source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
client.py
|
import time
import json
import numbers
from six.moves.queue import Queue
import threading
import uuid
from eth_client_utils.utils import (
get_transaction_params,
construct_filter_args,
wait_for_transaction,
wait_for_block,
get_max_gas,
)
class BaseClient(object):
def __init__(self, async=True, async_timeout=10):
self.is_async = async
self.async_timeout = async_timeout
if self.is_async:
self.request_queue = Queue()
self.results = {}
self.request_thread = threading.Thread(target=self.process_requests)
self.request_thread.daemon = True
self.request_thread.start()
def process_requests(self):
"""
Loop that runs in a thread to process requests synchronously.
"""
while True:
id, args, kwargs = self.request_queue.get()
try:
response = self._make_request(*args, **kwargs)
except Exception as e:
response = e
self.results[id] = response
def make_request(self, *args, **kwargs):
if self.is_async:
request_id = uuid.uuid4()
self.request_queue.put((request_id, args, kwargs))
start = time.time()
while time.time() - start < self.async_timeout:
if request_id in self.results:
result = self.results.pop(request_id)
if isinstance(result, Exception):
raise result
return result
raise ValueError("Timeout waiting for {0}".format(request_id))
else:
return self._make_request(*args, **kwargs)
def _make_request(self, method, params):
raise NotImplementedError("Clients must implement this method")
class JSONRPCBaseClient(BaseClient):
_nonce = 0
def get_nonce(self):
self._nonce += 1
return self._nonce
_coinbase_cache = None
_coinbase_cache_til = None
@property
def default_from_address(self):
"""
Cache the coinbase address so that we don't make two requests for every
single transaction.
"""
if self._coinbase_cache_til is not None:
if time.time - self._coinbase_cache_til > 30:
self._coinbase_cache_til = None
self._coinbase_cache = None
if self._coinbase_cache is None:
self._coinbase_cache = self.get_coinbase()
return self._coinbase_cache
def construct_json_request(self, method, params):
request = json.dumps({
"jsonrpc": "2.0",
"method": method,
"params": params,
"id": self.get_nonce(),
})
return request
#
# Utility Methods
#
get_max_gas = get_max_gas
wait_for_transaction = wait_for_transaction
wait_for_block = wait_for_block
#
# JSON-RPC Methods
#
def get_coinbase(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_coinbase
"""
response = self.make_request("eth_coinbase", [])
return response['result']
def get_gas_price(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gasprice
"""
response = self.make_request("eth_gasPrice", [])
return int(response['result'], 16)
def get_balance(self, address, block="latest"):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getbalance
"""
response = self.make_request("eth_getBalance", [address, block])
return int(response['result'], 16)
def get_code(self, address, block="latest"):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode
"""
response = self.make_request("eth_getCode", [address, block])
return response['result']
def call(self, _from=None, to=None, gas=None, gas_price=None, value=0,
data=None, block="latest"):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_call
"""
if _from is None:
_from = self.default_from_address
params = [
get_transaction_params(_from, to, gas, gas_price, value, data),
block,
]
response = self.make_request("eth_call", params)
return response['result']
def send_transaction(self, _from=None, to=None, gas=None, gas_price=None,
value=0, data=None):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sendtransaction
"""
if _from is None:
_from = self.default_from_address
params = get_transaction_params(_from, to, gas, gas_price, value, data)
response = self.make_request("eth_sendTransaction", [params])
return response['result']
def get_transaction_receipt(self, txn_hash):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionreceipt
"""
response = self.make_request("eth_getTransactionReceipt", [txn_hash])
return response['result']
def get_transaction_by_hash(self, txn_hash):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyhash
"""
response = self.make_request("eth_getTransactionByHash", [txn_hash])
return response['result']
def get_block_number(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_blocknumber<F37>
"""
response = self.make_request("eth_blockNumber", [])
return int(response['result'], 16)
def get_block_by_hash(self, block_hash, full_transactions=True):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblockbyhash
"""
response = self.make_request("eth_getBlockByHash", [block_hash, full_transactions])
return response['result']
def get_block_by_number(self, block_number, full_transactions=True):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblockbynumber
"""
if isinstance(block_number, numbers.Number):
block_number_as_hex = hex(block_number)
else:
block_number_as_hex = block_number
response = self.make_request(
"eth_getBlockByNumber", [block_number_as_hex, full_transactions],
)
return response['result']
def get_accounts(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_accounts
"""
response = self.make_request("eth_accounts", [])
return response['result']
def new_filter(self, from_block=None, to_block=None, address=None, topics=None):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter
Create a new filter object to be notified of changes in the
state of the EVM through the logs.
This command returns a filter ID that can be referenced by
other commands to get log information.
"""
params = construct_filter_args(from_block, to_block, address, topics)
response = self.make_request("eth_newFilter", [params])
return(response['result'])
def new_block_filter(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newblockfilter
"""
response = self.make_request("eth_newBlockFilter", [])
return(response['result'])
def new_pending_transaction_filter(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter
"""
response = self.make_request("eth_newPendingTransactionFilter", [])
return(response['result'])
def uninstall_filter(self, filter_id):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_uninstallfilter
Removes a filter by ID
"""
if isinstance(filter_id, numbers.Number):
filt_hex = hex(filter_id)
else:
filt_hex = filter_id
response = self.make_request("eth_uninstallFilter", [filt_hex])
return(response['result'])
def get_filter_changes(self, filter_id):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterchanges
"""
if isinstance(filter_id, numbers.Number):
filt_hex = hex(filter_id)
else:
filt_hex = filter_id
response = self.make_request("eth_getFilterChanges", [filt_hex])
return(response['result'])
def get_filter_logs(self, filter_id):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterlogs
"""
if isinstance(filter_id, numbers.Number):
filt_hex = hex(filter_id)
else:
filt_hex = filter_id
response = self.make_request("eth_getFilterLogs", [filt_hex])
return(response['result'])
def get_logs(self, from_block=None, to_block=None, address=None, topics=None):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
"""
params = construct_filter_args(from_block, to_block, address, topics)
response = self.make_request("eth_getLogs", [params])
return(response['result'])
|
python_ls.py
|
# Copyright 2017 Palantir Technologies, Inc.
import logging
import socketserver
import threading
from pyls_jsonrpc.dispatchers import MethodDispatcher
from pyls_jsonrpc.endpoint import Endpoint
from pyls_jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter
from . import lsp, _utils, uris
from .config import config
from .workspace import Workspace
log = logging.getLogger(__name__)
LINT_DEBOUNCE_S = 0.5 # 500 ms
PARENT_PROCESS_WATCH_INTERVAL = 10 # 10 s
MAX_WORKERS = 64
class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
# pylint: disable=no-member
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self):
self.delegate.start()
def start_tcp_lang_server(bind_addr, port, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + 'Handler',
(_StreamHandlerWrapper,),
{'DELEGATE_CLASS': handler_class}
)
server = socketserver.TCPServer((bind_addr, port), wrapper_class)
try:
log.info('Serving %s on (%s, %s)', handler_class.__name__, bind_addr, port)
server.serve_forever()
finally:
log.info('Shutting down')
server.server_close()
def start_io_lang_server(rfile, wfile, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
log.info('Starting %s IO language server', handler_class.__name__)
server = handler_class(rfile, wfile, check_parent_process)
server.start()
class PythonLanguageServer(MethodDispatcher):
""" Implementation of the Microsoft VSCode Language Server Protocol
https://github.com/Microsoft/language-server-protocol/blob/master/versions/protocol-1-x.md
"""
# pylint: disable=too-many-public-methods,redefined-builtin
def __init__(self, rx, tx, check_parent_process=False):
self.workspace = None
self.config = None
self._jsonrpc_stream_reader = JsonRpcStreamReader(rx)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(tx)
self._check_parent_process = check_parent_process
self._endpoint = Endpoint(self, self._jsonrpc_stream_writer.write, max_workers=MAX_WORKERS)
self._dispatchers = []
self._shutdown = False
def start(self):
"""Entry point for the server."""
self._jsonrpc_stream_reader.listen(self._endpoint.consume)
def __getitem__(self, item):
"""Override getitem to fallback through multiple dispatchers."""
if self._shutdown and item != 'exit':
# exit is the only allowed method during shutdown
log.debug("Ignoring non-exit method during shutdown: %s", item)
raise KeyError
try:
return super(PythonLanguageServer, self).__getitem__(item)
except KeyError:
# Fallback through extra dispatchers
for dispatcher in self._dispatchers:
try:
return dispatcher[item]
except KeyError:
continue
raise KeyError()
def m_shutdown(self, **_kwargs):
self._shutdown = True
return None
def m_exit(self, **_kwargs):
self._endpoint.shutdown()
self._jsonrpc_stream_reader.close()
self._jsonrpc_stream_writer.close()
def _hook(self, hook_name, doc_uri=None, **kwargs):
"""Calls hook_name and returns a list of results from all registered handlers"""
doc = self.workspace.get_document(doc_uri) if doc_uri else None
hook_handlers = self.config.plugin_manager.subset_hook_caller(hook_name, self.config.disabled_plugins)
return hook_handlers(config=self.config, workspace=self.workspace, document=doc, **kwargs)
def capabilities(self):
server_capabilities = {
'codeActionProvider': True,
'codeLensProvider': {
'resolveProvider': False, # We may need to make this configurable
},
'completionProvider': {
'resolveProvider': False, # We know everything ahead of time
'triggerCharacters': ['.']
},
'documentFormattingProvider': True,
'documentHighlightProvider': True,
'documentRangeFormattingProvider': True,
'documentSymbolProvider': True,
'definitionProvider': True,
'executeCommandProvider': {
'commands': flatten(self._hook('pyls_commands'))
},
'hoverProvider': True,
'referencesProvider': True,
'renameProvider': True,
'signatureHelpProvider': {
'triggerCharacters': ['(', ',']
},
'textDocumentSync': lsp.TextDocumentSyncKind.INCREMENTAL,
'experimental': merge(self._hook('pyls_experimental_capabilities'))
}
log.info('Server capabilities: %s', server_capabilities)
return server_capabilities
def m_initialize(self, processId=None, rootUri=None, rootPath=None, initializationOptions=None, **_kwargs):
log.debug('Language server initialized with %s %s %s %s', processId, rootUri, rootPath, initializationOptions)
if rootUri is None:
rootUri = uris.from_fs_path(rootPath) if rootPath is not None else ''
self.workspace = Workspace(rootUri, self._endpoint)
self.config = config.Config(rootUri, initializationOptions or {}, processId)
self._dispatchers = self._hook('pyls_dispatchers')
self._hook('pyls_initialize')
if self._check_parent_process and processId is not None:
def watch_parent_process(pid):
# exist when the given pid is not alive
if not _utils.is_process_alive(pid):
log.info("parent process %s is not alive", pid)
self.m_exit()
log.debug("parent process %s is still alive", pid)
threading.Timer(PARENT_PROCESS_WATCH_INTERVAL, watch_parent_process, args=[pid]).start()
watching_thread = threading.Thread(target=watch_parent_process, args=(processId,))
watching_thread.daemon = True
watching_thread.start()
# Get our capabilities
return {'capabilities': self.capabilities()}
def m_initialized(self, **_kwargs):
pass
def code_actions(self, doc_uri, range, context):
return flatten(self._hook('pyls_code_actions', doc_uri, range=range, context=context))
def code_lens(self, doc_uri):
return flatten(self._hook('pyls_code_lens', doc_uri))
def completions(self, doc_uri, position):
completions = self._hook('pyls_completions', doc_uri, position=position)
return {
'isIncomplete': False,
'items': flatten(completions)
}
def definitions(self, doc_uri, position):
return flatten(self._hook('pyls_definitions', doc_uri, position=position))
def document_symbols(self, doc_uri):
return flatten(self._hook('pyls_document_symbols', doc_uri))
def execute_command(self, command, arguments):
return self._hook('pyls_execute_command', command=command, arguments=arguments)
def format_document(self, doc_uri):
return self._hook('pyls_format_document', doc_uri)
def format_range(self, doc_uri, range):
return self._hook('pyls_format_range', doc_uri, range=range)
def highlight(self, doc_uri, position):
return flatten(self._hook('pyls_document_highlight', doc_uri, position=position)) or None
def hover(self, doc_uri, position):
return self._hook('pyls_hover', doc_uri, position=position) or {'contents': ''}
@_utils.debounce(LINT_DEBOUNCE_S, keyed_by='doc_uri')
def lint(self, doc_uri):
# Since we're debounced, the document may no longer be open
if doc_uri in self.workspace.documents:
self.workspace.publish_diagnostics(doc_uri, flatten(self._hook('pyls_lint', doc_uri)))
def references(self, doc_uri, position, exclude_declaration):
return flatten(self._hook(
'pyls_references', doc_uri, position=position,
exclude_declaration=exclude_declaration
))
def rename(self, doc_uri, position, new_name):
return self._hook('pyls_rename', doc_uri, position=position, new_name=new_name)
def signature_help(self, doc_uri, position):
return self._hook('pyls_signature_help', doc_uri, position=position)
def m_text_document__did_close(self, textDocument=None, **_kwargs):
self.workspace.rm_document(uris.translate_to_server_uri(textDocument['uri']))
def m_text_document__did_open(self, textDocument=None, **_kwargs):
self.workspace.put_document(uris.translate_to_server_uri(textDocument['uri']), textDocument['text'], version=textDocument.get('version'))
self._hook('pyls_document_did_open', uris.translate_to_server_uri(textDocument['uri']))
self.lint(uris.translate_to_server_uri(textDocument['uri']))
def m_text_document__did_change(self, contentChanges=None, textDocument=None, **_kwargs):
for change in contentChanges:
self.workspace.update_document(
uris.translate_to_server_uri(textDocument['uri']),
change,
version=textDocument.get('version')
)
self.lint(uris.translate_to_server_uri(textDocument['uri']))
def m_text_document__did_save(self, textDocument=None, **_kwargs):
self.lint(uris.translate_to_server_uri(textDocument['uri']))
def m_text_document__code_action(self, textDocument=None, range=None, context=None, **_kwargs):
return self.code_actions(uris.translate_to_server_uri(textDocument['uri']), range, context)
def m_text_document__code_lens(self, textDocument=None, **_kwargs):
return self.code_lens(uris.translate_to_server_uri(textDocument['uri']))
def m_text_document__completion(self, textDocument=None, position=None, **_kwargs):
return self.completions(uris.translate_to_server_uri(textDocument['uri']), position)
def m_text_document__definition(self, textDocument=None, position=None, **_kwargs):
return self.definitions(uris.translate_to_server_uri(textDocument['uri']), position)
def m_text_document__document_highlight(self, textDocument=None, position=None, **_kwargs):
return self.highlight(uris.translate_to_server_uri(textDocument['uri']), position)
def m_text_document__hover(self, textDocument=None, position=None, **_kwargs):
return self.hover(uris.translate_to_server_uri(textDocument['uri']), position)
def m_text_document__document_symbol(self, textDocument=None, **_kwargs):
return self.document_symbols(uris.translate_to_server_uri(textDocument['uri']))
def m_text_document__formatting(self, textDocument=None, _options=None, **_kwargs):
# For now we're ignoring formatting options.
return self.format_document(uris.translate_to_server_uri(textDocument['uri']))
def m_text_document__rename(self, textDocument=None, position=None, newName=None, **_kwargs):
return self.rename(uris.translate_to_server_uri(textDocument['uri']), position, newName)
def m_text_document__range_formatting(self, textDocument=None, range=None, _options=None, **_kwargs):
# Again, we'll ignore formatting options for now.
return self.format_range(uris.translate_to_server_uri(textDocument['uri']), range)
def m_text_document__references(self, textDocument=None, position=None, context=None, **_kwargs):
exclude_declaration = not context['includeDeclaration']
return self.references(uris.translate_to_server_uri(textDocument['uri']), position, exclude_declaration)
def m_text_document__signature_help(self, textDocument=None, position=None, **_kwargs):
return self.signature_help(uris.translate_to_server_uri(textDocument['uri']), position)
def m_workspace__did_change_configuration(self, settings=None):
self.config.update((settings or {}).get('pyls', {}))
for doc_uri in self.workspace.documents:
self.lint(doc_uri)
def m_workspace__did_change_watched_files(self, **_kwargs):
# Externally changed files may result in changed diagnostics
for doc_uri in self.workspace.documents:
self.lint(doc_uri)
def m_workspace__execute_command(self, command=None, arguments=None):
return self.execute_command(command, arguments)
def flatten(list_of_lists):
return [item for lst in list_of_lists for item in lst]
def merge(list_of_dicts):
return {k: v for dictionary in list_of_dicts for k, v in dictionary.items()}
|
__init__.py
|
import copy
import threading
import typing
StateType = typing.TypeVar('StateType')
class TimedThread(typing.Generic[StateType]):
"""
This is a "Thread" class that runs a job for a maximum period of time. The class provides concurrency-safe methods
to retrieve and persist a chunk of state.
"""
def __init__(self, timeout_seconds: float, state: StateType) -> None:
self.timeout_seconds = timeout_seconds
self.__state = copy.deepcopy(state)
self.lock = threading.Lock()
def run(self) -> StateType:
raise NotImplementedError()
def _run(self) -> None:
state = self.run()
self.save_state(state)
def _start_async(self) -> None:
self.thread = threading.Thread(target=self._run, daemon=True)
self.thread.start()
def _join(self) -> StateType:
self.thread.join(self.timeout_seconds)
with self.lock:
state = copy.deepcopy(self.__state)
return state
def start(self) -> StateType:
self._start_async()
return self._join()
def get_state_copy(self) -> StateType:
with self.lock:
state_copy = copy.deepcopy(self.__state)
return state_copy
def save_state(self, new_state: StateType) -> None:
new_state = copy.deepcopy(new_state)
with self.lock:
self.__state = new_state
|
regz_socket_MP_FD.py
|
# coding: utf-8
# # load package and settings
# In[ ]:
import cv2
import sys
import dlib
import time
import socket
import struct
import numpy as np
import tensorflow as tf
from win32api import GetSystemMetrics
import win32gui
from threading import Thread, Lock
import multiprocessing as mp
from config import get_config
import pickle
import math
# In[ ]:
conf,_ = get_config()
if conf.mod == 'flx':
import flx as model
else:
sys.exit("Wrong Model selection: flx or deepwarp")
# system parameters
model_dir = './'+conf.weight_set+'/warping_model/'+conf.mod+'/'+ str(conf.ef_dim) + '/'
size_video = [640,480]
# fps = 0
P_IDP = 5
depth = -50
# for monitoring
# environment parameter
Rs = (GetSystemMetrics(0),GetSystemMetrics(1))
# In[ ]:
model_dir
print(Rs)
# In[ ]:
# video receiver
class video_receiver:
def __init__(self,shared_v,lock):
self.close = False
self.video_recv = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print('Socket created')
# global remote_head_Center
self.video_recv.bind(('',conf.recver_port))
self.video_recv.listen(10)
print('Socket now listening')
self.conn, self.addr=self.video_recv.accept()
# face detection
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor("./lm_feat/shape_predictor_68_face_landmarks.dat")
self.face_detect_size = [320,240]
self.x_ratio = size_video[0]/self.face_detect_size[0]
self.y_ratio = size_video[1]/self.face_detect_size[1]
self.start_recv(shared_v,lock)
def face_detection(self,frame,shared_v,lock):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_detect_gray = cv2.resize(gray,(self.face_detect_size[0],self.face_detect_size[1]))
detections = self.detector(face_detect_gray, 0)
coor_remote_head_center=[0,0]
for k,bx in enumerate(detections):
coor_remote_head_center = [int((bx.left()+bx.right())*self.x_ratio/2),
int((bx.top()+bx.bottom())*self.y_ratio/2)]
break
# share remote participant's eye to the main process
lock.acquire()
shared_v[0] = coor_remote_head_center[0]
shared_v[1] = coor_remote_head_center[1]
lock.release()
def start_recv(self,shared_v,lock):
data = b""
payload_size = struct.calcsize("L")
print("payload_size: {}".format(payload_size))
while True:
while len(data) < payload_size:
data += self.conn.recv(4096)
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_msg_size)[0]
while len(data) < msg_size:
data += self.conn.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data, fix_imports=True, encoding="bytes")
if frame == 'stop':
print('stop')
cv2.destroyWindow("Remote")
break
frame = cv2.imdecode(frame, cv2.IMREAD_COLOR)
# face detection
self.video_recv_hd_thread = Thread(target=self.face_detection, args=(frame,shared_v,lock))
self.video_recv_hd_thread.start()
cv2.imshow('Remote',frame)
cv2.waitKey(1)
# # Flx-gaze
# In[ ]:
class gaze_redirection_system:
def __init__(self,shared_v,lock):
#Landmark identifier. Set the filename to whatever you named the downloaded file
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor("./lm_feat/shape_predictor_68_face_landmarks.dat")
self.size_df = (320,240)
self.size_I = (48,64)
# initial value
self.Rw = [0,0]
self.Pe_z = -60
#### get configurations
self.f = conf.f
self.Ps = (conf.S_W,conf.S_H)
self.Pc = (conf.P_c_x,conf.P_c_y,conf.P_c_z)
self.Pe = [self.Pc[0],self.Pc[1],self.Pe_z] # H,V,D
## start video sender
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.client_socket.connect((conf.tar_ip, conf.sender_port))
self.encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),90]
# load model to gpu
print("Loading model of [L] eye to GPU")
with tf.Graph().as_default() as g:
# define placeholder for inputs to network
with tf.name_scope('inputs'):
self.LE_input_img = tf.placeholder(tf.float32, [None, conf.height, conf.width, conf.channel], name="input_img")
self.LE_input_fp = tf.placeholder(tf.float32, [None, conf.height, conf.width,conf.ef_dim], name="input_fp")
self.LE_input_ang = tf.placeholder(tf.float32, [None, conf.agl_dim], name="input_ang")
self.LE_phase_train = tf.placeholder(tf.bool, name='phase_train') # a bool for batch_normalization
self.LE_img_pred, _, _ = model.inference(self.LE_input_img, self.LE_input_fp, self.LE_input_ang, self.LE_phase_train, conf)
# split modle here
self.L_sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False), graph = g)
# load model
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(model_dir+'L/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(self.L_sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
print("Loading model of [R] eye to GPU")
with tf.Graph().as_default() as g2:
# define placeholder for inputs to network
with tf.name_scope('inputs'):
self.RE_input_img = tf.placeholder(tf.float32, [None, conf.height, conf.width, conf.channel], name="input_img")
self.RE_input_fp = tf.placeholder(tf.float32, [None, conf.height, conf.width,conf.ef_dim], name="input_fp")
self.RE_input_ang = tf.placeholder(tf.float32, [None, conf.agl_dim], name="input_ang")
self.RE_phase_train = tf.placeholder(tf.bool, name='phase_train') # a bool for batch_normalization
self.RE_img_pred, _, _ = model.inference(self.RE_input_img, self.RE_input_fp, self.RE_input_ang, self.RE_phase_train, conf)
# split modle here
self.R_sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True,log_device_placement=False), graph = g2)
# load model
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state(model_dir+'R/')
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(self.R_sess, ckpt.model_checkpoint_path)
else:
print('No checkpoint file found')
self.run(shared_v,lock)
def monitor_para(self,frame,fig_alpha,fig_eye_pos,fig_R_w):
cv2.rectangle(frame,
(size_video[0]-150,0),(size_video[0],55),
(255,255,255),-1
)
cv2.putText(frame,
'Eye:['+str(int(fig_eye_pos[0])) +','+str(int(fig_eye_pos[1]))+','+str(int(fig_eye_pos[2]))+']',
(size_video[0]-140,15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,(0,0,255),1,cv2.LINE_AA)
cv2.putText(frame,
'alpha:[V='+str(int(fig_alpha[0])) + ',H='+ str(int(fig_alpha[1]))+']',
(size_video[0]-140,30),cv2.FONT_HERSHEY_SIMPLEX,0.4,(0,0,255),1,cv2.LINE_AA)
cv2.putText(frame,
'R_w:['+str(int(fig_R_w[0])) + ','+ str(int(fig_R_w[1]))+']',
(size_video[0]-140,45),cv2.FONT_HERSHEY_SIMPLEX,0.4,(0,0,255),1,cv2.LINE_AA)
return frame
def get_inputs(self, frame, shape, pos = "L", size_I = [48,64]):
if(pos == "R"):
lc = 36
rc = 39
FP_seq = [36,37,38,39,40,41]
elif(pos == "L"):
lc = 42
rc = 45
FP_seq = [45,44,43,42,47,46]
else:
print("Error: Wrong Eye")
eye_cx = (shape.part(rc).x+shape.part(lc).x)*0.5
eye_cy = (shape.part(rc).y+shape.part(lc).y)*0.5
eye_center = [eye_cx, eye_cy]
eye_len = np.absolute(shape.part(rc).x - shape.part(lc).x)
bx_d5w = eye_len*3/4
bx_h = 1.5*bx_d5w
sft_up = bx_h*7/12
sft_low = bx_h*5/12
img_eye = frame[int(eye_cy-sft_up):int(eye_cy+sft_low),int(eye_cx-bx_d5w):int(eye_cx+bx_d5w)]
ori_size = [img_eye.shape[0],img_eye.shape[1]]
LT_coor = [int(eye_cy-sft_up), int(eye_cx-bx_d5w)] # (y,x)
img_eye = cv2.resize(img_eye, (size_I[1],size_I[0]))
# create anchor maps
ach_map = []
for i,d in enumerate(FP_seq):
resize_x = int((shape.part(d).x-LT_coor[1])*size_I[1]/ori_size[1])
resize_y = int((shape.part(d).y-LT_coor[0])*size_I[0]/ori_size[0])
# y
ach_map_y = np.expand_dims(np.expand_dims(np.arange(0, size_I[0]) - resize_y, axis=1), axis=2)
ach_map_y = np.tile(ach_map_y, [1,size_I[1],1])
# x
ach_map_x = np.expand_dims(np.expand_dims(np.arange(0, size_I[1]) - resize_x, axis=0), axis=2)
ach_map_x = np.tile(ach_map_x, [size_I[0],1,1])
if (i ==0):
ach_map = np.concatenate((ach_map_x, ach_map_y), axis=2)
else:
ach_map = np.concatenate((ach_map, ach_map_x, ach_map_y), axis=2)
return img_eye/255, ach_map, eye_center, ori_size, LT_coor
def shifting_angles_estimator(self, R_le, R_re,shared_v,lock):
# get P_w
try:
tar_win = win32gui.FindWindow(None, "Remote")
#left, top, reight, bottom
Rw_lt = win32gui.GetWindowRect(tar_win)
size_window = (Rw_lt[2]-Rw_lt[0], Rw_lt[3]-Rw_lt[1])
except:
Rw_lt = [int(Rs[0])-int(size_window[0]/2),int(Rs[1])-int(size_window[1]/2)]
size_window = (659,528)
print("Missing the window")
# get pos head
pos_remote_head = [int(size_window[0]/2),int(size_window[1]/2)]
try:
if ((shared_v[0] !=0) & (shared_v[1] !=0)):
pos_remote_head[0] = shared_v[0]
pos_remote_head[1] = shared_v[1]
except:
pos_remote_head = (int(size_window[0]/2),int(size_window[1]/2))
R_w = (Rw_lt[0]+pos_remote_head[0], Rw_lt[1]+pos_remote_head[1])
Pw = (self.Ps[0]*(R_w[0]-Rs[0]/2)/Rs[0], self.Ps[1]*(R_w[1]-Rs[1]/2)/Rs[1], 0)
# get Pe
self.Pe[2] = -(self.f*conf.P_IDP)/np.sqrt((R_le[0]-R_re[0])**2 + (R_le[1]-R_re[1])**2)
# x-axis needs flip
self.Pe[0] = -np.abs(self.Pe[2])*(R_le[0]+R_re[0]-size_video[0])/(2*self.f) + self.Pc[0]
self.Pe[1] = np.abs(self.Pe[2])*(R_le[1]+R_re[1]-size_video[1])/(2*self.f) + self.Pc[1]
# calcualte alpha
a_w2z_x = math.degrees(math.atan( (Pw[0]-self.Pe[0])/(Pw[2]-self.Pe[2])))
a_w2z_y = math.degrees(math.atan( (Pw[1]-self.Pe[1])/(Pw[2]-self.Pe[2])))
a_z2c_x = math.degrees(math.atan( (self.Pe[0]-self.Pc[0])/(self.Pc[2]-self.Pe[2])))
a_z2c_y = math.degrees(math.atan( (self.Pe[1]-self.Pc[1])/(self.Pc[2]-self.Pe[2])))
alpha = [int(a_w2z_y + a_z2c_y),int(a_w2z_x + a_z2c_x)] # (V,H)
return alpha, self.Pe, R_w
def flx_gaze(self, frame, gray, detections, shared_v, lock, pixel_cut=[3,4], size_I = [48,64]):
alpha_w2c = [0,0]
x_ratio = size_video[0]/self.size_df[0]
y_ratio = size_video[1]/self.size_df[1]
LE_M_A=[]
RE_M_A=[]
p_e=[0,0]
R_w=[0,0]
for k,bx in enumerate(detections):
# Get facial landmarks
time_start = time.time()
target_bx = dlib.rectangle(left=int(bx.left()*x_ratio),right =int(bx.right()*x_ratio),
top =int(bx.top()*y_ratio), bottom=int(bx.bottom()*y_ratio))
shape = self.predictor(gray, target_bx)
# get eye
LE_img, LE_M_A, LE_center, size_le_ori, R_le_LT = self.get_inputs(frame, shape, pos="L", size_I=size_I)
RE_img, RE_M_A, RE_center, size_re_ori, R_re_LT = self.get_inputs(frame, shape, pos="R", size_I=size_I)
# shifting angles estimator
alpha_w2c, p_e, R_w = self.shifting_angles_estimator(LE_center,RE_center,shared_v,lock)
time_get_eye = time.time() - time_start
# gaze manipulation
time_start = time.time()
# gaze redirection
# left Eye
LE_infer_img = self.L_sess.run(self.LE_img_pred, feed_dict= {
self.LE_input_img: np.expand_dims(LE_img, axis = 0),
self.LE_input_fp: np.expand_dims(LE_M_A, axis = 0),
self.LE_input_ang: np.expand_dims(alpha_w2c, axis = 0),
self.LE_phase_train: False
})
LE_infer = cv2.resize(LE_infer_img.reshape(size_I[0],size_I[1],3), (size_le_ori[1], size_le_ori[0]))
# right Eye
RE_infer_img = self.R_sess.run(self.RE_img_pred, feed_dict= {
self.RE_input_img: np.expand_dims(RE_img, axis = 0),
self.RE_input_fp: np.expand_dims(RE_M_A, axis = 0),
self.RE_input_ang: np.expand_dims(alpha_w2c, axis = 0),
self.RE_phase_train: False
})
RE_infer = cv2.resize(RE_infer_img.reshape(size_I[0],size_I[1],3), (size_re_ori[1], size_re_ori[0]))
# repace eyes
frame[(R_le_LT[0]+pixel_cut[0]):(R_le_LT[0]+size_le_ori[0]-pixel_cut[0]),
(R_le_LT[1]+pixel_cut[1]):(R_le_LT[1]+size_le_ori[1]-pixel_cut[1])] = LE_infer[pixel_cut[0]:(-1*pixel_cut[0]), pixel_cut[1]:-1*(pixel_cut[1])]*255
frame[(R_re_LT[0]+pixel_cut[0]):(R_re_LT[0]+size_re_ori[0]-pixel_cut[0]),
(R_re_LT[1]+pixel_cut[1]):(R_re_LT[1]+size_re_ori[1]-pixel_cut[1])] = RE_infer[pixel_cut[0]:(-1*pixel_cut[0]), pixel_cut[1]:-1*(pixel_cut[1])]*255
frame = self.monitor_para(frame, alpha_w2c, self.Pe, R_w)
result, imgencode = cv2.imencode('.jpg', frame, self.encode_param)
data = pickle.dumps(imgencode, 0)
self.client_socket.sendall(struct.pack("L", len(data)) + data)
return True
def redirect_gaze(self, frame,shared_v,lock):
# head detection
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face_detect_gray = cv2.resize(gray,(self.size_df[0],self.size_df[1]))
detections = self.detector(face_detect_gray, 0)
rg_thread = Thread(target=self.flx_gaze, args=(frame, gray, detections,shared_v,lock))
rg_thread.start()
return True
def run(self,shared_v,lock):
# def main():
redir = False
size_window = [659,528]
vs = cv2.VideoCapture(0)
vs.set(3, size_video[0])
vs.set(4, size_video[1])
t = time.time()
cv2.namedWindow(conf.uid)
cv2.moveWindow(conf.uid, int(Rs[0]/2)-int(size_window[0]/2),int(Rs[1]/2)-int(size_window[1]/2));
while 1:
ret, recv_frame = vs.read()
if ret:
cv2.imshow(conf.uid,recv_frame)
if recv_frame is not None:
# redirected gaze
if redir:
frame = recv_frame.copy()
try:
tag = self.redirect_gaze(frame,shared_v,lock)
except:
pass
else:
result, imgencode = cv2.imencode('.jpg', recv_frame, self.encode_param)
data = pickle.dumps(imgencode, 0)
self.client_socket.sendall(struct.pack("L", len(data)) + data)
if (time.time() - t) > 1:
t = time.time()
k = cv2.waitKey(10)
if k == ord('q'):
data = pickle.dumps('stop')
self.client_socket.sendall(struct.pack("L", len(data))+data)
time.sleep(3)
cv2.destroyWindow(conf.uid)
self.client_socket.shutdown(socket.SHUT_RDWR)
self.client_socket.close()
vs.release()
self.L_sess.close()
self.R_sess.close()
break
elif k == ord('r'):
if redir:
redir = False
else:
redir = True
else:
pass
# In[ ]:
if __name__ == '__main__':
l = mp.Lock() # multi-process lock
v = mp.Array('i', [320,240]) # shared parameter
# start video receiver
# vs_thread = Thread(target=video_receiver, args=(conf.recver_port,))
vs_thread = mp.Process(target=video_receiver, args=(v,l))
vs_thread.start()
time.sleep(1)
gz_thread = mp.Process(target=gaze_redirection_system, args=(v,l))
gz_thread.start()
vs_thread.join()
gz_thread.join()
|
threadpool.py
|
"""
Generic thread pool class. Modeled after Java's ThreadPoolExecutor.
Please note that this ThreadPool does *not* fully implement the PEP 3148
ThreadPool!
"""
from threading import Thread, Lock, currentThread
from weakref import ref
import logging
from ambari_agent.ExitHelper import ExitHelper
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
logger = logging.getLogger(__name__)
_threadpools = set()
# Worker threads are daemonic in order to let the interpreter exit without
# an explicit shutdown of the thread pool. The following trick is necessary
# to allow worker threads to finish cleanly.
def _shutdown_all():
for pool_ref in tuple(_threadpools):
pool = pool_ref()
if pool:
pool.shutdown()
ExitHelper().register(_shutdown_all)
class ThreadPool(object):
def __init__(self, core_threads=0, max_threads=20, keepalive=1):
"""
:param core_threads: maximum number of persistent threads in the pool
:param max_threads: maximum number of total threads in the pool
:param thread_class: callable that creates a Thread object
:param keepalive: seconds to keep non-core worker threads waiting
for new tasks
"""
self.core_threads = core_threads
self.max_threads = max(max_threads, core_threads, 1)
self.keepalive = keepalive
self._queue = Queue()
self._threads_lock = Lock()
self._threads = set()
self._shutdown = False
_threadpools.add(ref(self))
logger.info('Started thread pool with %d core threads and %s maximum '
'threads', core_threads, max_threads or 'unlimited')
def _adjust_threadcount(self):
self._threads_lock.acquire()
try:
if self.num_threads < self.max_threads:
self._add_thread(self.num_threads < self.core_threads)
finally:
self._threads_lock.release()
def _add_thread(self, core):
t = Thread(target=self._run_jobs, args=(core,))
t.setDaemon(True)
t.start()
self._threads.add(t)
def _run_jobs(self, core):
logger.debug('Started worker thread')
block = True
timeout = None
if not core:
block = self.keepalive > 0
timeout = self.keepalive
while True:
try:
func, args, kwargs = self._queue.get(block, timeout)
except Empty:
break
if self._shutdown:
break
try:
logger.debug('Worker thread starting job %s', args[0])
func(*args, **kwargs)
except:
logger.exception('Error in worker thread')
self._threads_lock.acquire()
self._threads.remove(currentThread())
self._threads_lock.release()
logger.debug('Exiting worker thread')
@property
def num_threads(self):
return len(self._threads)
def submit(self, func, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new tasks after shutdown')
self._queue.put((func, args, kwargs))
self._adjust_threadcount()
def shutdown(self, wait=True):
if self._shutdown:
return
logging.info('Shutting down thread pool')
self._shutdown = True
_threadpools.remove(ref(self))
self._threads_lock.acquire()
for _ in range(self.num_threads):
self._queue.put((None, None, None))
self._threads_lock.release()
if wait:
self._threads_lock.acquire()
threads = tuple(self._threads)
self._threads_lock.release()
for thread in threads:
thread.join()
def __repr__(self):
if self.max_threads:
threadcount = '%d/%d' % (self.num_threads, self.max_threads)
else:
threadcount = '%d' % self.num_threads
return '<ThreadPool at %x; threads=%s>' % (id(self), threadcount)
|
dmlc_local.py
|
#!/usr/bin/env python
"""
DMLC submission script, local machine version
"""
import argparse
import sys
import os
import subprocess
from threading import Thread
import tracker
import signal
import logging
keepalive = """
nrep=0
rc=254
while [ $rc -eq 254 ];
do
export DMLC_NUM_ATTEMPT=$nrep
%s
rc=$?;
nrep=$((nrep+1));
done
exit $rc
"""
class LocalLauncher(object):
def __init__(self, args, unknown):
self.args = args
self.cmd = ' '.join(args.command) + ' ' + ' '.join(unknown)
def exec_cmd(self, cmd, role, pass_env):
env = os.environ.copy()
for k, v in pass_env.items():
env[k] = str(v)
env['DMLC_ROLE'] = role
ntrial = 0
while True:
if os.name == 'nt':
env['DMLC_NUM_ATTEMPT'] = str(ntrial)
ret = subprocess.call(cmd, shell=True, env = env)
if ret == 254:
ntrial += 1
continue
else:
bash = keepalive % (cmd)
ret = subprocess.call(bash, shell=True, executable='bash', env = env)
if ret == 0:
logging.debug('Thread %d exit with 0')
return
else:
if os.name == 'nt':
os.exit(-1)
else:
raise Exception('Get nonzero return code=%d' % ret)
def submit(self):
def mthread_submit(nworker, nserver, envs):
"""
customized submit script
"""
procs = {}
for i in range(nworker + nserver):
role = 'worker' if i < nworker else 'server'
procs[i] = Thread(target = self.exec_cmd, args = (self.cmd, role, envs))
procs[i].setDaemon(True)
procs[i].start()
return mthread_submit
def run(self):
tracker.config_logger(self.args)
tracker.submit(self.args.num_workers,
self.args.num_servers,
fun_submit = self.submit(),
pscmd = self.cmd)
def main():
parser = argparse.ArgumentParser(
description='DMLC script to submit dmlc jobs as local process')
parser.add_argument('-n', '--num-workers', default = 0, type=int,
help = 'number of worker nodes to be launched')
parser.add_argument('-s', '--num-servers', type=int,
help = 'number of server nodes to be launched')
parser.add_argument('--log-level', default='INFO', type=str,
choices=['INFO', 'DEBUG'],
help = 'logging level')
parser.add_argument('--log-file', type=str,
help = 'output log to the specific log file')
parser.add_argument('command', nargs='+',
help = 'command for launching the program')
args, unknown = parser.parse_known_args()
launcher = LocalLauncher(args, unknown)
launcher.run()
if __name__ == '__main__':
main()
|
worker_manager.py
|
"""
A manager for multiple workers.
-- kandasamy@cs.cmu.edu
"""
from __future__ import print_function
from __future__ import division
# pylint: disable=invalid-name
# pylint: disable=abstract-class-not-used
# pylint: disable=abstract-class-little-used
from argparse import Namespace
from multiprocessing import Process
import numpy as np
import os
import pickle
import shutil
import time
try:
from sets import Set
except ImportError:
Set = set
# Local
from .exd_utils import EVAL_ERROR_CODE
_TIME_TOL = 1e-5
class AbstractWorkerManager(object):
""" A Base class for a worker manager. """
def __init__(self, worker_ids):
""" Constructor. """
if hasattr(worker_ids, '__iter__'):
self.worker_ids = worker_ids
else:
self.worker_ids = list(range(worker_ids))
self.num_workers = len(self.worker_ids)
# These will be set in reset
self.experiment_designer = None
self.latest_results = None
# Reset
self.reset()
def reset(self):
""" Resets everything. """
self.experiment_designer = None
self.latest_results = [] # A list of namespaces
self._child_reset()
def _child_reset(self):
""" Child reset. """
raise NotImplementedError('Implement in a child class.')
def fetch_latest_results(self):
""" Returns the latest results. """
ret_idxs = []
for i in range(len(self.latest_results)):
if (self.latest_results[i].receive_time <=
self.experiment_designer.get_curr_spent_capital() + _TIME_TOL):
ret_idxs.append(i)
keep_idxs = [i for i in range(len(self.latest_results)) if i not in ret_idxs]
ret = [self.latest_results[i] for i in ret_idxs]
self.latest_results = [self.latest_results[i] for i in keep_idxs]
return ret
def close_all_queries(self):
""" Closes all queries. """
raise NotImplementedError('Implement in a child class.')
def set_experiment_designer(self, experiment_designer):
""" Set the experiment designer. """
self.experiment_designer = experiment_designer
def a_worker_is_free(self):
""" Returns true if a worker is free. """
raise NotImplementedError('Implement in a child class.')
def all_workers_are_free(self):
""" Returns true if all workers are free. """
raise NotImplementedError('Implement in a child class.')
def _dispatch_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches job. """
raise NotImplementedError('Implement in a child class.')
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches job. """
raise NotImplementedError('Implement in a child class.')
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches an entire batch of experiments. """
raise NotImplementedError('Implement in a child class.')
def get_time_distro_info(self):
""" Returns information on the time distribution. """
#pylint: disable=no-self-use
return ''
def get_poll_time_real(self):
""" Returns the poll time. """
raise NotImplementedError('Implement in a child class.')
# A synthetic worker manager - for simulating multiple workers ---------------------------
class SyntheticWorkerManager(AbstractWorkerManager):
""" A Worker manager for synthetic functions. Mostly to be used in simulations. """
def __init__(self, num_workers, time_distro='caller_eval_cost',
time_distro_params=None):
""" Constructor. """
self.worker_pipe = None
super(SyntheticWorkerManager, self).__init__(num_workers)
# Set up the time sampler
self.time_distro = time_distro
self.time_distro_params = time_distro_params
self.time_sampler = None
self._set_up_time_sampler()
def _set_up_time_sampler(self):
""" Set up the sampler for the time random variable. """
self.time_distro_params = Namespace() if self.time_distro_params is None else \
self.time_distro_params
if self.time_distro == 'caller_eval_cost':
pass
elif self.time_distro == 'const':
if not hasattr(self.time_distro_params, 'const_val'):
self.time_distro_params.const_val = 1
self.time_sampler = lambda num_samples: (np.ones((num_samples,)) *
self.time_distro_params.const_val)
elif self.time_distro == 'uniform':
if not hasattr(self.time_distro_params, 'ub'):
self.time_distro_params.ub = 2.0
self.time_distro_params.lb = 0.0
ub = self.time_distro_params.ub
lb = self.time_distro_params.lb
self.time_sampler = lambda num_samples: (np.random.random((num_samples,)) *
(ub - lb) + lb)
elif self.time_distro == 'halfnormal':
if not hasattr(self.time_distro_params, 'ub'):
self.time_distro_params.sigma = np.sqrt(np.pi/2)
self.time_sampler = lambda num_samples: np.abs(np.random.normal(
scale=self.time_distro_params.sigma, size=(num_samples,)))
else:
raise NotImplementedError('Not implemented time_distro = %s yet.'%(
self.time_distro))
def _child_reset(self):
""" Child reset. """
self.worker_pipe = [[wid, 0.0] for wid in self.worker_ids]
def sort_worker_pipe(self):
""" Sorts worker pipe by finish time. """
self.worker_pipe.sort(key=lambda x: x[-1])
def a_worker_is_free(self):
""" Returns true if a worker is free. """
return self.worker_pipe[0][-1] # Always return true as this is synthetic.
def all_workers_are_free(self):
""" Returns true if all workers are free. """
return self.worker_pipe[-1][-1]
def close_all_queries(self):
""" Close all queries. """
pass
def _dispatch_experiment(self, func_caller, qinfo, worker_id, **kwargs):
""" Dispatch experiment. """
# Set worker id and whether or not eval_time should be returned
qinfo.worker_id = worker_id # indicate which worker
qinfo = func_caller.eval_from_qinfo(qinfo, **kwargs)
if self.time_distro == 'caller_eval_cost':
if hasattr(qinfo, 'caller_eval_cost') and qinfo.caller_eval_cost is not None:
qinfo.eval_time = qinfo.caller_eval_cost
else:
qinfo.eval_time = 1.0
else:
qinfo.eval_time = float(self.time_sampler(1))
qinfo.receive_time = qinfo.send_time + qinfo.eval_time
# Store the result in latest_results
self.latest_results.append(qinfo)
return qinfo
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatch a single experiment. """
worker_id = self.worker_pipe[0][0]
qinfo = self._dispatch_experiment(func_caller, qinfo, worker_id, **kwargs)
# Sort the pipe
self.worker_pipe[0][-1] = qinfo.receive_time
self.sort_worker_pipe()
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches an entire batch of experiments. """
assert len(qinfos) == self.num_workers
for idx in range(self.num_workers):
qinfo = self._dispatch_experiment(func_caller, qinfos[idx],
self.worker_pipe[idx][0], **kwargs)
self.worker_pipe[idx][-1] = qinfo.receive_time
self.sort_worker_pipe()
def get_time_distro_info(self):
""" Returns information on the time distribution. """
return self.time_distro
def get_poll_time_real(self):
""" Return 0.0 as the poll time. """
return 0.0
# A worker manager which spawns a new thread for each process ---------------------------
class MultiProcessingWorkerManager(AbstractWorkerManager):
""" A worker manager which spawns a new thread for each worker. """
# pylint: disable=attribute-defined-outside-init
def __init__(self, worker_ids, tmp_dir,
poll_time=0.5, sleep_time_after_new_process=0.5):
""" Constructor. """
super(MultiProcessingWorkerManager, self).__init__(worker_ids)
self.poll_time = poll_time
self.sleep_time_after_new_process = sleep_time_after_new_process
self.tmp_dir = tmp_dir
self._rwm_set_up()
self._child_reset()
def _rwm_set_up(self):
""" Sets things up for the child. """
# Create the result directories. """
self.result_dir_names = {wid:'%s/result_%s'%(self.tmp_dir, str(wid)) for wid in
self.worker_ids}
# Create the working directories
self.working_dir_names = {wid:'%s/working_%s/tmp'%(self.tmp_dir,
str(wid)) for wid in self.worker_ids}
# Create the last receive times
self.last_receive_times = {wid:0.0 for wid in self.worker_ids}
# Create file names
self._result_file_name = 'result.p'
self._num_file_read_attempts = 10
@classmethod
def _delete_dirs(cls, list_of_dir_names):
""" Deletes a list of directories. """
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
@classmethod
def _delete_and_create_dirs(cls, list_of_dir_names):
""" Deletes a list of directories and creates new ones. """
for dir_name in list_of_dir_names:
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.makedirs(dir_name)
def _child_reset(self):
""" Resets child. """
# Delete/create the result and working directories.
if not hasattr(self, 'result_dir_names'): # Just for the super constructor.
return
self._delete_and_create_dirs(list(self.result_dir_names.values()))
self._delete_dirs(list(self.working_dir_names.values()))
self.free_workers = Set(self.worker_ids)
self.func_callers_for_each_worker = {wid:None for wid in self.worker_ids}
self.qinfos_in_progress = {wid:None for wid in self.worker_ids}
self.worker_processes = {wid:None for wid in self.worker_ids}
def _get_result_file_name_for_worker(self, worker_id):
""" Computes the result file name for the worker. """
return os.path.join(self.result_dir_names[worker_id], self._result_file_name)
def _read_result_from_file(self, result_file_name):
""" Reads the result from the file name. """
#pylint: disable=bare-except
num_attempts = 0
while num_attempts < self._num_file_read_attempts:
try:
file_reader = open(result_file_name, 'rb')
result = pickle.load(file_reader)
break
except:
print('Encountered error when reading %s. Trying again.'%(result_file_name))
time.sleep(self.poll_time)
file_reader.close()
result = EVAL_ERROR_CODE
return result
def _read_result_from_worker_and_update(self, worker_id):
""" Reads the result from the worker. """
# pylint: disable=maybe-no-member
# Read the file
result_file_name = self._get_result_file_name_for_worker(worker_id)
result_qinfo = self._read_result_from_file(result_file_name)
saved_qinfo = self.qinfos_in_progress[worker_id]
# Now update the relevant qinfo and put it to latest_results
if isinstance(result_qinfo, Namespace):
assert self.func_callers_for_each_worker[worker_id].domain.members_are_equal(
result_qinfo.point, saved_qinfo.point)
qinfo = result_qinfo
elif result_qinfo == EVAL_ERROR_CODE:
qinfo = saved_qinfo
qinfo.val = EVAL_ERROR_CODE
else:
raise ValueError('Could not read qinfo object: %s.'%(str(qinfo)))
qinfo.receive_time = self.experiment_designer.get_curr_spent_capital()
qinfo.eval_time = qinfo.receive_time - qinfo.send_time
if not hasattr(qinfo, 'true_val'):
qinfo.true_val = qinfo.val
self.latest_results.append(qinfo)
# Update receive time
self.last_receive_times[worker_id] = qinfo.receive_time
# Delete the file.
os.remove(result_file_name)
# Delete content in a working directory.
shutil.rmtree(self.working_dir_names[worker_id])
# Add the worker to the list of free workers and clear qinfos in progress.
self.worker_processes[worker_id].terminate()
self.worker_processes[worker_id] = None
self.qinfos_in_progress[worker_id] = None
self.func_callers_for_each_worker[worker_id] = None
self.free_workers.add(worker_id)
def _worker_is_free(self, worker_id):
""" Checks if worker with worker_id is free. """
if worker_id in self.free_workers:
return True
worker_result_file_name = self._get_result_file_name_for_worker(worker_id)
if os.path.exists(worker_result_file_name):
self._read_result_from_worker_and_update(worker_id)
else:
return False
def _get_last_receive_time(self):
""" Returns the last time we received a job. """
all_receive_times = list(self.last_receive_times.values())
return max(all_receive_times)
def a_worker_is_free(self):
""" Returns true if a worker is free. """
for wid in self.worker_ids:
if self._worker_is_free(wid):
return self._get_last_receive_time()
return None
def all_workers_are_free(self):
""" Returns true if all workers are free. """
all_are_free = True
for wid in self.worker_ids:
all_are_free = self._worker_is_free(wid) and all_are_free
if all_are_free:
return self._get_last_receive_time()
else:
return None
def _dispatch_experiment(self, func_caller, qinfo, worker_id, **kwargs):
""" Dispatches experiment to worker_id. """
#pylint: disable=star-args
if self.qinfos_in_progress[worker_id] is not None:
err_msg = 'qinfos_in_progress: %s,\nfree_workers: %s.'%(
str(self.qinfos_in_progress), str(self.free_workers))
print(err_msg)
raise ValueError('Check if worker is free before sending experiment.')
# First add all the data to qinfo
qinfo.worker_id = worker_id
qinfo.working_dir = self.working_dir_names[worker_id]
qinfo.result_file = self._get_result_file_name_for_worker(worker_id)
# Create the working directory
os.makedirs(qinfo.working_dir)
# Dispatch the experiment in a new process
target_func = lambda: func_caller.eval_from_qinfo(qinfo, **kwargs)
self.worker_processes[worker_id] = Process(target=target_func)
self.worker_processes[worker_id].start()
time.sleep(self.sleep_time_after_new_process)
# Add the qinfo to the in progress bar and remove from free_workers
self.qinfos_in_progress[worker_id] = qinfo
self.func_callers_for_each_worker[worker_id] = func_caller
self.free_workers.discard(worker_id)
def dispatch_single_experiment(self, func_caller, qinfo, **kwargs):
""" Dispatches a single experiment to a free worker. """
worker_id = self.free_workers.pop()
self._dispatch_experiment(func_caller, qinfo, worker_id, **kwargs)
def dispatch_batch_of_experiments(self, func_caller, qinfos, **kwargs):
""" Dispatches a batch of experiments. """
assert len(qinfos) == self.num_workers
for idx in range(self.num_workers):
self._dispatch_experiment(func_caller, qinfos[idx], self.worker_ids[idx], **kwargs)
def close_all_queries(self):
""" Closes all queries. """
pass
def get_time_distro_info(self):
""" Returns information on the time distribution. """
return 'realtime'
def get_poll_time_real(self):
""" Return 0.0 as the poll time. """
return self.poll_time
# For legacy purposes ----------------------------------------------------------------
RealWorkerManager = MultiProcessingWorkerManager
|
overlay.py
|
__author1__ = 'David Northcote'
__author2__ = 'Lewis McLaughlin'
__organisation__ = 'The University of Strathclyde'
__date__ = '22nd October 2021'
__version_name__ = '<a href="https://www.google.com/search?q=the+cobbler" target="_blank" rel="noopener noreferrer">The Cobbler</a>'
__version_number__ = '0.4.0'
__channels__ = 'Quad-channel'
__board__ = 'ZCU111'
__release__ = 'release'
__info__ = 'PYNQ on RFSoC: Spectrum Analyzer.'
__support__ = '<a href="https://github.com/strath-sdr/rfsoc_sam" target="_blank" rel="noopener noreferrer">https://github.com/strath-sdr/rfsoc_sam</a>'
about = ''.join(['<br><b>', __info__, '</b><br>', __channels__, ' ', __board__,
' ', __release__, '<br>', 'Version ', __version_number__,
': ', __version_name__, '<br>Date: ', __date__, '<br><br>',
'<b>Organisation:</b> <br>', __organisation__,
'<br><br>', '<b>Support</b>:<br>', __support__])
from pynq import Overlay, allocate
import xrfclk
import xrfdc
import os
from .hierarchies import *
from .quick_widgets import Image
from ipywidgets import IntProgress
from IPython.display import display
from IPython.display import clear_output
import time
import threading
load_progress = 0
max_count = 100
load_bar = IntProgress(min=load_progress, max=max_count) # instantiate the bar
def generate_about():
global about
about = ''.join(['<br><b>', __info__, '</b><br>', __channels__, ' ', __board__,
' ', __release__, '<br>', 'Version ', __version_number__,
': ', __version_name__, '<br>Date: ', __date__, '<br><br>',
'<b>Organisation:</b> <br>', __organisation__,
'<br><br>', '<b>Support</b>:<br>', __support__])
class Overlay(Overlay):
def __init__(self, overlay_system='sam', init_rf_clks=True, **kwargs):
global __channels__
if not isinstance(overlay_system, str):
raise TypeError("Argument overlay_system must be of type string.")
if overlay_system is 'sam':
this_dir = os.path.dirname(__file__)
bitfile_name = os.path.join(this_dir, 'bitstream', 'rfsoc_sam.bit')
elif overlay_system is 'ofdm':
this_dir = os.path.dirname(__file__)
bitfile_name = os.path.join(this_dir, 'bitstream', 'rfsoc_sam_ofdm.bit')
__channels__ = 'Single-channel OFDM'
generate_about()
else:
raise ValueError(''.join(["Unknown overlay design ", overlay_system]))
super().__init__(bitfile_name, **kwargs)
if init_rf_clks:
self.init_rf_clks()
def init_rf_clks(self, lmx_freq=409.6):
"""Initialise the LMX and LMK clocks for RF-DC operation.
"""
xrfclk.set_all_ref_clks(lmx_freq)
def _sam_generator(self, config=None):
def tab_handler(widget):
tab_idx = widget['new']
for i in range(0, len(self.radio.receiver.channels)):
if i is not tab_idx:
self.radio.receiver.channels[i].frontend.stop()
self.radio.receiver.channels[tab_idx].frontend.start()
sam = self.radio.receiver._get_spectrum_analyser(config)
tab_name = [''.join(['Spectrum Analyzer ', str(j)]) for j in range(0, len(sam))]
children = [sam[i] for i in range(0, len(sam))]
tab = ipw.Tab(children=children,
layout=ipw.Layout(height='initial',
width='initial'))
for i in range(0, len(children)):
tab.set_title(i, tab_name[i])
tab.observe(tab_handler, 'selected_index')
return tab
def _ctl_generator(self, config=None):
ctl = self.radio.transmitter._get_transmitter_control(config)
tab_name = [''.join(['Transmitter Control ', str(j)]) for j in range(0, len(ctl))]
children = [ctl[i] for i in range(0, len(ctl))]
tab = ipw.Tab(children=children,
layout=ipw.Layout(height='initial',
width='initial'))
for i in range(0, len(children)):
tab.set_title(i, tab_name[i])
return tab
def _app_generator(self, config_analyser=None, config_transmitter=None):
def tab_handler(widget):
tab_idx = widget['new']
for i in range(0, len(self.radio.receiver.channels)):
if i is not tab_idx:
self.radio.receiver.channels[i].frontend.stop()
if tab_idx < len(self.radio.receiver.channels):
self.radio.receiver.channels[tab_idx].frontend.start()
sam = self.radio.receiver._get_spectrum_analyser(config_analyser)
ctl = self.radio.transmitter._get_transmitter_control(config_transmitter)
tab_name = [''.join(['Spectrum Analyzer ', str(j)]) for j in range(0, len(sam))]
tab_name.extend([''.join(['Transmitter Control ', str(j)]) for j in range(0, len(ctl))])
children = [sam[i] for i in range(0, len(sam))]
children.extend([ctl[i] for i in range(0, len(ctl))])
tab = ipw.Tab(children=children,
layout=ipw.Layout(height='initial',
width='initial'))
for i in range(0, len(children)):
tab.set_title(i, tab_name[i])
tab.observe(tab_handler, 'selected_index')
return tab
def spectrum_analyzer(self, config=None):
display(load_bar) # display the bar
thread = threading.Thread(target=self._update_progress)
thread.start()
sam_tab = self._sam_generator([config, config, config, config])
ctl_tab = self._ctl_generator(config=[{'transmit_enable' : True},
{'transmit_enable' : True},
{'transmit_enable' : True},
{'transmit_enable' : True}])
this_dir = os.path.dirname(__file__)
img = os.path.join(this_dir, 'assets', 'pynq_logo_light.png')
if config is not None:
if 'plotly_theme' in config:
if config['plotly_theme'] == 'plotly_dark':
img = os.path.join(this_dir, 'assets', 'pynq_logo_dark.png')
about_html = ipw.HTML(value=about)
pynq_image = Image(image_file=img,
width=300,
height=200)
sidebar = ipw.VBox([pynq_image.get_widget(), about_html, ])
app = ipw.HBox([sidebar, sam_tab, ipw.VBox([ipw.HBox([ctl_tab])])])
load_bar.value = 100
clear_output(wait=True)
return app
def spectrum_analyzer_application(self, config=None):
display(load_bar) # display the bar
thread = threading.Thread(target=self._update_progress)
thread.start()
app_tab = self._app_generator(config_analyser=[config, config, config, config],
config_transmitter=[{'transmit_enable' : True},
{'transmit_enable' : True},
{'transmit_enable' : True},
{'transmit_enable' : True}])
this_dir = os.path.dirname(__file__)
img = os.path.join(this_dir, 'assets', 'pynq_logo_light.png')
if config is not None:
if 'plotly_theme' in config:
if config['plotly_theme'] == 'plotly_dark':
img = os.path.join(this_dir, 'assets', 'pynq_logo_dark.png')
about_html = ipw.HTML(value=about)
pynq_image = Image(image_file=img,
width=300,
height=200)
sidebar = ipw.VBox([pynq_image.get_widget(), about_html, ])
app = ipw.HBox([sidebar, app_tab])
load_bar.value = 100
clear_output(wait=True)
return app
def _update_progress(self):
while load_bar.value is not 100:
if load_bar.value < 100:
load_bar.value = load_bar.value + 1
time.sleep(1)
else:
pass
def _sam_ofdm_generator(self, config=None):
def tab_handler(widget):
tab_idx = widget['new']
for i in range(0, len(self.radio.receiver.channels)):
if i is not tab_idx:
self.radio.receiver.channels[i].frontend.stop()
self.radio.receiver.channels[tab_idx].frontend.start()
sam = self.radio.receiver._get_spectrum_analyser(config)
tab_name = [''.join(['Spectrum Analyzer ', str(j)]) for j in range(0, len(sam))]
children = [sam[i] for i in range(0, len(sam))]
tab = ipw.Tab(children=children,
layout=ipw.Layout(height='initial',
width='initial'))
for i in range(0, len(children)):
tab.set_title(i, tab_name[i])
tab.observe(tab_handler, 'selected_index')
return tab
def _ctl_ofdm_generator(self, config=None):
ctl = self.radio.transmitter._get_transmitter_control(config)
tab_name = [''.join(['Transmitter Control ', str(j)]) for j in range(0, len(ctl))]
children = [ctl[i] for i in range(0, len(ctl))]
tab = ipw.Tab(children=children,
layout=ipw.Layout(height='initial',
width='initial'))
for i in range(0, len(children)):
tab.set_title(i, tab_name[i])
return tab
def _app_ofdm_generator(self, config_analyser=None, config_transmitter=None):
def tab_handler(widget):
tab_idx = widget['new']
for i in range(0, len(self.radio.receiver.channels)):
if i is not tab_idx:
self.radio.receiver.channels[i].frontend.stop()
for i in range(len(self.radio.receiver.channels), len(self.radio.receiver.channels)*2):
if i is not tab_idx:
self.radio.receiver.channels[len(self.radio.receiver.channels)*2-1-i].frontend._widgets['constellation_enable'].configure_state(False)
if tab_idx < len(self.radio.receiver.channels):
self.radio.receiver.channels[tab_idx].frontend.start()
sam = self.radio.receiver._get_spectrum_analyser(config_analyser)
ctl = self.radio.transmitter._get_transmitter_control(config_transmitter)
iqp = self.radio.receiver._get_constellation_plot()
tab_name = [''.join(['Spectrum Analyzer ', str(j)]) for j in range(0, len(sam))]
tab_name.extend([''.join(['Constellation Plot ', str(j)]) for j in range(0, len(iqp))])
tab_name_tx = [''.join(['Transmitter ', str(j)]) for j in range(0, len(sam))]
children = [sam[i] for i in range(0, len(sam))]
children.extend([iqp[i] for i in range(0, len(iqp))])
tab = ipw.Tab(children=children,
layout=ipw.Layout(height='initial',
width='initial'))
tab_tx = ipw.Tab(children=[ctl[i] for i in range(0, len(ctl))],
layout=ipw.Layout(height='initial',
width='initial'))
for i in range(0, len(ctl)):
tab_tx.set_title(i, tab_name_tx[i])
for i in range(0, len(children)):
tab.set_title(i, tab_name[i])
tab.observe(tab_handler, 'selected_index')
return ipw.HBox([tab, tab_tx])
def spectrum_ofdm_analyzer(self, config=None):
display(load_bar) # display the bar
thread = threading.Thread(target=self._update_progress)
thread.start()
sam_tab = self._sam_ofdm_generator([config])
ctl_tab = self._ctl_ofdm_generator()
this_dir = os.path.dirname(__file__)
img = os.path.join(this_dir, 'assets', 'pynq_logo_light.png')
if config is not None:
if 'plotly_theme' in config:
if config['plotly_theme'] == 'plotly_dark':
img = os.path.join(this_dir, 'assets', 'pynq_logo_dark.png')
about_html = ipw.HTML(value=about)
pynq_image = Image(image_file=img,
width=300,
height=200)
sidebar = ipw.VBox([pynq_image.get_widget(), about_html, ])
app = ipw.HBox([sidebar, sam_tab, ipw.VBox([ipw.HBox([ctl_tab])])])
load_bar.value = 100
clear_output(wait=True)
return app
def spectrum_ofdm_analyzer_application(self, config_rx=None, config_tx=None):
display(load_bar) # display the bar
thread = threading.Thread(target=self._update_progress)
thread.start()
app_tab = self._app_ofdm_generator(config_analyser=[config_rx],
config_transmitter=[config_tx])
this_dir = os.path.dirname(__file__)
img = os.path.join(this_dir, 'assets', 'pynq_logo_light.png')
if config_rx is not None:
if 'plotly_theme' in config_rx:
if config_rx['plotly_theme'] == 'plotly_dark':
img = os.path.join(this_dir, 'assets', 'pynq_logo_dark.png')
about_html = ipw.HTML(value=about)
pynq_image = Image(image_file=img,
width=300,
height=200)
sidebar = ipw.VBox([pynq_image.get_widget(), about_html, ])
app = ipw.HBox([sidebar, app_tab])
load_bar.value = 100
clear_output(wait=True)
return app
|
concurrencytest.py
|
#!/usr/bin/env python3
#
# Modified for use in OE by Richard Purdie, 2018
#
# Modified by: Corey Goldberg, 2013
# License: GPLv2+
#
# Original code from:
# Bazaar (bzrlib.tests.__init__.py, v2.6, copied Jun 01 2013)
# Copyright (C) 2005-2011 Canonical Ltd
# License: GPLv2+
import os
import sys
import traceback
import unittest
import subprocess
import testtools
import threading
import time
import io
from queue import Queue
from itertools import cycle
from subunit import ProtocolTestCase, TestProtocolClient
from subunit.test_results import AutoTimingTestResultDecorator
from testtools import ThreadsafeForwardingResult, iterate_tests
import bb.utils
import oe.path
_all__ = [
'ConcurrentTestSuite',
'fork_for_tests',
'partition_tests',
]
#
# Patch the version from testtools to allow access to _test_start and allow
# computation of timing information and threading progress
#
class BBThreadsafeForwardingResult(ThreadsafeForwardingResult):
def __init__(self, target, semaphore, threadnum, totalinprocess, totaltests):
super(BBThreadsafeForwardingResult, self).__init__(target, semaphore)
self.threadnum = threadnum
self.totalinprocess = totalinprocess
self.totaltests = totaltests
def _add_result_with_semaphore(self, method, test, *args, **kwargs):
self.semaphore.acquire()
try:
self.result.starttime[test.id()] = self._test_start.timestamp()
self.result.threadprogress[self.threadnum].append(test.id())
totalprogress = sum(len(x) for x in self.result.threadprogress.values())
self.result.progressinfo[test.id()] = "%s: %s/%s %s/%s (%ss) (%s)" % (
self.threadnum,
len(self.result.threadprogress[self.threadnum]),
self.totalinprocess,
totalprogress,
self.totaltests,
"{0:.2f}".format(time.time()-self._test_start.timestamp()),
test.id())
finally:
self.semaphore.release()
super(BBThreadsafeForwardingResult, self)._add_result_with_semaphore(method, test, *args, **kwargs)
#
# A dummy structure to add to io.StringIO so that the .buffer object
# is available and accepts writes. This allows unittest with buffer=True
# to interact ok with subunit which wants to access sys.stdout.buffer.
#
class dummybuf(object):
def __init__(self, parent):
self.p = parent
def write(self, data):
self.p.write(data.decode("utf-8"))
#
# Taken from testtools.ConncurrencyTestSuite but modified for OE use
#
class ConcurrentTestSuite(unittest.TestSuite):
def __init__(self, suite, processes):
super(ConcurrentTestSuite, self).__init__([suite])
self.processes = processes
def run(self, result):
tests, totaltests = fork_for_tests(self.processes, self)
try:
threads = {}
queue = Queue()
semaphore = threading.Semaphore(1)
result.threadprogress = {}
for i, (test, testnum) in enumerate(tests):
result.threadprogress[i] = []
process_result = BBThreadsafeForwardingResult(result, semaphore, i, testnum, totaltests)
# Force buffering of stdout/stderr so the console doesn't get corrupted by test output
# as per default in parent code
process_result.buffer = True
# We have to add a buffer object to stdout to keep subunit happy
process_result._stderr_buffer = io.StringIO()
process_result._stderr_buffer.buffer = dummybuf(process_result._stderr_buffer)
process_result._stdout_buffer = io.StringIO()
process_result._stdout_buffer.buffer = dummybuf(process_result._stdout_buffer)
reader_thread = threading.Thread(
target=self._run_test, args=(test, process_result, queue))
threads[test] = reader_thread, process_result
reader_thread.start()
while threads:
finished_test = queue.get()
threads[finished_test][0].join()
del threads[finished_test]
except:
for thread, process_result in threads.values():
process_result.stop()
raise
def _run_test(self, test, process_result, queue):
try:
try:
test.run(process_result)
except Exception:
# The run logic itself failed
case = testtools.ErrorHolder(
"broken-runner",
error=sys.exc_info())
case.run(process_result)
finally:
queue.put(test)
def removebuilddir(d):
delay = 5
while delay and os.path.exists(d + "/bitbake.lock"):
time.sleep(1)
delay = delay - 1
bb.utils.prunedir(d)
def fork_for_tests(concurrency_num, suite):
result = []
test_blocks = partition_tests(suite, concurrency_num)
# Clear the tests from the original suite so it doesn't keep them alive
suite._tests[:] = []
totaltests = sum(len(x) for x in test_blocks)
for process_tests in test_blocks:
numtests = len(process_tests)
process_suite = unittest.TestSuite(process_tests)
# Also clear each split list so new suite has only reference
process_tests[:] = []
c2pread, c2pwrite = os.pipe()
# Clear buffers before fork to avoid duplicate output
sys.stdout.flush()
sys.stderr.flush()
pid = os.fork()
if pid == 0:
ourpid = os.getpid()
try:
newbuilddir = None
stream = os.fdopen(c2pwrite, 'wb', 1)
os.close(c2pread)
# Create a new separate BUILDDIR for each group of tests
if 'BUILDDIR' in os.environ:
builddir = os.environ['BUILDDIR']
newbuilddir = builddir + "-st-" + str(ourpid)
selftestdir = os.path.abspath(builddir + "/../meta-selftest")
newselftestdir = newbuilddir + "/meta-selftest"
bb.utils.mkdirhier(newbuilddir)
oe.path.copytree(builddir + "/conf", newbuilddir + "/conf")
oe.path.copytree(builddir + "/cache", newbuilddir + "/cache")
oe.path.copytree(selftestdir, newselftestdir)
for e in os.environ:
if builddir in os.environ[e]:
os.environ[e] = os.environ[e].replace(builddir, newbuilddir)
subprocess.check_output("git init; git add *; git commit -a -m 'initial'", cwd=newselftestdir, shell=True)
# Tried to used bitbake-layers add/remove but it requires recipe parsing and hence is too slow
subprocess.check_output("sed %s/conf/bblayers.conf -i -e 's#%s#%s#g'" % (newbuilddir, selftestdir, newselftestdir), cwd=newbuilddir, shell=True)
os.chdir(newbuilddir)
for t in process_suite:
if not hasattr(t, "tc"):
continue
cp = t.tc.config_paths
for p in cp:
if selftestdir in cp[p] and newselftestdir not in cp[p]:
cp[p] = cp[p].replace(selftestdir, newselftestdir)
if builddir in cp[p] and newbuilddir not in cp[p]:
cp[p] = cp[p].replace(builddir, newbuilddir)
# Leave stderr and stdout open so we can see test noise
# Close stdin so that the child goes away if it decides to
# read from stdin (otherwise its a roulette to see what
# child actually gets keystrokes for pdb etc).
newsi = os.open(os.devnull, os.O_RDWR)
os.dup2(newsi, sys.stdin.fileno())
subunit_client = TestProtocolClient(stream)
# Force buffering of stdout/stderr so the console doesn't get corrupted by test output
# as per default in parent code
subunit_client.buffer = True
subunit_result = AutoTimingTestResultDecorator(subunit_client)
process_suite.run(subunit_result)
if ourpid != os.getpid():
os._exit(0)
if newbuilddir:
removebuilddir(newbuilddir)
except:
# Don't do anything with process children
if ourpid != os.getpid():
os._exit(1)
# Try and report traceback on stream, but exit with error
# even if stream couldn't be created or something else
# goes wrong. The traceback is formatted to a string and
# written in one go to avoid interleaving lines from
# multiple failing children.
try:
stream.write(traceback.format_exc().encode('utf-8'))
except:
sys.stderr.write(traceback.format_exc())
finally:
if newbuilddir:
removebuilddir(newbuilddir)
stream.flush()
os._exit(1)
stream.flush()
os._exit(0)
else:
os.close(c2pwrite)
stream = os.fdopen(c2pread, 'rb', 1)
test = ProtocolTestCase(stream)
result.append((test, numtests))
return result, totaltests
def partition_tests(suite, count):
# Keep tests from the same class together but allow tests from modules
# to go to different processes to aid parallelisation.
modules = {}
for test in iterate_tests(suite):
m = test.__module__ + "." + test.__class__.__name__
if m not in modules:
modules[m] = []
modules[m].append(test)
# Simply divide the test blocks between the available processes
partitions = [list() for _ in range(count)]
for partition, m in zip(cycle(partitions), modules):
partition.extend(modules[m])
# No point in empty threads so drop them
return [p for p in partitions if p]
|
classifyFiles.py
|
import csv
import os
import shutil
from threading import Thread
file_csv = csv.reader(open('D:/dev/csv/train.csv', 'r'))
sourceDir = 'D:/dev/csv/data/'
targetDir = 'D:/dev/csv/data/target/'
suffix = '.tif'
i = 0
notExistList = []
existList = []
targetList = []
# 遍历csv
for row in file_csv:
i = i + 1
# 忽略首行
if i == 1:
continue
else:
fileName = row[0]
sourcePath = sourceDir + fileName + suffix
# print(filePath)
exists = os.path.exists(sourcePath)
# 如果文件存在,复制到新目录
if exists:
existList.append(sourcePath)
# 构建目标路径
fileType = row[1]
targetPath = targetDir + fileType + '/'
targetList.append(targetPath + fileName + suffix)
if not os.path.exists(targetPath):
os.makedirs(targetPath)
# print(filePath)
# Thread(target=shutil.copy, args=[sourcePath, targetPath]).start()
shutil.copy(sourcePath, targetPath)
# 如果不存在
else:
notExistList.append(sourcePath)
print(len(existList))
print(existList)
print(notExistList)
print(targetList)
|
tk_raw_image_analy_ver0.091.py
|
## 영상 처리 및 데이터 분석 툴
from tkinter import *; import os.path ;import math
from tkinter.filedialog import *
from tkinter.simpledialog import *
## 함수 선언부
def loadImage(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = os.path.getsize(fname) # 파일 크기 확인
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'rb') # 파일 열기(바이너리 모드)
for i in range(inH) :
for k in range(inW) :
inImage[i][k] = int(ord(fp.read(1)))
fp.close()
def openFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
loadImage(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import threading
def display() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 기존에 캐버스 있으면 뜯어내기.
if canvas != None :
canvas.destroy()
# 화면 준비 (고정됨)
VIEW_X, VIEW_Y = 256, 256
if VIEW_X >= outW or VIEW_Y >= outH : # 영상이 128미만이면
VIEW_X = outW
VIEW_Y = outH
step = 1 # 건너뛸숫자
else :
step = int(outW / VIEW_X)
window.geometry(str(VIEW_X*2) + 'x' + str(VIEW_Y*2))
canvas = Canvas(window, width=VIEW_X, height=VIEW_Y)
paper = PhotoImage(width=VIEW_X, height=VIEW_Y)
canvas.create_image((VIEW_X/2, VIEW_X/2), image=paper, state='normal')
# 화면에 출력
def putPixel() :
for i in range(0, outH,step) :
for k in range(0, outW,step) :
data = outImage[i][k]
paper.put('#%02x%02x%02x' % (data, data, data),
( int(k/step),int(i/step)))
threading.Thread(target=putPixel).start()
canvas.pack(expand=1, anchor =CENTER)
status.configure(text='이미지 정보:' + str(outW) + 'x' + str(outH) )
def equal() : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[i][k] = inImage[i][k]
display()
def addImage() : # 밝게하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
value = askinteger('밝게하기', '밝게할 값-->', minvalue=1, maxvalue=255)
for i in range(inH) :
for k in range(inW) :
if inImage[i][k] + value > 255 :
outImage[i][k] = 255
else :
outImage[i][k] = inImage[i][k] + value
display()
def a_average() : # 입출력 영상의 평균값
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
rawSum = 0
for i in range(inH) :
for k in range(inW) :
rawSum += inImage[i][k]
inRawAvg = int(rawSum / (inH*inW))
rawSum = 0
for i in range(outH) :
for k in range(outW) :
rawSum += outImage[i][k]
outRawAvg = int(rawSum / (outH*outW))
subWindow = Toplevel(window) # 부모(window)에 종속된 서브윈도
subWindow.geometry('200x100')
label1 = Label(subWindow, text='입력영상 평균값 -->' + str(inRawAvg) ); label1.pack()
label2 = Label(subWindow, text='출력영상 평균값 -->' + str(outRawAvg)); label2.pack()
subWindow.mainloop()
def a_histogram() : # 히스토 그램
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
countList = [0] * 256; normalList = [0] * 256
for i in range(outH) :
for k in range(outW) :
value = outImage[i][k]
countList[value] += 1
# 정규화된값 = (카운트값 - 최소값) * High / (최대값 - 최소값)
maxVal = max (countList); minVal = min(countList)
for i in range(len(countList)) :
normalList[i] = (countList[i] - minVal) * 256 / (maxVal - minVal)
# 화면 출력
subWindow = Toplevel(window)
subWindow.geometry('256x256')
subCanvas = Canvas(subWindow, width=256, height=256)
subPaper = PhotoImage(width=256, height=256)
subCanvas.create_image((256/2,256/2), image=subPaper, state='normal')
for i in range(0, 256) :
for k in range(0, int(normalList[i])) :
data = 0
subPaper.put('#%02x%02x%02x' % (data, data, data), (i, 255-k))
subCanvas.pack(expand=1, anchor=CENTER)
subWindow.mainloop()
import matplotlib.pyplot as plt
def a_histogram2() : # 히스토 그램
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
countList = [0] * 256
for i in range(outH) :
for k in range(outW) :
value = outImage[i][k]
countList[value] += 1
plt.plot(countList)
plt.show()
def upDown() : # 상하 반전 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[outW-1-i][k] = inImage[i][k]
display()
def panImage() :
global panYN
panYN = True
def mouseClick(event) : # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN :
return
sx = event.x; sy = event.y;
def mouseDrop(event): # 동일 영상 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global sx, sy, ex, ey, panYN
if not panYN:
return
ex = event.x; ey = event.y;
my = sx - ex ; mx = sy - ey
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
if 0<= i-mx <outH and 0<= k-my < outW :
outImage[i-mx][k-my] = inImage[i][k]
panYN = False
display()
def zoomOut() : # 축소하기 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
scale = askinteger('축소하기', '축소할 배수-->', minvalue=2, maxvalue=32)
outW = int(inW/scale); outH = int(inH/scale);
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
for i in range(inH) :
for k in range(inW) :
outImage[int(i/scale)][int(k/scale)] = inImage[i][k]
display()
import struct
def saveFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
saveFp = asksaveasfile(parent=window, mode='wb',
defaultextension="*.raw", filetypes=(("RAW파일", "*.raw"), ("모든파일", "*.*")))
for i in range(outW):
for k in range(outH):
saveFp.write( struct.pack('B',outImage[i][k]))
saveFp.close()
def exitFile() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
pass
import csv
def saveCSV() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.csv", filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
output_file = output_file.name
header = ['Column', 'Row', 'Value']
with open(output_file, 'w', newline='') as filewriter:
csvWriter = csv.writer(filewriter)
csvWriter.writerow(header)
for row in range(outW):
for col in range(outH):
data = outImage[row][col]
row_list = [row, col, data]
csvWriter.writerow(row_list)
print('OK!')
def saveShuffleCSV() :
pass
def loadCSV(fname) :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
fsize = -1
fp = open(fname, 'r')
for f in fp :
fsize += 1
fp.close()
inH = inW = int(math.sqrt(fsize)) # 입력메모리 크기 결정! (중요)
inImage = []; tmpList = []
for i in range(inH) : # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW) :
tmpList.append(0)
inImage.append(tmpList)
# 파일 --> 메모리로 데이터 로딩
fp = open(fname, 'r') # 파일 열기(바이너리 모드)
csvFP = csv.reader(fp)
next(csvFP)
for row_list in csvFP :
row= int(row_list[0]) ; col = int(row_list[1]) ; value=int(row_list[2])
inImage[row][col] = value
fp.close()
def openCSV() :
global window, canvas, paper, filename,inImage, outImage,inW, inH, outW, outH
filename = askopenfilename(parent=window,
filetypes=(("CSV파일", "*.csv"), ("모든파일", "*.*")))
loadCSV(filename) # 파일 --> 입력메모리
equal() # 입력메모리--> 출력메모리
import sqlite3
def saveSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(inW) + \
"," + str(i) + "," + str(k) + "," + str(inImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok!')
def openSQLite() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = sqlite3.connect('imageDB') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openSQLite")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openSQLite")
import pymysql
def saveMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
# 열이름 리스트 만들기
colList = []
fname = os.path.basename(filename).split(".")[0]
try:
sql = "CREATE TABLE imageTable( filename CHAR(20), resolution smallint" + \
", row smallint, col smallint, value smallint)"
cur.execute(sql)
except:
pass
try:
sql = "DELETE FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + str(outW)
cur.execute(sql)
con.commit()
except:
pass
for i in range(inW) :
for k in range(inH) :
sql = "INSERT INTO imageTable VALUES('" + fname + "'," + str(outW) + \
"," + str(i) + "," + str(k) + "," + str(outImage[i][k]) +")"
cur.execute(sql)
con.commit()
cur.close()
con.close() # 데이터베이스 연결 종료
print('Ok! saveMySQL')
def openMySQL() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
global csvList, input_file
con = pymysql.connect(host='192.168.174.129', user='root',
password='1234', db='imageDB', charset='utf8') # 데이터베이스 지정(또는 연결)
cur = con.cursor() # 연결 통로 생성 (쿼리문을 날릴 통로)
try :
sql = "SELECT DISTINCT filename, resolution FROM imageTable"
cur.execute(sql)
tableNameList = [] # ['강아지:128', '강아지:512' ....]
while True :
row = cur.fetchone()
if row == None :
break
tableNameList.append( row[0] + ':' + str(row[1]) )
######## 내부 함수 (Inner Function) : 함수 안의 함수,지역함수 #######
def selectTable() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
selectedIndex = listbox.curselection()[0]
subWindow.destroy()
fname, res = tableNameList[selectedIndex].split(':')
filename = fname
sql = "SELECT row, col, value FROM imageTable WHERE filename='" + \
fname + "' AND resolution=" + res
print(sql)
cur.execute(sql)
inH = inW = int(res)
inImage = []; tmpList = []
for i in range(inH): # 입력메모리 확보(0으로 초기화)
tmpList = []
for k in range(inW):
tmpList.append(0)
inImage.append(tmpList)
while True :
row_tuple = cur.fetchone()
if row_tuple == None :
break
row, col, value = row_tuple
inImage[row][col] = value
cur.close()
con.close()
equal()
print("Ok! openMySQL")
################################################################
subWindow = Toplevel(window)
listbox = Listbox(subWindow)
button = Button(subWindow, text='선택', command=selectTable)
listbox.pack(); button.pack()
for sName in tableNameList :
listbox.insert(END, sName)
subWindow.lift()
except :
cur.close()
con.close()
print("Error! openMySQL")
import xlwt
def saveExcel1() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xls", filetypes=(("XLS파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlwt.Workbook()
ws = wb.add_sheet(sheetName)
for rowNum in range(outH):
for colNum in range(outW):
data = outImage[rowNum][colNum]
ws.write(rowNum, colNum, data)
wb.save(output_file)
print('OK! saveExcel1')
import xlsxwriter
def saveExcel2() :
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
output_file = asksaveasfile(parent=window, mode='w',
defaultextension="*.xlsx", filetypes=(("XLSX파일", "*.xls"), ("모든파일", "*.*")))
output_file = output_file.name
sheetName = os.path.basename(output_file).split(".")[0]
wb = xlsxwriter.Workbook(output_file)
ws = wb.add_worksheet(sheetName)
ws.set_column(0, outW, 1.0) # 약 0.34 쯤
for r in range(outH):
ws.set_row(r, 9.5) # 약 0.35 쯤
for rowNum in range(outW) :
for colNum in range(outH) :
data = outImage[rowNum][colNum]
# data 값으로 셀의 배경색을 조절 #000000~#FFFFFF
if data > 15 :
hexStr = '#' + (hex(data)[2:])*3
else :
hexStr = '#' + ('0' + hex(data)[2:]) * 3
# 셀의 포맷을 준비
cell_format = wb.add_format()
cell_format.set_bg_color(hexStr)
ws.write(rowNum, colNum, '', cell_format)
wb.close()
print('OK! saveExcel2')
def a_histoStretch() : # 히스토그램 스트래칭 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
maxVal, minVal, HIGH = 0, 255, 255
for i in range(inH) :
for k in range(inW) :
data = inImage[i][k]
if data > maxVal :
maxVal = data
if data < minVal :
minVal = data
# 히스토그램 스트래칭
# OUT = (IN - 최소값) * HIGH / (최대값 - 최소값)
for i in range(inH) :
for k in range(inW) :
value = int( (inImage[i][k] - minVal) * HIGH / ( maxVal - minVal) )
if value < 0 :
value = 0
elif value > 255 :
value = 255
outImage[i][k] = value
display()
def a_endInSearch() : # 엔드-인 탐색 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
maxVal, minVal, HIGH = 0, 255, 255
for i in range(inH) :
for k in range(inW) :
data = inImage[i][k]
if data > maxVal :
maxVal = data
if data < minVal :
minVal = data
limit = askinteger('엔드인', '상하 범위:', minvalue=1, maxvalue=127)
maxVal -= limit
minVal += limit
# 히스토그램 스트래칭
# OUT = (IN - 최소값) * HIGH / (최대값 - 최소값)
for i in range(inH) :
for k in range(inW) :
value = int( (inImage[i][k] - minVal) * HIGH / ( maxVal - minVal) )
if value < 0 :
value = 0
elif value > 255 :
value = 255
outImage[i][k] = value
display()
def a_histoEqual() : # 히스토그램 평활화 알고리즘
global window, canvas, paper, filename, inImage, outImage, inW, inH, outW, outH
# 중요! 출력메모리의 크기를 결정
outW = inW; outH = inH;
outImage = []; tmpList = []
for i in range(outH): # 출력메모리 확보(0으로 초기화)
tmpList = []
for k in range(outW):
tmpList.append(0)
outImage.append(tmpList)
#############################
# 진짜 영상처리 알고리즘을 구현
############################
histo = [0] * 255; sumHisto = [0] * 255; normalHisto=[0] * 255
HIGH = 255
# 히스토그램 작성
for i in range(inH) :
for k in range(inW) :
value = inImage[i][k]
histo[value] += 1
# 누적 히스토그램 작성
sVal = 0
for i in range(len(histo)) :
sVal += histo[i]
sumHisto[i] = sVal
# 정규화된 누적 히스토그램 : (누적합 / (행개수*열개수)) * HIGH
for i in range(len(sumHisto)) :
normalHisto[i] = int(sumHisto[i] / (outW * outH) * HIGH)
# 정규화된 값으로 출력하기
for i in range(inH) :
for k in range(inW) :
index = inImage[i][k]
outImage[i][k] = normalHisto[index]
display()
## 전역 변수부
window, canvas, paper, filename = [None] * 4
inImage, outImage = [], []; inW, inH, outW, outH = [0] * 4
panYN = False; sx, sy, ex, ey = [0] * 4
VIEW_X, VIEW_Y = 128, 128
status = None
## 메인 코드부
window = Tk(); window.geometry('400x400');
window.title('영상 처리&데이터 분석 Ver 0.91')
window.bind("<Button-1>", mouseClick)
window.bind("<ButtonRelease-1>", mouseDrop)
status = Label(window, text='이미지 정보:', bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
mainMenu = Menu(window);window.config(menu=mainMenu)
fileMenu = Menu(mainMenu);mainMenu.add_cascade(label='파일', menu=fileMenu)
fileMenu.add_command(label='열기', command=openFile)
fileMenu.add_command(label='저장', command=saveFile)
fileMenu.add_separator()
fileMenu.add_command(label='종료', command=exitFile)
pixelMenu = Menu(mainMenu);mainMenu.add_cascade(label='화소점처리', menu=pixelMenu)
pixelMenu.add_command(label='동일영상', command=equal)
pixelMenu.add_command(label='밝게하기', command=addImage)
geoMenu = Menu(mainMenu);mainMenu.add_cascade(label='기하학 처리', menu=geoMenu)
geoMenu.add_command(label='상하반전', command=upDown)
geoMenu.add_command(label='화면이동', command=panImage)
geoMenu.add_command(label='화면축소', command=zoomOut)
analyzeMenu = Menu(mainMenu);mainMenu.add_cascade(label='데이터분석', menu=analyzeMenu)
analyzeMenu.add_command(label='평균값', command=a_average)
analyzeMenu.add_command(label='히스토그램', command=a_histogram)
analyzeMenu.add_command(label='히스토그램(matplotlib)', command=a_histogram2)
analyzeMenu.add_separator()
analyzeMenu.add_command(label='히스토그램 스트래칭', command=a_histoStretch)
analyzeMenu.add_command(label='엔드-인 탐색', command=a_endInSearch)
analyzeMenu.add_command(label='히스토그램 평활화', command=a_histoEqual)
otherMenu = Menu(mainMenu);mainMenu.add_cascade(label='다른 포맷 처리', menu=otherMenu)
otherMenu.add_command(label='CSV로 내보내기', command=saveCSV)
otherMenu.add_command(label='CSV(셔플)로 내보내기', command=saveShuffleCSV)
otherMenu.add_command(label='CSV 불러오기', command=openCSV)
otherMenu.add_separator()
otherMenu.add_command(label='SQLite로 내보내기', command=saveSQLite)
otherMenu.add_command(label='SQLite에서 가져오기', command=openSQLite)
otherMenu.add_separator()
otherMenu.add_command(label='MySQL로 내보내기', command=saveMySQL)
otherMenu.add_command(label='MySQL에서 가져오기', command=openMySQL)
otherMenu.add_separator()
otherMenu.add_command(label='Excel로 내보내기(숫자)', command=saveExcel1)
otherMenu.add_command(label='Excel로 내보내기(음영)', command=saveExcel2)
window.mainloop()
|
CW_Remote_PySide2_Qt.py
|
#!/usr/bin/env python2.7
# -*- encoding: utf-8 -*-
# Y axis ticks/labels optimize
# Defer refresh if zoomed
# Tried these, can't do, limitations of PySide2:
# # X axis ticks/labels optimize
# # X/Y axis subticks
# $ pyinstaller -F -w --exclude-module _tkinter --exclude-module Tkinter --exclude-module enchant --exclude-module twisted --osx-bundle-identifier com.techview.cwremote -i CW_Remote.icns CW_Remote_Qt.py
from __future__ import print_function
from __future__ import division
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import QPoint, Qt, QDateTime, QRect, QRectF, QSize, QMargins
from PySide2.QtGui import QPainter, QPen, QColor
from PySide2.QtCharts import QtCharts
# import PySide2.QtCore.QString as QString
import sys
import os, sys
from os.path import isfile, join, expanduser # dirname
import platform
import datetime, calendar
import boto3
import json
import copy
from collections import OrderedDict
import re
# from Graph_Index_0 import Graph_Index_0
# from Graph_Index_1 import Graph_Index_1
#
# def Parse_ISO_DateTime_String ( ISO_DateTime_String ):
# if (ISO_DateTime_String.endswith('Z')):
# ISO_DateTime_String = ISO_DateTime_String[:-1] + "+00:00"
# # "2019-01-03T00:00:05.522864Z"
# # "2017-04-27T04:02:59.008000+00:00"
# # 00000000001111111111222222222233
# # 01234567890123456789012345678901
# iso_microseconds = 0
# iso_timezone_string = ''
# if (len(ISO_DateTime_String) == 19):
# # No microseconds and no time zone specification
# # Interpret this as NYC wall time
# iso_microseconds = 0
# iso_timezone_string = ''
# elif ((len(ISO_DateTime_String) == 26) and (ISO_DateTime_String[19] == '.')):
# # Microseconds but no time zone specification
# # Interpret this as NYC wall time
# iso_microseconds = int(ISO_DateTime_String[20:26])
# iso_timezone_string = ''
# elif ((ISO_DateTime_String[19] == '+') or (ISO_DateTime_String[19] == '-')):
# # No microseconds but having time zone specification
# iso_microseconds = 0
# iso_timezone_string = ISO_DateTime_String[19:]
# elif ((ISO_DateTime_String[19] == '.') and
# ((ISO_DateTime_String[26] == '+') or (ISO_DateTime_String[26] == '-'))):
# # Both microseconds plus time zone specification
# iso_microseconds = int(ISO_DateTime_String[20:26])
# iso_timezone_string = ISO_DateTime_String[26:]
# # "2016-07-09T03:27:27-0400"
# # 00000000001111111111222222
# # 01234567890123456789012345
# # "2016-07-09T03:27:27-04:00"
# # Compute UTC offset, supporting all forms: "+0400", "-0400", "+04:00", and "-04:00"
# if (len(iso_timezone_string) == 0):
# # In the US, since 2007, DST starts at 2am (standard time) on the second
# # Sunday in March, which is the first Sunday on or after Mar 8.
# # and ends at 2am (DST time; 1am standard time) on the first Sunday of Nov.
# begin_daylight_savings = \
# datetime.datetime(year=int(ISO_DateTime_String[0:4]), month=3, day=8, hour=2, tzinfo=Eastern_Standard_Time_Zone)
# begin_daylight_savings += datetime.timedelta(days=(6 - begin_daylight_savings.date().weekday()))
#
# end_daylight_savings = \
# datetime.datetime(year=int(ISO_DateTime_String[0:4]), month=11, day=1, hour=1, tzinfo=Eastern_Standard_Time_Zone)
# end_daylight_savings += datetime.timedelta(days=(6 - end_daylight_savings.date().weekday()))
#
# datetime_EST = \
# datetime.datetime(int(ISO_DateTime_String[0:4]), # year
# int(ISO_DateTime_String[5:7]), # month
# int(ISO_DateTime_String[8:10]), # day
# int(ISO_DateTime_String[11:13]), # hour
# int(ISO_DateTime_String[14:16]), # minute
# int(ISO_DateTime_String[17:19]), # second
# iso_microseconds, # microseconds
# Eastern_Standard_Time_Zone)
#
# if ((datetime_EST >= begin_daylight_savings) and (datetime_EST <= end_daylight_savings)):
# minutes_offset = -4 * 60 # Eastern_Daylight_Time_Zone
# else: minutes_offset = -5 * 60 # Eastern_Standard_Time_Zone
#
# elif (iso_timezone_string[3] == ':'):
# minutes_offset = (60 * int(iso_timezone_string[1:3])) + int(iso_timezone_string[4:6])
# else:
# minutes_offset = (60 * int(iso_timezone_string[1:3])) + int(iso_timezone_string[3:5])
# if ((len(iso_timezone_string) > 0) and
# (iso_timezone_string[0] == '-')): minutes_offset = -minutes_offset
#
# # Return ISO_DateTime_String as UTC datetime
# return datetime.datetime(int(ISO_DateTime_String[0:4]), # year
# int(ISO_DateTime_String[5:7]), # month
# int(ISO_DateTime_String[8:10]), # day
# int(ISO_DateTime_String[11:13]), # hour
# int(ISO_DateTime_String[14:16]), # minute
# int(ISO_DateTime_String[17:19]), # second
# iso_microseconds, # microseconds
# Time_Zone(minutes_offset)).astimezone(UTC_Time_Zone)
#
# def Metric_Statistics_Datapoints_Time_and_Values(Metric_Statistics_Datapoints, Y_Factor=1):
# data_point_list = []
# for data_point in Metric_Statistics_Datapoints:
# data_datetime = Parse_ISO_DateTime_String(data_point["Timestamp"])
# nyc_wall_time_offset = NYC_Wall_DateTime_Offset(data_datetime)
# data_datetime = data_datetime + datetime.timedelta(hours=int(nyc_wall_time_offset) / 100)
# # datetime_str = data_point["Timestamp"][:-6]
# # format = "yyyy-MM-ddTHH:mm:ss"
# # data_datetime = QDateTime.fromString(datetime_str, format);
# data_maximum = data_point["Maximum"] * Y_Factor
# data_average = data_point["Average"] * Y_Factor
# data_point_list.append((data_datetime, data_maximum, data_average))
# data_point_list.sort()
#
# data_time_max_list = [(time, max) for time, max, avg in data_point_list]
# return data_time_max_list
# Convenience function to bound a value
def bound ( low, high, value ):
return max(low, min(high, value))
def Round_DateTime (Initial_DateTime, Round_to_Resolution_Seconds=60):
seconds = (Initial_DateTime - Initial_DateTime.min).seconds
rounding = (seconds + (Round_to_Resolution_Seconds / 2)) // Round_to_Resolution_Seconds * Round_to_Resolution_Seconds
return Initial_DateTime + datetime.timedelta(0, (rounding - seconds), -Initial_DateTime.microsecond)
# Local wall time, this works for New York City
class Time_Zone ( datetime.tzinfo ):
def __init__(self, offset_in_minutes):
super(Time_Zone, self).__init__()
self.offset = offset_in_minutes
def utcoffset(self, dt):
return datetime.timedelta(minutes=self.offset)
def tzname(self, dt):
return ""
def dst(self, dt):
return datetime.timedelta(0)
UTC_Time_Zone = Time_Zone(0)
Eastern_Daylight_Time_Zone = Time_Zone(-4 * 60)
Eastern_Standard_Time_Zone = Time_Zone(-5 * 60)
def NYC_Wall_DateTime_Offset ( Time_Zone_Aware_DateTime ):
# In the US, since 2007, DST starts at 2am (standard time) on the second
# Sunday in March, which is the first Sunday on or after Mar 8.
# and ends at 2am (DST time; 1am standard time) on the first Sunday of Nov.
datetime_nyc_wall = Time_Zone_Aware_DateTime.astimezone(Eastern_Standard_Time_Zone)
# Test whether in primetime
begin_daylight_savings = \
datetime.datetime(year=datetime_nyc_wall.year, month=3, day=8, hour=2, tzinfo=Eastern_Standard_Time_Zone)
begin_daylight_savings += datetime.timedelta(days=(6 - begin_daylight_savings.date().weekday()))
end_daylight_savings = \
datetime.datetime(year=datetime_nyc_wall.year, month=11, day=1, hour=1, tzinfo=Eastern_Standard_Time_Zone)
end_daylight_savings += datetime.timedelta(days=(6 - end_daylight_savings.date().weekday()))
if ((datetime_nyc_wall >= begin_daylight_savings) and (datetime_nyc_wall <= end_daylight_savings)):
datetime_nyc_wall_offset = "-0400"
else: datetime_nyc_wall_offset = "-0500"
return datetime_nyc_wall_offset
def NYC_Wall_DateTime ( Time_Zone_Aware_DateTime ):
# In the US, since 2007, DST starts at 2am (standard time) on the second
# Sunday in March, which is the first Sunday on or after Mar 8.
# and ends at 2am (DST time; 1am standard time) on the first Sunday of Nov.
datetime_nyc_wall = Time_Zone_Aware_DateTime.astimezone(Eastern_Standard_Time_Zone)
# Test whether in primetime
begin_daylight_savings = \
datetime.datetime(year=datetime_nyc_wall.year, month=3, day=8, hour=2, tzinfo=Eastern_Standard_Time_Zone)
begin_daylight_savings += datetime.timedelta(days=(6 - begin_daylight_savings.date().weekday()))
end_daylight_savings = \
datetime.datetime(year=datetime_nyc_wall.year, month=11, day=1, hour=1, tzinfo=Eastern_Standard_Time_Zone)
end_daylight_savings += datetime.timedelta(days=(6 - end_daylight_savings.date().weekday()))
if ((datetime_nyc_wall >= begin_daylight_savings) and (datetime_nyc_wall <= end_daylight_savings)):
datetime_nyc_wall = Time_Zone_Aware_DateTime.astimezone(Eastern_Daylight_Time_Zone)
return datetime_nyc_wall
def Return_NYC_Wall_Time_String ( UTC_Datetime=None, NYC_Wall_Datetime=None, Time_Zone_Indicator="" ):
if (UTC_Datetime is not None):
datetime_NYC_Wall = NYC_Wall_DateTime(UTC_Datetime)
elif (NYC_Wall_Datetime is not None):
datetime_NYC_Wall = NYC_Wall_Datetime
else:
datetime_NYC_Wall = None
isoformatted_datetime_NYC_Wall = datetime_NYC_Wall.isoformat()
if (Time_Zone_Indicator == "E"):
return isoformatted_datetime_NYC_Wall[:-6]
if (datetime_NYC_Wall is not None):
return (isoformatted_datetime_NYC_Wall + Time_Zone_Indicator)
else: return "Error"
def Period_Span_NYC_Wall_Time ( Period_Hours, Period_End_Hours_Ago ):
datetime_now_utc = datetime.datetime.now(UTC_Time_Zone)
period_end_utc = datetime_now_utc - datetime.timedelta(hours=Period_End_Hours_Ago)
period_begin_utc = period_end_utc - datetime.timedelta(hours=Period_Hours)
period_begin_NYC_Wall = NYC_Wall_DateTime(period_begin_utc)
period_end_NYC_Wall = NYC_Wall_DateTime(period_end_utc)
period_begin_nyc_wall_string = \
Return_NYC_Wall_Time_String(NYC_Wall_Datetime=period_begin_NYC_Wall, Time_Zone_Indicator="E")[:-10].replace("T", " ")
period_end_nyc_wall_string = \
Return_NYC_Wall_Time_String(NYC_Wall_Datetime=period_end_NYC_Wall, Time_Zone_Indicator="E")[:-10].replace("T", " ")
return (calendar.day_abbr[period_begin_NYC_Wall.weekday()] + " " + period_begin_nyc_wall_string + "NYC to " +
calendar.day_abbr[period_end_NYC_Wall.weekday()] + " " + period_end_nyc_wall_string + "NYC")
CW_Remote_Duplex_Layout = True
Force_Duplex_Layout = True
Force_GetMetricWidgetImage = False # True # False #
Cursor_Tracking = True # True # False #
Screen_Manager_App = True # True # False #
Defer_CWapi_Requests = False # True # False #
Defer_CWapi_Requests_by_Seconds = 0.5
Testing_Bypass_Initialization = False # True # False # Should be False unless testing
Testing_Bypass_Initialization_Delay_Seconds = 0 # Should be zero unless testing
# There is a limit of 20 transactions per second for this API.
# Each GetMetricWidgetImage action has the following limits:
# As many as 100 metrics in the graph.
# Up to 100 KB uncompressed payload.
# If zero, no auto-refresh, if greater than zero, the auto-refresh interval in seconds
cw_remote_refresh_interval_seconds = 0 # (1 * 60)
Force_Refresh_Interval_Seconds = -1
Initial_Period_Duration_Hours = 24
Initial_Period_End_Hours_Ago = 0
cw_remote_ini_json = ""
cw_remote_ini = None
path_to_time_slider_cursor = ""
path_to_time_slider_cursor_disabled = ""
path_to_cwremote_screen_image = ""
execution_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
execution_script = os.path.abspath(sys.argv[0])
os_platform = platform.system()
# message_text = execution_directory
# Find json initialization file and image resources
if (os_platform == "Darwin"):
# execution_directory = execution_directory.split("CW_Remote_Qt.app")[0]
execution_directory = re.split("/CW_Remote[0-9a-zA-Z_]*[.](?:app|py)", execution_script)[0] # [0-9a-zA-Z_]+\\.app
def resource_path ( relative_path ):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = execution_directory
return os.path.join(base_path, relative_path)
path_to_icon_image = resource_path(os.path.join("data", "cwremote-icon-512.png"))
path_to_time_slider_cursor = resource_path(os.path.join("data", "time_slider_cursor.png"))
path_to_time_slider_cursor_disabled = resource_path(os.path.join("data", "time_slider_cursor_disabled.png"))
path_to_cwremote_screen_image = resource_path(os.path.join("data", "CW_Remote_Screen.png"))
# Config.set('kivy','window_icon', path_to_icon_image)
# Config.write()
# Initialization_Example_Label_text = path_to_icon_image + ": isfile == " + str(isfile(path_to_icon_image))
try:
if (isfile(join(execution_directory, "CW_Remote.ini"))):
ini_directory = execution_directory
else:
home_dir = expanduser("~")
ini_dir = "Documents/CW_Remote"
ini_directory = join(home_dir, ini_dir)
cw_remote_ini_file = open(join(ini_directory, "CW_Remote.ini"), "r")
cw_remote_ini_json = cw_remote_ini_file.read()
cw_remote_ini_file.close()
except:
cw_remote_ini_json = ""
elif (os_platform == "Linux"):
cw_remote_ini_json = ""
# if (Kivy_Platform == "android"):
# # Pydroid 3 fails to install pycairo
# # Gtk backend requires PyGObject or pgi
# # "Pip" menu item, "Install": pygobject
# # "Running setup.py install for pycairo: finished with status 'error' "
# # ...
# # "No package 'cairo' found"
# # Fix:
# # import matplotlib
# # matplotlib.use('AGG')
#
# try:
# # To run from Pydroid 3
# if (isfile(join(execution_directory, "CW_Remote.ini"))):
# ini_directory = execution_directory
# else:
# home_dir = expanduser("~")
# ini_dir = "Documents/CW_Remote"
# ini_directory = join(home_dir, ini_dir)
#
# path_to_time_slider_cursor = join(ini_directory, "data", "time_slider_cursor.png")
# path_to_time_slider_cursor_disabled = join(ini_directory, "data", "time_slider_cursor_disabled.png")
#
# cw_remote_ini_file = open(join(ini_directory, "CW_Remote.ini"), "r")
# cw_remote_ini_json = cw_remote_ini_file.read()
# cw_remote_ini_file.close()
#
# # documents_dir = "/system/storage/emulated/0/Documents"
# # ini_dir = "CW_Remote"
# # ini_directory = join(documents_dir, ini_dir)
# #
# # cw_remote_ini_file = open(join(ini_directory, "CW_Remote.ini"), "r")
# # cw_remote_ini_json = cw_remote_ini_file.read()
# # cw_remote_ini_file.close()
#
# except:
# cw_remote_ini_json = ""
elif (os_platform == "Windows"):
cw_remote_ini_json = ""
else:
cw_remote_ini_json = ""
# Apply initialization ...
# ... json has been loaded as nested structure of dictionaries and lists
if (len(cw_remote_ini_json) > 0):
# This should be the same for any OS platform, ...
# ... but may still fail by being ill-structured
try:
# Load initialization from the JSON ini file
cw_remote_ini = json.loads(cw_remote_ini_json, object_pairs_hook=OrderedDict)
cw_remote_layout = cw_remote_ini.get("layout", '')
if (cw_remote_layout == "paged"): CW_Remote_Duplex_Layout = False
elif (cw_remote_layout == "duplex"): CW_Remote_Duplex_Layout = True
cw_api = cw_remote_ini.get("cw_api", "image") # "image" or "statistics"
if (cw_api == "image"): Force_GetMetricWidgetImage = True
if ("refresh_interval_seconds" in cw_remote_ini):
cw_remote_refresh_interval_seconds = cw_remote_ini["refresh_interval_seconds"]
if (Force_Refresh_Interval_Seconds >= 0):
cw_remote_refresh_interval_seconds = Force_Refresh_Interval_Seconds
# Fractional seconds not supported
cw_remote_refresh_interval_seconds = int(round(cw_remote_refresh_interval_seconds))
if (Force_Duplex_Layout): CW_Remote_Duplex_Layout = True
Alarm_Name_List = cw_remote_ini.get("alarm_name_list", [])
Metric_Descriptor_List = []
ini_metric_descriptor_list = cw_remote_ini.get("metric_descriptor_list", [])
for metric_descr in ini_metric_descriptor_list:
# metric_descr is itself a list, potentially with one element, or with two
this_metric_descriptor = copy.deepcopy(metric_descr)
Metric_Descriptor_List.append(this_metric_descriptor)
Widget_Image_Descriptor_List = []
ini_widget_descriptor_list = cw_remote_ini.get("widget_descriptor_list", [])
for widget_descr in ini_widget_descriptor_list:
this_widget_descriptor = widget_descr.copy()
Widget_Image_Descriptor_List.append(this_widget_descriptor)
if (len(Widget_Image_Descriptor_List) < 2):
CW_Remote_Duplex_Layout = False
if (CW_Remote_Duplex_Layout):
pass
else:
# Not duplex, make graphs "higher", i.e. more vertical resolution
for widget_descr in Widget_Image_Descriptor_List:
widget_descr["height"] = 2 * widget_descr["height"]
# Initialize connection to CloudWatch.
cloudwatch_client = \
boto3.client('cloudwatch',
aws_access_key_id=cw_remote_ini.get("aws_access_id", ''),
aws_secret_access_key=cw_remote_ini.get("aws_secret_key", ''),
region_name=cw_remote_ini.get("region_name", ''))
except:
# except Exception, e:
# If initialization file is missing, don't build usual UI
cw_remote_ini = None
else:
cw_remote_ini = None
from threading import Thread
def Describe_Alarm_History ( Alarm_Name, Alarm_History_Results ):
datetime_now_utc = datetime.datetime.now(UTC_Time_Zone)
datetime_yesterday_utc = datetime_now_utc - datetime.timedelta(days=1)
alarm_history = \
cloudwatch_client.describe_alarm_history(AlarmName=Alarm_Name,
HistoryItemType="StateUpdate", # 'StateUpdate' | 'Action',
StartDate=datetime_yesterday_utc,
EndDate=datetime_now_utc,
MaxRecords=100,
NextToken='')
Alarm_History_Results[Alarm_Name] = alarm_history
def Alarm_History ( ):
if (len(Alarm_Name_List) == 0): return {}
alarm_history_threads = [ None ] * len(Alarm_Name_List)
alarm_history_results = { }
for alarm_index, alarm_name in enumerate(Alarm_Name_List):
alarm_history_threads[alarm_index] = \
Thread(target=Describe_Alarm_History, args=(alarm_name, alarm_history_results))
alarm_history_threads[alarm_index].start()
for alarm_index in range(len(Alarm_Name_List)):
alarm_history_threads[alarm_index].join()
return alarm_history_results
def Optimize_DataPoint_Summary_Seconds ( Period_Hours ):
datapoint_summary_seconds = 60
# The maximum number of data points returned from a single call is 1,440.
# The period for each datapoint can be 1, 5, 10, 30, 60, or any multiple of 60 seconds.
datapoint_count = (Period_Hours * 60 * 60) / datapoint_summary_seconds
while (datapoint_count > 1440):
datapoint_summary_seconds += 60
datapoint_count = (Period_Hours * 60 * 60) / datapoint_summary_seconds
return datapoint_summary_seconds
# These are specific to cw_remote, parameters for generating figures, etc. ...
# There can be a maximum of two figures per page (duplex), or only one (simplex)
_graph_plot_metric_statistics_list = [None, None]
def get_Graph_Plot_Metric_Statistics(Plot_Figure_Index):
global _graph_plot_metric_statistics_list
return _graph_plot_metric_statistics_list[Plot_Figure_Index]
def set_Graph_Plot_Metric_Statistics(Plot_Figure_Index, Graph_Plot_Metric_Statistics):
global _graph_plot_metric_statistics_list
_graph_plot_metric_statistics_list[Plot_Figure_Index] = Graph_Plot_Metric_Statistics
# ... These are specific to cw_remote, parameters for generating figures, etc.
# Code specific to AWS API, fetch data and render into matplotlib friendly form
def Get_Metric_Statistics ( Metric_Descriptor, Metric_Statistics_List, Metric_Statistics_Index ):
raw_metric_statistics = \
cloudwatch_client.get_metric_statistics(MetricName=Metric_Descriptor["MetricName"],
Namespace=Metric_Descriptor["Namespace"],
Dimensions=Metric_Descriptor["Dimensions"],
StartTime=Metric_Descriptor["StartTime"],
EndTime=Metric_Descriptor["EndTime"],
Period=Metric_Descriptor["Period"],
Statistics=Metric_Descriptor["Statistics"],
Unit=Metric_Descriptor["Unit"])
raw_metric_statistics_datapoints = raw_metric_statistics.get("Datapoints", [])
nyc_wall_time_offset = NYC_Wall_DateTime_Offset(Metric_Descriptor["EndTime"])
nyc_wall_datetime_offset = datetime.timedelta(hours=int(nyc_wall_time_offset) / 100)
y_factor = Metric_Descriptor.get("YFactor", 1)
data_point_list = []
for data_point in raw_metric_statistics_datapoints:
data_datetime = data_point["Timestamp"]
# This will return some wrong local time values ...
# ... if StartTime and EndTime straddle standard <=> daylight savings
# The alternative will cause graph to have discontinuity (worse), ...
# ... or duplicates of time values (fatal)
data_datetime = data_datetime + nyc_wall_datetime_offset
data_maximum = data_point["Maximum"] * y_factor
data_average = data_point["Average"] * y_factor
data_point_list.append((data_datetime, data_maximum, data_average))
data_point_list.sort()
data_time_max_list = [(time, max) for time, max, avg in data_point_list]
data_time_avg_list = [(time, avg) for time, max, avg in data_point_list]
prepared_metric_statistics = {}
prepared_metric_statistics["Datapoints_Maximum_List"] = data_time_max_list
prepared_metric_statistics["Datapoints_Average_List"] = data_time_avg_list
prepared_metric_statistics["MetricDescriptor"] = Metric_Descriptor
Metric_Statistics_List[Metric_Statistics_Index] = prepared_metric_statistics
Cache_Page_Metrics = True # False # True #
Cache_Page_Metric_Statistics = []
def Page_Get_Metric_Statistics_Datapoints ( Metric_Index_List, Period_End_UTC, Period_Hours ):
global Cache_Page_Metric_Statistics
if (len(Metric_Index_List) == 0): return False
Cache_Page_Metric_Statistics = []
if (not Cache_Page_Metrics): return False
# start_time = time.clock()
period_begin_utc = Period_End_UTC - datetime.timedelta(hours=Period_Hours)
datapoint_summary_seconds = Optimize_DataPoint_Summary_Seconds(Period_Hours)
page_metric_descriptor_list = {}
for list_index, metric_index in enumerate(Metric_Index_List):
metric_descr_list = Metric_Descriptor_List[metric_index]
metric_statistics_list = [None] * len(metric_descr_list)
for descr_index, metric_descr in enumerate(metric_descr_list):
page_metric_descriptor_list[(list_index, descr_index)] = metric_descr
# Graph placeholder metric_statistics_list contain a placeholder ...
# ... for each line on the graph, each line representing one metric statistics.
Cache_Page_Metric_Statistics.append(metric_statistics_list)
# For each graph on this page (either one or two) ...
# ... there must be one graph placeholder in the cache.
get_metric_statistics_threads = []
for list_descr_key, metric_descr in page_metric_descriptor_list.items():
list_index, descr_index = list_descr_key
metric_descr["StartTime"] = period_begin_utc
metric_descr["EndTime"] = Period_End_UTC
metric_descr["Period"] = datapoint_summary_seconds
this_thread = Thread(target=Get_Metric_Statistics,
args=(metric_descr, Cache_Page_Metric_Statistics[list_index], descr_index))
# print ("start (", len(get_metric_statistics_threads), "):", (time.clock() - start_time), sep='')
this_thread.start()
get_metric_statistics_threads.append(this_thread)
for thread_index, this_thread in enumerate(get_metric_statistics_threads):
this_thread.join()
# print("end (", thread_index, "):", (time.clock() - start_time), sep='')
class Control_Bar ( QtWidgets.QFrame ):
metricsUpdate = QtCore.Signal()
metricsPrevious = QtCore.Signal()
metricsNext = QtCore.Signal()
metricsDuplex = QtCore.Signal()
metricsSimplex = QtCore.Signal()
Period_Duration_Hours_Steps = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 18, 20, 22, 24, # 18
26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, # 12
50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, # 12
74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, # 12
100, 104, 108, 112, 116, 120, # 6
124, 128, 132, 136, 140, 144, # 6
148, 152, 156, 160, 164, 168] # 6
Period_End_Hours_Ago_Steps = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 18, 20, 22, 24, # 19
26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, # 12
50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, # 12
74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, # 12
100, 104, 108, 112, 116, 120, # 6
124, 128, 132, 136, 140, 144, # 6
148, 152, 156, 160, 164, 168] # 6
def __init__(self, **kwargs):
super(Control_Bar, self).__init__(**kwargs)
self._tooltip = QtWidgets.QToolTip()
self.setMouseTracking(True)
self.__period_duration_hours = Initial_Period_Duration_Hours
self.__period_end_hours_ago = Initial_Period_End_Hours_Ago
slider_minimum_value = 0
period_duration_slider_maximum_value = 999
self.period_duration_slider_value_span = period_duration_slider_maximum_value - slider_minimum_value
period_end_slider_maximum_value = 1000
self.period_end_slider_value_span = period_end_slider_maximum_value - slider_minimum_value
self.setContentsMargins(0, 0, 0, 0)
self.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.setAutoFillBackground(True)
self.setLineWidth(0)
self.setMaximumHeight(32)
# self.setFrameStyle(QtWidgets.QFrame.Plain)
control_bar_layout = QtWidgets.QHBoxLayout()
control_bar_layout.setMargin(0)
control_bar_layout.setContentsMargins(0, 0, 0, 0)
control_bar_layout.setSpacing(2)
control_bar_layout.setAlignment(Qt.AlignCenter)
self.__previous_pushbutton = QtWidgets.QPushButton("⬆", parent=self)
self.__previous_pushbutton.setFixedWidth(56)
self.__previous_pushbutton.clicked.connect(self.__emit_previous_signal)
self.__previous_pushbutton.setMouseTracking(True)
control_bar_layout.addWidget(self.__previous_pushbutton)
self.__period_duration_hours_label = QtWidgets.QLabel("24H", parent=self)
self.__period_duration_hours_label.setFixedWidth(76)
self.__period_duration_hours_label.setAlignment(Qt.AlignCenter)
control_bar_layout.addWidget(self.__period_duration_hours_label)
self.__period_duration_hours_slider = QtWidgets.QSlider(Qt.Horizontal, parent=self)
self.__period_duration_hours_slider.setMinimumWidth(360) # 5 pixels per step, 72 steps
self.__period_duration_hours_slider.setMinimum(slider_minimum_value)
self.__period_duration_hours_slider.setMaximum(period_duration_slider_maximum_value)
self.__period_duration_hours_slider.setValue(slider_minimum_value)
self.__period_duration_hours_slider.valueChanged.connect(self.__on_period_duration_hours_value_change)
self.__period_duration_hours_slider.sliderReleased.connect(self.__emit_update_signal)
self.__period_duration_hours_slider.setMouseTracking(True)
control_bar_layout.addWidget(self.__period_duration_hours_slider)
# min = slider_minimum_value, max = period_duration_slider_maximum_value,
# value = period_duration_slider_maximum_value, step = 1, size_hint = (0.4, 1))
self.__duplex_pushbutton = QtWidgets.QPushButton("2", parent=self)
self.__duplex_pushbutton.clicked.connect(self.__emit_duplex_signal)
self.__duplex_pushbutton.setFixedWidth(32)
self.__duplex_pushbutton.setDown(True)
self.__duplex_pushbutton.setMouseTracking(True)
control_bar_layout.addWidget(self.__duplex_pushbutton)
refresh_pushbutton = QtWidgets.QPushButton("Refresh", parent=self)
refresh_pushbutton.clicked.connect(self.__emit_update_signal)
refresh_pushbutton.setFixedWidth(104)
control_bar_layout.addWidget(refresh_pushbutton)
self.__simplex_pushbutton = QtWidgets.QPushButton("1", parent=self)
self.__simplex_pushbutton.clicked.connect(self.__emit_simplex_signal)
self.__simplex_pushbutton.setFixedWidth(32)
self.__simplex_pushbutton.setMouseTracking(True)
control_bar_layout.addWidget(self.__simplex_pushbutton)
self.__period_end_hours_ago_slider = QtWidgets.QSlider(Qt.Horizontal, parent=self)
self.__period_end_hours_ago_slider.setMinimumWidth(360) # 5 pixels per step, 72 steps
self.__period_end_hours_ago_slider.setMinimum(slider_minimum_value)
self.__period_end_hours_ago_slider.setMaximum(period_end_slider_maximum_value)
self.__period_end_hours_ago_slider.setValue(period_end_slider_maximum_value)
self.__period_end_hours_ago_slider.valueChanged.connect(self.__on_period_end_hours_ago_value_change)
self.__period_end_hours_ago_slider.sliderReleased.connect(self.__emit_update_signal)
self.__period_end_hours_ago_slider.setMouseTracking(True)
control_bar_layout.addWidget(self.__period_end_hours_ago_slider)
# min = slider_minimum_value, max = period_end_slider_maximum_value,
# value = period_end_slider_maximum_value, step = 1, size_hint = (0.4, 1))
self.__period_end_hours_ago_label = QtWidgets.QLabel("0H ago", parent=self)
self.__period_end_hours_ago_label.setFixedWidth(76)
self.__period_end_hours_ago_label.setAlignment(Qt.AlignCenter)
control_bar_layout.addWidget(self.__period_end_hours_ago_label)
self.__next_pushbutton = QtWidgets.QPushButton("⬇", parent=self)
self.__next_pushbutton.setFixedWidth(56)
self.__next_pushbutton.clicked.connect(self.__emit_next_signal)
self.__next_pushbutton.setMouseTracking(True)
control_bar_layout.addWidget(self.__next_pushbutton)
self.setLayout(control_bar_layout)
self.set_period_duration_hours_value(self.__period_duration_hours)
self.set_period_end_hours_ago_value(self.__period_end_hours_ago)
# Public functions (used to synchronize multiple TimeSpanControlBars) ...
def get_period_duration_hours_value ( self ):
return self.__period_duration_hours
def set_period_duration_hours_value ( self, period_duration_hours_value, *args ):
self.__period_duration_hours = period_duration_hours_value
self.__period_duration_hours_label.setText(self.__period_value_display(self.__period_duration_hours))
self.__period_duration_hours_slider.setValue \
(1000 - int(round(self.period_duration_slider_value_span *
(self.Period_Duration_Hours_Steps.index(self.__period_duration_hours) /
len(self.Period_Duration_Hours_Steps)))))
def get_period_end_hours_ago_value ( self ):
return self.__period_end_hours_ago
def set_period_end_hours_ago_value ( self, period_end_hours_ago_value, *args ):
self.__period_end_hours_ago = period_end_hours_ago_value
self.__period_end_hours_ago_slider.setValue \
(1000 - int(round(self.period_end_slider_value_span *
(self.Period_End_Hours_Ago_Steps.index(self.__period_end_hours_ago) /
len(self.Period_End_Hours_Ago_Steps)))))
self.__period_end_hours_ago_label.text = (self.__period_value_display(self.__period_end_hours_ago) + " ago")
# ... Public functions (used to synchronize multiple TimeSpanControlBars)
# Private functions ...
def __period_value_display ( self, Period_Value ):
period_value_string = ""
if ((Period_Value // 24) > 0): period_value_string += str(Period_Value // 24) + "D"
if (((Period_Value % 24) > 0) or (len(period_value_string) == 0)):
if (len(period_value_string) > 0): period_value_string += " "
period_value_string += str(Period_Value % 24) + "H"
return period_value_string
def __on_period_duration_hours_value_change ( self, period_duration_slider_value, *args ):
# print (period_duration_slider_value)
period_value_index = \
int(round(len(self.Period_Duration_Hours_Steps) *
(abs(period_duration_slider_value - 1000) / self.period_duration_slider_value_span)))
self.__period_duration_hours = \
self.Period_Duration_Hours_Steps[bound(0, (len(self.Period_Duration_Hours_Steps) - 1), period_value_index)]
self.__period_duration_hours_label.setText(self.__period_value_display(self.__period_duration_hours))
# print (period_duration_slider_value, period_value_index, self.__period_duration_hours, self.period_duration_label.text)
return True
def __on_period_end_hours_ago_value_change ( self, period_end_slider_value, *args ):
period_end_value_index = \
int(round(len(self.Period_End_Hours_Ago_Steps) *
(abs(period_end_slider_value - 1000) / self.period_end_slider_value_span)))
self.__period_end_hours_ago = \
self.Period_End_Hours_Ago_Steps[bound(0, (len(self.Period_End_Hours_Ago_Steps) -1), period_end_value_index)]
self.__period_end_hours_ago_label.setText(self.__period_value_display(self.__period_end_hours_ago) + " ago")
return True
# ... Private functions
def __emit_update_signal ( self ):
self.metricsUpdate.emit()
def __emit_previous_signal ( self ):
self.metricsPrevious.emit()
def __emit_next_signal ( self ):
self.metricsNext.emit()
def __emit_duplex_signal ( self ):
if (not self.__duplex_pushbutton.isDown()):
self.__duplex_pushbutton.setDown(True)
self.__simplex_pushbutton.setDown(False)
self.metricsDuplex.emit()
def __emit_simplex_signal ( self ):
if (not self.__simplex_pushbutton.isDown()):
self.__duplex_pushbutton.setDown(False)
self.__simplex_pushbutton.setDown(True)
self.metricsSimplex.emit()
def mouseMoveEvent ( self, event ):
if (self.rect().contains(event.pos())):
control_bar_pos = self.pos()
tooltip_pos = event.pos()
tooltip_pos.setX(control_bar_pos.x() + tooltip_pos.x())
tooltip_pos.setY(control_bar_pos.y() + tooltip_pos.y() + 100)
if (self.__previous_pushbutton.geometry().contains(event.pos())):
self._tooltip.showText(tooltip_pos, "Previous graph page")
elif (self.__period_duration_hours_slider.geometry().contains(event.pos())):
self._tooltip.showText(tooltip_pos, "Adjust period of displayed metrics in hours")
elif (self.__duplex_pushbutton.geometry().contains(event.pos())):
self._tooltip.showText(tooltip_pos, "Duplex page (two graphs)")
elif (self.__simplex_pushbutton.geometry().contains(event.pos())):
self._tooltip.showText(tooltip_pos, "Simplex page (one graph)")
elif (self.__period_end_hours_ago_slider.geometry().contains(event.pos())):
self._tooltip.showText(tooltip_pos, "Adjust hours ago of end of displayed metrics period")
elif (self.__next_pushbutton.geometry().contains(event.pos())):
self._tooltip.showText(tooltip_pos, "Next graph page")
else: self._tooltip.hideText()
else: self._tooltip.hideText()
class Zoomable_Chart ( QtCharts.QChartView ):
def __init__(self, Chart_Metric_Statistics_List, **kwargs):
super(Zoomable_Chart, self).__init__(**kwargs)
self.setContentsMargins(0, 0, 0, 0)
self._zoom_level = 0
self._zoom_factor = 4
self._tooltip = QtWidgets.QToolTip()
self._chart = QtCharts.QChart()
self._chart.setBackgroundRoundness(0)
self._chart.layout().setContentsMargins(0, 0, 0, 0)
chart_margins = self._chart.margins()
chart_margins.setTop(0)
chart_margins.setBottom(0)
self._chart.setMargins(chart_margins)
self._axisX = QtCharts.QDateTimeAxis()
self._chart.addAxis(self._axisX, Qt.AlignBottom)
self._axisY_left = QtCharts.QValueAxis()
self._chart.addAxis(self._axisY_left, Qt.AlignLeft)
self._axisY_right = QtCharts.QValueAxis()
self._chart.addAxis(self._axisY_right, Qt.AlignRight)
# metric_statistics_list = Graph_Index_0
self.__chart_data_load(Chart_Metric_Statistics_List)
self.setChart(self._chart)
self.setRenderHint(QPainter.Antialiasing)
def setChartData ( self, Chart_Metric_Statistics_List ):
self._chart.removeAllSeries()
# Axes "remember" their previous state, ...
# ... if not removed, previous min/max values will displayed
self._chart.removeAxis(self._axisX)
if (self._axisY_left is not None):
self._chart.removeAxis(self._axisY_left)
if (self._axisY_right is not None):
self._chart.removeAxis(self._axisY_right)
# Replace with new "stateless" axes
self._axisX = QtCharts.QDateTimeAxis()
self._chart.addAxis(self._axisX, Qt.AlignBottom)
self._axisY_left = QtCharts.QValueAxis()
self._chart.addAxis(self._axisY_left, Qt.AlignLeft)
self._axisY_right = QtCharts.QValueAxis()
self._chart.addAxis(self._axisY_right, Qt.AlignRight)
self.__chart_data_load(Chart_Metric_Statistics_List)
# self.repaint()
def __chart_data_load (self, Chart_Metric_Statistics_List ):
data_min_datetime = None
data_max_datetime = None
data_min_value_left = None
data_max_value_left = None
data_min_value_right = None
data_max_value_right = None
metric_statistics = Chart_Metric_Statistics_List[0]
datapoints = metric_statistics.get("Datapoints_Maximum_List", [])
# series_data = Metric_Statistics_Datapoints_Time_and_Values(datapoints)
# data_min_datetime = series_data[0][0]
# data_max_datetime = series_data[-1][0]
data_min_datetime = datapoints[0][0]
data_max_datetime = datapoints[-1][0]
data_min_datetime_quantized = \
data_min_datetime - datetime.timedelta(minutes=data_min_datetime.minute,
seconds=data_min_datetime.second)
data_max_datetime_quantized = \
data_max_datetime + datetime.timedelta(minutes=((60 - data_max_datetime.minute) % 60),
seconds=((60 - data_max_datetime.second) % 60))
workaround_line_series = QtCharts.QLineSeries()
workaround_line_series.append(QDateTime(data_min_datetime_quantized).toMSecsSinceEpoch(), 0)
workaround_line_series.append(QDateTime(data_max_datetime_quantized).toMSecsSinceEpoch(), 0)
pen = workaround_line_series.pen()
pen.setWidth(1)
pen.setColor("lightgray")
workaround_line_series.setPen(pen)
self._chart.addSeries(workaround_line_series)
workaround_line_series.attachAxis(self._axisX)
workaround_line_series.attachAxis(self._axisY_left)
left_y_axis_series_count = 0
right_y_axis_series_count = 0
for metric_index, metric_statistics in enumerate(reversed(Chart_Metric_Statistics_List)):
metric_statistics_descriptor = metric_statistics.get("MetricDescriptor", {})
datapoints = metric_statistics.get("Datapoints_Maximum_List", [])
which_y_axis = metric_statistics_descriptor.get("YAxis", "left")
if (which_y_axis == "left"):
left_y_axis_series_count += 1
elif (which_y_axis == "right"):
right_y_axis_series_count += 1
# datapoints = metric_statistics.get("Datapoints", [])
# series_data = Metric_Statistics_Datapoints_Time_and_Values(datapoints)
if ((data_min_datetime is None) or (datapoints[0][0] < data_min_datetime)):
data_min_datetime = datapoints[0][0]
if ((data_max_datetime is None) or (datapoints[-1][0] > data_max_datetime)):
data_max_datetime = datapoints[-1][0]
data_min_datetime_quantized = \
data_min_datetime - datetime.timedelta(minutes=data_min_datetime.minute,
seconds=data_min_datetime.second)
data_max_datetime_quantized = \
data_max_datetime + datetime.timedelta(minutes=((60 - data_max_datetime.minute) % 60),
seconds=((60 - data_max_datetime.second) % 60))
line_series = QtCharts.QLineSeries()
for point_time, point_value in datapoints:
if (which_y_axis == "right"):
if ((data_min_value_right is None) or (point_value < data_min_value_right)):
data_min_value_right = point_value
if ((data_max_value_right is None) or (point_value > data_max_value_right)):
data_max_value_right = point_value
elif (which_y_axis == "left"):
if ((data_min_value_left is None) or (point_value < data_min_value_left)):
data_min_value_left = point_value
if ((data_max_value_left is None) or (point_value > data_max_value_left)):
data_max_value_left = point_value
line_series.append(QDateTime(point_time).toMSecsSinceEpoch(), point_value)
line_color = tuple([int(255 * color_value)
for color_value in metric_statistics_descriptor.get("Color", [0, 0, 0])])
pen = line_series.pen()
pen.setWidth(0)
pen.setColor(QColor(*line_color))
line_series.setPen(pen)
self._chart.addSeries(line_series)
line_series.attachAxis(self._axisX)
if (which_y_axis == "right"):
line_series.attachAxis(self._axisY_right)
elif (which_y_axis == "left"):
line_series.attachAxis(self._axisY_left)
line_series.setName(metric_statistics_descriptor.get("MetricLabel", " "))
self._axisX.setMin(QDateTime(data_max_datetime_quantized).toMSecsSinceEpoch())
self._axisX.setMax(QDateTime(data_max_datetime_quantized).toMSecsSinceEpoch())
delta_hours = (data_max_datetime_quantized - data_min_datetime_quantized).total_seconds() // (60 * 60)
delta_ticks = 12
for factor in [13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2]:
if ((delta_hours % factor) == 0):
delta_ticks = factor
break
self._axisX.setTickCount(delta_ticks + 1)
self._axisX.setFormat("hh:mm'\n'MM/dd")
if (left_y_axis_series_count == 0):
self._chart.removeAxis(self._axisY_left)
self._axisY_left = None
else:
# self._axisY_left.setTickAnchor(0)
# self._axisY_left.setTickInterval(1)
scale_factor = self._compute_optimal_axis_scale_factor(data_max_value_left)
data_max_value_left_scaled = int(round(((data_max_value_left * 1.025) / scale_factor) + 0.5))
self._axisY_left.setTickCount(data_max_value_left_scaled + 1)
self._axisY_left.setRange(0, (scale_factor * data_max_value_left_scaled))
pen_color = self._axisY_left.linePen().color()
pen_color.setNamedColor("#ccccff")
self._axisY_left.setLinePenColor(pen_color)
self._axisY_left.setGridLineColor(pen_color)
# self._axisY_left.setTickType(QtCharts.QValueAxis.TicksDynamic)
# self._axisY_left.setTickAnchor(0)
# unit_interval = 10**math.floor(math.log10(1.025 * data_max_value_left))
# for unit_interval_scale in [1, 2, 4, 5, 8]:
# if ((data_max_value_left / (unit_interval / unit_interval_scale)) >= 4):
# unit_interval = unit_interval / unit_interval_scale
# break
# self._axisY_left.setTickInterval(unit_interval)
# self._axisY_left.setRange(0, (1.025 * data_max_value_left))
#
# # scale_factor = self._compute_optimal_axis_scale_factor(data_max_value_left)
# # data_max_value_left_scaled = int(round(((data_max_value_left * 1.025) / scale_factor) + 0.5))
# # self._axisY_left.setTickCount(data_max_value_left_scaled + 1)
# # self._axisY_left.setRange(0, (scale_factor * data_max_value_left_scaled))
if (right_y_axis_series_count == 0):
self._chart.removeAxis(self._axisY_right)
self._axisY_right = None
else:
# self._axisY_right.setTickAnchor(0)
# self._axisY_right.setTickInterval(1)
scale_factor = self._compute_optimal_axis_scale_factor(data_max_value_right)
data_max_value_right_scaled = int(round(((data_max_value_right * 1.025) / scale_factor) + 0.5))
self._axisY_right.setTickCount(data_max_value_right_scaled + 1)
self._axisY_right.setRange(0, (scale_factor * data_max_value_right_scaled))
pen_color = self._axisY_right.linePen().color()
pen_color.setNamedColor("#ffcccc")
self._axisY_right.setLinePenColor(pen_color)
self._axisY_right.setGridLineColor(pen_color)
# self._chart.legend().setVisible(False)
legend = self._chart.legend()
legend.markers(workaround_line_series)[0].setVisible(False)
legend.setVisible(True)
legend.setAlignment(Qt.AlignTop)
legend.setContentsMargins(0, 0, 0, 0)
legend.layout().setContentsMargins(0, 0, 0, 0)
def _compute_optimal_axis_scale_factor ( self, data_maximum_value ):
scale_factor = 0
scale_factor_power = 1 / (10 * 1000 * 1000)
while ((scale_factor == 0) and (scale_factor_power <= (1000 * 1000))):
scale_factor_power *= 10
for scale_factor_multiplier in [1, 2, 4, 5, 8]:
scale_factor_ceiling = scale_factor_power * scale_factor_multiplier
if (data_maximum_value <= scale_factor_ceiling):
scale_factor = scale_factor_ceiling / 10
break
if (scale_factor == 0): scale_factor = (1000 * 1000)
return scale_factor
def mousePressEvent ( self, event ):
if (self._zoom_level == 0):
# chart_rect = self._rect()
# mouse_press_position = event.pos()
plotarea_rect = self._chart.plotArea()
zoom_area_left = \
max((event.pos().x() - (plotarea_rect.width() / (2 * self._zoom_factor))), plotarea_rect.left())
zoom_area_top = \
max((event.pos().y() - (plotarea_rect.height() / (2 * self._zoom_factor))), plotarea_rect.top())
zoom_area_width = plotarea_rect.width() / self._zoom_factor
zoom_area_height = plotarea_rect.height() / self._zoom_factor
if ((zoom_area_left + zoom_area_width) > (plotarea_rect.left() + plotarea_rect.width())):
zoom_area_left = (plotarea_rect.left() + plotarea_rect.width()) - zoom_area_width
if ((zoom_area_top + zoom_area_height) > (plotarea_rect.top() + plotarea_rect.height())):
zoom_area_top = (plotarea_rect.top() + plotarea_rect.height()) - zoom_area_height
zoom_rect = QRectF(zoom_area_left, zoom_area_top, zoom_area_width, zoom_area_height)
self._chart.zoomIn(zoom_rect)
self._zoom_level += 1
else:
self._chart.zoomReset()
self._zoom_level -= 1
def mouseMoveEvent ( self, event ):
self_pos = self.pos()
plotarea_rect = self._chart.plotArea()
if (not plotarea_rect.contains(event.pos())):
self._tooltip.hideText()
else:
mouse_move_position = event.pos()
tooltip_text = ''
chart_series_list = self._chart.series()
# First series is workaround for datetime axis labeling issue
mouse_move_point_left = self._chart.mapToValue(mouse_move_position, chart_series_list[1])
mouse_move_point_right = self._chart.mapToValue(mouse_move_position, chart_series_list[-1])
mouse_move_datetime = QDateTime()
mouse_move_datetime.setMSecsSinceEpoch(mouse_move_point_right.x())
tooltip_text += mouse_move_datetime.toString("yyyy-MM-dd HH:mm")
tooltip_text += '\n' + "L: " + str(round(mouse_move_point_left.y(), 2))
tooltip_text += " R: " + str(round(mouse_move_point_right.y(), 2))
mouse_move_plotarea_x = mouse_move_position.x() - plotarea_rect.left()
if (mouse_move_plotarea_x < (plotarea_rect.width() / 2)):
tooltip_pos_offset_x = 100
else:
tooltip_pos_offset_x = -25
mouse_move_plotarea_y = mouse_move_position.y() - plotarea_rect.top()
if (mouse_move_plotarea_y < (plotarea_rect.height() / 2)):
tooltip_pos_offset_y = 125
else:
tooltip_pos_offset_y = 50
tooltip_pos = event.pos()
tooltip_pos.setX(self_pos.x() + tooltip_pos.x() + tooltip_pos_offset_x)
tooltip_pos.setY(self_pos.y() + tooltip_pos.y() + tooltip_pos_offset_y)
self._tooltip.showText(tooltip_pos, tooltip_text)
# chart_rect = self.rect()
# plotarea_rect = self._chart.plotArea()
# for chart_series in chart_series_list[1:]: # First series workaround for datetime axis labeling issue
# point_list = chart_series.points()
# def mouseReleaseEvent ( self, event ):
# pass
class CW_Remote_Screen ( QtWidgets.QWidget ):
def __init__(self, **kwargs):
super(CW_Remote_Screen, self).__init__(**kwargs)
self.Visible_Graph_Count = 2
self.Graph_Offset = 0
self.setContentsMargins(0, 0, 0, 0)
self.Period_Duration_Hours = Initial_Period_Duration_Hours
self.Period_End_Hours_Ago = Initial_Period_End_Hours_Ago
datetime_now_utc = datetime.datetime.now(UTC_Time_Zone)
period_end_utc = datetime_now_utc - datetime.timedelta(hours=self.Period_End_Hours_Ago)
Page_Get_Metric_Statistics_Datapoints([(self.Graph_Offset + 0), (self.Graph_Offset + 1)],
period_end_utc, self.Period_Duration_Hours)
main_window_layout = QtWidgets.QVBoxLayout()
main_window_layout.setMargin(0)
main_window_layout.setContentsMargins(0, 0, 0, 0)
main_window_layout.setSpacing(0)
self.__empty_label = QtWidgets.QLabel('')
self.__empty_label.setMaximumHeight(1)
# main_window_layout.addWidget(self.__empty_label)
# message_label = QtWidgets.QLabel(message_text)
# main_window_layout.addWidget(message_label)
if (Cache_Page_Metrics and (len(Cache_Page_Metric_Statistics) > 0)):
metric_statistics_list = Cache_Page_Metric_Statistics[0]
self.__zoomable_chart_upper = Zoomable_Chart(metric_statistics_list)
main_window_layout.addWidget(self.__zoomable_chart_upper, stretch=0.5)
self.__control_bar = Control_Bar()
self.__control_bar.metricsUpdate.connect(self.__update_page_metrics)
self.__control_bar.metricsPrevious.connect(self.__previous_page_metrics)
self.__control_bar.metricsNext.connect(self.__next_page_metrics)
self.__control_bar.metricsDuplex.connect(self.__duplex_metrics)
self.__control_bar.metricsSimplex.connect(self.__simplex_metrics)
main_window_layout.addWidget(self.__control_bar, stretch=0)
if (Cache_Page_Metrics and (len(Cache_Page_Metric_Statistics) > 1)):
metric_statistics_list = Cache_Page_Metric_Statistics[1]
self.__zoomable_chart_lower = Zoomable_Chart(metric_statistics_list)
main_window_layout.addWidget(self.__zoomable_chart_lower, stretch=0.5)
self.setLayout(main_window_layout)
self.__timer = QtCore.QTimer()
self.__timer.timeout.connect(self.__update_page_metrics)
self.__timer.start(60000) # 60 seconds in milliseconds
def __update_page_metrics ( self, *args ):
self.Period_Duration_Hours = self.__control_bar.get_period_duration_hours_value()
self.Period_End_Hours_Ago = self.__control_bar.get_period_end_hours_ago_value()
self.set_main_window_title()
datetime_now_utc = datetime.datetime.now(UTC_Time_Zone)
period_end_utc = datetime_now_utc - datetime.timedelta(hours=self.Period_End_Hours_Ago)
Page_Get_Metric_Statistics_Datapoints([(self.Graph_Offset + 0), (self.Graph_Offset + 1)],
period_end_utc, self.Period_Duration_Hours)
if (Cache_Page_Metrics and (len(Cache_Page_Metric_Statistics) > 0)):
metric_statistics_list = Cache_Page_Metric_Statistics[0]
self.__zoomable_chart_upper.setChartData(metric_statistics_list)
if (Cache_Page_Metrics and (len(Cache_Page_Metric_Statistics) > 1)):
metric_statistics_list = Cache_Page_Metric_Statistics[1]
self.__zoomable_chart_lower.setChartData(metric_statistics_list)
def __previous_page_metrics ( self, *args ):
if (self.Visible_Graph_Count == 2):
if ((self.Graph_Offset % 2) == 1):
# If odd, then 1, 3, 5 ..., move back to even alignment
self.Graph_Offset -= 1
elif (self.Graph_Offset > 1):
self.Graph_Offset -= 2
else: self.Graph_Offset = 0
else:
if (self.Graph_Offset > 0):
self.Graph_Offset -= 1
else: self.Graph_Offset = 0
self.__update_page_metrics()
def __next_page_metrics ( self, *args ):
descriptor_list_length = len(Metric_Descriptor_List)
if (self.Visible_Graph_Count == 2):
# We are displaying two graphs, must account for second graph after skipping ahead
if ((self.Graph_Offset + 2 + 1) < descriptor_list_length):
self.Graph_Offset += 2
elif ((self.Graph_Offset + 1 + 1) < descriptor_list_length):
# If at at the end save one graph, move ahead by one for odd alignment
self.Graph_Offset += 1
else:
if ((self.Graph_Offset + 1) < descriptor_list_length):
self.Graph_Offset += 1
self.__update_page_metrics()
def __duplex_metrics ( self, *args ):
# self.__zoomable_chart_upper.show()
main_window_layout = self.layout()
main_window_layout.replaceWidget(self.__empty_label, self.__zoomable_chart_upper, options=Qt.FindDirectChildrenOnly)
self.__zoomable_chart_lower.setMaximumHeight(self.__zoomable_chart_lower_height)
self.__zoomable_chart_upper.setMaximumHeight(self.__zoomable_chart_upper_height)
self.Graph_Offset = max((self.Graph_Offset - 1), 0)
self.Visible_Graph_Count = 2
# self.__update_page_metrics()
self.update()
def __simplex_metrics(self, *args):
descriptor_list_length = len(Metric_Descriptor_List)
self.__zoomable_chart_upper_height = self.__zoomable_chart_upper.height()
self.__zoomable_chart_lower_height = self.__zoomable_chart_lower.height()
main_window_layout = self.layout()
main_window_layout.replaceWidget(self.__zoomable_chart_upper, self.__empty_label, options=Qt.FindDirectChildrenOnly)
self.Graph_Offset = min((self.Graph_Offset + 1), (descriptor_list_length - 1))
self.Visible_Graph_Count = 1
self.update()
def set_main_window_title ( self ):
main_window = QtWidgets.QApplication.activeWindow()
if (main_window is not None):
main_window.setWindowTitle("CW_Remote" + " (" +
Period_Span_NYC_Wall_Time(self.Period_Duration_Hours,
self.Period_End_Hours_Ago) + ")")
class Main_Window ( QtWidgets.QMainWindow ):
def __init__(self, **kwargs):
super(Main_Window, self).__init__(**kwargs)
self.setWindowTitle("CW_Remote" + " (" +
Period_Span_NYC_Wall_Time(Initial_Period_Duration_Hours,
Initial_Period_End_Hours_Ago) + ")")
self.setContentsMargins(0, 0, 0, 0)
self.cw_remote_screen_widget = CW_Remote_Screen(parent=self)
self.cw_remote_screen_widget.setContentsMargins(0, 0, 0, 0)
self.setCentralWidget(self.cw_remote_screen_widget)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = Main_Window()
window.setContentsMargins(0, 0, 0, 0)
window.resize(1280, 800)
# window_size = window.frameSize()
# desktop_geometry = PySide2.QtWidgets.QDesktopWidget().screenGeometry()
# window.move((desktop_geometry.width() / 2) - (window_size.width() / 2),
# (desktop_geometry.height() / 2) - (window_size.height() / 2))
window_rect = window.frameGeometry()
desktop_center = QtWidgets.QDesktopWidget().screenGeometry().center()
window_rect.moveCenter(desktop_center)
window.move(window_rect.topLeft())
window.show()
sys.exit(app.exec_())
|
f.py
|
from pynput.keyboard import Key,Listener
import ctypes
from os import system
from threading import Thread
from win32gui import GetWindowText, GetForegroundWindow
from pywinauto.application import Application
from WConio2 import getkey,setcursortype,clrscr
system('mode 55,10')
system('color 04')
print('svasti')
ctypes.windll.kernel32.SetConsoleTitleW('vināsho\'sti')
class run():
def __init__(self):
self.__active_status = False
self.__index = 0
self.__th =Thread(target=self.__change)
self.__th.daemon=True
self.__th.start()
self.start_file = []
self.__active_status = False
self.__pass = False
with Listener(on_press=self.__wait) as shrotA:
shrotA.join()
if self.__active_status:
self.__dig.close()
def __wait(self,key):
try:
key = key.name
except AttributeError:
key = key.char
if 'end' in key or 'esc' in key:
return False
elif 'shift' in key and self.__pass:
self.__play()
def __change(self):
setcursortype(0)
a = 'hariom'
index = 0
while index != len(a):
b = getkey()
if b == a[index]:
index += 1
else:
index = 0
print('susvasti')
self.__pass = True
system('color b')
def __play(self):
a = GetWindowText(GetForegroundWindow())
app = Application().connect(title=a)
self.__dig=app[a]
clrscr()
print(a)
self.__active_status = True
run()
|
auto_reset_event_test.py
|
import threading
import time
from unittest import TestCase
from puma.primitives import AutoResetEvent
class AutoResetEventTest(TestCase):
# AutoResetEvent is derived from threading.Event, and only overrides the wait() method. We only test the modified behaviour, not the base class's behaviour.
def test_cleared_if_preset(self) -> None:
event = AutoResetEvent()
event.set()
self.assertTrue(event.is_set())
self.assertTrue(event.wait(timeout=None))
self.assertFalse(event.is_set())
def test_cleared_if_wait_successful(self) -> None:
event = AutoResetEvent()
self.assertFalse(event.is_set())
thread = threading.Thread(target=self._set_after_delay, args=[event])
thread.start()
try:
t1 = time.perf_counter()
ret = event.wait(timeout=10.0)
t2 = time.perf_counter()
self.assertTrue(ret)
self.assertFalse(event.is_set())
self.assertGreaterEqual(t2 - t1, 0.1)
self.assertLess(t2 - t1, 1.0)
finally:
thread.join()
@staticmethod
def _set_after_delay(event: AutoResetEvent) -> None:
time.sleep(0.1)
event.set()
|
client.py
|
# import blockchain
from ecdsa.keys import VerifyingKey
from hashutils import hash_object
import socket
import threading
import os
import time
import hashlib
import shutil
from blockchain import Blockchain, Block
from card import Card
import uuid
from pickle import dumps, loads
import pandas as pd
import numpy as np
import random
from uuid import uuid4
from ecdsa import SigningKey
import getopt
from pynput.keyboard import Key, Controller
import sys
from time import sleep
class Trainer:
def __init__(self, name, host, port):
self.host = host
self.port = port
self.name = name
self.my_cards = []
self.blockchain = None
self.members = {} # key: member_name, value: (addr, port, public_key)
self.private_key = SigningKey.generate() # need to update this
self.public_key = self.private_key.get_verifying_key()
self.stop = False # False: User is online
# self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# self.sock.settimeout(None)
# self.port = port
# self.sock.bind((host, port))
self.initial_block_chains = []
self.initial_block_chains_sizes = []
# self.initial_display()
# self.join()
self.auctioned_cards = {}
# only for genesis
self.isGenesis = False
self.cards = None
self.card_index = 0
self.pending_transaction = {} # transaction id: {approved: false, card_add: card, card_remove: card}
T1 = threading.Thread(target = self.listener)
T1.start()
# Blockchain methods
#Sign the pokemon card that is up for trade
def sign(self, hash_pokemon_card_id):
""" Sign a hash_pokemon_card_id using the private key """
return self.private_key.sign(hash_pokemon_card_id.encode('utf-8'))
# Add received block from the miner to the blockchain
def add_block(self, block):
self.blockchain.add_block(block)
def assign_blockchain(self, blockchain):
self.blockchain = blockchain
def verify_ownership(self, owner_public_key, card_id):
for i in reversed(range(len(self.blockchain.blocks))):
if i == 0:
break
contents = self.blockchain.blocks[i].content
if contents['pokemon_card_id'] == card_id:
key1 = self.public_key_to_str(contents['public_key_receiver'])
key2 = self.public_key_to_str(owner_public_key)
if key1 == key2:
return True
return False
def public_key_to_str(self, public_key):
return public_key.to_string().hex()
def public_key_to_obj(self, public_key):
return VerifyingKey.from_string(bytes.fromhex(public_key))
def private_key_to_str(self, private_key):
return private_key.to_string().hex()
def private_key_to_obj(self, private_key):
return SigningKey.from_string(bytes.fromhex(private_key))
def create_genesis_block(self, pokemon_master_key, pokemon_card_id_list):
content = {"pokemon_master_key": pokemon_master_key,
"pokemon_card_id_list": pokemon_card_id_list}
block = Block(content, block_type='genesis')
return block
# New member joins
def join(self):
lines = []
with open('./members.txt', "r") as f:
lines = f.readlines()
# genesis
if lines == []:
lines = []
with open("./pokemons.csv", "r") as f:
lines = f.readlines()
lines = lines[1:]
# striping /n, splitting on ",", specifying legendary as yes or no
lines = list(map(lambda x: x.rstrip(), lines))
lines = list(map(lambda x: x.split(","), lines))
lines = list(map(lambda x: x[0:-1]+["No"] if x[-1] == "0"else x[0:-1]+["Yes"], lines))
cards = list(map(lambda x: Card(x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8]), lines))
self.isGenesis = True
self.cards = cards
f = open("./members.txt", "a")
temp_string = self.name + "," + self.host + "," + str(self.port) + "," + self.public_key_to_str(self.public_key)
f.write(temp_string)
f.close()
genesis_block = self.create_genesis_block(self.public_key, self.cards)
self.blockchain = Blockchain()
self.add_block(genesis_block)
public_key_str = self.public_key_to_str(self.public_key)
message = {"type": "add_member", "name": self.name, "host": self.host, "port": self.port, "public_key": public_key_str}
new_sock = socket.socket()
new_sock.connect(("localhost", 10000))
self.send_message(message, new_sock)
else:
lines = []
with open("./members.txt", "r") as f:
lines = f.readlines()
lines = list(map(lambda x: x.rstrip(), lines))
lines = list(map(lambda x: x.split(","), lines))
for i in range(len(lines)):
public_key_obj = self.public_key_to_obj(lines[i][3])
self.members[lines[i][0]] = (lines[i][1], int(lines[i][2]), public_key_obj)
public_key_str = self.public_key_to_str(self.public_key)
message = {"type": "add_member", "name": self.name, "host": self.host, "port": self.port, "public_key": public_key_str}
self.flood(message)
new_sock = socket.socket()
new_sock.connect(("localhost", 10000))
self.send_message(message, new_sock)
f = open("./members.txt", "a")
temp_string = "\n" + self.name + "," + self.host + "," + str(self.port) + "," + self.public_key_to_str(self.public_key)
f.write(temp_string)
f.close()
block_chains = []
block_chains_sizes = []
for key, val in self.members.items():
message = {"type": "send_blockchain", "host": self.host, "port": self.port}
new_sock = socket.socket()
new_sock.connect(('localhost', val[1]))
self.send_message(message, new_sock)
self.action()
def first_ten_cards(self):
message = {"type": "first_ten_cards", "host": self.host, "port": self.port, "public_key": self.public_key}
genesis = list(self.members.keys())[0]
new_sock = socket.socket()
new_sock.connect((self.members[genesis][0], self.members[genesis][1]))
self.send_message(message, new_sock)
# Always listening port
def listener(self):
listener = socket.socket()
listener.bind((self.host, self.port))
listener.listen(100)
while not self.stop:
client, addr = listener.accept()
threading.Thread(target = self.handleMessage, args = (client, addr)).start()
print("Shutting down node:", self.host, self.port)
try:
listener.shutdown(2)
listener.close()
except:
listener.close()
# General function to initiate an action (trade, gift, etc)
def action(self):
while not self.stop:
user_input = input()
input_split = user_input.split(" ")
if input_split[0] == '':
continue
elif input_split[0] == 'trade':
self.trade()
elif input_split[0] == 'gift':
self.gift_card()
elif input_split[0] == 'my_cards':
self.view_my_cards()
elif input_split[0] == 'blockchain':
print(self.blockchain)
elif input_split[0] == 'members':
self.view_members()
elif input_split[0] == 'help':
self.display_help()
elif input_split[0] == 'view_trade':
self.view_trade_offers()
elif input_split[0] == 'exit':
self.stop = True
else:
print("\nThe input keyword is not correct, please type again\n")
print("\nType 'help' for keyword information...")
def view_members(self):
print("\n**********MEMBERS**********\n\n")
for key, val in self.members.items():
print_str = key + " at address \"" + val[0] + "\" and port \"" + str(val[1]) + "\""
print(print_str)
print("\n**********MEMBERS**********\n\n")
# Deals with incoming messages
def handleMessage(self, client, addr):
message_arr = []
count = 0
while True:
packet = client.recv(4096)
if not packet: break
message_arr.append(packet)
count = count + 1
content = loads(b"".join(message_arr))
if content['type'] == "trade":
new_sock = socket.socket()
new_sock.connect(('localhost', content['port']))
print("\nTRADE OFFER!\nHere is the card up for trade\n")
content['card'].view_card()
print("\n 'accept'/'reject': ")
decision = input()
if decision == 'accept':
self.accept_trade(new_sock, content['card'])
else:
self.decline_trade(new_sock, content['card'])
elif content['type'] == "accept_trade":
self.auctioned_cards[content['key_card'].poke_id][(content['name'], content['port'])] = content['card']
self.check_response_count(content['key_card'].poke_id)
elif content['type'] == "decline_trade":
self.auctioned_cards[content['key_card'].poke_id][(content['name'], content['port'])] = None
self.check_response_count(content['key_card'].poke_id)
elif content['type'] == "verify_ownership":
check = self.verify_ownership(content["owner_public_key"], content["card_id"])
new_sock = socket.socket()
new_sock.connect(('localhost', 10000))
if check:
message = {"type": "ownership_verified", "transaction_id": content["transaction_id"], "trade_id": content["trade_id"]}
self.send_message(message, new_sock)
else:
message = {"type": "ownership_not_verified", "transaction_id": content["transaction_id"], "trade_id": content["trade_id"]}
self.send_message(message, new_sock)
elif content['type'] == "add_block":
self.add_block(content['block'])
elif content['type'] == "add_member":
self.members[content['name']] = (content['host'], int(content['port']), self.public_key_to_obj(content['public_key']))
elif content['type'] == "send_blockchain":
message = {"type": "blockchain", "blockchain": self.blockchain}
new_sock = socket.socket()
new_sock.connect((content['host'], content['port']))
self.send_message(message, new_sock)
elif content['type'] == "blockchain":
self.initial_block_chains.append(content["blockchain"])
self.initial_block_chains_sizes.append(len(content["blockchain"].blocks))
if len(self.initial_block_chains_sizes) == len(list(self.members.keys())):
self.accept_longest_blockchain()
elif content['type'] == "first_ten_cards":
self.initialize_cards(content["host"], content["port"], content["public_key"])
elif content['type'] == "transaction_approved":
self.approve_transaction(content["transaction_id"])
elif content['type'] == "add_initial_card":
self.my_cards.append(content["card"])
elif content['type'] == "twin_transaction":
message = content['dict']
message["signature"] = self.sign(message["hash_pokemon_card_id"])
message["type"] = "transaction"
new_sock = socket.socket()
new_sock.connect(('localhost', 10000))
self.send_message(message, new_sock)
elif content['type'] == "add_pending_transaction":
self.pending_transaction[content["transaction_id"]] = content["transaction_content"]
def accept_longest_blockchain(self):
index = self.initial_block_chains_sizes.index(max(self.initial_block_chains_sizes))
self.blockchain = self.initial_block_chains[index]
self.first_ten_cards()
def approve_transaction(self, transaction_id):
self.pending_transaction[transaction_id]["approve"] = True
self.my_cards.append(self.pending_transaction[transaction_id]["card_add"])
new_cards = []
for card in self.my_cards:
if card.poke_id != self.pending_transaction[transaction_id]["card_remove"].poke_id:
new_cards.append(card)
self.my_cards = new_cards
def initialize_cards(self, host, port, public_key_receiver):
ten_cards = self.cards[self.card_index: (self.card_index+10)]
self.card_index = self.card_index + 10
for card in ten_cards:
message = {"type": "send_ten_cards", "card": card, "host": host, "port": port, "public_key_sender": self.public_key, "public_key_receiver": public_key_receiver, "pokemon_card_id": card.poke_id, "hash_pokemon_card_id": hash_object(card.poke_id), "signature": self.sign(hash_object(card.poke_id))}
new_sock = socket.socket()
new_sock.connect(('localhost', 10000))
self.send_message(message, new_sock)
sleep(0.2)
# Checks whether each member has responded to the auctioned card
def check_response_count(self, key):
if (len(list(self.members.keys()))-1) == len(list(self.auctioned_cards[key].keys())):
self.evaluate_auction(key)
def evaluate_auction(self, key):
print("\n!___WOOHOO! All members have responded to trade offer__!\n")
print("\nYour card:\n")
trade_card = None
for card in self.my_cards:
if card.poke_id == key:
card.view_card()
trade_card = card
break
print("\nCards offered for trade against your card:\n")
for offer in list(self.auctioned_cards[key].keys()):
if self.auctioned_cards[key][offer] != None:
print("Name: " + offer[0] + "\n")
self.auctioned_cards[key][offer].view_card()
print("\n")
print("\nEnter the ID of card you want to accept trade of\n")
print("\nOtherwise enter 999\n")
user_input = input()
if user_input != "999":
temp_card = None
temp_name_port = None
temp_public_key = None
temp_trade_number = uuid4()
for dict_key, val in self.auctioned_cards[key].items():
if val != None:
if val.poke_id == user_input:
temp_card = val
temp_name_port = dict_key
for dict_key, val in self.members.items():
if dict_key == temp_name_port[0]:
temp_public_key = val[2]
sender_txn_id = uuid4()
receiver_txn_id = uuid4()
self.pending_transaction[sender_txn_id] = {"approve": False, "card_add": temp_card, "card_remove": trade_card}
temp_dict = {"approve": False, "card_add": trade_card, "card_remove": temp_card}
message = {"type": "add_pending_transaction", "transaction_id": receiver_txn_id, "transaction_content": temp_dict}
new_sock = socket.socket()
new_sock.connect(('localhost', temp_name_port[1]))
self.send_message(message, new_sock)
message_1 = {"type":"transaction","public_key_sender": self.public_key, "public_key_receiver": temp_public_key, "pokemon_card_id": trade_card.poke_id, "hash_pokemon_card_id": hash_object(trade_card.poke_id), "port": self.port, "trade_id": temp_trade_number, "transaction_id": sender_txn_id, "signature": self.sign(hash_object(trade_card.poke_id))}
message_2 = {"type":"twin_transaction", "dict": {"public_key_sender": temp_public_key, "public_key_receiver": self.public_key, "pokemon_card_id": temp_card.poke_id, "hash_pokemon_card_id": hash_object(temp_card.poke_id), "port": temp_name_port[1], "trade_id": temp_trade_number, "transaction_id": receiver_txn_id}}
new_sock1 = socket.socket()
new_sock1.connect(('localhost', 10000))
new_sock2 = socket.socket()
new_sock2.connect(('localhost', temp_name_port[1]))
self.send_message(message_1, new_sock1)
self.send_message(message_2, new_sock2)
# Adds selfs credentials to text file upon join
def add_to_txt(self, name, host, port):
with open("./members.txt", 'a') as my_file:
to_write = name + ',' + 'localhost' + ',' + str(port) + ',' + str(self.public_key)
my_file.write(to_write)
# Reads already joined members and returs dictionay
# key: member name, value: other attributes
def read_text(self):
members_info = {}
with open("./members.txt", 'r') as my_file:
lines = my_file.readlines
for line in lines:
temp = line.split(',')
member = temp.pop(0)
members_info[member] = temp
return members_info
# Sends message to all members
def flood(self, message, include_genesis=True):
if include_genesis:
for key, val in self.members.items():
new_sock = socket.socket()
new_sock.connect((val[0], val[1]))
self.send_message(message, new_sock)
else:
key_list = list(self.members.keys())
key_list = key_list[1:]
for key in key_list:
new_sock = socket.socket()
new_sock.connect((self.members[key][0], self.members[key][1]))
self.send_message(message, new_sock)
# Put for trade
def trade(self):
print("\nCards list:\n")
self.view_my_cards()
print("\nEnter Card Pokemon ID: ")
poke_ID = input()
card_found = False
for card in self.my_cards:
if card.poke_id == poke_ID:
card_found = True
message = {"type":"trade", "addr":"localhost", 'port':self.port, 'card': card}
self.auctioned_cards[card.poke_id] = {}
self.flood(message, False)
break
if not card_found:
print("!__Invalid Pokemon ID entered__!")
# Bid card against a card on auction
def bid_card(self, addr, port):
print("\nCards list:\n")
self.view_my_cards()
print("\nEnter Card Pokemon ID: ")
poke_ID = input()
for card in self.my_cards:
if card.poke_id == poke_ID:
message = {"type":"trade", "addr":"localhost", 'port':self.port, 'card': card}
self.send_message(message, addr, port)
else:
print("!__Invalid Pokemon ID entered__!")
# Accept a card trade. Generate a block
def accept_trade(self, client, key_card):
print("\nCards list:\n")
self.view_my_cards()
print("\nEnter Card Pokemon ID you want to give for trade: \n")
poke_ID = input()
card_found = False
for card in self.my_cards:
if card.poke_id == poke_ID:
message = {"type":"accept_trade", "name": self.name, "addr":"localhost", 'port':self.port, 'card': card, 'key_card': key_card}
self.send_message(message, client)
card_found = True
break
if not card_found:
print("!__Invalid Pokemon ID entered__!")
self.decline_trade(client, key_card)
# Decline a card trade
def decline_trade(self, client, card):
card.view_card()
message = {"type":"decline_trade", "name": self.name, "addr":"localhost", 'port':self.port, 'key_card': card}
self.send_message(message, client)
# View my cards
def view_my_cards(self):
for card in self.my_cards:
card.view_card()
print("\n")
# View all trade offers
def view_trade_offers(self):
pass
# Gift a card
def gift_card(self):
members_info = self.read_text()
print("\n Type the member you want to gift card\n ")
print(list(members_info.keys()))
chosen_member = input()
print("\n Enter Card Name to gift\n")
self.view_my_cards()
card_name = input()
def send_message(self, content, client):
# packet = dumps(content).encode("utf-16")
# filename = str(uuid4())
# complete_path = "./temp_files/" + filename + ".bin"
# with open(complete_path, "wb") as f:
# f.write(packet)
# # self.sock.connect((addr, port))
# self.sock.sendto(filename.encode("utf-8"), (addr, port))
packet = dumps(content)
# print("packet in send_message:", packet)
client.send(packet)
client.close()
# Display the possible actions and format for the user's reference
def initial_display(self):
print("*****__Welcome to Pokemon Card Trading Marketplace__*****")
print("\nHello Pokemon Trainer ", self.name, ", here is how you become a Pokemon Master\n")
print("Option 1: Trade a card- type 'trade'\n")
print("Option 2: Gift a card - type 'gift' \n")
print("Option 3: View Cards - type 'my cards'\n")
print("Option 4: Get Help - type 'help'\n")
print("Option 5: View trade offers - type 'view trade'\n")
print("Option 6: Exit - type 'exit' ")
print("Disclamer --> Exiting will permanently result in loosing cards")
print("Also be on a lookout for trade requests, you don't waant to miss out on that fantastic deal!\n")
def display_help(self):
print("\nDo not worry Pokemon trainer :P, we are here to help!\n")
print("Here are the options available:\n")
print("Option 1: Trade a card- type 'trade'\n")
print("Option 2: Gift a card - type 'gift' \n")
print("Option 3: View Cards - type 'my cards'\n")
print("Option 4: Get Help - type 'help'\n")
print("Option 5: View trade offers - type 'view trade'\n")
print("Option 6: Exit - type 'exit' ")
# Do not change this part of code
if __name__ == "__main__":
def helper():
'''
This function is just for the sake of our Client module completion
'''
print("-u username | --user=username The username of Client")
print("-h | --help Print this help")
try:
OPTS, ARGS = getopt.getopt(sys.argv[1:],
"u:", ["user="])
except getopt.error:
helper()
exit(1)
PORT = random.randint(11000, 40000)
DEST = "localhost"
USER_NAME = None
WINDOW_SIZE = 3
for o, a in OPTS:
if o in ("-u", "--user="):
USER_NAME = a
if USER_NAME is None:
print("Missing Username.")
helper()
exit(1)
# create trainer class instance
print("Starting...\nName: " + str(USER_NAME) + "\nHost: " + str(DEST) + "\nPort: " + str(PORT) + "\n")
S = Trainer(USER_NAME, DEST, PORT)
try:
# Start receiving Messages
# initializations
# S.add_to_txt(USER_NAME, DEST, PORT)
S.initial_display()
# Start Action Window
S.join()
# T2 = threading.Thread(target = S.action)
# T2.daemon = True
# T2.start()
except (KeyboardInterrupt, SystemExit):
sys.exit()
|
record.py
|
from __future__ import print_function, division
import numpy as np
import cv2
import pyaudio
import wave
import threading
import time
import subprocess
import os
import keyboard
class VideoRecorder():
"Video class based on openCV"
def __init__(self, name="output.avi", fourcc="XVID", sizex=640, sizey=480, camindex=0, fps=30):
self.open = True
self.device_index = camindex
self.fps = fps # fps should be the minimum constant rate at which the camera can
self.fourcc = fourcc # capture images (with no decrease in speed over time; testing is required)
self.frameSize = (sizex, sizey) # video formats and sizes also depend and vary according to the camera used
self.video_filename = name
self.video_cap = cv2.VideoCapture(self.device_index)
self.video_writer = cv2.VideoWriter_fourcc(*self.fourcc)
self.video_out = cv2.VideoWriter(self.video_filename, self.video_writer, self.fps, self.frameSize)
self.frame_counts = 1
self.start_time = time.time()
def record(self):
"Video starts being recorded"
# counter = 1
timer_start = time.time()
timer_current = 0
while self.open:
ret, video_frame = self.video_cap.read()
if ret:
self.video_out.write(video_frame)
# print(str(counter) + " " + str(self.frame_counts) + " frames written " + str(timer_current))
cv2.imshow('frame',frame)
# counter += 1
# timer_current = time.time() - timer_start
time.sleep(1/self.fps)
# gray = cv2.cvtColor(video_frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow('video_frame', gray)
# cv2.waitKey(1)
else:
break
def stop(self):
"Finishes the video recording therefore the thread too"
if self.open:
self.open=False
self.video_out.release()
self.video_cap.release()
cv2.destroyAllWindows()
def start(self):
"Launches the video recording function using a thread"
video_thread = threading.Thread(target=self.record)
video_thread.start()
class AudioRecorder():
"Audio class based on pyAudio and Wave"
def __init__(self, filename="output.wav", rate=44100, fpb=1024, channels=2):
self.open = True
self.rate = rate
self.frames_per_buffer = fpb
self.channels = channels
self.format = pyaudio.paInt16
self.audio_filename = filename
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=self.format,
channels=self.channels,
rate=self.rate,
input=True,
frames_per_buffer = self.frames_per_buffer)
self.audio_frames = []
def record(self):
"Audio starts being recorded"
self.stream.start_stream()
while self.open:
data = self.stream.read(self.frames_per_buffer)
self.audio_frames.append(data)
if not self.open:
break
def stop(self):
"Finishes the audio recording therefore the thread too"
if self.open:
self.open = False
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
waveFile = wave.open(self.audio_filename, 'wb')
waveFile.setnchannels(self.channels)
waveFile.setsampwidth(self.audio.get_sample_size(self.format))
waveFile.setframerate(self.rate)
waveFile.writeframes(b''.join(self.audio_frames))
waveFile.close()
def start(self):
"Launches the audio recording function using a thread"
audio_thread = threading.Thread(target=self.record)
audio_thread.start()
def start_AVrecording(filename="test"):
global video_thread
global audio_thread
video_thread = VideoRecorder()
audio_thread = AudioRecorder()
audio_thread.start()
video_thread.start()
return filename
def start_video_recording(filename="test"):
global video_thread
video_thread = VideoRecorder()
video_thread.start()
return filename
def start_audio_recording(filename="test"):
global audio_thread
audio_thread = AudioRecorder()
audio_thread.start()
return filename
def stop_AVrecording(filename="test"):
audio_thread.stop()
video_thread.stop()
def file_manager(filename="test"):
"Required and wanted processing of final files"
local_path = os.getcwd()
# if os.path.exists(str(local_path) + "/output.wav"):
# os.remove(str(local_path) + "/output.wav")
# if os.path.exists(str(local_path) + "/output.avi"):
# os.remove(str(local_path) + "/output.avi")
|
test_server_client_am.py
|
import multiprocessing as mp
import os
from functools import partial
from queue import Empty as QueueIsEmpty
import numpy as np
import pytest
from ucp._libs import ucx_api
from ucp._libs.arr import Array
from ucp._libs.utils_test import blocking_am_recv, blocking_am_send
mp = mp.get_context("spawn")
RNDV_THRESH = 8192
def get_data():
ret = {}
ret["bytearray"] = {
"allocator": bytearray,
"generator": lambda n: bytearray(os.urandom(n)),
"validator": lambda recv, exp: np.testing.assert_equal(recv, exp),
"memory_type": ucx_api.AllocatorType.HOST,
}
ret["numpy"] = {
"allocator": partial(np.ones, dtype=np.uint8),
"generator": partial(np.arange, dtype=np.int64),
"validator": lambda recv, exp: np.testing.assert_equal(
recv.view(np.int64), exp
),
"memory_type": ucx_api.AllocatorType.HOST,
}
try:
import cupy as cp
ret["cupy"] = {
"allocator": partial(cp.ones, dtype=np.uint8),
"generator": partial(cp.arange, dtype=np.int64),
"validator": lambda recv, exp: cp.testing.assert_array_equal(
recv.view(np.int64), exp
),
"memory_type": ucx_api.AllocatorType.CUDA,
}
except ImportError:
pass
return ret
def _echo_server(get_queue, put_queue, msg_size, datatype):
"""Server that send received message back to the client
Notice, since it is illegal to call progress() in call-back functions,
we use a "chain" of call-back functions.
"""
data = get_data()[datatype]
ctx = ucx_api.UCXContext(
config_dict={"RNDV_THRESH": str(RNDV_THRESH)},
feature_flags=(ucx_api.Feature.AM,),
)
worker = ucx_api.UCXWorker(ctx)
worker.register_am_allocator(data["allocator"], data["memory_type"])
# A reference to listener's endpoint is stored to prevent it from going
# out of scope too early.
ep = None
def _send_handle(request, exception, msg):
# Notice, we pass `msg` to the handler in order to make sure
# it doesn't go out of scope prematurely.
assert exception is None
def _recv_handle(recv_obj, exception, ep):
assert exception is None
msg = Array(recv_obj)
ucx_api.am_send_nbx(ep, msg, msg.nbytes, cb_func=_send_handle, cb_args=(msg,))
def _listener_handler(conn_request):
global ep
ep = worker.ep_create_from_conn_request(
conn_request, endpoint_error_handling=True
)
# Wireup
ucx_api.am_recv_nb(ep, cb_func=_recv_handle, cb_args=(ep,))
# Data
ucx_api.am_recv_nb(ep, cb_func=_recv_handle, cb_args=(ep,))
listener = ucx_api.UCXListener(worker=worker, port=0, cb_func=_listener_handler)
put_queue.put(listener.port)
while True:
worker.progress()
try:
get_queue.get(block=False, timeout=0.1)
except QueueIsEmpty:
continue
else:
break
def _echo_client(msg_size, datatype, port):
data = get_data()[datatype]
ctx = ucx_api.UCXContext(
config_dict={"RNDV_THRESH": str(RNDV_THRESH)},
feature_flags=(ucx_api.Feature.AM,),
)
worker = ucx_api.UCXWorker(ctx)
worker.register_am_allocator(data["allocator"], data["memory_type"])
ep = worker.ep_create("localhost", port, endpoint_error_handling=True)
# The wireup message is sent to ensure endpoints are connected, otherwise
# UCX may not perform any rendezvous transfers.
send_wireup = bytearray(b"wireup")
send_data = data["generator"](msg_size)
blocking_am_send(worker, ep, send_wireup)
blocking_am_send(worker, ep, send_data)
recv_wireup = blocking_am_recv(worker, ep)
recv_data = blocking_am_recv(worker, ep)
# Cast recv_wireup to bytearray when using NumPy as a host allocator,
# this ensures the assertion below is correct
if datatype == "numpy":
recv_wireup = bytearray(recv_wireup)
assert bytearray(recv_wireup) == send_wireup
if data["memory_type"] == "cuda" and send_data.nbytes < RNDV_THRESH:
# Eager messages are always received on the host, if no host
# allocator is registered UCX-Py defaults to `bytearray`.
assert recv_data == bytearray(send_data.get())
data["validator"](recv_data, send_data)
@pytest.mark.skipif(
not ucx_api.is_am_supported(), reason="AM only supported in UCX >= 1.11"
)
@pytest.mark.parametrize("msg_size", [10, 2 ** 24])
@pytest.mark.parametrize("datatype", get_data().keys())
def test_server_client(msg_size, datatype):
put_queue, get_queue = mp.Queue(), mp.Queue()
server = mp.Process(
target=_echo_server, args=(put_queue, get_queue, msg_size, datatype)
)
server.start()
port = get_queue.get()
client = mp.Process(target=_echo_client, args=(msg_size, datatype, port))
client.start()
client.join(timeout=10)
assert not client.exitcode
put_queue.put("Finished")
server.join(timeout=10)
assert not server.exitcode
|
cabinet.py
|
import ipaddress
import os.path
import threading
import time
import yaml
from enum import Enum
from typing import Dict, List, Optional, Sequence, Tuple, Union
from naomi import NaomiSettingsPatcher
from netdimm import NetDimmInfo, NetDimmException, NetDimmVersionEnum, CRCStatusEnum
from netboot.hostutils import Host, HostStatusEnum, TargetEnum
from netboot.log import log
class CabinetException(Exception):
pass
class CabinetStateEnum(Enum):
STATE_STARTUP = "startup"
STATE_WAIT_FOR_CABINET_POWER_ON = "wait_power_on"
STATE_SEND_CURRENT_GAME = "send_game"
STATE_CHECK_CURRENT_GAME = "check_game"
STATE_WAIT_FOR_CABINET_POWER_OFF = "wait_power_off"
class CabinetRegionEnum(Enum):
REGION_UNKNOWN = "unknown"
REGION_JAPAN = "japan"
REGION_USA = "usa"
REGION_EXPORT = "export"
REGION_KOREA = "korea"
REGION_AUSTRALIA = "australia"
class Cabinet:
def __init__(
self,
ip: str,
region: CabinetRegionEnum,
description: str,
filename: Optional[str],
patches: Dict[str, Sequence[str]],
settings: Dict[str, Optional[bytes]],
target: Optional[TargetEnum] = None,
version: Optional[NetDimmVersionEnum] = None,
quiet: bool = False,
) -> None:
self.description: str = description
self.region: CabinetRegionEnum = region
self.patches: Dict[str, List[str]] = {rom: [p for p in patches[rom]] for rom in patches}
self.settings: Dict[str, Optional[bytes]] = {rom: settings[rom] for rom in settings}
self.quiet = quiet
self.__host: Host = Host(ip, target=target, version=version, quiet=self.quiet)
self.__lock: threading.Lock = threading.Lock()
self.__current_filename: Optional[str] = filename
self.__new_filename: Optional[str] = filename
self.__state: Tuple[CabinetStateEnum, int] = (CabinetStateEnum.STATE_STARTUP, 0)
def __repr__(self) -> str:
return f"Cabinet(ip={repr(self.ip)}, description={repr(self.description)}, filename={repr(self.filename)}, patches={repr(self.patches)} settings={repr(self.settings)}, target={repr(self.target)}, version={repr(self.version)})"
@property
def ip(self) -> str:
return self.__host.ip
@property
def target(self) -> TargetEnum:
return self.__host.target
@target.setter
def target(self, newval: TargetEnum) -> None:
self.__host.target = newval
@property
def version(self) -> NetDimmVersionEnum:
return self.__host.version
@version.setter
def version(self, newval: NetDimmVersionEnum) -> None:
self.__host.version = newval
@property
def filename(self) -> Optional[str]:
with self.__lock:
return self.__new_filename
@filename.setter
def filename(self, new_filename: Optional[str]) -> None:
with self.__lock:
self.__new_filename = new_filename
def __print(self, string: str, newline: bool = True) -> None:
if not self.quiet:
log(string, newline=newline)
def tick(self) -> None:
"""
Tick the state machine forward.
"""
with self.__lock:
self.__host.tick()
current_state = self.__state[0]
# Startup state, only one transition to waiting for cabinet
if current_state == CabinetStateEnum.STATE_STARTUP:
self.__print(f"Cabinet {self.ip} waiting for power on.")
self.__state = (CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_ON, 0)
return
# Wait for cabinet to power on state, transition to sending game
# if the cabinet is active, transition to self if cabinet is not.
if current_state == CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_ON:
if self.__host.alive:
if self.__new_filename is None:
# Skip sending game, there's nothing to send
self.__print(f"Cabinet {self.ip} has no associated game, waiting for power off.")
self.__state = (CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_OFF, 0)
else:
try:
info = self.__host.info()
except NetDimmException:
info = None
if info is not None and info.current_game_crc != 0:
# Its worth trying to CRC this game and seeing if it matches.
crc = self.__host.crc(self.__new_filename, self.patches.get(self.__new_filename, []), self.settings.get(self.__new_filename, None))
if crc == info.current_game_crc:
if info.game_crc_status == CRCStatusEnum.STATUS_VALID:
self.__print(f"Cabinet {self.ip} already running game {self.__new_filename}.")
self.__current_filename = self.__new_filename
self.__state = (CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_OFF, 0)
return
elif info.game_crc_status == CRCStatusEnum.STATUS_CHECKING:
self.__print(f"Cabinet {self.ip} is already verifying game {self.__new_filename}.")
self.__current_filename = self.__new_filename
self.__state = (CabinetStateEnum.STATE_CHECK_CURRENT_GAME, 0)
return
self.__print(f"Cabinet {self.ip} sending game {self.__new_filename}.")
self.__current_filename = self.__new_filename
self.__host.send(self.__new_filename, self.patches.get(self.__new_filename, []), self.settings.get(self.__new_filename, None))
self.__state = (CabinetStateEnum.STATE_SEND_CURRENT_GAME, 0)
return
# Wait for send to complete state. Transition to waiting for
# cabinet power on if transfer failed. Stay in state if transfer
# continuing. Transition to waiting for CRC verification if transfer
# passes.
if current_state == CabinetStateEnum.STATE_SEND_CURRENT_GAME:
if self.__host.status == HostStatusEnum.STATUS_INACTIVE:
raise Exception("State error, shouldn't be possible!")
elif self.__host.status == HostStatusEnum.STATUS_TRANSFERRING:
current, total = self.__host.progress
self.__state = (CabinetStateEnum.STATE_SEND_CURRENT_GAME, int(float(current * 100) / float(total)))
elif self.__host.status == HostStatusEnum.STATUS_FAILED:
self.__print(f"Cabinet {self.ip} failed to send game, waiting for power on.")
self.__state = (CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_ON, 0)
elif self.__host.status == HostStatusEnum.STATUS_COMPLETED:
self.__print(f"Cabinet {self.ip} succeeded sending game, rebooting and verifying game CRC.")
self.__host.reboot()
self.__state = (CabinetStateEnum.STATE_CHECK_CURRENT_GAME, 0)
return
# Wait for the CRC verification screen to finish. Transition to waiting
# for cabinet power off if CRC passes. Transition to waiting for power
# on if CRC fails. If CRC is still in progress wait. If the cabinet
# is turned off or the game is changed, also move back to waiting for
# power on to send a new game.
if current_state == CabinetStateEnum.STATE_CHECK_CURRENT_GAME:
if not self.__host.alive:
self.__print(f"Cabinet {self.ip} turned off, waiting for power on.")
self.__state = (CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_ON, 0)
elif self.__current_filename != self.__new_filename:
self.__print(f"Cabinet {self.ip} changed games to {self.__new_filename}, waiting for power on.")
self.__current_filename = self.__new_filename
self.__state = (CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_ON, 0)
else:
try:
info = self.__host.info()
except NetDimmException:
info = None
if info is not None and info.current_game_crc != 0:
if info.game_crc_status == CRCStatusEnum.STATUS_VALID:
# Game passed onboard CRC, consider it running!
self.__print(f"Cabinet {self.ip} passed CRC verification for {self.__current_filename}, waiting for power off.")
self.__state = (CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_OFF, 0)
elif info.game_crc_status == CRCStatusEnum.STATUS_DISABLED:
# Game failed onboard CRC, try sending again!
self.__print(f"Cabinet {self.ip} had CRC verification disabled for {self.__current_filename}, waiting for power on.")
self.__state = (CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_ON, 0)
elif info.game_crc_status in {CRCStatusEnum.STATUS_INVALID, CRCStatusEnum.STATUS_BAD_MEMORY}:
# Game failed onboard CRC, try sending again!
self.__print(f"Cabinet {self.ip} failed CRC verification for {self.__current_filename}, waiting for power on.")
self.__state = (CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_ON, 0)
return
# Wait for cabinet to turn off again. Transition to waiting for
# power to come on if the cabinet is inactive. Transition to
# waiting for power to come on if game changes. Stay in state
# if cabinet stays on.
if current_state == CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_OFF:
if not self.__host.alive:
self.__print(f"Cabinet {self.ip} turned off, waiting for power on.")
self.__state = (CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_ON, 0)
elif self.__current_filename != self.__new_filename:
self.__print(f"Cabinet {self.ip} changed games to {self.__new_filename}, waiting for power on.")
self.__current_filename = self.__new_filename
self.__state = (CabinetStateEnum.STATE_WAIT_FOR_CABINET_POWER_ON, 0)
return
raise Exception("State error, impossible state!")
@property
def state(self) -> Tuple[CabinetStateEnum, int]:
"""
Returns the current state as a string, and the progress through that state
as an integer, bounded between 0-100.
"""
with self.__lock:
return self.__state
def info(self) -> Optional[NetDimmInfo]:
with self.__lock:
return self.__host.info()
class CabinetManager:
def __init__(self, cabinets: Sequence[Cabinet]) -> None:
self.__cabinets: Dict[str, Cabinet] = {cab.ip: cab for cab in cabinets}
self.__lock: threading.Lock = threading.Lock()
self.__thread: threading.Thread = threading.Thread(target=self.__poll_thread)
self.__thread.setDaemon(True)
self.__thread.start()
def __repr__(self) -> str:
return f"CabinetManager([{', '.join(repr(cab) for cab in self.cabinets)}])"
@staticmethod
def from_yaml(yaml_file: str) -> "CabinetManager":
with open(yaml_file, "r") as fp:
data = yaml.safe_load(fp)
if data is None:
# Assume this is an empty file
return CabinetManager([])
if not isinstance(data, dict):
raise CabinetException(f"Invalid YAML file format for {yaml_file}, missing list of cabinets!")
cabinets: List[Cabinet] = []
for ip, cab in data.items():
try:
ip = str(ipaddress.IPv4Address(ip))
except ValueError:
raise CabinetException("Invalid YAML file format for {yaml_file}, IP address {ip} is not valid!")
if not isinstance(cab, dict):
raise CabinetException(f"Invalid YAML file format for {yaml_file}, missing cabinet details for {ip}!")
for key in ["description", "filename", "roms"]:
if key not in cab:
raise CabinetException(f"Invalid YAML file format for {yaml_file}, missing {key} for {ip}!")
if cab['filename'] is not None and not os.path.isfile(str(cab['filename'])):
raise CabinetException(f"Invalid YAML file format for {yaml_file}, file {cab['filename']} for {ip} is not a file!")
for rom, patches in cab['roms'].items():
if not os.path.isfile(str(rom)):
raise CabinetException(f"Invalid YAML file format for {yaml_file}, file {rom} for {ip} is not a file!")
for patch in patches:
if not os.path.isfile(str(patch)):
raise CabinetException(f"Invalid YAML file format for {yaml_file}, file {patch} for {ip} is not a file!")
cabinet = Cabinet(
ip=ip,
description=str(cab['description']),
region=CabinetRegionEnum(str(cab['region']).lower()),
filename=str(cab['filename']) if cab['filename'] is not None else None,
patches={str(rom): [str(p) for p in cab['roms'][rom]] for rom in cab['roms']},
# This is accessed differently since we have older YAML files that might need upgrading.
settings={str(rom): (bytes(data) or None) for (rom, data) in cab.get('settings', {}).items()},
target=TargetEnum(str(cab['target'])) if 'target' in cab else None,
version=NetDimmVersionEnum(str(cab['version'])) if 'version' in cab else None,
)
if cabinet.target == TargetEnum.TARGET_NAOMI:
# Make sure that the settings are correct for one of the possible patch types
cabinet.settings = {
name: None if (settings is not None and len(settings) not in {NaomiSettingsPatcher.SRAM_SIZE, NaomiSettingsPatcher.EEPROM_SIZE}) else settings
for name, settings in cabinet.settings.items()
}
else:
# Nothing can have settings outside of Naomi until we support it.
cabinet.settings = {name: None for name in cabinet.settings}
cabinets.append(cabinet)
return CabinetManager(cabinets)
def to_yaml(self, yaml_file: str) -> None:
data: Dict[str, Dict[str, Optional[Union[str, Dict[str, List[str]], Dict[str, List[int]]]]]] = {}
with self.__lock:
cabinets: List[Cabinet] = sorted([cab for _, cab in self.__cabinets.items()], key=lambda cab: cab.ip)
for cab in cabinets:
data[cab.ip] = {
'description': cab.description,
'region': cab.region.value,
'target': cab.target.value,
'version': cab.version.value,
'filename': cab.filename,
'roms': cab.patches,
# Bytes isn't a serializable type, so serialize it as a list of ints. If the settings is
# None for a ROM, serialize it as an empty list.
'settings': {rom: [x for x in (settings or [])] for (rom, settings) in cab.settings.items()},
}
with open(yaml_file, "w") as fp:
yaml.dump(data, fp)
def __poll_thread(self) -> None:
while True:
with self.__lock:
cabinets: List[Cabinet] = [cab for _, cab in self.__cabinets.items()]
for cabinet in cabinets:
cabinet.tick()
time.sleep(1)
@property
def cabinets(self) -> List[Cabinet]:
with self.__lock:
return sorted([cab for _, cab in self.__cabinets.items()], key=lambda cab: cab.ip)
def cabinet(self, ip: str) -> Cabinet:
with self.__lock:
if ip not in self.__cabinets:
raise CabinetException(f"There is no cabinet with the IP {ip}")
return self.__cabinets[ip]
def add_cabinet(self, cab: Cabinet) -> None:
with self.__lock:
if cab.ip in self.__cabinets:
raise CabinetException(f"There is already a cabinet with the IP {cab.ip}")
self.__cabinets[cab.ip] = cab
def remove_cabinet(self, ip: str) -> None:
with self.__lock:
if ip not in self.__cabinets:
raise CabinetException(f"There is no cabinet with the IP {ip}")
del self.__cabinets[ip]
def update_cabinet(self, cab: Cabinet) -> None:
with self.__lock:
ip = cab.ip
if ip not in self.__cabinets:
raise CabinetException(f"There is no cabinet with the IP {ip}")
# Make sure we don't reboot the cabinet if we update settings.
existing_cab = self.__cabinets[ip]
existing_cab.description = cab.description
existing_cab.target = cab.target
existing_cab.region = cab.region
existing_cab.version = cab.version
existing_cab.patches = cab.patches
existing_cab.settings = cab.settings
existing_cab.filename = cab.filename
def cabinet_exists(self, ip: str) -> bool:
with self.__lock:
return ip in self.__cabinets
|
mc_test.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
.. _module_mc_test:
mc_test
===============================================
'''
import threading
import time
import Queue
import os
import traceback
import salt.exceptions
import salt.output
from salt.utils.odict import OrderedDict
from mc_states import api
from mc_states import saltapi
from mc_states.tests import utils
class TestError(salt.exceptions.SaltException):
"""."""
def _error(msg, ret=None):
return saltapi.rich_error(TestError, msg, ret)
def froot():
return __opts__['file_roots']['base'][0]
def mroot():
return os.path.join(froot(), 'makina-states')
def lint_tests(use_vt=True, logcapture=True):
try:
result = __salt__['cmd.run_all'](
'_scripts/pylint.sh -f colorized mc_states',
use_vt=use_vt, cwd=mroot())
if result['retcode']:
raise _error('Pylint tests failed', result)
except salt.exceptions.CommandExecutionError:
trace = traceback.format_exc()
raise _error('Problem with pylint install:\n {0}'.format(
api.magicstring(trace)))
def unit_tests(tests=None,
coverage=True,
doctests=True,
use_vt=True,
logcapture=True):
in_args = '--exe -e mc_test -v -s'
if not logcapture:
in_args += ' --nologcapture'
if isinstance(tests, basestring):
tests = tests.split(',')
if not tests:
tests = ['mc_states']
if coverage:
in_args += (' --with-xcoverage'
' --xcoverage-file=.coverage.xml')
if doctests:
in_args += ' --with-doctest'
failed = OrderedDict()
success = OrderedDict()
for test in tests:
try:
cmd = 'bin/nosetests {0} {1}'.format(
in_args, test)
result = __salt__['cmd.run_all'](
cmd,
output_loglevel='debug',
use_vt=use_vt, cwd=mroot())
if result['retcode']:
failed[test] = result
else:
success[test] = result
except salt.exceptions.CommandExecutionError:
trace = traceback.format_exc()
raise _error('Problem with nose install:\n {0}'.format(
api.magicstring(trace)))
if failed:
fail = failed.pop([a for a in failed][0])
for ffail in failed:
fail = saltapi.concat_res_or_rets(fail, ffail)
raise _error('Doctest tests failed', fail)
return success
def _echo(inq, outq):
stop = False
while not stop:
try:
test = inq.get_nowait() == 'STOP'
if test:
print('OK baby, finished !')
stop = True
continue
except Queue.Empty:
pass
if int(time.time()) % 50 == 0:
print('STATUS ECHO running...')
time.sleep(1)
def run_tests(flavors=None, use_vt=True, echo=False, logcapture=True):
if not flavors:
flavors = []
if isinstance(flavors, basestring):
flavors = flavors.split(',') # pylint: disable=E1101
success = OrderedDict()
failures = OrderedDict()
# for step in ['lint', 'unit']:
if echo:
inq = Queue.Queue()
outq = Queue.Queue()
pr = threading.Thread(target=_echo, args=(inq, outq))
pr.start()
for step in ['unit']:
try:
utils.test_setup()
success[step] = __salt__['mc_test.{0}_tests'.format(
step)](use_vt=use_vt, logcapture=logcapture)
except (TestError,) as exc:
failures[step] = exc
except (Exception, KeyboardInterrupt):
failures[step] = traceback.format_exc()
break
finally:
utils.test_teardown()
if echo:
inq.put('STOP')
pr.join()
# for now, lint is not a failure
acceptables = ['lint']
for i in acceptables:
failures.pop(i, None)
if failures:
_failures = dict([(a, "{0}".format(failures[a])) for a in failures])
salt.output.display_output(_failures, opts=__opts__)
raise TestError('test failure => non 0 exit code')
# if no failure, be sure not to mark retcode as a failure
__context__['retcode'] = 0
return success
def run_travis_tests(use_vt=False, echo=True, logcapture=False):
use_vt = True
return run_tests(
'travis', use_vt=use_vt, echo=echo, logcapture=logcapture)
# vim:set et sts=4 ts=4 tw=80:
|
test_ucx_options.py
|
import multiprocessing as mp
import numpy
import pytest
import dask
from dask import array as da
from distributed import Client
from distributed.deploy.local import LocalCluster
from dask_cuda.utils import _ucx_110
mp = mp.get_context("spawn")
ucp = pytest.importorskip("ucp")
# Notice, all of the following tests is executed in a new process such
# that UCX options of the different tests doesn't conflict.
# Furthermore, all tests do some computation to trigger initialization
# of UCX before retrieving the current config.
def _test_global_option(seg_size):
"""Test setting UCX options through dask's global config"""
tls = "tcp,cuda_copy" if _ucx_110 else "tcp,sockcm,cuda_copy"
tls_priority = "tcp" if _ucx_110 else "sockcm"
dask.config.update(
dask.config.global_config,
{
"ucx": {
"SEG_SIZE": seg_size,
"TLS": tls,
"SOCKADDR_TLS_PRIORITY": tls_priority,
},
},
priority="new",
)
with LocalCluster(
protocol="ucx",
dashboard_address=None,
n_workers=1,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster):
res = da.from_array(numpy.arange(10000), chunks=(1000,))
res = res.sum().compute()
assert res == 49995000
conf = ucp.get_config()
assert conf["SEG_SIZE"] == seg_size
@pytest.mark.xfail(reason="https://github.com/rapidsai/dask-cuda/issues/627")
def test_global_option():
for seg_size in ["2K", "1M", "2M"]:
p = mp.Process(target=_test_global_option, args=(seg_size,))
p.start()
p.join()
assert not p.exitcode
|
00Template.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# -----------------------------------------------------------------------------------------------------------
# Name: 00Template.py
# Purpose: This is a copy of an early version of 01ThreadCounter.py
# saved as a template for other modules
#
# Author: Gabriel Marti Fuentes
# email: gabimarti at gmail dot com
# GitHub: https://github.com/gabimarti
# Created: 02/08/2019
# License: MIT
# -----------------------------------------------------------------------------------------------------------
#
import argparse
import random
import threading
import time
########################################################
# CONSTANTS
########################################################
DESCRIPTION = 'Template_Description'
EPILOG = 'What do you want me to tell you?'
MAXTHREADS = 10000
DELAYBETWEENTHREADS = 0 # Milliseconds of delay between threads
VERBOSE_LEVELS = [ "basic", "a few", "insane info" ] # Verbose levels description
MAXRANDOMSLEEP = 10 # Max time in seconds for random sleep
########################################################
# VARIABLES
########################################################
thread_counter = 0 # Total executed threads
thread_active_counter = 0 # Number of current active threads
thread_list = [] # List of active threads
max_threads = MAXTHREADS # Max threads
verbose = 0 # Verbose level
delay_threads = DELAYBETWEENTHREADS # Delay between threads
max_random_sleep = MAXRANDOMSLEEP # Max random sleep
########################################################
# FUNCTIONS
########################################################
# Wait for the indicated time in milliseconds
def delay_miliseconds(milisec):
if milisec == 0:
return None # Avoid making unnecessary call to time.sleep function
time.sleep(milisec/1000)
# Wait a random time
def do_something_more(thread_id, max_random_sleep, verbose):
global thread_active_counter
seconds = random.randint(0, max_random_sleep+1)
if verbose >= 2:
print("Begin thread id %d : Active counter %d : Random Sleep %d" % (thread_id, thread_active_counter, seconds))
time.sleep(seconds)
if verbose >= 2:
print("End thread id %d : Active counter %d " % (thread_id, thread_active_counter))
# Increase counters and call auxiliary function
def do_something(thread_id, max_random_sleep, verbose):
global thread_counter, thread_active_counter
thread_counter += 1
thread_active_counter += 1
do_something_more(thread_id, max_random_sleep, verbose)
thread_active_counter -= 1
# Parse command line parameters
def parse_params():
parser = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG)
parser.add_argument("-m", "--maxthreads", type=int, default=MAXTHREADS,
help="Indicates the maximum number of threads. Default value: " + str(MAXTHREADS))
parser.add_argument("-d", "--delay", type=int, default=DELAYBETWEENTHREADS,
help="Milliseconds of delay between threads call. Default value: " + str(DELAYBETWEENTHREADS))
parser.add_argument("-v", "--verbose", type=int, choices=[0, 1, 2], default=0,
help="Increase output verbosity. Default value: 0")
args = parser.parse_args()
return args
# Main
def main():
global max_threads, delay_threads, verbose, max_random_sleep
# Check and parse parameters
args = parse_params()
verbose = args.verbose
max_threads = args.maxthreads
delay_threads = args.delay
max_random_sleep = MAXRANDOMSLEEP
print("Verbose level "+str(VERBOSE_LEVELS[verbose]))
print("Max %d Threads " % (max_threads))
print("Delay between Threads %d milliseconds" % (delay_threads))
print("Launching ...")
start = time.perf_counter()
# Launch threads and execute function do_something()(
for t_id in range(1, int(max_threads)+1):
thread_handler = threading.Thread(target=do_something, args=(t_id, max_random_sleep, verbose))
thread_handler.start()
thread_list.append(thread_handler)
delay_miliseconds(delay_threads) # delay between threads
if verbose >= 1:
print("Finished threads launch.")
print("Total threads %d : Current active %d" % (thread_counter, thread_active_counter))
partialtime = time.perf_counter() - start
print("Launched %d threads in %6.2f seconds " % (thread_counter, partialtime))
# Wait to finish threads
for thread_wait in thread_list:
thread_wait.join()
totaltime = time.perf_counter() - start
print("Performed %d threads in %6.2f seconds " % (thread_counter, totaltime))
print("Current active threads %d" % (thread_active_counter))
if __name__ == '__main__':
main()
|
test_ftplib.py
|
"""Test script for ftplib module."""
# Modified by Giampaolo Rodola' to test FTP class, IPv6 and TLS
# environment
import ftplib
import asyncore
import asynchat
import socket
import StringIO
import errno
import os
try:
import ssl
except ImportError:
ssl = None
from unittest import TestCase, SkipTest, skipUnless
from test import test_support
from test.test_support import HOST, HOSTv6
threading = test_support.import_module('threading')
TIMEOUT = 3
# the dummy data returned by server over the data channel when
# RETR, LIST and NLST commands are issued
RETR_DATA = 'abcde12345\r\n' * 1000
LIST_DATA = 'foo\r\nbar\r\n'
NLST_DATA = 'foo\r\nbar\r\n'
class DummyDTPHandler(asynchat.async_chat):
dtp_conn_closed = False
def __init__(self, conn, baseclass):
asynchat.async_chat.__init__(self, conn)
self.baseclass = baseclass
self.baseclass.last_received_data = ''
def handle_read(self):
self.baseclass.last_received_data += self.recv(1024)
def handle_close(self):
# XXX: this method can be called many times in a row for a single
# connection, including in clear-text (non-TLS) mode.
# (behaviour witnessed with test_data_connection)
if not self.dtp_conn_closed:
self.baseclass.push('226 transfer complete')
self.close()
self.dtp_conn_closed = True
def handle_error(self):
raise
class DummyFTPHandler(asynchat.async_chat):
dtp_handler = DummyDTPHandler
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator("\r\n")
self.in_buffer = []
self.dtp = None
self.last_received_cmd = None
self.last_received_data = ''
self.next_response = ''
self.rest = None
self.next_retr_data = RETR_DATA
self.push('220 welcome')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = ''.join(self.in_buffer)
self.in_buffer = []
if self.next_response:
self.push(self.next_response)
self.next_response = ''
cmd = line.split(' ')[0].lower()
self.last_received_cmd = cmd
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('550 command "%s" not understood.' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data + '\r\n')
def cmd_port(self, arg):
addr = map(int, arg.split(','))
ip = '%d.%d.%d.%d' %tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_pasv(self, arg):
sock = socket.socket()
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
ip, port = sock.getsockname()[:2]
ip = ip.replace('.', ',')
p1, p2 = divmod(port, 256)
self.push('227 entering passive mode (%s,%d,%d)' %(ip, p1, p2))
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_eprt(self, arg):
af, ip, port = arg.split(arg[0])[1:-1]
port = int(port)
s = socket.create_connection((ip, port), timeout=10)
self.dtp = self.dtp_handler(s, baseclass=self)
self.push('200 active data connection established')
def cmd_epsv(self, arg):
sock = socket.socket(socket.AF_INET6)
sock.bind((self.socket.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(10)
port = sock.getsockname()[1]
self.push('229 entering extended passive mode (|||%d|)' %port)
conn, addr = sock.accept()
self.dtp = self.dtp_handler(conn, baseclass=self)
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
self.push('331 username ok')
def cmd_pass(self, arg):
self.push('230 password ok')
def cmd_acct(self, arg):
self.push('230 acct ok')
def cmd_rnfr(self, arg):
self.push('350 rnfr ok')
def cmd_rnto(self, arg):
self.push('250 rnto ok')
def cmd_dele(self, arg):
self.push('250 dele ok')
def cmd_cwd(self, arg):
self.push('250 cwd ok')
def cmd_size(self, arg):
self.push('250 1000')
def cmd_mkd(self, arg):
self.push('257 "%s"' %arg)
def cmd_rmd(self, arg):
self.push('250 rmd ok')
def cmd_pwd(self, arg):
self.push('257 "pwd ok"')
def cmd_type(self, arg):
self.push('200 type ok')
def cmd_quit(self, arg):
self.push('221 quit ok')
self.close()
def cmd_stor(self, arg):
self.push('125 stor ok')
def cmd_rest(self, arg):
self.rest = arg
self.push('350 rest ok')
def cmd_retr(self, arg):
self.push('125 retr ok')
if self.rest is not None:
offset = int(self.rest)
else:
offset = 0
self.dtp.push(self.next_retr_data[offset:])
self.dtp.close_when_done()
self.rest = None
def cmd_list(self, arg):
self.push('125 list ok')
self.dtp.push(LIST_DATA)
self.dtp.close_when_done()
def cmd_nlst(self, arg):
self.push('125 nlst ok')
self.dtp.push(NLST_DATA)
self.dtp.close_when_done()
def cmd_setlongretr(self, arg):
# For testing. Next RETR will return long line.
self.next_retr_data = 'x' * int(arg)
self.push('125 setlongretr ok')
class DummyFTPServer(asyncore.dispatcher, threading.Thread):
handler = DummyFTPHandler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
try:
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
except:
# unregister the server on bind() error,
# needed by TestIPv6Environment.setUpClass()
self.del_channel()
raise
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accept(self):
conn, addr = self.accept()
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
if ssl is not None:
CERTFILE = os.path.join(os.path.dirname(__file__), "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__), "pycacert.pem")
class SSLConnection(object, asyncore.dispatcher):
"""An asyncore.dispatcher subclass supporting TLS/SSL."""
_ssl_accepting = False
_ssl_closing = False
def secure_connection(self):
socket = ssl.wrap_socket(self.socket, suppress_ragged_eofs=False,
certfile=CERTFILE, server_side=True,
do_handshake_on_connect=False,
ssl_version=ssl.PROTOCOL_SSLv23)
self.del_channel()
self.set_socket(socket)
self._ssl_accepting = True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def _do_ssl_shutdown(self):
self._ssl_closing = True
try:
self.socket = self.socket.unwrap()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
except socket.error as err:
# Any "socket error" corresponds to a SSL_ERROR_SYSCALL return
# from OpenSSL's SSL_shutdown(), corresponding to a
# closed socket condition. See also:
# http://www.mail-archive.com/openssl-users@openssl.org/msg60710.html
pass
self._ssl_closing = False
if getattr(self, '_ccc', False) is False:
super(SSLConnection, self).close()
else:
pass
def handle_read_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_read_event()
def handle_write_event(self):
if self._ssl_accepting:
self._do_ssl_handshake()
elif self._ssl_closing:
self._do_ssl_shutdown()
else:
super(SSLConnection, self).handle_write_event()
def send(self, data):
try:
return super(SSLConnection, self).send(data)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN,
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return 0
raise
def recv(self, buffer_size):
try:
return super(SSLConnection, self).recv(buffer_size)
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return b''
if err.args[0] in (ssl.SSL_ERROR_EOF, ssl.SSL_ERROR_ZERO_RETURN):
self.handle_close()
return b''
raise
def handle_error(self):
raise
def close(self):
if (isinstance(self.socket, ssl.SSLSocket) and
self.socket._sslobj is not None):
self._do_ssl_shutdown()
else:
super(SSLConnection, self).close()
class DummyTLS_DTPHandler(SSLConnection, DummyDTPHandler):
"""A DummyDTPHandler subclass supporting TLS/SSL."""
def __init__(self, conn, baseclass):
DummyDTPHandler.__init__(self, conn, baseclass)
if self.baseclass.secure_data_channel:
self.secure_connection()
class DummyTLS_FTPHandler(SSLConnection, DummyFTPHandler):
"""A DummyFTPHandler subclass supporting TLS/SSL."""
dtp_handler = DummyTLS_DTPHandler
def __init__(self, conn):
DummyFTPHandler.__init__(self, conn)
self.secure_data_channel = False
def cmd_auth(self, line):
"""Set up secure control channel."""
self.push('234 AUTH TLS successful')
self.secure_connection()
def cmd_pbsz(self, line):
"""Negotiate size of buffer for secure data transfer.
For TLS/SSL the only valid value for the parameter is '0'.
Any other value is accepted but ignored.
"""
self.push('200 PBSZ=0 successful.')
def cmd_prot(self, line):
"""Setup un/secure data channel."""
arg = line.upper()
if arg == 'C':
self.push('200 Protection set to Clear')
self.secure_data_channel = False
elif arg == 'P':
self.push('200 Protection set to Private')
self.secure_data_channel = True
else:
self.push("502 Unrecognized PROT type (use C or P).")
class DummyTLS_FTPServer(DummyFTPServer):
handler = DummyTLS_FTPHandler
class TestFTPClass(TestCase):
def setUp(self):
self.server = DummyFTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP(timeout=10)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(), '220 welcome')
def test_sanitize(self):
self.assertEqual(self.client.sanitize('foo'), repr('foo'))
self.assertEqual(self.client.sanitize('pass 12345'), repr('pass *****'))
self.assertEqual(self.client.sanitize('PASS 12345'), repr('PASS *****'))
def test_exceptions(self):
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\n0')
self.assertRaises(ValueError, self.client.sendcmd, 'echo 40\r0')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 400')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'echo 499')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 500')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'echo 599')
self.assertRaises(ftplib.error_proto, self.client.sendcmd, 'echo 999')
def test_all_errors(self):
exceptions = (ftplib.error_reply, ftplib.error_temp, ftplib.error_perm,
ftplib.error_proto, ftplib.Error, IOError, EOFError)
for x in exceptions:
try:
raise x('exception not included in all_errors set')
except ftplib.all_errors:
pass
def test_set_pasv(self):
# passive mode is supposed to be enabled by default
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(True)
self.assertTrue(self.client.passiveserver)
self.client.set_pasv(False)
self.assertFalse(self.client.passiveserver)
def test_voidcmd(self):
self.client.voidcmd('echo 200')
self.client.voidcmd('echo 299')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 199')
self.assertRaises(ftplib.error_reply, self.client.voidcmd, 'echo 300')
def test_login(self):
self.client.login()
def test_acct(self):
self.client.acct('passwd')
def test_rename(self):
self.client.rename('a', 'b')
self.server.handler_instance.next_response = '200'
self.assertRaises(ftplib.error_reply, self.client.rename, 'a', 'b')
def test_delete(self):
self.client.delete('foo')
self.server.handler_instance.next_response = '199'
self.assertRaises(ftplib.error_reply, self.client.delete, 'foo')
def test_size(self):
self.client.size('foo')
def test_mkd(self):
dir = self.client.mkd('/foo')
self.assertEqual(dir, '/foo')
def test_rmd(self):
self.client.rmd('foo')
def test_cwd(self):
dir = self.client.cwd('/foo')
self.assertEqual(dir, '250 cwd ok')
def test_pwd(self):
dir = self.client.pwd()
self.assertEqual(dir, 'pwd ok')
def test_quit(self):
self.assertEqual(self.client.quit(), '221 quit ok')
# Ensure the connection gets closed; sock attribute should be None
self.assertEqual(self.client.sock, None)
def test_retrbinary(self):
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
def test_retrbinary_rest(self):
for rest in (0, 10, 20):
received = []
self.client.retrbinary('retr', received.append, rest=rest)
self.assertEqual(''.join(received), RETR_DATA[rest:],
msg='rest test case %d %d %d' % (rest,
len(''.join(received)),
len(RETR_DATA[rest:])))
def test_retrlines(self):
received = []
self.client.retrlines('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA.replace('\r\n', ''))
def test_storbinary(self):
f = StringIO.StringIO(RETR_DATA)
self.client.storbinary('stor', f)
self.assertEqual(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storbinary('stor', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_storbinary_rest(self):
f = StringIO.StringIO(RETR_DATA)
for r in (30, '30'):
f.seek(0)
self.client.storbinary('stor', f, rest=r)
self.assertEqual(self.server.handler_instance.rest, str(r))
def test_storlines(self):
f = StringIO.StringIO(RETR_DATA.replace('\r\n', '\n'))
self.client.storlines('stor', f)
self.assertEqual(self.server.handler_instance.last_received_data, RETR_DATA)
# test new callback arg
flag = []
f.seek(0)
self.client.storlines('stor foo', f, callback=lambda x: flag.append(None))
self.assertTrue(flag)
def test_nlst(self):
self.client.nlst()
self.assertEqual(self.client.nlst(), NLST_DATA.split('\r\n')[:-1])
def test_dir(self):
l = []
self.client.dir(lambda x: l.append(x))
self.assertEqual(''.join(l), LIST_DATA.replace('\r\n', ''))
def test_makeport(self):
self.client.makeport()
# IPv4 is in use, just make sure send_eprt has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'port')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
# IPv4 is in use, just make sure send_epsv has not been used
self.assertEqual(self.server.handler_instance.last_received_cmd, 'pasv')
def test_line_too_long(self):
self.assertRaises(ftplib.Error, self.client.sendcmd,
'x' * self.client.maxline * 2)
def test_retrlines_too_long(self):
self.client.sendcmd('SETLONGRETR %d' % (self.client.maxline * 2))
received = []
self.assertRaises(ftplib.Error,
self.client.retrlines, 'retr', received.append)
def test_storlines_too_long(self):
f = StringIO.StringIO('x' * self.client.maxline * 2)
self.assertRaises(ftplib.Error, self.client.storlines, 'stor', f)
@skipUnless(socket.has_ipv6, "IPv6 not enabled")
class TestIPv6Environment(TestCase):
@classmethod
def setUpClass(cls):
try:
DummyFTPServer((HOST, 0), af=socket.AF_INET6)
except socket.error:
raise SkipTest("IPv6 not enabled")
def setUp(self):
self.server = DummyFTPServer((HOSTv6, 0), af=socket.AF_INET6)
self.server.start()
self.client = ftplib.FTP()
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_af(self):
self.assertEqual(self.client.af, socket.AF_INET6)
def test_makeport(self):
self.client.makeport()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'eprt')
def test_makepasv(self):
host, port = self.client.makepasv()
conn = socket.create_connection((host, port), 10)
conn.close()
self.assertEqual(self.server.handler_instance.last_received_cmd, 'epsv')
def test_transfer(self):
def retr():
received = []
self.client.retrbinary('retr', received.append)
self.assertEqual(''.join(received), RETR_DATA)
self.client.set_pasv(True)
retr()
self.client.set_pasv(False)
retr()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClassMixin(TestFTPClass):
"""Repeat TestFTPClass tests starting the TLS layer for both control
and data connections first.
"""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=10)
self.client.connect(self.server.host, self.server.port)
# enable TLS
self.client.auth()
self.client.prot_p()
@skipUnless(ssl, "SSL not available")
class TestTLS_FTPClass(TestCase):
"""Specific TLS_FTP class tests."""
def setUp(self):
self.server = DummyTLS_FTPServer((HOST, 0))
self.server.start()
self.client = ftplib.FTP_TLS(timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
def tearDown(self):
self.client.close()
self.server.stop()
def test_control_connection(self):
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
def test_data_connection(self):
# clear text
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# secured, after PROT P
self.client.prot_p()
sock = self.client.transfercmd('list')
self.assertIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
# PROT C is issued, the connection must be in cleartext again
self.client.prot_c()
sock = self.client.transfercmd('list')
self.assertNotIsInstance(sock, ssl.SSLSocket)
sock.close()
self.assertEqual(self.client.voidresp(), "226 transfer complete")
def test_login(self):
# login() is supposed to implicitly secure the control connection
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.login()
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
# make sure that AUTH TLS doesn't get issued again
self.client.login()
def test_auth_issued_twice(self):
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
def test_auth_ssl(self):
try:
self.client.ssl_version = ssl.PROTOCOL_SSLv23
self.client.auth()
self.assertRaises(ValueError, self.client.auth)
finally:
self.client.ssl_version = ssl.PROTOCOL_TLSv1
def test_context(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, ftplib.FTP_TLS, keyfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
context=ctx)
self.assertRaises(ValueError, ftplib.FTP_TLS, certfile=CERTFILE,
keyfile=CERTFILE, context=ctx)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
self.client.connect(self.server.host, self.server.port)
self.assertNotIsInstance(self.client.sock, ssl.SSLSocket)
self.client.auth()
self.assertIs(self.client.sock.context, ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.client.prot_p()
sock = self.client.transfercmd('list')
try:
self.assertIs(sock.context, ctx)
self.assertIsInstance(sock, ssl.SSLSocket)
finally:
sock.close()
def test_check_hostname(self):
self.client.quit()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
ctx.load_verify_locations(CAFILE)
self.client = ftplib.FTP_TLS(context=ctx, timeout=TIMEOUT)
# 127.0.0.1 doesn't match SAN
self.client.connect(self.server.host, self.server.port)
with self.assertRaises(ssl.CertificateError):
self.client.auth()
# exception quits connection
self.client.connect(self.server.host, self.server.port)
self.client.prot_p()
with self.assertRaises(ssl.CertificateError):
self.client.transfercmd("list").close()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.auth()
self.client.quit()
self.client.connect("localhost", self.server.port)
self.client.prot_p()
self.client.transfercmd("list").close()
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.port = test_support.bind_port(self.sock)
threading.Thread(target=self.server, args=(self.evt,self.sock)).start()
# Wait for the server to be ready.
self.evt.wait()
self.evt.clear()
ftplib.FTP.port = self.port
def tearDown(self):
self.evt.wait()
def server(self, evt, serv):
# This method sets the evt 3 times:
# 1) when the connection is ready to be accepted.
# 2) when it is safe for the caller to close the connection
# 3) when we have closed the socket
serv.listen(5)
# (1) Signal the caller that we are ready to accept the connection.
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
conn.send("1 Hola mundo\n")
# (2) Signal the caller that it is safe to close the socket.
evt.set()
conn.close()
finally:
serv.close()
# (3) Signal the caller that we are done.
evt.set()
def testTimeoutDefault(self):
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutNone(self):
# no timeout -- do not use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
ftp = ftplib.FTP(HOST, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(ftp.sock.gettimeout())
self.evt.wait()
ftp.close()
def testTimeoutValue(self):
# a value
ftp = ftplib.FTP(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutConnect(self):
ftp = ftplib.FTP()
ftp.connect(HOST, timeout=30)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDifferentOrder(self):
ftp = ftplib.FTP(timeout=30)
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def testTimeoutDirectAccess(self):
ftp = ftplib.FTP()
ftp.timeout = 30
ftp.connect(HOST)
self.assertEqual(ftp.sock.gettimeout(), 30)
self.evt.wait()
ftp.close()
def test_main():
tests = [TestFTPClass, TestTimeouts,
TestIPv6Environment,
TestTLS_FTPClassMixin, TestTLS_FTPClass]
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
batch_ops_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the currently experimental in-graph batch ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from tensorflow.contrib.batching.python.ops import batch_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
def delayed_plus1(x):
"""Sleeps for 100ms then returns x+1."""
time.sleep(0.1)
return x + 1
class BatchOpsTest(test.TestCase):
"""Tests for batch_ops.{un,}batch."""
def testBasicUnbatchV1Decorated(self):
"""Tests that the batch_function_v1 decorator works."""
with self.cached_session() as sess:
@batch_ops.batch_function_v1(1, 10, 100000)
def computation(in_t):
return in_t + 1
inp = array_ops.placeholder(dtype=dtypes.int32, shape=[1])
result = computation(inp)
thread_results = []
def worker():
thread_results.extend(sess.run([result], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([result], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [3])
def testUnbatchGrad(self):
"""Tests that batch and unbatch are differentiable."""
with self.cached_session() as sess:
inp = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
batched, index, id_t = batch_ops.batch(
[inp], num_batch_threads=1, max_batch_size=2,
batch_timeout_micros=36000000, grad_timeout_micros=1000000,
batching_queue="")
computation = batched[0] * batched[0]
result = batch_ops.unbatch(computation, index, id_t,
timeout_micros=1000000, shared_name="unbatch")
grad = gradients_impl.gradients(result, inp)
thread_results = []
def worker():
thread_results.extend(sess.run([grad], feed_dict={inp: [1]}))
worker_thread = threading.Thread(target=worker)
worker_thread.start()
main_results = sess.run([grad], feed_dict={inp: [2]})
worker_thread.join()
self.assertEqual(thread_results[0], [2])
self.assertEqual(main_results[0], [4])
if __name__ == "__main__":
test.main()
|
AIData.py
|
# import python libs
from threading import Thread
# import project libs
from controller import Client
from ai import AiDataEngine
class AIData:
"""start all the data threading
pass it the master signal class for emmission"""
def __init__(self, ai_signal_obj, harmony_signal):
self.ai_signal = ai_signal_obj
self.harmony_signal = harmony_signal
# instantiate the AI server
engine = AiDataEngine(self.ai_signal, self.harmony_signal, speed=1)
# instantiate the controller client and pass AI engine
cl = Client(engine)
# declares all threads
t1 = Thread(target=engine.make_data)
t2 = Thread(target=engine.affect)
t3 = Thread(target=cl.snd_listen)
# t4 = Thread(target=cl.data_exchange)
# t5 = Thread(target=cl.sound_bot)
# assigns them all daemons
t1.daemon = True
t2.daemon = True
# t3.daemon = True
# t4.daemon = True
# starts them all
t1.start()
t2.start()
t3.start()
# t4.start()
# t5.start()
|
pruebas.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
import os
import sys
import csv
import time
import datetime
import math
from tkinter import*
from PIL import Image, ImageTk
from reportlab.lib.units import mm, inch
from reportlab.pdfgen import canvas as pdf
from mbarete import geometria
global d,canvas_width,canvas_height
d={
'img':os.getcwd()+'\\'+"img"+'\\',
'audio':os.getcwd()+'\\'+"audio"+'\\'
}
canvas_width = 1100
canvas_height =1000
print(datetime.datetime.now())
def timeConOsPath():
import os, sys,time
#time ,pruebas con la libreria time:
print('time.gmtime(0):',time.gmtime(0)) #
print('time.ctime(0):',time.ctime(0))
print('sys.argv[0]:',sys.argv[0])
print('os.path.getatime(sys.argv[0]):',os.path.getatime(sys.argv[0]))
print('time.ctime(os.path.getmtime(sys.argv[0])):',time.ctime(os.path.getmtime(sys.argv[0])),'Tiempo de la ultima modificación de path')
print('time.ctime(os.path.getctime(sys.argv[0])):',time.ctime(os.path.getctime(sys.argv[0])),'En algunos sistemas (como Unix) es la hora del ultimo cambio de metadatos y, en otros (como Windows), es el tiempo de creacion de path')
print('time.ctime(os.path.getatime(sys.argv[0])):',time.ctime(os.path.getatime(sys.argv[0])),'Hora del ultimo acceso de path')
print(r'strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime(os.path.getatime(sys.argv[0]))):',time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(os.path.getatime(sys.argv[0]))))
def VariablesDeEntorno():
import os
variables_de_entorno={env:os.environ[env] for env in os.environ}
for variable in variables_de_entorno:
print("%s: %s" % (variable, variables_de_entorno[variable]))
def powerPath(pwd=os.getcwd()):
import os
"""
El módulo os.path siempre es el módulo adecuado para el sistema operativo en el cual Python está operando,
y por lo tanto es utilizable para rutas locales. Sin embargo, también puedes importar y utilizar los módulos
individuales si deseas manipular una ruta que siempre está en uno de los diferentes formatos. Todos tienen la misma interfaz:
posixpath para rutas con estilo UNIX
ntpath para rutas Windows
"""
supportUnicode=os.path.supports_unicode_filenames #True si se pueden utilizar cadenas Unicode arbitrarias como nombres de archivo (dentro de las limitaciones impuestas por el sistema de archivos).
print(supportUnicode)
pwdA=pwd #obtengo la ruta path de mi ubicacion actual donde se esta ejecutando este Script
os.chdir(pwdA)
os.chdir('..')
pwdB=os.getcwd() #obtengo la ruta path de la carpeta de nivel superior a la carpeta actual donde se esta ejecutando este Script
os.chdir(pwdA)
fullPathA=[pwdA+'\\'+d for d in os.listdir(pwdA)] # obtenemos la lista de archivos en la direccion pwd y luego le agregamos la ruta completa al archivo
fullPathB=[pwdB+'\\'+d for d in os.listdir(pwdB)] # obtenemos la lista de archivos en la direccion pwd y luego le agregamos la ruta completa al archivo
print('os.path.commonpath(fullPathA+fullPathB)',os.path.commonpath(fullPathA+fullPathB)) #Retorna la sub-ruta común más larga de cada nombre de ruta en la secuencia paths.
print('os.path.commonprefix(fullPathA+fullPathB)',os.path.commonprefix(fullPathA+fullPathB)) #Retorna el prefijo de ruta más largo (tomado carácter por carácter) que es un prefijo de todas las rutas en list.
pathB=''
power={}
for path in fullPathA: #recorremos todos los archivos para obtener los datos de cada archivo
if not pathB:
pathB=path
# obtenemos metadatos y datos del path
power['basename'] = os.path.basename(path) #Retorna un nombre base de nombre de ruta path.
power['abspath']= os.path.abspath(path) #Retorna una versión normalizada y absoluta del nombre de ruta path.
power['dirname']= os.path.dirname(path) #Retorna el nombre del directorio de la ruta path.
power['exists'] = os.path.exists(path) #Retorna True si path se refiere a una ruta existente o un descriptor de archivo abierto. Retorna False para enlaces simbólicos rotos. En algunas plataformas, esta función puede retornar False si no se concede permiso para ejecutar os.stat() en el archivo solicitado, incluso la ruta path existe físicamente.
power['lexists'] = os.path.lexists(path) #Retorna True si path se refiere a un camino existente. Retorna True para los enlaces simbólicos rotos
power['expanduser'] = os.path.expanduser(path) #En Unix y Windows, retorna el argumento con un componente inicial de ~ o ~user reemplazado por el directorio home de user
power['expandvars']=os.path.expandvars(path)#Retorna el argumento con variables de entorno expandidas.
power['ultimoAcceso'] = os.path.getatime(path)#Retorna la hora del ultimo acceso de path. El valor de retorno es un numero de punto flotante que da el numero de segundos desde la epoca
power['ultimaModificacion'] = os.path.getmtime(path)#Retorna el tiempo de la ultima modificación de path. El valor de retorno es un numero de punto flotante que da el numero de segundos desde la epoca
power['creacionOModificacion']=os.path.getctime(path)#Retorna el ctime del sistema que, en algunos sistemas (como Unix) es la hora del ultimo cambio de metadatos y, en otros (como Windows), es el tiempo de creacion de path. El valor retornado es un numero que da el numero de segundos desde la epoca
power['getsize'] =os.path.getsize(path)#Retorna el tamaño en bytes de path, Lanza una excepcion OSError si el archivo no existe o es inaccesible
power['isabs'] = os.path.isabs(path) #Retorna True si path es un nombre de ruta de acceso absoluto. En Unix, eso significa que comienza con una barra diagonal, en Windows que comienza con una barra diagonal (invertida) despues de cortar una letra de unidad potencial.
power['isfile'] =os.path.isfile(path)#Retorna True si path es un archivo existing. Esto sigue los enlaces simbólicos, por lo que tanto islink() como isfile() pueden ser verdaderos para la misma ruta.
power['isdir'] = os.path.isdir(path)#Retorna True si path es un directorio existing. Esto sigue los enlaces simbólicos, por lo que tanto islink() como isdir() pueden ser verdaderos para la misma ruta.
power['islink'] =os.path.islink(path)#Retorna True si path hace referencia a una entrada de directorio existing que es un enlace simbólico. Siempre False si el entorno de ejecución de Python no admite vínculos simbólicos.
power['ismount']=os.path.ismount(path)#Retorna True si el nombre de ruta path es un mount point: un punto en un sistema de archivos donde se ha montado un sistema de archivos diferente. En POSIX, la función comprueba si el elemento primario de path, path/.., se encuentra en un dispositivo diferente de path, o si path/.. y path apuntan al mismo i-node en el mismo dispositivo — esto debería detectar puntos de montaje para todas las variantes Unix y POSIX. No es capaz de detectar de forma fiable los montajes de enlace en el mismo sistema de archivos. En Windows, una raíz de letra de unidad y un recurso compartido UNC siempre son puntos de montaje, y para cualquier otra ruta de acceso GetVolumePathName se llama para ver si es diferente de la ruta de acceso de entrada.
power['join']=os.path.join(path, sys.argv[0])
#Unir uno o más componentes de ruta de acceso de forma inteligente. El valor retornado es la concatenación de path y cualquier miembro de *paths
#con exactamente un separador de directorios (os.sep) después de cada parte no vacía, excepto la última, lo que significa que el resultado solo
#terminará en un separador si la última parte está vacía. Si un componente es una ruta absoluta, todos los componentes anteriores se desechan y
#la unión continúa desde el componente de ruta absoluta.
#En Windows, la letra de la unidad no se restablece cuando se encuentra un componente de ruta absoluta (por ejemplo, r'\foo').
#Si un componente contiene una letra de unidad, todos los componentes anteriores se desechan y la letra de unidad se restablece.
#Ten en cuenta que, dado que hay un directorio actual para cada unidad, `` os.path.join («c:», «foo») `` representa una ruta
#relativa al directorio actual en la unidad C: (c:foo),, no c:\foo.
power['normcase']=os.path.normcase(path)#Normaliza las mayúsculas y minúsculas de un nombre de ruta. En Windows convierte todos los caracteres en el nombre de ruta a minúsculas y también convierte las barras inclinadas hacia atrás en barras inclinadas hacia atrás. En otros sistemas operativos, retorna la ruta sin cambios.
power['normpath']=os.path.normpath(path)#Normaliza un nombre de ruta colapsando separadores redundantes y referencias de nivel superior para que A//B, A/B/, A/./B y A/foo/../B se transformen en``A/B``. Esta modificación de cadena puede que modifique el significado de la ruta que contenga enlaces simbólicos. En Windows, convierte las barras inclinadas hacia adelante en barras hacia atrás. Para normalizar mayúsculas y minúsculas, utiliza normcase().
power['realpath']=os.path.realpath(path)#Retorna la ruta canónica del nombre de archivo especificado, eliminando cualquier enlace simbólico encontrado en la ruta (si es que tienen soporte por el sistema operativo).
power['relpath']=os.path.relpath(path, start=os.curdir)#Retorna un nombre de ruta relativo a path desde el directorio actual o de un directorio start opcional. Este es un cálculo de ruta: No se accede al sistema de archivos para confirmar la existencia o la naturaleza de path o start.
#start toma de forma predeterminada el valor de os.curdir.
power['samefile']=os.path.samefile(pathB, path)#Retorna True si ambos argumentos de nombre de ruta refieren al mismo archivo o directorio. Esto se determina por el número de dispositivo y el número de i-node y lanza una excepción si una llamada de os.stat() en alguno de los nombres de ruta falla.
#sameOpenFile=os.path.sameopenfile(os.stat(pathB), os.stat(path))#Retorna True si los descriptores de archivo fp1 y fp2 se refieren al mismo archivo.
power['samestat']=os.path.samestat(os.stat(pathB), os.stat(path))#Retorna True si las tuplas de stat (stat1 y stat2) refieren al mismo archivo. Estas estructuras pueden haber sido retornadas por os.fstat(), os.lstat(), o os.stat(). Esta función implementa la comparación subyacente utilizada por: samefile() y sameopenfile().
power['split']=os.path.split(path)#Divide el nombre de la ruta path * en un par, `` (head, tail) `` donde *tail es el último componente del nombre de la ruta y head es todo lo que conduce a eso. La parte head nunca contendrá una barra; si head termina en una barra, tail estará vacía. Si no hay barra inclinada en path, head estará vacío. Si path está vacía, tanto head como tail estarán vacíos. Las barras diagonales finales se eliminan de head a menos que sea la raíz (solo una o más barras). En todos los casos, join(head, tail) retorna una ruta a la misma ubicación que path (pero las cadenas pueden diferir).
power['splitdrive']=os.path.splitdrive(path)
#Divide el nombre de ruta path en un par (drive, tail) donde drive es un punto de montaje o una cadena vacía. En sistemas que no utilizan especificaciones de unidad, drive siempre será una cadena vacía. En todos los casos, drive + tail será lo mismo que path.
#En Windows, divide un nombre de ruta en unidad / punto compartido UNC y ruta relativa.
#Si la ruta contiene una letra de unidad, la unidad contendrá todo hasta los dos puntos inclusive. p.ej. splitdrive("c:/dir") retorna ("c:", "/dir")
#Si la ruta contiene una ruta UNC, drive contendrá el nombre de host y el recurso compartido, hasta el cuarto separador, pero sin incluirlo. p.ej. splitdrive("//host/computer/dir") retorna ("//host/computer", "/dir")
power['splitext']=os.path.splitext(path)#Divide el nombre de ruta path en un par (root, ext) de tal forma que root + ext == path, y ext queda vacío o inicia con un punto y contiene a lo mucho un punto. Se ignoran los puntos iniciales del nombre base; splitext('.cshrc') retorna ('.cshrc', '').
print('\n')
for atributo in power:
print('.'+atributo+': ',power[atributo])
def powerPDF():
import os,datetime
from reportlab.lib.units import mm, inch
from reportlab.pdfgen import canvas as pdf
#inicia codigo de la prueba
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://hg.reportlab.com/hg-public/reportlab/log/tip/src/reportlab/lib/pagesizes.py
#__version__='3.4.18'
#ISO 216 standard paer sizes; see eg https://en.wikipedia.org/wiki/ISO_216
hoja={
'A0':(841*mm,1189*mm),
'A1':(594*mm,841*mm),
'A2':(420*mm,594*mm),
'A3':(297*mm,420*mm),
'A4':(210*mm,297*mm),
'A5':(148*mm,210*mm),
'A6':(105*mm,148*mm),
'A7':(74*mm,105*mm),
'A8':(52*mm,74*mm),
'A9':(37*mm,52*mm),
'A10':(26*mm,37*mm),
'B0':(1000*mm,1414*mm),
'B1':(707*mm,1000*mm),
'B2':(500*mm,707*mm),
'B3':(353*mm,500*mm),
'B4':(250*mm,353*mm),
'B5':(176*mm,250*mm),
'B6':(125*mm,176*mm),
'B7':(88*mm,125*mm),
'B8':(62*mm,88*mm),
'B9':(44*mm,62*mm),
'B10':(31*mm,44*mm),
'C0':(917*mm,1297*mm),
'C1':(648*mm,917*mm),
'C2':(458*mm,648*mm),
'C3':(324*mm,458*mm),
'C4':(229*mm,324*mm),
'C5':(162*mm,229*mm),
'C6':(114*mm,162*mm),
'C7':(81*mm,114*mm),
'C8':(57*mm,81*mm),
'C9':(40*mm,57*mm),
'C10':(28*mm,40*mm)
}
print([hj for hj in hoja])
txt={}
txt.setdefault('Nombre',input('Ingrese su nombre: '))
txt.setdefault('Direccion',input('Ingrese su direccon de domicilio : '))
txt.setdefault('Telefono',input('Ingrese su numero de telefono: '))
txt.setdefault('Nacimiento',input('Ingrese su Fecha Nacimiento: '))
txt.setdefault('Sangre',input('Ingrese su tipo de sangre: '))
label=[key for key in txt]
Lsize=700
print("Los valores ingresados son")
for key in txt:
print(key,':',txt[key])
canvas = pdf.Canvas(txt['Nombre']+".pdf", pagesize=hoja['A4'])
canvas.setLineWidth(.3)
canvas.setFont('Helvetica', 12)
canvas.drawString(30,750,'CARTA DE PRUEBA')
canvas.drawString(30,735,"setFont('Helvetica', 12)")
canvas.drawString(500,750,str(datetime.date.today()))
canvas.line(480,747,580,747)
canvas.drawString(275,725,'ESTIMADO:')
canvas.drawString(500-(len(txt['Nombre'])*3),725,txt['Nombre'])
canvas.line(378,723,580,723)
for x in range(0,len(txt),1):
canvas.drawString(30,Lsize-(x*15),str(label[x])+':')
canvas.line(120,Lsize-(x*15),580,Lsize-(x*15))
canvas.drawString(120,Lsize-(x*15),txt[label[x]])
canvas.save()
if os.path.isfile(os.getcwd()+'\\'+txt['Nombre']+".pdf"):
print("Abriendo el archivo "+'"'+txt['Nombre']+'.pdf"')
os.system('"'+txt['Nombre']+'.pdf"')
def showAlbum():
from tkinter import Tk, Scrollbar,Canvas,Frame,IntVar,Entry,Button
from PIL import Image, ImageTk
import os
global d,canvas_width,canvas_height
def dibujar(alto,ancho):
my_canvas_height=int(int(len(lista)/int(range(0,canvas_width,ancho)[-1]/ancho))*alto+alto)
miFrameinicio.configure(bg='#090929',width = canvas_width, height=my_canvas_height)
canvas.configure(bg='#090929',width = canvas_width, height=my_canvas_height)
canvas.create_polygon((0,0,canvas_width,0,canvas_width,my_canvas_height,0,my_canvas_height),fill='#090929')
#miniatura={}
contador=0
for y in range(30,my_canvas_height,alto):
for x in range(0,canvas_width-ancho,ancho):
if contador==len(lista):
break
img=Image.open(pwd+str(lista[contador]))
redimencionar=(img.size[0]*(ancho/img.size[0]),img.size[1]*(ancho/img.size[0])) if img.size[0]>img.size[1] else (img.size[0]*(alto/img.size[1]),img.size[1]*(alto/img.size[1]))
redimencionar=(int(redimencionar[0]),int(redimencionar[1]))
print(redimencionar,str(lista[contador]))
if '.jpg' in str(lista[contador])[-5:]:
img.save(pwd+str(lista[contador]).replace('.jpg','.png'),'png')
img=Image.open(pwd+str(lista[contador]).replace('.jpg','.png'))
miniatura[contador]={'img':img.resize(redimencionar)}
miniatura[contador]['PhotoImage']=ImageTk.PhotoImage(miniatura[contador]['img'])
miniatura[contador]['widget']=Label(miFrameinicio,image=miniatura[contador]['PhotoImage'])
#miniatura[contador]['widget'].image=miniatura[contador]['PhotoImage']
#miniatura[contador]['widget'].place(x=x,y=y)
canvas.create_image(x+int((ancho-redimencionar[0])/2),y+int((alto-redimencionar[1])/2),image=miniatura[contador]['PhotoImage'],anchor='nw')
if ".jpg" in str(lista[contador])[-5:]:
os.remove(pwd+str(lista[contador]))
contador=contador+1
raiz.update()
c.config(scrollregion=c.bbox("all"))
raiz.geometry(str(canvas_width)+"x"+str(canvas_height)+"+10+10")
pwd=d['img']
alto=200
ancho=200
print('Ubicacion:',pwd)
#inicia codigo de la prueba
lista=[img for img in os.listdir(pwd) if (((".jpg" in img[-5:]) and (not ".png" in img)) or ((".png" in img[-5:]) and (not ".jpg" in img)) or ((".jpeg" in img[-5:]) and (not ".png" in img)) )]
miniatura={}
contador=0
print(canvas_width//ancho)
my_canvas_height=int(int(len(lista)/int(range(0,canvas_width,ancho)[-1]/ancho))*alto+alto)
raiz=Tk()
scrollbar=Scrollbar(raiz)
c = Canvas(raiz, yscrollcommand=scrollbar.set)
scrollbar.config(command=c.yview)
scrollbar.pack(side=RIGHT, fill=Y)
miFrameinicio=Frame(c)
miFrameinicio.configure(width = canvas_width, height=my_canvas_height)
canvas = Canvas(miFrameinicio, width=canvas_width, height=my_canvas_height)
canvas.place(x=0, y=0)
c.pack(side="left" , fill="both", expand=True)
c.create_window(0,0,window=miFrameinicio, anchor='nw')
c.config(scrollregion=c.bbox("all"))
img_ancho=IntVar(value=ancho)
img_alto=IntVar(value=alto)
entry_ancho=Entry(miFrameinicio,textvariable=img_ancho,width=10)
entry_ancho.place(x=10, y=10)
entry_alto=Entry(miFrameinicio,textvariable=img_alto,width=10)
entry_alto.place(x=110, y=10)
btn_aplicar=Button(miFrameinicio,text='Aplicar',command=lambda: dibujar(img_alto.get(),img_ancho.get()),width=10)
btn_aplicar.place(x=210, y=10)
dibujar(img_alto.get(),img_ancho.get())
#raiz.update()
#c.config(scrollregion=c.bbox("all"))
#raiz.geometry(str(canvas_width)+"x"+str(canvas_height)+"+10+10")
print("Mostrando las imagenes con tkinter y PIL...")
raiz.focus_force()
raiz.mainloop()
def ButtonConImagen():
from tkinter import Tk,PhotoImage,Button
from PIL import Image
import os
global canvas_width,canvas_height,d
pwd=d['img']
file=[str(img) for img in os.listdir(d['img']) if (('.png' in img[-4:]) or ('.jpg' in img[-4:]))][0]
raiz = Tk()
raiz.geometry(str(canvas_width)+"x"+str(canvas_height))
try:
#guardo la imagen con formato PNG, usando el modulo Image, de esta forma tkinter siempre podra cargar la nueva imagen correctamente
#la nueva imagen generada con Image.save("nuevoArchivoMiFoto.PNG") normalmente siempre se carga bien los programas con Tkinter
Image.open(pwd+file).save(pwd+file.replace('.jpg','.png'))
except IOError:
print("No se puede convertir la imagen")
#el archivo de origen puede ser JPG o PNG, lo importante es guardas la imagen en PNG
#redimensionamos la imagen con Image.resize((200,200)), los parametros alto y ancho en una tupla ejem:(alto,ancho), alto y ancho en pixeles
#luego, al cargar la imagen y mostrala en pantalla con Tkinter ocupara las dimensiones que le dimos con 'Image.resize'. GUardo la IMagen en un
#archivo nuevo en formato PNG generado con Image.save("myNuevaImagen.png","png")
imgOriginal = Image.open(pwd+file.replace('.jpg','.png')).resize((200,200)).save(pwd+'myNuevaImagen.png','png')
#cargamos el nuevo archivo "myNuevaImagen.png" creado en la linea anterior
imgNueva = PhotoImage(file=pwd+"myNuevaImagen.png")
#creamos un widget Button y le pasamos la variable que contiene la nueva imagen con image=imgNueva
#el parametro text="Botonio", quedara debajo de la imagen y alineada con la imagen, el boton ocupara el lugar de la imagen mas el Texto
#si no le damos texto ,ejemplo: Button(raiz, image=imgNueva, bd=0, etc etc ...) el boton tomara las medidas de la imagen
boton = Button(raiz, image=imgNueva,text=pwd+"myNuevaImagen.png", bd=0, compound="top",command=lambda:print("Click XD"))
boton.place(x=0, y=50)
raiz.mainloop()
def playlist(pwd='.'):
import os
#inicia codigo de la prueba
tiempo=45
ignorarArchivo=['Crear Lista.py','0000_archivos.txt']
formatos=['.mp3','.mp4','.wav','.avi','.webm','.ogg','.m4a','.mkv','.rmvb','.vob','.wmv']
playListFile=pwd+'\\'+"000_playList.m3u"
if playListFile in os.listdir(pwd):
os.remove(playListFile)
palabra=input("ingrese una palabra para filtrar los archivos : ")
midir=[archivo for archivo in os.listdir(pwd) if ((archivo not in ignorarArchivo) and (os.path.isfile(pwd+'\\'+archivo)))]
midir=[archivo for archivo in midir if [archivo for exten in formatos if (exten in archivo[-6:].lower()) ] ]
midir=[archivo for archivo in midir if (palabra.lower() in archivo.lower())]
filtro=[
('ú','u'),
('á','a'),
('é','e'),
('í','i'),
('ó','o'),
('ú','u'),
('_',''),
('-',''),
('¡',''),
('!',''),
(' ',' ')
]
for contador in range(0,len(midir),1):
archivo=str(midir[contador]).lower()
for change in filtro:
archivo=archivo.replace(change[0],change[1])
os.rename(pwd+'\\'+midir[contador], pwd+'\\'+archivo)
midir[contador]=archivo
playList=open(playListFile,"a")
playList.write('#EXTM3U\n')
for contador in range(0,len(midir),1):
playList.write('#EXTINF:'+str(tiempo)+','+str(midir[contador])+'\n')
playList.write(pwd+'\\'+str(midir[contador])+'\n')
playList.close()
if midir:
os.system(playListFile)
def decoradores():
import time
"""
practica con decoradores
la funcion 'decorador' se ejecutara automaticamente cada ves que ejecutemos las funciones decoradas con esta funcion
la funcion 'decorador' recive como parametro la funcion 'funcion_parametro' que fue decorada, junto con sus argumentos de posicion *vars y los argumentos con palabras claves **vars_claves
la funcion 'decorador' retornara otra funcion 'funcionInterior', esta funcion interna ejecuta su propio codigo, y dentro de esta funcion intarna ejecutamos la funcion_parametro
"""
def decorador(funcion_parametro):
def funcionInterior(*args,**kwargs):
print("Funcion:",funcion_parametro.__name__)
print("Entradas:",*args,**kwargs)
ret=funcion_parametro(*args,**kwargs)
print("Salidas:",ret,'\n')
return ret
return funcionInterior
"""
la funcion 'decoradorPlus' recive la funcion decoradoa con sos respectivos
argumentos, aparte tambien recive un parametro asignada a la clave arg como
cualquier funcion normal. De acuerdo al valor de 'arg' decorara la funcion decorada con una funcion interna de 'decoradorPlus'
"""
def decoradorPlus(arg=""):
def decoradortiempo(funcion):
def funcionInterior(*args,**kwargs):
inicio=time.time()
print("Funcion:",funcion.__name__)
ret=funcion(*args,**kwargs)
print("Tiempo de Ejecucion:",time.time()-inicio)
return ret
return funcionInterior
def decoradorSimple(funcion):
def funcionInterior(*args,**kwargs):
print("Funcion:",funcion.__name__)
print("Entradas:",*args,**kwargs)
ret=funcion(*args,**kwargs)
print("Salidas:",ret)
return ret
return funcionInterior
if arg=="tiempo":
return decoradortiempo
else:
return decoradorSimple
"""
la funcion 'testFuncion' recive como argumento posicional, una
funcion, el valor que debe retornar dicha funcion, seguido de
los argumentos que recive dicha funcion
"""
def testFuncion(funcion,retorna,*test_args):
test=True
if retorna!=funcion(*test_args):
test=False
return test
"""
decoramos la funcion 'resta', con la funcion 'decoradorPlus' sin pasarle la clave opcional 'arg'
"""
@decoradorPlus()
def resta(a,b):
return (a-b)
"""
decoramos la funcion 'suma', con la funcion 'decoradorPlus' y le pasamos la clave opcional 'arg' igual a "tiempo"
"""
@decoradorPlus(arg='tiempo')
def suma(a,b):
return (a+b)
print(testFuncion(resta,40,50,10))
print(testFuncion(suma,40,50,10))
def aspas(x=10,y=10,escalar=1.0,dividir=120,baseRadio=375.0,altura=375.0,revolucion=360,rotorRadio=12.0,fondo=200.0):
baseRadio=375.0*escalar
altura=375.0*escalar
rotorRadio=12.0*escalar
fondo=200.0*escalar
# retorna un poligono de una aspa de un aerogenerador de viento
print('escalar:',escalar,'x:',x,'y:',y,'dividir:',dividir,'baseRadio:', baseRadio,'altura:', altura,'revolucion:', revolucion,'rotorRadio:', rotorRadio,'fondo:', fondo)
datos={'x':x,'y':y,'dividir':dividir,'baseRadio':baseRadio,'altura': altura,'revolucion': revolucion,'rotorRadio': rotorRadio,'fondo': fondo}
geo=geometria()
xR=[0.0]
yR=[rotorRadio]
zR=[-1*(fondo/baseRadio)*(baseRadio-rotorRadio)]
xA=[0.0]
yA=[baseRadio]
zA=[0.0]
tanA=baseRadio/altura
for ang in range(1,revolucion,1):
if (((altura-((altura/float(revolucion))*float(ang)))*tanA) >= rotorRadio):
rad=math.radians(float(ang))
zA.append((altura/float(revolucion))*float(ang))
p=((altura-zA[-1])*tanA)
yA.append(p*math.cos(rad))
xA.append(p*math.sin(rad))
zR.append((-1*(((fondo+zA[-1])/p)*(p-rotorRadio))+zA[-1]))
yR.append((rotorRadio)*math.cos(rad))
xR.append((rotorRadio)*math.sin(rad))
fin=ang
xOut=[0]
yOut=[geo.dist([xR[0],yR[0],zR[0]],[xA[0],yA[0],zA[0]])]
xIn =[0]
yIn =[0]
for n in range(1,fin+1,1):
A=[xA[n-1],yA[n-1],zA[n-1]] #punto que ya esta en el plano
B=[xR[n-1],yR[n-1],zR[n-1]] #punto origen que ya esta en plano
C=[xA[n],yA[n],zA[n]] #punto que se agregara al plano
xO=geo.dist(geo.alt(C,A,B),C)
yO=geo.dist(geo.alt(C,A,B),B)
#print(math.degrees(angRad([0,1,0],resta([xIn[-1],yIn[-1],0],[xOut[-1],yOut[-1],0]))))
rot= -1*math.fabs(geo.angRad([0,1,0],geo.resta([xIn[-1],yIn[-1],0],[xOut[-1],yOut[-1],0])))
xRot, yRot=geo.rotar(rot,[xO,yO,0])
xTras, yTras=geo.trasladar([xIn[-1],yIn[-1],0],[xRot,yRot,0])
yOut.append(yTras)
xOut.append(xTras)
A=[xA[n],yA[n],zA[n]]
B=[xR[n-1],yR[n-1],zR[n-1]]
C=[xR[n],yR[n],zR[n]]
xO= geo.dist(geo.alt(C,A,B),C)
yO= geo.dist(geo.alt(C,A,B),B) if geo.dist(geo.alt(C,A,B),A)<geo.dist(A,B) else geo.dis(geo.alt(C,A,B),B)*(-1)
rot= -1*math.fabs(geo.angRad([0,1,0],geo.resta([xIn[-1],yIn[-1],0],[xOut[-1],yOut[-1],0])))
xRot, yRot=geo.rotar(rot,[xO,yO,0])
xTras, yTras=geo.trasladar([xIn[-1],yIn[-1],0],[xRot,yRot,0])
yIn.append(yTras)
xIn.append(xTras)
for n in range(0,len(xOut),dividir):
datos[str(n)+'grados']=str(int(n))+' grados, alturaRelativa= '+str(int(fondo+zR[n]))+'mm, largo= '+str(geo.dist([xIn[n],yIn[n],0],[xOut[n],yOut[n],0]))+', angulo= '+str(math.degrees(geo.angRad([0,0,1],geo.resta([xR[n],yR[n],zR[n]],[xA[n],yA[n],zA[n]]))))
datos[str(fin)+'grados']=str(int(fin))+' grados, alturaRelativa= '+str(int(fondo+zR[fin]))+'mm, largo= '+str(geo.dist([xIn[fin],yIn[fin],0],[xOut[fin],yOut[fin],0]))+', angulo= '+str(math.degrees(geo.angRad([0,0,1],geo.resta([xR[fin],yR[fin],zR[fin]],[xA[fin],yA[fin],zA[fin]]))))
textos = [[xIn[n]+x, yIn[n]+y,geo.ang(geo.resta([xIn[n]+(x), yIn[n]+(y),0],[xOut[n]+x, yOut[n]+y,0]),[-1,0,0])-180,'__'+str(n)+' grados'] for n in range(0,len(xOut),dividir)]+[geo.medio([xOut[fin]+x, yOut[fin]+y,0], [xIn[fin]+(x), yIn[fin]+(y),0])[0:2]+[geo.ang(geo.resta([xIn[fin]+(x), yIn[fin]+(y),0],[xOut[fin]+x, yOut[fin]+y,0]),[-1,0,0])-180,'__'+str(fin)+' grados']]
lineas = [[xOut[n]+(x), yOut[n]+(y), xIn[n]+(x), yIn[n]+(y)] for n in range(0,len(xOut),dividir)]+[[xOut[fin]+(x), yOut[fin]+(y), xIn[fin]+(x), yIn[fin]+(y)]]
poligono = [[xOut[n]+(x),yOut[n]+(y)] for n in range(0,len(xOut),1)]+[[xIn[n]+(x),yIn[n]+(y)] for n in range(len(xIn)-1,0,-1)]+[[xIn[0]+(x),yIn[0]+(y)]]+[[xOut[0]+x,yOut[0]+y]]
datos['giroReal'] = fin
return poligono, lineas, datos,textos
def poligonoToPDF(debug=1,calibrar=1,miniatura=1,margenes=[],REALmm=[200.0,200.0],datos={},printTEXT=[[100,100,0,"hola"],[100,100,90,"hola"],[100,100,180,"hola"],[100,100,270,"hola"]],REALsize=[0.0,0.0],escalar=1.0,cuadricular=1,imprimirHojasEnblanco=0, poligonos=[[(10,10),(15,15),(15,20),(20,20),(10,20),(10,10)]], lineas=[(10,10,0,0),(15,15,0,0),(15,20,0,0),(20,20,0,0),(10,20,0,0),(10,10,0,0)], fin = 6):
from reportlab.lib.colors import tan, black, green
from reportlab.lib.units import mm
from reportlab.pdfgen import canvas
fontSize=10
hoja={
'A0':(841*mm,1189*mm),
'A1':(594*mm,841*mm),
'A2':(420*mm,594*mm),
'A3':(297*mm,420*mm),
'A4':(210*mm,297*mm),
'A5':(148*mm,210*mm),
'A6':(105*mm,148*mm),
'A7':(74*mm,105*mm),
'A8':(52*mm,74*mm),
'A9':(37*mm,52*mm),
'A10':(26*mm,37*mm),
'B0':(1000*mm,1414*mm),
'B1':(707*mm,1000*mm),
'B2':(500*mm,707*mm),
'B3':(353*mm,500*mm),
'B4':(250*mm,353*mm),
'B5':(176*mm,250*mm),
'B6':(125*mm,176*mm),
'B7':(88*mm,125*mm),
'B8':(62*mm,88*mm),
'B9':(44*mm,62*mm),
'B10':(31*mm,44*mm),
'C0':(917*mm,1297*mm),
'C1':(648*mm,917*mm),
'C2':(458*mm,648*mm),
'C3':(324*mm,458*mm),
'C4':(229*mm,324*mm),
'C5':(162*mm,229*mm),
'C6':(114*mm,162*mm),
'C7':(81*mm,114*mm),
'C8':(57*mm,81*mm),
'C9':(40*mm,57*mm),
'C10':(28*mm,40*mm),
'oficio':(216*mm,330*mm)
}
Xmm=200.0/REALmm[0]
Ymm=200.0/REALmm[1]
page=hoja['A4']
escala=escalar
escalaX,escalaY=(REALsize[0]/(page[0]/mm)) if REALsize[0]>0.0 else 1.0,(REALsize[1]/(page[1]/mm)) if REALsize[1]>0.0 else 1.0
menorX,menorY,mayorX,mayorY=0,0,0,0
poligono=poligonos
lines=lineas
textos=printTEXT
for polig in range(0,len(poligono),1):
for p in range(0,len(poligono[polig]),1):
poligono[polig][p]=[poligono[polig][p][0]*mm*Xmm , poligono[polig][p][1]*mm*Ymm]
for p in poligono[polig]:
if p[0]<menorX:
menorX=p[0]
if p[0]>mayorX:
mayorX=p[0]
if p[1]<menorY:
menorY=p[1]
if p[1]>mayorY:
mayorY=p[1]
for l in range(0,len(lines),1):
lines[l] = [lines[l][0]*mm*Xmm,lines[l][1]*mm*Ymm,lines[l][2]*mm*Xmm,lines[l][3]*mm*Ymm]
menorX=lines[l][0] if lines[l][0]<menorX else menorX
mayorX=lines[l][0] if lines[l][0]>mayorX else mayorX
menorX=lines[l][2] if lines[l][2]<menorX else menorX
mayorX=lines[l][2] if lines[l][2]>mayorX else mayorX
menorY=lines[l][1] if lines[l][1]<menorY else menorY
mayorY=lines[l][1] if lines[l][1]>mayorY else mayorY
menorY=lines[l][3] if lines[l][3]<menorY else menorY
mayorY=lines[l][3] if lines[l][3]>mayorY else mayorY
#print(lines[l])
mayorX=mayorX-menorX
mayorY=mayorY-menorY
divisionX=1
divisionY=1
if (page[0]*escalaX) < mayorX:
divisionX=int(mayorX/(page[0]*escalaX))+1
if (page[1]*escalaY) < mayorY:
divisionY=int(mayorY/(page[1]*escalaY))+1
geo=geometria()
planos=[]
#trasladamos todos los puntos de todos los poligonos al primer Cuadrante
for t in range(0,len(textos),1):
print(textos[t])
textos[t][0]=textos[t][0]*mm*Xmm-(menorX)
textos[t][1]=textos[t][1]*mm*Ymm-(menorY)
for polig in range(0,len(poligono),1):
for p in range(0,len(poligono[polig]),1):
poligono[polig][p]=(poligono[polig][p][0]-(menorX),poligono[polig][p][1]-(menorY))
for l in range(0,len(lines),1):
lines[l] = [lines[l][0]-menorX,lines[l][1]-menorY,lines[l][2]-menorX,lines[l][3]-menorY]
#pasamos las lineas a poligonos
divX=[x*page[0]*escalaX for x in range(0,divisionX,1)]+[mayorX]
divY=[y*page[1]*escalaY for y in range(0,divisionY,1)]+[mayorY]
print("cortando")
for l in range(0,len(lines),1):
v=[lines[l][2]-lines[l][0],lines[l][3]-lines[l][1],0]
oX = lines[l][0] if v[0]==0.0 else -1.0
oY = lines[l][1] if v[1]==0.0 else -1.0
Yf=lines[l][1] if lines[l][1]>lines[l][3] else lines[l][3]
Yi=lines[l][1] if lines[l][1]<lines[l][3] else lines[l][3]
Xf=lines[l][0] if lines[l][0]>lines[l][2] else lines[l][2]
Xi=lines[l][0] if lines[l][0]<lines[l][2] else lines[l][2]
# dividimos en Y
div=[divY[d] for d in range(0,len(divY)-1,1) if ((divY[d]<=Yi) and (divY[d+1]>=Yi))]
div = div[0] if div else 0.0
puntos = [[(((float(y)/10.0)-lines[l][1])/(v[1]/v[0])+lines[l][0]) if v[0]!=0.0 else lines[l][0],(float(y)/10.0)] for y in range(int(div*10),int(Yf*10),int(page[1]*escalaY*10))[1:]]
# dividimos en X
div=[divX[d] for d in range(0,len(divX)-1,1) if ((divX[d]<=Xi) and (divX[d+1]>=Xi))]
div = div[0] if div else 0.0
puntos += [[float(x)/10.0,(((float(x)/10.0)-lines[l][0])*(v[1]/v[0])+lines[l][1]) if v[1]!=0.0 else lines[l][1]] for x in range(int(div*10),int(Xf*10),int(page[0]*escalaX*10))[1:]]
polig=[lines[l][0:2]]
distancias=[[geo.dist(lines[l][0:2]+[0],p+[0]),p[0],p[1]] for p in puntos]
for p in range(0,len(puntos),1):
menor=geo.dist(lines[l][0:2]+[0],lines[l][2:]+[0])
pos=0
for x in range(0,len(distancias),1):
if distancias[x][0]<menor:
menor=distancias[x][0]
pos=x
polig += [[(((distancias[pos][2]-1.0)-lines[l][1])/(v[1]/v[0])+lines[l][0]) if v[0]!=0.0 else lines[l][0],distancias[pos][2]-1.0]]
polig += [[(((distancias[pos][2]+1.0)-lines[l][1])/(v[1]/v[0])+lines[l][0]) if v[0]!=0.0 else lines[l][0],distancias[pos][2]+1.0]]
distancias.pop(pos)
polig += [lines[l][2:]]
poligono += [polig]
margen=margenes if margenes else [((page[0]*(1.0-escalaX))/2),((page[1]*(1.0-escalaY))/2)]
txt=[]
for x in range(0,divisionX,1):
for y in range(0,divisionY,1):
txt= [[]]+txt
for t in textos:
if (((page[0]*escalaX)>=(t[0]-(x*page[0]*escalaX))) and ((t[0]-(x*page[0]*escalaX))>=0.0) ) and (((page[1]*escalaY) >= (t[1]-(y*page[1]*escalaY))) and ((t[1]-(y*page[1]*escalaY))>=0.0)) :
txt[0] += [[ t[0]-(x*page[0]*escalaX)+margen[0],t[1]-(y*page[1]*escalaY)+margen[1],t[2],t[3] ]]
for polig in range(0,len(poligono),1):
plano=[]
matriz=[]
for x in range(0,divisionX,1):
for y in range(0,divisionY,1):
plano = [[]] + plano
matriz += [[divisionX-x-1,divisionY-y-1]]
for p in poligono[polig]:
if (((page[0]*escalaX)>=(p[0]-(x*page[0]*escalaX))) and ((p[0]-(x*page[0]*escalaX))>=0.0) ) and (((page[1]*escalaY) >= (p[1]-(y*page[1]*escalaY))) and ((p[1]-(y*page[1]*escalaY))>=0.0)) :
plano[0] += [ [(p[0]-(x*page[0]*escalaX))+margen[0] , (p[1]-(y*page[1]*escalaY))+margen[1]]]
planos += [plano]
global canvas
canvas = canvas.Canvas(datos['archivo'], pagesize=page)
def texto(x,y,ang,txt):
print(x,y,ang,txt)
canvas.saveState()
rad=-1*math.radians(float(ang))
i, j=geo.rotar(rad,[x,y,0])
canvas.rotate(ang)
canvas.drawString(i,j,str(txt))
canvas.restoreState()
def cuadriculando(cuadricular):
if cuadricular:
canvas.saveState()
canvas.setDash(1,10)
canvas.setLineWidth(0.1)
#linea Vartical
canvas.line((page[0]/2), page[1]-margen[1], (page[0]/2), margen[1])
#linea Horizontal
canvas.line(margen[0],(page[1]/2),page[0]-margen[0],(page[1]/2))
#linea Diagonal de Superior Izquierdo a inferior derecho
canvas.line(margen[0], page[1]-margen[1], page[0]-margen[0],margen[1])
#linea Diagonal de inferior Izquierdo a Superior Derecho
canvas.line(margen[0],margen[1],page[0]-margen[0],page[1]-margen[1])
#limites
canvas.setDash(5,5)
canvas.setLineWidth(0.3)
#limite horizontal superior
canvas.line((page[0]/2)-50, page[1]-margen[1],(page[0]/2)+50, page[1]-margen[1])
#limite horizontal inferior
canvas.line((page[0]/2)-50, margen[1],(page[0]/2)+50, margen[1])
#limite Vertical Derecho
canvas.line((page[0]-margen[0]), (page[1]/2)-50,(page[0]-margen[0]), (page[1]/2)+50)
#limite Vertical Izquierdo
canvas.line((margen[0]), (page[1]/2)-50,(margen[0]), (page[1]/2)+50)
canvas.restoreState()
if not calibrar:
hojaConDibujo=[0]*(divisionY*divisionX)
print(hojaConDibujo)
canvas.setLineWidth(3)
for x in range(0,(divisionY*divisionX),1):
dibujado=0
if txt[x]:
for t in txt[x]:
texto(*t)
for p in range(0,len(planos),1):
if len(planos[p][x])>1:
hojaConDibujo[x] = 1
if len(planos[p][x])>1 or imprimirHojasEnblanco:
cuadriculando(cuadricular)
dibujado=1
for g in range(0,len(planos[p][x])-1,1):
canvas.line(planos[p][x][g][0], planos[p][x][g][1], planos[p][x][g+1][0], planos[p][x][g+1][1])
if dibujado:
canvas.drawString(10.0*mm,800,str(matriz[x]))
canvas.drawString(page[0]-20.0*mm,(15*mm),str(x+1))
canvas.showPage()
canvas.setLineWidth(3)
print(hojaConDibujo)
datos['cantidadTotalDeHojas']=divisionX*divisionY
datos['HojasEnBlanco']=str([x+1 for x in range(0,len(hojaConDibujo),1) if hojaConDibujo[x]==0 ])
datos['HojasConDibujo']=str([x+1 for x in range(0,len(hojaConDibujo),1) if hojaConDibujo[x]>0 ])
canvas.setLineWidth(0.3)
canvas.setFont('Helvetica', fontSize)
contador=1
datos['miniatura']=str(bool(miniatura))
datos['tipoDeHoja']='A4'
datos['medidasDeLaHoja']=[page[0]/mm,page[1]/mm]
datos['ALTOdelDibujo']=mayorY/mm
datos['ANCHOdelDibujo']=mayorX/mm
datos['imprimirHojasEnblanco']=str(bool(imprimirHojasEnblanco))
canvas.setLineCap(1) #extremo de la linea redondeada
cuadriculando(cuadricular)
canvas.setDash(6,3)# sucesion de 6 puntos trazados y 3 no trazados
for x in datos:
if (page[1]-(contador*fontSize*mm))<(fontSize*mm):
canvas.showPage()
canvas.setLineCap(1)
contador=1
cuadriculando(cuadricular)
canvas.setDash(6,3)# sucesion de 6 puntos trazados y 3 no trazados
canvas.drawString(30+margen[0],page[1]-(contador*fontSize*2.0)-margen[1],str(x)+':')
#((0,len(str(datos[x])),40))
canvas.line(100+((len(x)-10)*5 if len(x)>10 else 0)+margen[0],page[1]-(contador*fontSize*2.0)-2-margen[1],580-margen[0],page[1]-(contador*fontSize*2.0)-2-margen[1])
canvas.drawString(100+((len(x)-10)*5 if len(x)>10 else 0)+margen[0],page[1]-(contador*fontSize*2.0)-margen[1],str(datos[x]))
contador += 1
canvas.setLineWidth(1)
canvas.setDash(1,0) #dibuja 1 puntos y ignora 0 puntos
canvas.setLineCap(2) #extremo de la linea cuadrada
#linea que agarra toda la hoja de izquierda a derecha, osea horizontal
canvas.drawString((page[0]/2),page[1]-(contador*fontSize*2.0)+5,str(page[0]/mm)+'mm. RealANCHO')
canvas.line(0, page[1]-(contador*fontSize*2.0), page[0], page[1]-(contador*fontSize*2.0))
contador+=1
#100mm horizontales
canvas.drawString((page[0]/2),page[1]-(contador*fontSize*2.0)+5,str(100)+'mm. REALmmX')
canvas.line((page[0]/2)-50.0*mm, page[1]-(contador*fontSize*2.0), (page[0]/2)+50.0*mm, page[1]-(contador*fontSize*2.0))
texto(page[0]-10.0*mm-margen[0],(page[1]/2),90.0,str(page[1]/mm)+'mm. RealALTO')
texto(page[0]-20.0*mm-margen[0],(page[1]/2),90.0,str(100)+'mm. REALmmY')
#100mm verticales
canvas.line(page[0]-10.0*mm-margen[0], page[1], page[0]-10.0*mm-margen[0], 0.0)
#linea que agarra toda la hoja de arriba a abajo, osea vertical
canvas.line(page[0]-20.0*mm-margen[0], (page[1]/2)+50.0*mm, page[0]-20.0*mm-margen[0], (page[1]/2)-50.0*mm)
canvas.save()
print("Ok")
def planos():
escala=1.0
puntos,lineas,datos,textos = aspas(escalar=escala)
datos['plano']="Hoja de una turbina de viento"
datos['archivo']="plano_escala_1.0_Turbina_eolica.pdf"
datos['UnidadDeMedida']="milimetros"
datos['escala']=str(escala)
poligonoToPDF(calibrar=0,miniatura=1,poligonos=[puntos],lineas=lineas,datos=datos, printTEXT=textos)
if input("Ya imprimio y saco las medidas de la hoja de prueba? S/N:").lower()=="s":
RealANCHO=float(input("RealANCHO: "))
RealALTO=float(input("RealALTO: "))
REALmmX=float(input("REALmmX: "))
REALmmY=float(input("REALmmY: "))
margenX=float(input("margen superior: "))
margenY=float(input("margen Izquierdo: "))
miniatura=int(input("ImprmirMiniatura 1/0 : "))
#=float(input(": "))
poligonoToPDF(calibrar=0,miniatura=miniatura,REALsize=[RealANCHO,RealANCHO],margenes=[margenX,margenY],REALmm=[REALmmX,REALmmY],poligonos=[puntos],lineas=lineas,datos=datos)
def red():
from mbarete import internet
ip=internet()
print('ip.lan_ip:',ip.lan_ip,'ip.wan_ip:',ip.wan_ip)
def servidor_HTTP_python(LAN=1):
import socket
import threading
if LAN:
s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('10.255.255.255',1))
ip=s.getsockname()
s.close()
host = ip[0]
else:
host = '127.0.0.1'
port = 8080
format_encode='utf-8'
numerosMagicos={
'png':{'inicio':b'\x89PNG\r\n'},
'gif1':{'inicio':b'GIF89a'},
'gif2':{'inicio':b'GIF87a'},
'jpg1':{'inicio':b'\xff\xd8\xff\xdb'},
'jpg2':{'inicio':b'\xff\xd8\xff\xe0'},
'jpg':{'inicio':b'\xff\xd8\xff\xee'},
'webp':{'inicio':b'RIFF\xb0y\x00\x00WEBPVP8'},
'exe':{'inicio':b'MZ'},
'pdf':{'inicio':b'%PDF-'},
'OggS':{'inicio':b'OggS'},
'matroska':{'inicio':b'\x1a\x45\xdf\xa3'},
'script':{'inicio':b'#!'},
'sql':{'inicio':b'SQLite format 3'},
'faxx':{'inicio':b'FORM????FAXX'},
'zip1':{'inicio':b'\x50\x4b\x03\x04'},
'zip2':{'inicio':b'\x50\x4b\x05\x06'},
#'zip3':{'inicio':b'PK␅␆'},
#'rar':{'inicio':b'Rar!␚␇␀'},
#'windowMedia':{'inicio':b'0&²uŽfÏ␑¦Ù␀ª␀bÎl'},
#'Photoshop':{'inicio':b'8BPS'},
'wav':{'inicio':b'RIFF????WAVE'},
#'avi':{'inicio':b'RIFF????AVI␠'},
#'1mp3':{'inicio':b'ÿû'},
#'2mp3':{'inicio':b'ÿó'},
#'3mp3':{'inicio':b'ÿò'},
'mp3':{'inicio':b'ID3'},
'CD_DVD':{'inicio':b'CD001'},
'midi':{'inicio':b'MThd'},
#'MicrosoftOffice':{'inicio':b'ÐÏ␑ࡱ␚á'},
#'debutante':{'inicio':b'!␊'},
'webpGoogle':{'inicio':b'RIFF????WEBP'},
'mp4':{'inicio':b'ftypisom'},
'blender':{'inicio':b'BLENDER'}
}
#'':{'inicio':b''},
#'':{'inicio':b''}
global status
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen()
print(f"\nServidor HTTP corriendo en la direccion 'http://{host}:{port}/'")
status=True
clients = []
usernames = []
def requestToDictionary(request,add={}):
"""
b'POST /subir HTTP/1.1'
b'Host: 127.0.0.1:8080'
b'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0'
b'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
b'Accept-Language: es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3'
b'Accept-Encoding: gzip, deflate'
b'Content-Type: multipart/form-data; boundary=---------------------------1262949829386019333586660223'
b'Content-Length: 225'
b'Origin: http://127.0.0.1:8080'
b'Connection: keep-alive'
b'Referer: http://127.0.0.1:8080/'
b'Upgrade-Insecure-Requests: 1'
b'Sec-Fetch-Dest: document'
b'Sec-Fetch-Mode: navigate'
b'Sec-Fetch-Site: same-origin'
b'Sec-Fetch-User: ?1'
b''
b'-----------------------------1262949829386019333586660223'
b'Content-Disposition: form-data; name="archivo"; filename=""'
b'Content-Type: application/octet-stream'
b''
b''
b'-----------------------------1262949829386019333586660223--'
"""
if b'\r\n\r\n' in request:
post=[ request.split(b'\r\n\r\n')[-1]]
requ=[r.decode(format_encode) for r in request.split(b'\r\n')]
ret={}
for i in requ:
if ('POST' in i) or ('GET' in i):
ret['method']=i.split(' ')[0]
ret['sub_dominio']=i.split(' ')[1]
ret['http']=i.split(' ')[2]
if 'User-Agent:' in i:
ret['User_Agent']=i[len('User_Agent: '):-1]
if 'Content-Disposition:' in i:
ret['form_data']={
'name':i.split('form-data; ')[-1].split(';')[0][len('name='):-1],
'filename':i.split('form-data; ')[-1].split(';')[1][len(' filename="'):-1]
}
if 'Content-Type: multipart/form-data;' in i:
ret['boundary']=i.split('boundary=')[-1]
if add:
for a in add:
ret[a]=add[a]
return ret
def respond(client, address):
responder=False
request=b''
ok=True
cabezera=True
porcion=1024*5
binario=None
info={}
while ok:
datos_Bytes=client.recv(porcion)
if (b'Android' in datos_Bytes) and (b'boundary' in datos_Bytes):
info=requestToDictionary(datos_Bytes)
datos_Bytes=client.recv(porcion)
if (porcion > len(datos_Bytes)) and (b'\r\n' in datos_Bytes):
ok = False
if cabezera:
if (b'\r\n\r\n' in datos_Bytes) or info:
cabezera=False
for b in numerosMagicos:
if numerosMagicos[b]['inicio'] in datos_Bytes:
binario=b
if binario:
request=datos_Bytes.split(numerosMagicos[binario]['inicio'])[0]
info=requestToDictionary(request,add=info)
subiendo = open(info['form_data']['filename'],"wb")
subiendo.write(numerosMagicos[binario]['inicio']+datos_Bytes.split(numerosMagicos[binario]['inicio'])[-1])
while binario:
datos_Bytes=client.recv(porcion)
if info['boundary'].encode(format_encode) in datos_Bytes:
subiendo.write(datos_Bytes.split(info['boundary'].encode(format_encode))[0])
request=info['boundary'].encode(format_encode)+datos_Bytes.split(info['boundary'].encode(format_encode))[-1]
binario=False
else:
subiendo.write(datos_Bytes)
subiendo.close()
print("Subido:",info['form_data']['filename'])
else:
request+=datos_Bytes
else:
request+=datos_Bytes
else:
request+=datos_Bytes
print('request:',request)
info = requestToDictionary(request,add=info)
if ''!= request:
print(info)
if '/cerrar' in info['sub_dominio']:
print('Servidor Apagado')
client.close()
server.close()
status=False
elif ('GET' in info['method']):
if '/' == info['sub_dominio']:
myfile = 'index.html'
elif 'pruebaGet' in info['sub_dominio']:
myfile='index.html'
elif 'video' in info['sub_dominio']:
myfile='bibliografia/ONE_PUNCH_PARTE_9.mp4'
else:
myfile='media/GET.html'
file=open(myfile,'wb')
file.write(request)
file.close()
elif ('POST' in info['method']):
if 'pruebaPost' in info['sub_dominio']:
myfile='media/pruebaPost.html'
file=open(myfile,'wb')
file.write(request)
file.close()
else:
myfile='media/POST.html'
file=open(myfile,'wb')
file.write(request)
file.close()
try:
print('myfile',myfile)
header='HTTP/1.1 200 OK\n'
if myfile.endswith('.jpg'):
mimetype='image/jpg'
elif myfile.endswith('.css'):
mimetype='text/css'
elif myfile.endswith('.pdf'):
mimetype='application/pdf'
elif myfile.endswith('.mp4'):
mimetype='video/mp4'
else:
mimetype='text/html'
header += 'Content-Type: '+str(mimetype)+'\n\n'
except Exception as e:
header='HTTP:/1.1 404 Not Found \n\n'
response=f'<html><body>Error 404: File NOt Found<br> {e} </body></html>'.encode(format_encode)
header=header.encode(format_encode)
client.send(header)
file=open(myfile,'rb')
client.send(file.read())
file.close()
client.close()
print("fin de coneccion")
def receive_connections():
while status:
client, address = server.accept()
thread = threading.Thread(target=respond, args=(client, address))
thread.start()
print("fin de servicio")
receive_connections()
server.close()
def administrador_servidor_HTTP_python(LAN=1):
import socket
import threading
if LAN:
s=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('10.255.255.255',1))
ip=s.getsockname()
s.close()
host = ip[0]
else:
host = '127.0.0.1'
port = 8080
format_encode='utf-8'
def iniciar():
thread = threading.Thread(target=servidor_HTTP_python)
thread.start()
servidor = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(host, port)
servidor.connect((host, port))
return servidor
def cerrar(servidor):
servidor.send('cerrar'.encode(format_encode))
return servidor
def recargar(servidor):
servidor.send('reiniciar'.encode(format_encode))
return servidor
def info(servidor):
servidor.send('info'.encode(format_encode))
return servidor
comandos={'cerrar':cerrar,'recargar':recargar,'info':info}
servidor=iniciar()
while True:
command=input('Ingrese un comando:'+str([c for c in comandos]))
if command in comandos:
servidor=comandos[command](servidor)
if command=='':
cerrar(servidor)
break
def servidor_CHAT_socket_python():
import socket
import threading
host = '192.168.43.134'
port = 8080
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen()
print(f"Servidor corriendo en la direccion {host}:{port}")
clients = []
usernames = []
def broadcast(message, _client):
for client in clients:
if client != _client:
client.send(message)
def handle_messages(client):
while True:
try:
message = client.recv(1024)
broadcast(message, client)
except:
index = clients.index(client)
username = usernames[index]
broadcast(f"ChatBot: {username} se desconecto".encode('utf-8'), client)
clients.remove(client)
usernames.remove(username)
client.close()
break
def receive_connections():
while True:
client, address = server.accept()
client.send("@username".encode("utf-8"))
username = client.recv(1024).decode('utf-8')
clients.append(client)
usernames.append(username)
print(f"{username} esta conectado desde {str(address)}")
message = f"ChatBot: {username} se unio al chat!".encode("utf-8")
broadcast(message, client)
thread = threading.Thread(target=handle_messages, args=(client,))
thread.start()
receive_connections()
def cliente_CHAT_socket_python():
import socket
import threading
#username = input("Ingresa tu nombre de usuario: ")
username = "Ingresa"
host = '192.168.43.134'
port = 8080
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((host, port))
def receive_messages():
while True:
try:
message = client.recv(1024).decode('utf-8')
if message == "@username":
client.send(username.encode("utf-8"))
else:
print('\n'+message+'\n<<< Tu:',end='')
except:
print("Houston! Tenemos Problemas")
client.close()
break
def write_messages():
while True:
tu=input('<<< Tu:')
if tu=='salir':
client.close()
break
else:
message = f"{username}: {tu}"
client.send(message.encode('utf-8'))
receive_thread = threading.Thread(target=receive_messages)
receive_thread.start()
write_thread = threading.Thread(target=write_messages)
write_thread.start()
def ahorcado(pwd=d['img']+"palabras.txt"):
import random
vidas='******'
letrasCantadas=''
file=open(pwd,'r')
secretos=[line[:-1] for line in file]
file.close()
secreto=secretos[random.randrange(0,len(secretos))].upper()
letra_inicio=random.randrange(0,len(secreto))
palabra='_ '*len(secreto)
letra=secreto[letra_inicio]
while True:
if (letra in secreto) and (letra not in letrasCantadas):
letrasCantadas+=letra
for x in range(len(secreto)):
if secreto[x]==letra:
palabra=palabra[:2*x]+letra[0]+palabra[2*x+1:]
elif letra in letrasCantadas:
print("La letra '"+letra+"', ya fue Cantada...")
vidas=vidas[:-1]
else:
letrasCantadas+=letra
vidas=vidas[:-1]
print('\n\nPalabra Secreta: '+palabra)
print('Vidas: '+vidas+', te quedan '+str(len(vidas))+' vidas.')
print('Letras Cantadas: '+letrasCantadas)
if vidas:
if [l for l in secreto if l not in letrasCantadas]:
letra=input('Siguiente Letra: <<< ')[0].upper()
else:
if input("Muchas Felicidades Lograste Descubrir la palabra secreta "+secreto.upper()+". \n¿Si queres volver a jugar ingresa cualquier letra, sino es asi presiona enter? :<<<"):
secreto=secretos[random.randrange(0,len(secretos))]
letra_inicio=random.randrange(0,len(secreto))
palabra='_ '*len(secreto)
letra=secreto[letra_inicio]
letrasCantadas=''
vidas='******'
else:
break
else:
print("Te quedaste sin vidas JAJAJA. \nLa palabra Secreta es: "+secreto)
break
def manipularArchivos(pwd=d['img'],f='',ret=0):
bi=b''
if not f:
file=f
else:
file='subiendo'
binario=open(file,'rb')
for b in binario:
bi+=b
binario.close()
print(bi[:1024])
print(bi[-1024:])
def capturarNumerosMagicos(pwd=d['img'],f='',ret=0):
bi={}
miDir=os.listdir(pwd)
muestra=100
contador=0
for file in miDir:
bi[contador]={'name':file,'inicio':b''}
binario=open(pwd+'\\'+file,'rb')
for inicio in binario:
bi[contador]['inicio']+=inicio
binario.close()
bi[contador]['fin']=bi[contador]['inicio'][-muestra:]
bi[contador]['inicio']=bi[contador]['inicio'][:muestra]
print('name',bi[contador]['name'])
print('inicio',bi[contador]['inicio'])
print('fin',bi[contador]['fin'])
contador+=1
print(__name__)
if 'main' in __name__:
import threading
pruebas=[
{'titulo':"salir",'f':exit},
{'titulo':"Lista de las variables del sistema",'f':VariablesDeEntorno},
{'titulo':"os.path, Manipulaciones comunes de nombre de ruta:",'f':powerPath},
{'titulo':"Prueba para generar archivo pdf desde una variable de tipo diccionario",'f':powerPDF},
{'titulo':'Visor de Imagenes Pro v0.1 XD hecho en Tkinter','f':showAlbum},
{'titulo':"Modificar una imagen, redimensionar la imagen guardar como otro archivo nuevo, luego usar ese nuevo archivo dentro de widget Button",'f':ButtonConImagen},
{'titulo':"Crear un archivo PLayList para reproducir con el 'Reproductor Multimedia' del sistema operativo",'f':playlist},
{'titulo':"retorna un poligono, que seria una hoja de unas aspas de un aerogenerador de viento ",'f':aspas},
{'titulo':"Genera un PDF del poligono que vos le pases",'f':poligonoToPDF},
{'titulo':"generamos un plano en un PDF con los dos ejemplos 9 y 10",'f':planos},
{'titulo':"time ,pruebas con la libreria time:",'f':timeConOsPath},
{'titulo':"obtener ip publica y pribada:",'f':red},
{'titulo':"Decoradores y funciones y parametros",'f':decoradores},
{'titulo':"Servidor Socket Python",'f':servidor_CHAT_socket_python},
{'titulo':"Cliente Socket Python",'f':cliente_CHAT_socket_python},
{'titulo':"Administrador Servidor HTTP Python",'f':administrador_servidor_HTTP_python},
{'titulo':"Servidor HTTP Python",'f':servidor_HTTP_python},
{'titulo':"Ahorcado",'f':ahorcado},
{'titulo':"leerBinario",'f':manipularArchivos},
{'titulo':"Capturar numeros magicos:",'f':capturarNumerosMagicos}
]
def f(num):
print('######################################################################')
print("PRUEBA",num,"Inicianda: "+pruebas[num]['titulo'])
print('######################################################################'+'\n')
hilo=threading.Thread(target=pruebas[num]['f'])
#llamamos a la funcion
hilo.start()
#esperamos que termine
hilo.join()
#Aviso de que la funcion termino.
print('\n'+"PRUEBA Terminada...")
print('\n')
if len(sys.argv)>1:
f(int(sys.argv[1]))
exit()
num=1
while num > 0:
num=0
for prueba in range(0,len(pruebas),1):
print(prueba,pruebas[prueba]['titulo'])
inpu=input('Ingrese el numero de la siguiente prueba: ').split(' ')
num=int(inpu[0] if inpu[0] != '' else 0)
if num > 0:
f(num)
else:
exit()
#https://youtube.com/playlist?list=PLU8oAlHdN5BlvPxziopYZRd55pdqFwkeS
|
legion.py
|
#!/usr/bin/env python3
import time
import sys
import select
import re
import getopt
import uuid
import socket
import hashlib
import random
import datetime
import os
import netifaces
import fnmatch
from comms import Comms
from multicast import MultiCast
from multicast import continuousTimer
from multiprocessing import Process, Manager
from utils import Utils
from node import Node
from splitjobs import Split
from portscan import Scanner
class Legion():
def __init__(self, ip, port, mcastChannel="234.233.232.231", mcastPort=8193):
# standard meta info
self.ip = ip
self.port = port
self.neighbors = {} # dict of neighbor nodes
self.exit = None # Node to send exit traffic to
self.exitWeight = 0 # exit weight for calculations
self.uid = self.genUID() # personal id
self.nodeCount = 0
# CLIENT specific variables
self.outputBuf = ""
self.proclist = {}
self.pushedfiles = {}
# multiprocessing stuff
self.manager = Manager()
self.que = self.manager.Queue()
# multicast stuff
self.mcast = None
self.mcastListenerThread = None
self.mcastProbeThread = None
self.mcastChannel = mcastChannel
self.mcastPort = mcastPort
self.setupMulticaster()
# meshnet forwarding
self.meshPort = random.randint(40000, 65000)
self.hashMsgs = dict()
self.meshListener = None
self.startMeshListener()
self.meshServerIP = ""
self.meshServerPort = 0
# -----------------------------------------------------
# Meshnet code
# -----------------------------------------------------
# start a meshnet listener
def startMeshListener(self):
# make sure we find an unused port
while Comms.test_port(self.meshPort):
self.meshPort = random.randint(40000, 65000)
self.meshListener = Comms.create_server_socket("0.0.0.0", self.meshPort)
# forward any meshnet traffic to all neighbors
def forwardTraffic(self, srcip, srcport, dstip, dstport, msg):
for uid in self.neighbors:
if (self.neighbors[uid].location == "Mesh"):
if (self.neighbors[uid].ip == dstip) and (self.neighbors[uid].port == dstport):
# connect and send
remote_sock = Comms.create_direct_socket(self.neighbors[uid].ip, self.neighbors[uid].port)
if (remote_sock):
Comms.sendMsg(remote_sock, srcip + ":" + str(srcport) + ":" + dstip + ":" + str(dstport) + ":" + msg)
msg = srcip + ":" + str(srcport) + ":" + dstip + ":" + str(dstport) + ":" + msg
remote_sock.close()
else:
print ("FAILED TO SEND")
return
for uid in self.neighbors:
if (self.neighbors[uid].location == "Mesh"):
if not ((self.neighbors[uid].ip == srcip) and (self.neighbors[uid].port == srcport)):
# connect and send
remote_sock = Comms.create_direct_socket(self.neighbors[uid].ip, self.neighbors[uid].port)
if (remote_sock):
Comms.sendMsg(remote_sock, srcip + ":" + str(srcport) + ":" + dstip + ":" + str(dstport) + ":" + msg)
remote_sock.close()
else:
print ("FAILED TO SEND")
return
# TODO LATER
def findExitRoute(self, ip, port):
if self.testConnectivity(ip, port):
self.exit = None
else:
best = None
lowest = None
#for uid, neighbor in self.neighbors:
# True
# # ask each neighbor for their exit node and weight, then pick the lowest
# # if weight < lowest ... lowest = weight ... best = uid
if best:
self.exit = best
self.exitWeight = lowest
# -----------------------------------------------------
# Multicast Code
# -----------------------------------------------------
# setup multicaster configuration
def setupMulticaster(self):
if (self.mcastChannel and self.mcastPort):
self.mcast = MultiCast(self.mcastPort, self.mcastChannel, 1)
# setup listener
self.mcastListenerThread = Process(target=self.mcast.recv, args=(self.que,))
self.mcastListenerThread.start()
# used for generating unique self id
def genUID(self):
return str(uuid.uuid1())
# send out multicast probe
def probeNeighbors(self):
if (self.mcast):
self.mcastProbeThread = continuousTimer(1, self.mcast.send, self.uid, str(self.meshPort))
# test to see if we can connect to a neighbor/node
def testConnection(self, neighbor):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((neighbor.ip,neighbor.port))
if result == 0:
return True
else:
return False
# test to see if we can still communicate to our neighbors
def testNeighbors(self):
for uid in self.neighbors:
if not self.testConnection(self.neighbors[uid].ip, self.neighbors[uid].port):
self.rmNeighbor(uid)
def isNeighbor(self, uid):
for key in self.neighbors:
if uid == self.neighbors[key].uid:
return True
return False
# add a new neighbor
def addNeighbor(self):
while not self.que.empty():
t = self.que.get()
ip = str(t[0][0])
data = t[1].split(':')
uid = str(data[0][1:])
port = int(data[1])
temp = Node(ip, port, uid, 1, None, "Mesh")
if (not self.isNeighbor(uid)):
print ("\nFound Neighbor...........")
self.neighbors[self.nodeCount] = temp
self.nodeCount+= 1
else:
None
# delete a neighbor we can no longer access
def rmNeighbor(self, uid):
del self.neighbors[uid]
# return a list of neighbor nodes
def listNeighbors(self):
n_list = []
for uid in self.neighbors:
tmp_str = str(uid) + "::" + self.neighbors[uid].ip + ":" + str(self.neighbors[uid].port)
n_list.append(tmp_str)
return n_list
# -----------------------------------------------------
# Client/Server code
# -----------------------------------------------------
def client_process_cmds(self, msg, sock):
if (msg.startswith("EXIT")):
sys.stdout.write("Client Terminated!!!\r\n")
self.cleanup()
elif (msg.startswith("SCAN")):
p = re.compile("SCAN:(.+?):(.+?):(.+)")
m = p.match(msg)
if (m):
print("Scanning: " + m.group(1) + " " + m.group(2) + "-" + m.group(3))
self.outputBuf += '\r\n'.join(str(x) for x in Scanner.scan(m.group(1), range(int(m.group(2)), int(m.group(3)))))
print ("finished")
print (self.outputBuf)
elif (msg.startswith("WGET")):
p = re.compile("WGET\s+(.+)")
m = p.match(msg)
if (m):
print("Getting: " + m.group(1))
Utils.wget(m.group(1))
elif (msg.startswith("EXEC")):
p = re.compile("EXEC\s+(.+)")
m = p.match(msg)
if (m):
sys.stdout.write("Executing [%s]\n" % m.group(1))
self.outputBuf += "\n\n" + Utils.execWait(m.group(1)).decode('unicode_escape')
#pid = Utils.exec(m.group(1))
#self.proclist[pid] = m.group(1)
else:
None
elif (msg.startswith("PROCLIST")):
for pid in self.proclist:
self.outputBuf += str(pid) + " " + self.proclist[pid] + '\n'
elif (msg.startswith("EXIST")):
(tmp, cmd) = msg.split(':', 1)
result = "false"
(short_cmd, args) = cmd.split(' ')
if (Utils.which(short_cmd)):
result = "true"
Comms.sendMsg(sock, result)
elif (msg.startswith("CLEARBUFFER")):
self.outputBuf = ""
elif (msg.startswith("GETBUFFER")):
created_sock = False
if not sock:
# assume we are returning something to the mesh server
sock = Comms.create_direct_socket(self.meshServerIP, int(self.meshServerPort))
created_sock = True
self.outputBuf = self.ip + ":" + str(self.meshPort) + ":" + self.meshServerIP + ":" + str(self.meshServerPort) + ":" +self.outputBuf
Comms.sendMsg(sock, self.outputBuf)
if created_sock:
sock.close()
elif (msg.startswith("PUSH")):
(tmp, filename, file_len, file_data) = msg.split(':', 3)
filename = os.path.basename(filename)
print("received file: " + filename)
filename_orig = filename
filename = "tmp/" + filename
if (Utils.fileExists(filename)):
filename += "_" + Utils.getRandStr(5)
self.pushedfiles[filename_orig] = filename
Utils.writeFile(file_data, filename, "ab")
elif (msg.startswith("PULL")):
(tmp, filename) = msg.split(':', 1)
file_data = ""
if (Utils.fileExists(filename)):
file_t = open(filename, "rb")
file_data = file_t.read()
self.outputBuf = file_data.decode()
elif (msg.startswith("NEIGHBORS")):
self.outputBuf = '\n'.join(self.listNeighbors())
else:
sys.stdout.write(msg)
def client(self, host, port):
slist = [] #array of client sockets
# start the multicast probes
self.probeNeighbors()
if (host and port):
remote_server_sock = Comms.create_direct_socket(host, port)
slist.append(remote_server_sock)
# add mesh listener if necessary
if (self.meshListener):
slist.append(self.meshListener)
print ("MeshNet Listener started on port: " + str(self.meshPort))
while(1):
self.addNeighbor()
# get the list sockets which are ready to be read through select
# 4th arg, time_out = 0 : poll and never block
ready_to_read,ready_to_write,in_error = select.select(slist,[],[],0)
for sock in ready_to_read:
if (self.meshListener) and (sock == self.meshListener):
sockfd, addr = sock.accept()
msg = Comms.readMsg(sockfd, 4096)
# construct msg hash
m = hashlib.sha256()
m.update(msg.encode('ISO-8859-1'))
hash_key = m.hexdigest()
timestamp = datetime.datetime.now()
good = True
if hash_key in self.hashMsgs:
stored_timestamp = datetime.datetime.strptime(self.hashMsgs[hash_key], '%Y-%m-%d %H:%M:%S.%f')
if (timestamp <= (stored_timestamp + datetime.timedelta(minutes = 10))):
good = False
# if we have not seen the message before then process it
if good:
self.hashMsgs[hash_key] = str(timestamp)
(srcip, srcport, dstip, dstport, data) = msg.split(':', 4)
# set mesh server ip:port
self.meshServerIP = srcip
self.meshServerPort = int(srcport)
if (dstip == self.ip) and (int(dstport) == self.meshPort):
#process msg
self.client_process_cmds(data, None)
else:
self.forwardTraffic(srcip, srcport, dstip, dstport, data)
elif (sock == remote_server_sock): # a new connection request received
msg = Comms.readMsg(sock, 4096)
msg = msg.lstrip('\r\n')
self.client_process_cmds(msg, sock)
def rmtsh(self, tmp_sock, slist, server_sock):
prompt = "rmtsh (EXIT to quit) "
cwd = "" # used to keep track of current working dir
# attempt to get the pwd/cwd so we can use in in our commands
Comms.sendMsg(tmp_sock, "EXEC pwd")
Comms.sendMsg(tmp_sock, "GETBUFFER")
while (1):
displayPrompt = False
ready_to_read,ready_to_write,in_error = select.select(slist,[],[],0)
for sock in ready_to_read:
displayPrompt = False
if (sock == sys.stdin): # server sending message
msg = sys.stdin.readline()
msg = msg.lstrip('\r\n ') # clean up line removing any starting spaces and CRLF
if 'EXIT' in msg: # did we enter EXIT?
return
else: #must have entered some other command
msg = msg.rstrip('\r\n')
if len(msg) > 0: # is this a blank line? just a return?
if (cwd): # do we have a stored cwd?
msg = "cd " + cwd + " ; " + msg # if so, change command to prepend a "cd <cwd> ; "
Comms.sendMsg(tmp_sock, "EXEC " + msg + " ; pwd") # append a "; pwd" to the command so we can find out the ending working directory
Comms.sendMsg(tmp_sock, "GETBUFFER")
else:
displayPrompt = True
elif (sock != server_sock) and (sock != sys.stdin):
msg = Comms.readMsg(sock, 4096)
msg = msg.rstrip('\r\n')
indx = msg.rfind('\n') # what is the ending line break?
if indx == -1:
indx = 0
cwd = msg[indx:].lstrip('\r\n').rstrip('\r\n')
msg = msg[:indx]
sys.stdout.write("\r\n")
sys.stdout.write(msg)
sys.stdout.write("\r\n")
displayPrompt = True
else:
displayPrompt = False
if (displayPrompt):
sys.stdout.write(prompt + cwd + "> ")
sys.stdout.flush()
def server_process_cmds(self, slist, ignore_list, msg, server_sock):
displayPrompt = False
if (msg.startswith("NODE:")):
p = re.compile("NODE:(\d+)\s+(.*)")
m = p.match(msg)
if (m):
if (self.neighbors[int(m.group(1))].location == "Direct"):
displayPrompt = self.server_process_cmds([self.neighbors[int(m.group(1))].socket], ignore_list, m.group(2), server_sock)
else:
sys.stdout.write("Can only use NODE on Direct connections" + '\r\n')
displayPrompt = True
else:
displayPrompt = True
elif (msg.startswith("HELP") or msg.startswith("help") or msg.startswith("?") or msg.startswith("/?")):
sys.stdout.write(getHelp())
displayPrompt = True
elif (msg.startswith("LIST")):
sys.stdout.write("List of current client/slave nodes:\r\n")
sys.stdout.write("------------------------------------------------------\r\n")
sys.stdout.write('{:5} {:21} {:13}'.format("<#>", "<IP>:<PORT>", "<Direct/Mesh>"))
sys.stdout.write("\r\n")
for uid in self.neighbors:
sys.stdout.write('{:5} {:21} {:13}'.format(str(uid), self.neighbors[uid].ip + ":" + str(self.neighbors[uid].port), self.neighbors[uid].location))
sys.stdout.write("\r\n")
displayPrompt = True
elif (msg.startswith("PUSH")):
(tmp, filename) = msg.split(':', 1)
for key in self.neighbors:
if (self.neighbors[key].location == "Direct"):
if self.neighbors[key].socket in slist:
Comms.sendFile(self.neighbors[key].socket, filename)
displayPrompt = True
elif (msg.startswith("PULL")):
p = re.compile("PULL:(.*)")
m = p.match(msg)
if (m):
Comms.broadcast(slist, ignore_list, msg)
displayPrompt = True
elif (msg.startswith("SCAN")):
Comms.broadcast(slist, ignore_list, msg)
displayPrompt = True
elif (msg.startswith("WGET")):
Comms.broadcast(slist, ignore_list, msg)
displayPrompt = True
elif (msg.startswith("EXEC")):
Comms.broadcast(slist, ignore_list, msg)
displayPrompt = True
elif (msg.startswith("SHELL")):
p = re.compile("SHELL:(\d+)")
m = p.match(msg)
if (m):
if (self.neighbors[int(m.group(1))].location == "Direct"):
self.rmtsh(self.neighbors[int(m.group(1))].socket, slist, server_sock)
else:
sys.stdout.write("Can only use SHELL on Direct connections" + '\r\n')
displayPrompt = True
elif (msg.startswith("CLEARBUFFER")):
Comms.broadcast(slist, ignore_list, msg)
displayPrompt = True
elif (msg.startswith("GETBUFFER")):
Comms.broadcast(slist, ignore_list, msg)
displayPrompt = False
elif (msg.startswith("EXIT")):
Comms.broadcast(slist, ignore_list, msg)
displayPrompt = True
elif (msg.startswith("QUIT")):
self.cleanup()
elif (msg.startswith("PROCLIST")):
Comms.broadcast(slist, ignore_list, msg)
displayPrompt = True
elif (msg.startswith("NEIGHBORS")):
Comms.broadcast(slist, ignore_list, msg)
displayPrompt = True
elif (msg.startswith("MESH:")):
p = re.compile("MESH:(\d+)\s+(.*)")
m = p.match(msg)
if (m):
if (self.neighbors[int(m.group(1))].location == "Mesh"):
n = self.neighbors[int(m.group(1))]
self.forwardTraffic(self.ip, self.meshPort, n.ip, n.port, m.group(2))
displayPrompt = True
elif (msg.startswith("DIST")):
#match DIST <command> <file>
#\s+(.+) for command
#(.*)for filename
#p compiles the regex
p = re.compile("DIST:(.*?) (.*)")
#m matches DIST <something> <something>.
m = p.match(msg)
#m.group(1) is the command, m.group(2) is the filename
#correct command matches? then lets go!
if (m):
# make a local list to use
clist = dict()
# only copy the Direct neighbors into the new list
count = 0
for key in self.neighbors:
if self.neighbors[key].location == "Direct":
print ("testing if command exists : ")
Comms.sendMsg(self.neighbors[key].socket, "EXIST:" + m.group(2))
if Comms.readMsg(self.neighbors[key].socket) == "true":
clist[count] = self.neighbors[key]
print ('clist '+clist[count].ip)
count += 1
#check for clients, sending commands is pointless if no clients
if len(clist) < 1:
#is this the best way? probably not
print('no clients!')
#give the user back their prompt
displayPrompt = True
#ok, we have clients...now what?
else:
#first, split the input file into n parts, where n is count of nodes
#splitjobs.Split takes clist to count nodes, and the filename to split
#splitjobs.Split will then write files to ./tmp called 0.splitout 1.splitout ,etc
s = Split(clist, m.group(1))
files = s.getFiles()
print(files)
#command logic check--todo
#if m.group(2) is nmap, then xx, if its hashcat, then... etc
#for now assume any command we want to distribute accepts a text file
#for each client in clist
for i in range (0,len(clist)):
filename = files.pop()
for key in self.neighbors:
if self.neighbors[key].uid == clist[i].uid:
#send this file to a node. file 0.splitout would go to node 0
#PUSH code goes here to transfer 0.splitout to node 1 (0th node), etc
#issue PUSH as a server command
print("running NODE:" + str(key) + " PUSH:tmp/"+filename)
displayPrompt = self.server_process_cmds([clist[i].socket], ignore_list, "NODE:%s PUSH:%s" % (key,filename), server_sock)
time.sleep(2)
print("running NODE:%s %s " % (i,m.group(2))+filename)
displayPrompt = self.server_process_cmds([clist[i].socket], ignore_list, "NODE:%s EXEC %s" % (key,m.group(2))+''+filename, server_sock)
time.sleep(2)
break
else:
# do nothing for now
displayPrompt = True
return displayPrompt
def server(self, host, port):
slist = [] #array of client sockets
nid = 0
prompt = "# (stuck? type HELP)> "
server_sock = Comms.create_server_socket(host, port)
slist.append(server_sock)
slist.append(sys.stdin)
print ("Server started on IPs : " + str(host))
print ("Server started on port: " + str(port))
# add mesh listener if necessary
if (self.meshListener):
slist.append(self.meshListener)
print ("MeshNet Listener started on port: " + str(self.meshPort))
sys.stdout.write(prompt)
sys.stdout.flush()
displayPrompt = False
while(1):
self.addNeighbor()
displayPrompt = False
# get the list sockets which are ready to be read through select
# 4th arg, time_out = 0 : poll and never block
ready_to_read,ready_to_write,in_error = select.select(slist,[],[],0)
for sock in ready_to_read:
displayPrompt = False
if (self.meshListener) and (sock == self.meshListener):
sockfd, addr = sock.accept()
msg = Comms.readMsg(sockfd, 4096)
# construct msg hash
m = hashlib.sha256()
m.update(msg.encode('ISO-8859-1'))
hash_key = m.hexdigest()
timestamp = datetime.datetime.now()
good = True
if hash_key in self.hashMsgs:
stored_timestamp = datetime.datetime.strptime(self.hashMsgs[hash_key], '%Y-%m-%d %H:%M:%S.%f')
if (timestamp >= (stored_timestamp + datetime.timedelta(minutes = 10))):
good = False
# if we have not seen the message before then process it
if good:
self.hashMsgs[hash_key] = timestamp
(srcip, srcport, dstip, dstport, data) = msg.split(':', 4)
if (dstip == self.ip) and (int(dstport) == self.meshPort):
#process msg
sys.stdout.write(data)
sys.stdout.write("\r\n")
displayPrompt = True
else:
# the server does not forward messages
None
elif (sock == server_sock): # a new connection request received
nid = nid+1
sockfd, addr = sock.accept()
slist.append(sockfd)
self.neighbors[self.nodeCount] = Node(addr[0], addr[1], self.genUID(), 1, sockfd, "Direct")
self.nodeCount += 1
sys.stdout.write("\r" + "Client %i : (%s, %s) connected\n" % (nid, addr[0], addr[1]))
displayPrompt = True
elif (sock == sys.stdin): # server sending message
msg = sys.stdin.readline()
msg = msg.lstrip('\r\n')
msg = msg.rstrip('\r\n')
displayPrompt = self.server_process_cmds(slist, [server_sock, self.meshListener], msg, server_sock)
elif (sock != server_sock) and (sock != sys.stdin):
msg = Comms.readMsg(sock, 4096)
sys.stdout.write("=====================")
sys.stdout.write("\r\n")
(ip, port) = sock.getpeername()
sys.stdout.write(ip + ":" + str(port))
sys.stdout.write("\r\n")
sys.stdout.write("---------------------")
sys.stdout.write("\r\n")
sys.stdout.write(msg)
sys.stdout.write("\r\n")
displayPrompt = True
else:
sys.stdout.write("[UNKNOWN SOCKET]")
sys.stdout.write("\r\n")
displayPrompt = True
if (displayPrompt):
sys.stdout.write(prompt)
sys.stdout.flush()
# ----------------------------
# CTRL-C display and exit
# ----------------------------
def ctrlc(self):
print("Ctrl-C caught!!!")
self.cleanup()
def cleanup(self):
try:
if self.meshListener:
self.meshListener.close()
if self.mcastProbeThread:
self.mcastProbeThread.stop()
if self.mcastListenerThread:
self.mcastListenerThread.terminate()
except: # bad form, but just a general catch all for now
None
sys.exit(0)
def getLocalIP():
default_iface = netifaces.gateways()['default'][netifaces.AF_INET][1]
default_data = netifaces.ifaddresses(default_iface).setdefault(netifaces.AF_INET, [{'addr':'No IP addr'}])
for i in default_data:
return (i['addr'])
def selectLocalIP():
ips = list()
print ("------------------------")
i = 1
for ifaceName in netifaces.interfaces():
tmp_ips = [i['addr'] for i in netifaces.ifaddresses(ifaceName).setdefault(netifaces.AF_INET, [{'addr':'No IP addr'}] )]
ips += tmp_ips
print(str(i) + ": " + str(tmp_ips) + " (" + ifaceName +")")
i = i + 1
print ("------------------------")
print ("")
answer = input("Which IP to use as Server IP? ")
return ips[int(answer)-1]
def getHelp():
tmp = "HELP = List of Available Commands\r\n"
tmp += "---------------------------------------------------------------------\r\n"
tmp += "HELP displays this message\r\n"
tmp += "LIST displays a list of all nodes\r\n"
tmp += "EXEC executes a command on all nodes\r\n"
tmp += "DIST:<file to split> <cmd> distributes a job on all nodes and returns data\r\n"
tmp += "GETBUFFER pulls any output from a node\r\n"
tmp += "CLEARBUFFER clears any buffered output from a node\r\n"
tmp += "PUSH:<filename> push a file to remote node(s)\r\n"
tmp += "PULL:<filename> pull a file from remote node(s)\r\n"
tmp += "NEIGHBORS displays a list of all neighbor nodes\r\n"
tmp += "SHELL:# opens remote shell on client #\r\n"
tmp += "NODE:# <cmd> issues another command only to node #\r\n"
tmp += "MESH:# <cmd> send message/cmd to remote node\r\n"
tmp += "SCAN:<ip>:<start port>:<stop port> perform port scan\r\n"
tmp += "WGET <url> download file from URL\r\n"
tmp += "EXIT shut down the nodes\r\n"
tmp += "QUIT EXITS server\r\n"
tmp += "\r\n"
return tmp
def usage():
print('python3 legion.py -i <server ip> -p <server port>')
print(' -i, --serverip= ip of the server node (not used in when -s is set)')
print(' -p, --serverport= port the server node is listening on')
print(' -s run in server mode')
print(' -h help (this message)')
if __name__ == "__main__":
n_ip = getLocalIP()
n_port = None
s_ip = None
s_port = None
s_flag = False
try:
opts, args = getopt.getopt(sys.argv[1:],"hsi:p:",["serverip=","serverport="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-i", "--serverip"):
s_ip = arg
elif opt in ("-p", "--serverport"):
s_port = int(arg)
elif opt in ("-s"):
s_flag = True
mnet = Legion(n_ip, n_port)
try:
if (s_flag):
if not s_ip:
#s_ip = "0.0.0.0"
s_ip = selectLocalIP()
sys.exit(mnet.server(s_ip, s_port))
else:
sys.exit(mnet.client(s_ip, s_port))
except KeyboardInterrupt:
mnet.ctrlc()
except:
sys.exit(0)
|
test__geometric_intersection.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import unittest
import numpy as np
from tests.unit import utils
from tests.unit.hazmat import test_geometric_intersection
@utils.needs_speedup
class Test_speedup_bbox_intersect(
test_geometric_intersection.Test_bbox_intersect
):
@staticmethod
def _call_function_under_test(nodes1, nodes2):
from bezier import _speedup
return _speedup.bbox_intersect(nodes1, nodes2)
@utils.needs_speedup
class Test_speedup_all_intersections(
test_geometric_intersection.Test_all_intersections
):
@staticmethod
def _call_function_under_test(nodes_first, nodes_second, **kwargs):
from bezier import _speedup
return _speedup.curve_intersections(
nodes_first, nodes_second, **kwargs
)
@staticmethod
def reset_curves_workspace(workspace_size):
from bezier import _speedup
return _speedup.reset_curves_workspace(workspace_size)
@staticmethod
def curves_workspace_size():
from bezier import _speedup
return _speedup.curves_workspace_size()
def test_workspace_resize(self):
nodes1 = np.asfortranarray([[-3.0, 5.0], [0.0, 0.0]])
nodes2 = np.asfortranarray(
[[-7.0, 9.0, -7.0, 9.0], [-9.0, 13.0, -13.0, 9.0]]
)
# NOTE: These curves intersect 3 times, so a workspace of
# 2 is not large enough.
self.reset_curves_workspace(2)
intersections, coincident = self._call_function_under_test(
nodes1, nodes2
)
expected = np.asfortranarray([[0.5, 0.375, 0.625], [0.5, 0.25, 0.75]])
self.assertEqual(intersections, expected)
self.assertFalse(coincident)
# Make sure the workspace was resized.
self.assertEqual(self.curves_workspace_size(), 3)
def test_workspace_too_small(self):
from bezier import _speedup
nodes1 = np.asfortranarray([[-3.0, 5.0], [0.0, 0.0]])
nodes2 = np.asfortranarray(
[[-7.0, 9.0, -7.0, 9.0], [-9.0, 13.0, -13.0, 9.0]]
)
# NOTE: These curves intersect 3 times, so a workspace of
# 2 is not large enough.
self.reset_curves_workspace(2)
with self.assertRaises(ValueError) as exc_info:
self._call_function_under_test(nodes1, nodes2, allow_resize=False)
exc_args = exc_info.exception.args
expected = _speedup.TOO_SMALL_TEMPLATE.format(3, 2)
self.assertEqual(exc_args, (expected,))
# Make sure the workspace was **not** resized.
self.assertEqual(self.curves_workspace_size(), 2)
@utils.needs_speedup
class Test_reset_curves_workspace(unittest.TestCase):
@staticmethod
def _call_function_under_test(workspace_size):
from bezier import _speedup
return _speedup.reset_curves_workspace(workspace_size)
def test_it(self):
from bezier import _speedup
size = 5
return_value = self._call_function_under_test(size)
self.assertIsNone(return_value)
self.assertEqual(_speedup.curves_workspace_size(), size)
@unittest.expectedFailure
def test_threadsafe(self):
from bezier import _speedup
size_main = 3
self._call_function_under_test(size_main)
worker = WorkspaceThreadedAccess()
self.assertIsNone(worker.size1)
self.assertIsNone(worker.size2)
size1 = 7
size2 = 8
thread1 = threading.Thread(target=worker.task1, args=(size1,))
thread2 = threading.Thread(target=worker.task2, args=(size2,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# This check demonstrates the **broken-ness** of the implementation.
# The sizes for each thread should be the sizes actually **set** in
# the given thread and the workspace in the main thread should be
# unchanged (i.e. should have ``size_main``). What we'll actually
# observe is ``(size2, size1, size2)``.
expected = (size1, size2, size_main)
actual = (worker.size1, worker.size2, _speedup.curves_workspace_size())
self.assertEqual(actual, expected)
@utils.needs_speedup
class Test_curves_workspace_size(unittest.TestCase):
@staticmethod
def _call_function_under_test():
from bezier import _speedup
return _speedup.curves_workspace_size()
def test_it(self):
from bezier import _speedup
size = 5
_speedup.reset_curves_workspace(size)
self.assertEqual(self._call_function_under_test(), size)
class WorkspaceThreadedAccess:
def __init__(self):
self.barrier1 = threading.Event()
self.barrier2 = threading.Event()
self.barrier3 = threading.Event()
self.size1 = None
self.size2 = None
def event1(self, size):
from bezier import _speedup
# NOTE: There is no need to ``wait`` since this is the first event.
_speedup.reset_curves_workspace(size)
self.barrier1.set()
def event2(self):
from bezier import _speedup
self.barrier1.wait()
result = _speedup.curves_workspace_size()
self.barrier2.set()
return result
def event3(self, size):
from bezier import _speedup
self.barrier2.wait()
_speedup.reset_curves_workspace(size)
self.barrier3.set()
def event4(self):
from bezier import _speedup
self.barrier3.wait()
# NOTE: There is no barrier to ``set`` since this is the last event.
return _speedup.curves_workspace_size()
def task1(self, size):
self.event1(size)
self.size1 = self.event4()
def task2(self, size):
self.size2 = self.event2()
self.event3(size)
|
process_replay.py
|
#!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
import signal
from collections import namedtuple
import capnp
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from common.timeout import Timeout
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.car.car_helpers import get_car, interfaces
from selfdrive.test.process_replay.helpers import OpenpilotPrefix
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
TIMEOUT = 15
PROC_REPLAY_DIR = os.path.dirname(os.path.abspath(__file__))
FAKEDATA = os.path.join(PROC_REPLAY_DIR, "fakedata/")
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance', 'fake_pubsubmaster', 'submaster_config'], defaults=({},))
def wait_for_event(evt):
if not evt.wait(TIMEOUT):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services, ignore_alive=None, ignore_avg_freq=None):
super().__init__(services, ignore_alive=ignore_alive, ignore_avg_freq=ignore_avg_freq, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super().update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock, fingerprint):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
def get_car_params(msgs, fsm, can_sock, fingerprint):
if fingerprint:
CarInterface, _, _ = interfaces[fingerprint]
CP = CarInterface.get_params(fingerprint)
else:
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def controlsd_rcv_callback(msg, CP, cfg, fsm):
# no sendcan until controlsd is initialized
socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
if "sendcan" in socks and fsm.frame < 2000:
socks.remove("sendcan")
return socks, len(socks) > 0
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaStates": [], "peripheralState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "wideRoadCameraState": [], "managerState": [], "testJoystick": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=controlsd_rcv_callback,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
submaster_config={
'ignore_avg_freq': ['radarState', 'longitudinalPlan', 'driverCameraState', 'driverMonitoringState'], # dcam is expected at 20 Hz
'ignore_alive': ['wideRoadCameraState'], # TODO: Add to regen
}
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan", "longitudinalPlan"],
"carState": [], "controlsState": [], "radarState": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay", "longitudinalPlan.solverExecutionTime", "lateralPlan.solverExecutionTime"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverStateV2": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=False,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
fake_pubsubmaster=False,
),
]
def replay_process(cfg, lr, fingerprint=None):
with OpenpilotPrefix():
if cfg.fake_pubsubmaster:
return python_replay_process(cfg, lr, fingerprint)
else:
return cpp_replay_process(cfg, lr, fingerprint)
def setup_env(simulation=False):
params = Params()
params.clear_all()
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("Passive", False)
params.put_bool("DisengageOnAccelerator", True)
params.put_bool("WideCameraOnly", False)
params.put_bool("DisableLogging", False)
os.environ["NO_RADAR_SLEEP"] = "1"
os.environ["REPLAY"] = "1"
if simulation:
os.environ["SIMULATION"] = "1"
elif "SIMULATION" in os.environ:
del os.environ["SIMULATION"]
def python_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets, **cfg.submaster_config)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
setup_env()
# TODO: remove after getting new route for civic & accord
migration = {
"HONDA CIVIC 2016 TOURING": "HONDA CIVIC 2016",
"HONDA ACCORD 2018 SPORT 2T": "HONDA ACCORD 2018",
"HONDA ACCORD 2T 2018": "HONDA ACCORD 2018",
"Mazda CX-9 2021": "MAZDA CX-9 2021",
}
if fingerprint is not None:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = fingerprint
else:
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = migration.get(msg.carParams.carFingerprint, msg.carParams.carFingerprint)
if msg.carParams.fingerprintSource == "fw" and (car_fingerprint in FW_VERSIONS):
Params().put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock, fingerprint)
CP = car.CarParams.from_bytes(Params().get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in pub_msgs:
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(msg.logMonoTime / 1e9, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg().as_builder()
m.logMonoTime = msg.logMonoTime
m = m.as_reader()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
log_msgs = []
# We need to fake SubMaster alive since we can't inject a fake clock
setup_env(simulation=True)
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
try:
with Timeout(TIMEOUT):
while not all(pm.all_readers_updated(s) for s in cfg.pub_sub.keys()):
time.sleep(0)
# Make sure all subscribers are connected
sockets = {s: messaging.sub_sock(s, timeout=2000) for s in sub_sockets}
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for i, msg in enumerate(pub_msgs):
pm.send(msg.which(), msg.as_builder())
resp_sockets = cfg.pub_sub[msg.which()] if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is None:
print(f"Warning, no response received {i}")
else:
response = response.as_builder()
response.logMonoTime = msg.logMonoTime
response = response.as_reader()
log_msgs.append(response)
if not len(resp_sockets): # We only need to wait if we didn't already wait for a response
while not pm.all_readers_updated(msg.which()):
time.sleep(0)
finally:
managed_processes[cfg.proc_name].signal(signal.SIGKILL)
managed_processes[cfg.proc_name].stop()
return log_msgs
def check_enabled(msgs):
for msg in msgs:
if msg.which() == "carParams":
if msg.carParams.notCar:
return True
elif msg.which() == "controlsState":
if msg.controlsState.active:
return True
return False
|
background_task.py
|
#!/usr/bin/env python
# Run a background task while the main task loop progresses
import numpy as np
from threading import Thread
import rospy
import actionlib
from task_executor.abstract_step import AbstractStep
from actionlib_msgs.msg import GoalStatus
from task_executor.msg import ExecuteAction, ExecuteGoal
# Helpers
def goal_status_from_code(status):
mapping = {
GoalStatus.SUCCEEDED: "SUCCEEDED",
GoalStatus.PREEMPTED: "PREEMPTED",
GoalStatus.ABORTED: "ABORTED",
}
return mapping.get(status, status)
# The class definition
class BackgroundTaskAction(AbstractStep):
BACKGROUND_TASK_SERVER = "/idle_executor"
def init(self, name):
self.name = name
# Is this background behaviour enabled or is it disabled?
self.enabled = False
# The background thread to keep track of the task
self._background_thread = None
# The action client
self._background_client = actionlib.SimpleActionClient(
BackgroundTaskAction.BACKGROUND_TASK_SERVER,
ExecuteAction
)
# Initialize the action client
rospy.loginfo("Connecting to the background task executor...")
self._background_client.wait_for_server()
rospy.loginfo("...background task executor connected")
def run(self, task):
if isinstance(task, (list, tuple,)):
task = np.random.choice(task)
rospy.loginfo("Action {}: Starting background task {}".format(self.name, task))
# Wait for an old thread to complete, if it must
self.stop()
while self._background_thread is not None:
rospy.sleep(0.5)
yield self.set_running()
# Create the goal, send it, and update in a new thread
goal = ExecuteGoal(name=task)
self._background_client.send_goal(goal)
self.notify_action_send_goal(BackgroundTaskAction.BACKGROUND_TASK_SERVER, goal)
self._enabled = True
self._background_thread = Thread(target=self._check_on_goal)
self._background_thread.start()
# Yield a success
self.set_succeeded()
def stop(self):
self._enabled = False
def _check_on_goal(self):
# Wait on the background task to complete
while self._background_client.get_state() in AbstractStep.RUNNING_GOAL_STATES:
if not self._enabled or rospy.is_shutdown():
self._background_client.cancel_goal()
self.notify_action_cancel(BackgroundTaskAction.BACKGROUND_TASK_SERVER)
rospy.sleep(0.5)
# Get the state, result
status = self._background_client.get_state()
self._background_client.wait_for_result()
result = self._background_client.get_result()
self.notify_action_recv_result(BackgroundTaskAction.BACKGROUND_TASK_SERVER, status, result)
# Exit cleanly
rospy.loginfo("Action {}: Finished background task with result {}"
.format(self.name, goal_status_from_code(status)))
self._enabled = False
self._background_thread = None
|
explicit_threading.py
|
# Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from threading import Thread
from opencensus.common.runtime_context import RuntimeContext
RuntimeContext.register_slot('operation_id', '<empty>')
def work(name):
print('Entering worker:', RuntimeContext)
RuntimeContext.operation_id = name
print('Exiting worker:', RuntimeContext)
if __name__ == '__main__':
print('Main thread:', RuntimeContext)
RuntimeContext.operation_id = 'main'
print('Main thread:', RuntimeContext)
# by default context is not propagated to worker thread
thread = Thread(target=work, args=('foo',))
thread.start()
thread.join()
print('Main thread:', RuntimeContext)
# user can propagate context explicitly
thread = Thread(
target=RuntimeContext.with_current_context(work),
args=('bar',),
)
thread.start()
thread.join()
print('Main thread:', RuntimeContext)
|
tests.py
|
"""
Unit tests for reverse URL lookups.
"""
import sys
import threading
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.utils import override_script_prefix
from django.urls import (
NoReverseMatch, RegexURLPattern, RegexURLResolver, Resolver404,
ResolverMatch, get_callable, get_resolver, resolve, reverse, reverse_lazy,
)
from . import middleware, urlconf_outer, views
from .utils import URLObject
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
(
'/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, tuple(),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-normal-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/included/view_class/42/37/', 'inc-view-class', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-view-class',
views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}
),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
(
'/included/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, tuple(), {'arg2': '37'}
),
(
'/included/12/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, tuple(), {'arg2': '37'}
),
# Unnamed views should have None as the url_name. Regression data for #21157.
(
'/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, tuple(),
{'arg1': '42', 'arg2': '37'}
),
(
'/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance,
tuple(), {'arg1': '42', 'arg2': '37'}
),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
(
'/included/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('42', '37'), {}
),
(
'/included/12/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('12', '42', '37'), {}
),
# Namespaces
(
'/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'inc-ns1', 'inc-ns1:inc-normal-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
# Nested namespaces
(
'/ns-included1/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-ns1:inc-ns4:inc-ns2:test-ns3',
'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp', 'inc-app:test-ns3',
'inc-app:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-app:inc-ns4:inc-ns2:test-ns3',
'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
# Namespaces capturing variables
(
'/inc70/', 'inner-nothing', 'included_urls', 'inc-ns5', 'inc-ns5:inner-nothing',
views.empty_view, tuple(), {'outer': '70'}
),
(
'/inc78/extra/foobar/', 'inner-extra', 'included_urls', 'inc-ns5', 'inc-ns5:inner-extra',
views.empty_view, tuple(), {'outer': '78', 'extra': 'foobar'}
),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
(
'windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [],
dict(drive_name='C', path=r'Documents and Settings\spam')
),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('test', '/test/1', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', settings.ROOT_URLCONF)
with self.assertRaisesMessage(
ImproperlyConfigured,
"The included URLconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see valid patterns in "
"the file then the issue is probably caused by a circular import."
):
getattr(resolver, 'url_patterns')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
with self.assertRaises(NoReverseMatch):
reverse(None)
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
with self.assertRaisesMessage(NoReverseMatch, r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"):
# this url exists, but requires an argument
reverse("people", args=[])
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_view_not_found_message(self):
msg = (
"Reverse for 'non-existent-view' not found. 'non-existent-view' "
"is not a valid view function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('non-existent-view')
def test_no_args_message(self):
msg = "Reverse for 'places' with no arguments not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places')
def test_illegal_args_message(self):
msg = "Reverse for 'places' with arguments '(1, 2)' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', args=(1, 2))
def test_illegal_kwargs_message(self):
msg = "Reverse for 'places' with keyword arguments '{'arg1': 2}' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', kwargs={'arg1': 2})
class ResolverTests(SimpleTestCase):
def test_resolver_repr(self):
"""
Test repr of RegexURLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced URLconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<RegexURLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
resolver.resolve(proxy_url)
def test_resolver_reverse(self):
resolver = get_resolver('urlpatterns_reverse.named_urls')
self.assertEqual(resolver.reverse('named-url1'), '')
self.assertEqual(resolver.reverse('named-url2', 'arg'), 'extra/arg/')
self.assertEqual(resolver.reverse('named-url2', extra='arg'), 'extra/arg/')
def test_non_regex(self):
"""
A Resolver404 is raised if resolving doesn't meet the basic
requirements of a path to match - i.e., at the very least, it matches
the root pattern '^/'. Never return None from resolve() to prevent a
TypeError from occurring later (#10834).
"""
with self.assertRaises(Resolver404):
resolve('')
with self.assertRaises(Resolver404):
resolve('a')
with self.assertRaises(Resolver404):
resolve('\\')
with self.assertRaises(Resolver404):
resolve('.')
def test_404_tried_urls_have_names(self):
"""
The list of URLs that come back from a Resolver404 exception contains
a list in the right format for printing out in the DEBUG 404 page with
both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
with self.assertRaisesMessage(Resolver404, 'tried') as cm:
resolve('/included/non-existent-url', urlconf=urls)
e = cm.exception
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
self.assertEqual(
len(e.args[0]['tried']),
len(url_types_names),
'Wrong number of tried URLs returned. Expected %s, got %s.' % (
len(url_types_names), len(e.args[0]['tried'])
)
)
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertIsInstance(t, e['type']), '%s is not an instance of %s' % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(
t.name,
e['name'],
'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name)
)
def test_namespaced_view_detail(self):
resolver = get_resolver('urlpatterns_reverse.nested_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view1'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view2'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.View3'))
self.assertFalse(resolver._is_callback('urlpatterns_reverse.nested_urls.blub'))
def test_view_detail_as_method(self):
# Views which have a class name as part of their path.
resolver = get_resolver('urlpatterns_reverse.method_view_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.method_view'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.classmethod_view'))
def test_populate_concurrency(self):
"""
RegexURLResolver._populate() can be called concurrently, but not more
than once per thread (#26888).
"""
resolver = RegexURLResolver(r'^/', 'urlpatterns_reverse.urls')
resolver._local.populating = True
thread = threading.Thread(target=resolver._populate)
thread.start()
thread.join()
self.assertNotEqual(resolver._reverse_dict, {})
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
alfred = User.objects.create_user('alfred', 'alfred@example.com', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.force_login(alfred)
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
self.write_settings('settings.py', extra="""
from django.urls import reverse_lazy
LOGIN_URL = reverse_lazy('login')""")
def tearDown(self):
self.remove_settings('settings.py')
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj:
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
with self.assertRaises(NoReverseMatch):
redirect('not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_reverse_by_path_nested(self):
# Views added to urlpatterns using include() should be reversible.
from .views import nested_view
self.assertEqual(reverse(nested_view), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
with self.assertRaises(NoReverseMatch):
redirect(absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view')
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view', args=[37, 42])
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing')
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing', args=[37, 42])
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing', kwargs={'arg1': 42, 'arg2': 37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
with self.assertRaises(NoReverseMatch):
reverse('blahblah:urlobject-view')
with self.assertRaises(NoReverseMatch):
reverse('test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37, 42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('included_namespace_urls:inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('included_namespace_urls:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/included/normal/42/37/',
reverse('included_namespace_urls:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/included/+%5C$*/', reverse('included_namespace_urls:inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_app_object(self):
"Dynamic URL objects can return a (pattern, app_name) 2-tuple, and include() can set the namespace"
self.assertEqual('/newapp1/inner/', reverse('new-ns1:urlobject-view'))
self.assertEqual('/newapp1/inner/37/42/', reverse('new-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/newapp1/inner/42/37/', reverse('new-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/newapp1/inner/+%5C$*/', reverse('new-ns1:urlobject-special-view'))
def test_app_object_default_namespace(self):
"Namespace defaults to app_name when including a (pattern, app_name) 2-tuple"
self.assertEqual('/new-default/inner/', reverse('newapp:urlobject-view'))
self.assertEqual('/new-default/inner/37/42/', reverse('newapp:urlobject-view', args=[37, 42]))
self.assertEqual(
'/new-default/inner/42/37/', reverse('newapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/new-default/inner/+%5C$*/', reverse('newapp:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('included_namespace_urls:test-ns3:urlobject-view'))
self.assertEqual(
'/included/test3/inner/37/42/', reverse('included_namespace_urls:test-ns3:urlobject-view', args=[37, 42])
)
self.assertEqual(
'/included/test3/inner/42/37/',
reverse('included_namespace_urls:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual(
'/included/test3/inner/+%5C$*/', reverse('included_namespace_urls:test-ns3:urlobject-special-view')
)
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_app_name_pattern(self):
"Namespaces can be applied to include()'d urlpatterns that set an app_name attribute"
self.assertEqual('/app-included1/normal/', reverse('app-ns1:inc-normal-view'))
self.assertEqual('/app-included1/normal/37/42/', reverse('app-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/app-included1/normal/42/37/', reverse('app-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/app-included1/+%5C$*/', reverse('app-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using an include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual(
'/ns-outer/42/normal/37/4/',
reverse('inc-outer:inc-normal-view', kwargs={'outer': 42, 'arg1': 37, 'arg2': 4})
)
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view')
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/37/42/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37, 42])
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view')
)
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual(
'/default/inner/37/42/',
reverse('testapp:urlobject-view', args=[37, 42], current_app='test-ns3')
)
self.assertEqual(
'/default/inner/42/37/',
reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='test-ns3')
)
self.assertEqual(
'/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3')
)
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual(
'/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42], current_app='other-ns1')
)
self.assertEqual(
'/other1/inner/42/37/',
reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='other-ns1')
)
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:included_namespace_urls:inc-normal-view'))
self.assertEqual(
'/+%5C$*/included/normal/37/42/',
reverse('special:included_namespace_urls:inc-normal-view', args=[37, 42])
)
self.assertEqual(
'/+%5C$*/included/normal/42/37/',
reverse('special:included_namespace_urls:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:included_namespace_urls:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual(
'/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer': '78', 'extra': 'foobar'})
)
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78', 'foobar']))
def test_nested_app_lookup(self):
"A nested current_app should be split in individual namespaces (#24904)"
self.assertEqual('/ns-included1/test4/inner/', reverse('inc-ns1:testapp:urlobject-view'))
self.assertEqual('/ns-included1/test4/inner/37/42/', reverse('inc-ns1:testapp:urlobject-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/test4/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/test4/inner/+%5C$*/', reverse('inc-ns1:testapp:urlobject-special-view'))
self.assertEqual(
'/ns-included1/test3/inner/',
reverse('inc-ns1:testapp:urlobject-view', current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/37/42/',
reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/+%5C$*/',
reverse('inc-ns1:testapp:urlobject-special-view', current_app='inc-ns1:test-ns3')
)
def test_current_app_no_partial_match(self):
"current_app should either match the whole path or shouldn't be used"
self.assertEqual(
'/ns-included1/test4/inner/',
reverse('inc-ns1:testapp:urlobject-view', current_app='non-existent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/37/42/',
reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='non-existent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37},
current_app='non-existent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/+%5C$*/',
reverse('inc-ns1:testapp:urlobject-special-view', current_app='non-existent:test-ns3')
)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
"""
Overriding request.urlconf with None will fall back to the default
URLconf.
"""
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve_error_handler(400), handler)
self.assertEqual(self.resolver.resolve_error_handler(404), handler)
self.assertEqual(self.resolver.resolve_error_handler(500), handler)
def test_callable_handlers(self):
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve_error_handler(400), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(404), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(500), handler)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_full_import')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
with self.assertRaisesMessage(ValueError, "I don't think I'm getting good"):
self.client.get('/bad_view/')
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if ROOT_URLCONF is None"""
def test_no_handler_exception(self):
with self.assertRaises(ImproperlyConfigured):
self.client.get('/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
def test_urlpattern_resolve(self):
for path, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_noncallable_view(self):
# View is not a callable (explicit import; arbitrary Python object)
with self.assertRaisesMessage(TypeError, 'view must be a callable'):
url(r'uncallable-object/$', views.uncallable)
def test_invalid_regex(self):
# Regex contains an error (refs #6170)
msg = '(regex_error/$" is not a valid regular expression'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse(views.empty_view)
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
# passing a callable should return the callable
self.assertEqual(get_callable(empty_view), empty_view)
def test_exceptions(self):
# A missing view (identified by an AttributeError) should raise
# ViewDoesNotExist, ...
with self.assertRaisesMessage(ViewDoesNotExist, "View does not exist in"):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
# ... but if the AttributeError is caused by something else don't
# swallow it.
with self.assertRaises(AttributeError):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
class IncludeTests(SimpleTestCase):
url_patterns = [
url(r'^inner/$', views.empty_view, name='urlobject-view'),
url(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
url(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
]
app_urls = URLObject('inc-app')
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
def test_include_namespace(self):
msg = (
"Specifying a namespace in django.conf.urls.include() without "
"providing an app_name is not supported."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include(self.url_patterns, 'namespace')
def test_include_3_tuple(self):
msg = 'Passing a 3-tuple to django.conf.urls.include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'))
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, 'app_name')),
(self.url_patterns, 'app_name', 'app_name')
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, 'app_name'), namespace='namespace'),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_app_name(self):
self.assertEqual(
include(self.app_urls),
(self.app_urls, 'inc-app', 'inc-app')
)
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, 'namespace'),
(self.app_urls, 'inc-app', 'namespace')
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
match = resolve(test_url)
self.assertEqual(match.kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
url = reverse('lookahead-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead+/a-city/')
url = reverse('lookahead-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead-/a-city/')
url = reverse('lookbehind-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind+/a-city/')
url = reverse('lookbehind-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind-/a-city/')
def test_invalid_reverse(self):
with self.assertRaises(NoReverseMatch):
reverse('lookahead-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookahead-negative', kwargs={'city': 'not-a-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-negative', kwargs={'city': 'not-a-city'})
|
foldbot.py
|
#!/usr/bin/env python
from threading import Thread
from bottle import get, post, run, request, response
from time import sleep
from dotenv import load_dotenv
from sys import exit
import requests
import os
load_dotenv()
port = 3000
username = os.getenv('USERNAME')
api_token = os.getenv('API_TOKEN')
bot_endpoint = os.getenv('BOT_ENDPOINT')
notifications = False
@post('/pokerwars.io/play')
def play():
# This endpoint is called by pokerwars.io to request your bot next move on a tournament.
# You have the current state of the table in the game_info object, which you can use to decide
# your next move.
game_info = request.json
print('Game info received for tournament ' + str(game_info["tournamentId"]) + ' and round ' + str(game_info["roundId"]) + ', let\'s decide the next bot move for this hand')
print('Current round turn is ' + str(game_info["roundTurn"]))
print('Cards on the table are ' + str(game_info["tableCards"]))
print('Your bot cards are ' + str(game_info["yourCards"]))
if game_info["canCheckOrBet"]:
# remember: in poker you can check or bet only if in the current turn no bot has bet already
# if a bot bet already, you'll need to call or raise.
print('In this hand, your bot can check or bet')
print('If you bet, the minimum bet is ' + str(game_info["minBet"]))
if game_info["canCallOrRaise"]:
# remember: in poker you can call or raise only if there has been a bet before
print('In this hand, your bot can call or raise')
print('If you call, you will spend ' + str(game_info["chipsToCall"]) + ' chips')
print('If you raise, the minimum raise is ' + str(game_info["minRaise"]))
print('The value of small blind now is ' + str(game_info["smallBlindValue"]))
print('The value of big blind now is ' + str(game_info["bigBlindValue"]))
print('Small blind player is ' + str(game_info["smallBlindPlayer"]))
print('Big blind player is ' + str(game_info["bigBlindPlayer"]))
print('Players in turn order with their info are: ' + str(game_info["players"]))
# implement your strategy here, now we always return fold, not great for your leaderboard!
response.content_type = 'application/json'
return {"action": "fold"}
@get('/pokerwars.io/ping')
def ping():
# This is used by pokerwars.io when your bot subscribe to verify that is alive and responding
print('Received ping from pokerwars.io, responding with a pong')
response.content_type = 'application/json'
return {"pong": True}
@post('/pokerwars.io/notifications')
def notifications():
print('Received notification')
print(request.json)
response.content_type = 'application/json'
return
def subscribe():
down = True
while down:
try:
print('Trying to subscribe to pokerwars.io ...')
r = requests.get(bot_endpoint + '/pokerwars.io/ping')
if r.status_code == 200:
down = False
r = requests.post('https://play.pokerwars.io/v1/pokerwars/subscribe', json={'username': username, 'token': api_token, 'botEndpoint': bot_endpoint, 'notifications': bool(notifications)})
print('Subscription --> Status code: ' + str(r.status_code))
print('Subscription --> Body: ' + str(r.json()))
if r.status_code != 202:
print('Failed to subscribe, aborting ...')
exit()
except:
exit()
sleep(2)
if __name__ == '__main__':
s = Thread(target=subscribe)
s.daemon = True
s.start()
run(port=port)
|
runner.py
|
#!/usr/bin/env python2
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
python tests/runner.py asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from __future__ import print_function
from subprocess import PIPE, STDOUT
from functools import wraps
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import re
import shlex
import shutil
import string
import subprocess
import sys
import tempfile
import time
import unittest
import urllib
import webbrowser
if sys.version_info.major == 2:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from urllib import unquote
else:
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import parallel_runner
from tools.shared import EM_CONFIG, TEMP_DIR, EMCC, DEBUG, PYTHON, LLVM_TARGET, ASM_JS_TARGET, EMSCRIPTEN_TEMP_DIR, WASM_TARGET, SPIDERMONKEY_ENGINE, WINDOWS, V8_ENGINE, NODE_JS, EM_BUILD_VERBOSE
from tools.shared import asstr, get_canonical_temp_dir, Building, run_process, try_delete, to_cc, asbytes, safe_copy, Settings
from tools import jsrun, shared, line_endings
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger(__file__)
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
EMTEST_WASM_PTHREADS = int(os.getenv('EMTEST_WASM_PTHREADS', '1'))
# Also suppot the old name: EM_SAVE_DIR
EMTEST_SAVE_DIR = os.getenv('EMTEST_SAVE_DIR', os.getenv('EM_SAVE_DIR'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0'))
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dlfcn(func):
assert callable(func)
@wraps(func)
def decorated(self):
self.check_dlfcn()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def no_wasm_backend(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note)
return decorated
def no_fastcomp(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note, negate=True)
return decorated
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
# used for tests that fail now and then on CI, due to timing or other
# random causes. this tries the test a few times, looking for at least
# one pass
def flaky(f):
assert callable(f)
max_tries = 3
@wraps(f)
def decorated(self):
for i in range(max_tries - 1):
try:
f(self)
return
except Exception:
print('flaky...')
continue
# run the last time normally, to get a simpler stack trace
f(self)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
@contextlib.contextmanager
def chdir(dir):
"""A context manager that performs actions in the given directory."""
orig_cwd = os.getcwd()
os.chdir(dir)
try:
yield
finally:
os.chdir(orig_cwd)
def limit_size(string, MAX=800 * 20):
if len(string) < MAX:
return string
return string[0:MAX // 2] + '\n[..]\n' + string[-MAX // 2:]
def create_test_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
# The core test modes
core_test_modes = [
'asm0',
'asm1',
'asm2',
'asm3',
'asm2g',
'asm2f',
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
'wasm2js0',
'wasm2js1',
'wasm2js2',
'wasm2js3',
'wasm2jss',
'wasm2jsz',
'asmi',
'asm2i',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
'benchmark',
]
test_index = 0
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
resulting_test.__name__ = '%s_%s' % (name, suffix)
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the function.
# We add the suffix to it as well.
if hasattr(func, '__qualname__'):
resulting_test.__qualname__ = '%s_%s' % (func.__qualname__, suffix)
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
# This is a hack to make the metaclass work on both python 2 and python 3.
#
# On python 3, the code should be:
# class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# ...
#
# On python 2, the code should be:
# class RunnerCore(unittest.TestCase):
# __metaclass__ = RunnerMeta
# ...
#
# To be compatible with both python 2 and python 3, we create a class by directly invoking the
# metaclass, which is done in the same way on both python 2 and 3, and inherit from it,
# since a class inherits the metaclass by default.
class RunnerCore(RunnerMeta('TestCase', (unittest.TestCase,), {})):
emcc_args = []
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
save_dir = EMTEST_SAVE_DIR
save_JS = 0
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
env = {}
settings_mods = {}
temp_files_before_run = []
def is_emterpreter(self):
return self.get_setting('EMTERPRETIFY')
def is_wasm(self):
return self.is_wasm_backend() or self.get_setting('WASM') != 0
def is_wasm_backend(self):
return self.get_setting('WASM_BACKEND')
def check_dlfcn(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dlfcn with memory growth (without wasm)')
if self.get_setting('WASM_BACKEND') and not self.get_setting('WASM'):
self.skipTest('no dynamic library support in wasm2js yet')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or \
(self.get_setting('WASM') and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if self.save_dir:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
# Use emscripten root for node module lookup
os.environ['NODE_PATH'] = path_from_root('node_modules')
if not self.save_dir:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not self.save_dir:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not os.environ.get('EMCC_DEBUG'):
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir. They may not be due to
# us, but e.g. the browser when running browser tests. Until we figure out a proper solution,
# ignore some temp file names that we see on our CI infrastructure.
ignorable_files = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout'
]
left_over_files = list(set(temp_files_after_run) - set(self.temp_files_before_run) - set(ignorable_files))
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
# Make sure we don't leave stuff around
# if not self.has_prev_ll:
# for temp_file in os.listdir(TEMP_DIR):
# assert not temp_file.endswith('.ll'), temp_file
# # TODO assert not temp_file.startswith('emscripten_'), temp_file
def get_setting(self, key):
if key in self.settings_mods:
return self.settings_mods[key]
return Settings[key]
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
else:
ret += ['-s', '{}={}'.format(key, json.dumps(value))]
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def get_stdout_path(self):
return os.path.join(self.get_dir(), 'stdout')
def hardcode_arguments(self, filename, args):
# Hardcode in the arguments, so js is portable without manual commandlinearguments
if not args:
return
js = open(filename).read()
create_test_file(filename, js.replace('run();', 'run(%s + Module["arguments"]);' % str(args)))
def prep_ll_file(self, output_file, input_file, force_recompile=False, build_ll_hook=None):
# force_recompile = force_recompile or os.path.getsize(filename + '.ll') > 50000
# If the file is big, recompile just to get ll_opts
# Recompiling just for dfe in ll_opts is too costly
def fix_target(ll_filename):
if LLVM_TARGET == ASM_JS_TARGET:
return
with open(ll_filename) as f:
contents = f.read()
if LLVM_TARGET in contents:
return
asmjs_layout = "e-p:32:32-i64:64-v128:32:128-n32-S128"
wasm_layout = "e-m:e-p:32:32-i64:64-n32:64-S128"
assert(ASM_JS_TARGET in contents)
assert(asmjs_layout in contents)
contents = contents.replace(asmjs_layout, wasm_layout)
contents = contents.replace(ASM_JS_TARGET, WASM_TARGET)
with open(ll_filename, 'w') as f:
f.write(contents)
output_obj = output_file + '.o'
output_ll = output_file + '.ll'
if force_recompile or build_ll_hook:
if input_file.endswith(('.bc', '.o')):
if input_file != output_obj:
shutil.copy(input_file, output_obj)
Building.llvm_dis(output_obj, output_ll)
else:
shutil.copy(input_file, output_ll)
fix_target(output_ll)
if build_ll_hook:
need_post = build_ll_hook(output_file)
Building.llvm_as(output_ll, output_obj)
shutil.move(output_ll, output_ll + '.pre') # for comparisons later
Building.llvm_dis(output_obj, output_ll)
if build_ll_hook and need_post:
build_ll_hook(output_file)
Building.llvm_as(output_ll, output_obj)
shutil.move(output_ll, output_ll + '.post') # for comparisons later
Building.llvm_dis(output_obj, output_ll)
Building.llvm_as(output_ll, output_obj)
else:
if input_file.endswith('.ll'):
safe_copy(input_file, output_ll)
fix_target(output_ll)
Building.llvm_as(output_ll, output_obj)
else:
safe_copy(input_file, output_obj)
return output_obj
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False):
args = self.serialize_settings() + self.emcc_args
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
# Build JavaScript code from source code
def build(self, src, dirname, filename, main_file=None,
additional_files=[], libraries=[], includes=[], build_ll_hook=None,
post_build=None, js_outfile=True):
# Copy over necessary files for compiling the source
if main_file is None:
with open(filename, 'w') as f:
f.write(src)
final_additional_files = []
for f in additional_files:
final_additional_files.append(os.path.join(dirname, os.path.basename(f)))
shutil.copyfile(f, final_additional_files[-1])
additional_files = final_additional_files
else:
# copy whole directory, and use a specific main .cpp file
# (rmtree() fails on Windows if the current working directory is inside the tree.)
if os.getcwd().startswith(os.path.abspath(dirname)):
os.chdir(os.path.join(dirname, '..'))
shutil.rmtree(dirname)
shutil.copytree(src, dirname)
shutil.move(os.path.join(dirname, main_file), filename)
# the additional files were copied; alter additional_files to point to their full paths now
additional_files = [os.path.join(dirname, f) for f in additional_files]
os.chdir(self.get_dir())
suffix = '.o.js' if js_outfile else '.o.wasm'
if build_ll_hook:
# "slow", old path: build to bc, then build to JS
# C++ => LLVM binary
for f in [filename] + additional_files:
try:
# Make sure we notice if compilation steps failed
os.remove(f + '.o')
except:
pass
args = [PYTHON, EMCC] + self.get_emcc_args(main_file=True) + \
['-I', dirname, '-I', os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
['-c', f, '-o', f + '.o']
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(f + '.o')
# Link all files
object_file = filename + '.o'
if len(additional_files) + len(libraries):
shutil.move(object_file, object_file + '.alone')
inputs = [object_file + '.alone'] + [f + '.o' for f in additional_files] + libraries
Building.link_to_object(inputs, object_file)
if not os.path.exists(object_file):
print("Failed to link LLVM binaries:\n\n", object_file)
self.fail("Linkage error")
# Finalize
self.prep_ll_file(filename, object_file, build_ll_hook=build_ll_hook)
# BC => JS
Building.emcc(object_file, self.get_emcc_args(main_file=True), object_file + '.js')
else:
# "fast", new path: just call emcc and go straight to JS
all_files = [filename] + additional_files + libraries
for i in range(len(all_files)):
if '.' not in all_files[i]:
shutil.move(all_files[i], all_files[i] + '.bc')
all_files[i] += '.bc'
args = [PYTHON, EMCC] + self.get_emcc_args(main_file=True) + \
['-I', dirname, '-I', os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
all_files + ['-o', filename + suffix]
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(filename + suffix)
if post_build:
post_build(filename + suffix)
if js_outfile and self.uses_memory_init_file():
src = open(filename + suffix).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
def validate_asmjs(self, err):
m = re.search(r"asm.js type error: '(\w+)' is not a (standard|supported) SIMD type", err)
if m:
# Bug numbers for missing SIMD types:
bugs = {
'Int8x16': 1136226,
'Int16x8': 1136226,
'Uint8x16': 1244117,
'Uint16x8': 1244117,
'Uint32x4': 1240796,
'Float64x2': 1124205,
}
simd = m.group(1)
if simd in bugs:
print(("\nWARNING: ignoring asm.js type error from {} due to implementation not yet available in SpiderMonkey." +
" See https://bugzilla.mozilla.org/show_bug.cgi?id={}\n").format(simd, bugs[simd]), file=sys.stderr)
err = err.replace(m.group(0), '')
# check for asm.js validation
if 'uccessfully compiled asm.js code' in err and 'asm.js link error' not in err:
print("[was asm.js'ified]", file=sys.stderr)
# check for an asm.js validation error, if we expect one
elif 'asm.js' in err and not self.is_wasm() and self.get_setting('ASM_JS') == 1:
self.fail("did NOT asm.js'ify: " + err)
err = '\n'.join([line for line in err.split('\n') if 'uccessfully compiled asm.js code' not in line])
return err
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_generated_code(self, engine, filename, args=[], check_timeout=True, output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
# Make sure that we produced proper line endings to the .js file we are about to run.
self.assertEqual(line_endings.check_line_endings(filename), 0)
if EMTEST_VERBOSE:
print("Running '%s' under '%s'" % (filename, engine))
with chdir(self.get_dir()):
jsrun.run_js(filename, engine, args, check_timeout,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if engine == SPIDERMONKEY_ENGINE and self.get_setting('ASM_JS') == 1:
err = self.validate_asmjs(err)
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
assert 'strict warning:' not in ret, 'We should pass all strict mode checks: ' + ret
if EMTEST_VERBOSE:
print('-- being program output --')
print(ret, end='')
print('-- end program output --')
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg)
def assertIdentical(self, values, y, msg=None):
if type(values) not in [list, tuple]:
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.split('\n'), y.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
fail_message = "Expected to have '%s' == '%s', diff:\n\n%s" % (limit_size(values[0]), limit_size(y), limit_size(diff))
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
values = list(map(asstr, values))
if callable(string):
string = string()
for value in values:
if value in string:
return # success
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')])),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
if not os.path.exists(ret):
os.makedirs(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args='help',
env_init={}, cache_name_extra='', native=False):
if make_args == 'help':
make_args = ['-j', str(multiprocessing.cpu_count())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
emcc_args = self.get_emcc_args()
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print('<building and saving %s into cache> ' % cache_name, file=sys.stderr)
return build_library(name, build_dir, output_dir, generated_libs, configure,
configure_args, make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native, cflags=self.get_emcc_args())
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name))
if EMSCRIPTEN_TEMP_DIR:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
# Shared test code between main suite and others
def setup_runtimelink_test(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
create_test_file('supp.cpp', supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
#ifdef BROWSER
REPORT_RESULT(suppInt);
#endif
return 0;
}
'''
return (main, supp)
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_test_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_test_file('libb.cpp', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
void bfunc() {
afunc("b");
}
''')
create_test_file('libc.cpp', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
self.clear_setting('RUNTIME_LINKED_LIBS')
# XXX in wasm each lib load currently takes 5MB; default TOTAL_MEMORY=16MB is thus not enough
self.set_setting('TOTAL_MEMORY', 32 * 1024 * 1024)
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [PYTHON, EMCC, src, '-o', os.path.splitext(src)[0] + so] + self.get_emcc_args()
cmdv += ['-s', 'SIDE_MODULE=1', '-s', 'RUNTIME_LINKED_LIBS=' + str(linkto)]
run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE', 1)
self.set_setting('RUNTIME_LINKED_LIBS', ['libb' + so, 'libc' + so])
do_run(r'''
void bfunc();
void cfunc();
int _main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.set_setting('RUNTIME_LINKED_LIBS', [])
self.emcc_args += ['--embed-file', '.@/']
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int _main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currenlty working
bdso = dlopen("libb%(so)s", RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "_Z5bfuncv");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "_Z5cfuncv");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = shared.JS_ENGINES
for engine in js_engines:
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) == list
js_engines = [engine for engine in js_engines if engine and engine[0] not in [banned[0] for banned in self.banned_js_engines if banned]]
return js_engines
def do_run_from_file(self, src, expected_output, *args, **kwargs):
if 'force_c' not in kwargs and os.path.splitext(src)[1] == '.c':
kwargs['force_c'] = True
self.do_run(open(src).read(), open(expected_output).read(), *args, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def do_run(self, src, expected_output, args=[], output_nicerizer=None,
no_build=False, main_file=None, additional_files=[],
js_engines=None, post_build=None, basename='src.cpp', libraries=[],
includes=[], force_c=False, build_ll_hook=None,
assert_returncode=None, assert_identical=False):
if self.get_setting('ASYNCIFY') == 1 and self.is_wasm_backend():
self.skipTest("wasm backend doesn't support ASYNCIFY yet")
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
Building.COMPILER = to_cc(Building.COMPILER)
if no_build:
if src:
js_file = src
else:
js_file = basename + '.o.js'
else:
dirname = self.get_dir()
filename = os.path.join(dirname, basename)
self.build(src, dirname, filename, main_file=main_file,
additional_files=additional_files, libraries=libraries,
includes=includes,
build_ll_hook=build_ll_hook, post_build=post_build)
js_file = filename + '.o.js'
self.assertExists(js_file)
# Run in both JavaScript engines, if optimizing - significant differences there (typed arrays)
js_engines = self.filtered_js_engines(js_engines)
if len(js_engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
if len(js_engines) > 1 and not self.use_all_engines:
if SPIDERMONKEY_ENGINE in js_engines: # make sure to get asm.js validation checks, using sm
js_engines = [SPIDERMONKEY_ENGINE]
else:
js_engines = js_engines[:1]
for engine in js_engines:
# print 'test in', engine
js_output = self.run_generated_code(engine, js_file, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
else:
self.assertContained(expected_output, js_output)
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
if self.save_JS:
global test_index
self.hardcode_arguments(js_file, args)
shutil.copyfile(js_file, os.path.join(TEMP_DIR, str(test_index) + '.js'))
test_index += 1
def get_freetype_library(self):
return self.get_library('freetype', os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared'])
def get_poppler_library(self):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + path_from_root('tests', 'freetype', 'include'),
'-I' + path_from_root('tests', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
poppler = self.get_library(
'poppler',
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init={'FONTCONFIG_CFLAGS': ' ', 'FONTCONFIG_LIBS': ' '},
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library('zlib', os.path.join('libz.a'),
configure=[path_from_root('emconfigure.bat')],
configure_args=['cmake', '.'],
make=['mingw32-make'],
make_args=[])
return self.get_library('zlib', os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header("Content-type", 'application/javascript')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(path_from_root('tests', 'browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', urllib.unquote_plus(self.path), ']')
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write('COMMAND:' + url)
# move us to the right place to serve the files
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write('(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', urllib.unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
if not EMTEST_BROWSER:
print("Using default system browser")
else:
cmd = shlex.split(EMTEST_BROWSER)
def run_in_other_browser(url):
subprocess.Popen(cmd + [url])
webbrowser.open_new = run_in_other_browser
print("Using Emscripten browser: " + str(cmd))
cls.browser_timeout = 30
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
webbrowser.open_new('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
def run_browser(self, html_file, message, expectedResult=None, timeout=None):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
asbytes('http://localhost:%s/%s' % (self.port, html_file)),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
self.assertIdentical(expectedResult, output)
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
def with_report_result(self, code):
return '#define EMTEST_PORT_NUMBER %d\n#include "%s"\n' % (self.port, path_from_root('tests', 'report_result.h')) + code
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open(os.path.join(self.get_dir(), 'reftest.js'), 'w') as out:
with open(path_from_root('tests', 'browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
};
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args):
run_process([PYTHON, EMCC] + args + ['--pre-js', path_from_root('tests', 'browser_reporting.js')])
def btest(self, filename, expected=None, reference=None, force_c=False,
reference_slack=0, manual_reference=False, post_build=None,
args=[], outfile='test.html', message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
# if we are provided the source and not a path, use that
filename_is_src = '\n' in filename
src = filename if filename_is_src else ''
original_args = args[:]
if 'USE_PTHREADS=1' in args and not self.is_wasm_backend() and 'ALLOW_MEMORY_GROWTH=1' not in args:
if EMTEST_WASM_PTHREADS:
also_asmjs = True
elif 'WASM=0' not in args:
args += ['-s', 'WASM=0']
if 'WASM=0' not in args:
# Filter out separate-asm, which is implied by wasm
args = [a for a in args if a != '--separate-asm']
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port, '-include', path_from_root('tests', 'report_result.h')]
if filename_is_src:
filepath = os.path.join(self.get_dir(), 'main.c' if force_c else 'main.cpp')
with open(filepath, 'w') as f:
f.write(src)
else:
filepath = path_from_root('tests', filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(path_from_root('tests', reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args = args + ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
all_args = ['-s', 'IN_TEST_HARNESS=1', filepath, '-o', outfile] + args
# print('all args:', all_args)
try_delete(outfile)
self.compile_btest(all_args)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in args and (also_asmjs or self.also_asmjs):
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
args + ['-s', 'WASM=0'], outfile, message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING=1'], outfile, message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
configure_args=[],
make=['make'],
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False,
cflags=[]):
"""Build a library into a .bc file. We build the .bc file once and cache it
for all our tests. (We cache in memory since the test directory is destroyed
and recreated for each test. Note that we cache separately for different
compilers). This cache is just during the test runner. There is a different
concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = path_from_root('tests', name.replace('_native', ''))
temp_dir = build_dir
project_dir = os.path.join(temp_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
with chdir(project_dir):
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
env = Building.get_building_env(native, True, cflags=cflags)
for k, v in env_init.items():
env[k] = v
if configure:
# Useful in debugging sometimes to comment this out (and the lines below
# up to and including the |link| call)
if EM_BUILD_VERBOSE < 2:
stdout = open(os.path.join(project_dir, 'configure_out'), 'w')
else:
stdout = None
if EM_BUILD_VERBOSE < 1:
stderr = open(os.path.join(project_dir, 'configure_err'), 'w')
else:
stderr = None
try:
Building.configure(configure + configure_args, env=env, stdout=stdout, stderr=stderr)
except subprocess.CalledProcessError as e:
pass # Ignore exit code != 0
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
Building.make(make + make_args, stdout=stdout, stderr=stderr, env=env)
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
except Exception as e:
if EM_BUILD_VERBOSE == 0:
with open_make_err() as ferr:
for line in ferr:
sys.stderr.write(line)
raise Exception('could not build library ' + name + ' due to exception ' + str(e))
return generated_libs
def check_js_engines():
total_engines = len(shared.JS_ENGINES)
shared.JS_ENGINES = list(filter(jsrun.check_engine, shared.JS_ENGINES))
if not shared.JS_ENGINES:
print('WARNING: None of the JS engines in JS_ENGINES appears to work.')
elif len(shared.JS_ENGINES) < total_engines:
print('WARNING: Not all the JS engines in JS_ENGINES appears to work, ignoring those.')
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
try:
suite = getattr(m, suite_name)
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
break
except:
pass
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other')
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_runner.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_runner.ParallelTestSuite()
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('-j', '--js-engine', help='Set JS_ENGINE_OVERRIDE')
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
if options.js_engine:
if options.js_engine == 'SPIDERMONKEY_ENGINE':
Building.JS_ENGINE_OVERRIDE = SPIDERMONKEY_ENGINE
elif options.js_engine == 'V8_ENGINE':
Building.JS_ENGINE_OVERRIDE = V8_ENGINE
elif options.js_engine == 'NODE_JS':
Building.JS_ENGINE_OVERRIDE = NODE_JS
else:
print('Unknown js engine override: ' + options.js_engine)
return 1
print("Overriding JS engine: " + Building.JS_ENGINE_OVERRIDE[0])
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
paho_mqtt_client.py
|
#!/usr/bin/env python
import paho.mqtt.client as mqtt_client
import asyncio
import traceback
import threading
import functools
from uuid import getnode as get_mac
from homie.mqtt.mqtt_base import MQTT_Base
import logging
logger = logging.getLogger(__name__)
mqtt_logger = logging.getLogger("MQTT")
mqtt_logger.setLevel("INFO")
COONNECTION_RESULT_CODES = {
0: "Connection successful",
1: "Connection refused - incorrect protocol version",
2: "Connection refused - invalid client identifier",
3: "Connection refused - server unavailable",
4: "Connection refused - bad username or password",
5: "Connection refused - not authorised",
}
class PAHO_MQTT_Client(MQTT_Base):
def __init__(self, mqtt_settings):
MQTT_Base.__init__(self, mqtt_settings)
self.mqtt_client = None
def connect(self):
MQTT_Base.connect(self)
self.mqtt_client = mqtt_client.Client(
client_id=self.mqtt_settings["MQTT_CLIENT_ID"]
)
self.mqtt_client.on_connect = self._on_connect
self.mqtt_client.on_message = self._on_message
# self.mqtt_client.on_publish = self._on_publish
self.mqtt_client.on_disconnect = self._on_disconnect
#self.mqtt_client.enable_logger(mqtt_logger)
#self.mqtt_client.enable_logger()
if self.mqtt_settings["MQTT_USERNAME"]:
self.mqtt_client.username_pw_set(
self.mqtt_settings["MQTT_USERNAME"],
password=self.mqtt_settings["MQTT_PASSWORD"],
)
try:
self.mqtt_client.connect(
self.mqtt_settings["MQTT_BROKER"],
port=self.mqtt_settings["MQTT_PORT"],
keepalive=self.mqtt_settings["MQTT_KEEPALIVE"],
)
self.mqtt_client.loop_start()
except Exception as e:
logger.warning("MQTT Unable to connect to Broker {}".format(e))
MQTT_Base.connect(self)
self.mqtt_client.on_connect = self._on_connect
self.mqtt_client.on_message = self._on_message
self.mqtt_client.on_disconnect = self._on_disconnect
if self.mqtt_settings["MQTT_USERNAME"]:
self.mqtt_client.set_auth_credentials(
self.mqtt_settings["MQTT_USERNAME"],
self.mqtt_settings["MQTT_PASSWORD"],
)
def start():
try:
logger.info ('Publisher loop')
asyncio.set_event_loop(self.event_loop)
logger.info ('Looping forever')
self.event_loop.run_forever()
logger.warning ('Event loop stopped')
except Exception as e:
logger.error ('Error in event loop {}'.format(e))
self.event_loop = asyncio.new_event_loop()
logger.warning("Starting MQTT publish thread")
self._ws_thread = threading.Thread(target=start, args=())
self._ws_thread.daemon = True
self._ws_thread.start()
def publish(self, topic, payload, retain, qos):
MQTT_Base.publish(self, topic, payload, retain, qos)
def p():
self.mqtt_client.publish(topic, payload, retain=retain, qos=qos)
wrapped = functools.partial(
p
)
self.event_loop.call_soon_threadsafe(wrapped)
def subscribe(self, topic, qos): # subclass to provide
MQTT_Base.subscribe(self, topic, qos)
self.mqtt_client.subscribe(topic, qos)
def unsubscribe(self, topic): # subclass to provide
MQTT_Base.unsubscribe(self, topic)
self.mqtt_client.unsubscribe(topic)
def set_will(self, will, topic, retain, qos):
MQTT_Base.set_will(self, will, topic, retain, qos)
self.mqtt_client.will_set(will, topic, retain, qos)
def _on_connect(self, client, userdata, flags, rc):
logger.debug("MQTT On Connect: {}".format(rc))
self.mqtt_connected = rc == 0
def _on_message(self, client, userdata, msg):
topic = msg.topic
payload = msg.payload.decode("utf-8")
MQTT_Base._on_message(self, topic, payload, msg.retain, msg.qos)
def _on_disconnect(self, client, userdata, rc):
self.mqtt_connected = False # note, change this uses the property setter, do not really need to catch this in the base class
if rc > 0: # unexpected disconnect
rc_text = "Unknown result code {}".format(rc)
if rc in COONNECTION_RESULT_CODES:
rc_text = COONNECTION_RESULT_CODES[rc]
logger.warning(
"MQTT Unexpected disconnection {} {} {}".format(
client, userdata, rc_text
)
)
MQTT_Base._on_disconnect(self, rc)
def close(self):
MQTT_Base.close(self)
self.event_loop.stop()
|
server.py
|
from fastapi import FastAPI, WebSocket
import uvicorn
import torch
import time
import json
from torch import optim
from torch.utils.data import DataLoader
import logging
from aux import train_one_epoch, get_lr
from threading import Thread
import copy
from utils.AverageMeter import AverageMeter
import itertools
import numpy as np
import os
class Server():
def update_model(self, items_count =None):
self.updating = True
self.new_items = 0
item_limit = (items_count or self.seen_items)
item_limit -= item_limit%1000
self.logger.info(f"GOING FOR **** {item_limit}")
self.test_data.item_limit = item_limit
train_loader = DataLoader(self.test_data, self.conf.batch_size, True, num_workers=0)
model_copy = copy.deepcopy(self.model).to(self.conf.train_device) #self.model.to(self.conf.train_device) #
backbone = model_copy.backbone
criterion = torch.nn.KLDivLoss(log_target=True).to(self.conf.train_device)
filenames = [f"./server/{self.conf.backbone_type}_exit{num_exit}_{item_limit}.pt" for num_exit in range(len(backbone.cached_layers))]
for num_exit in range(len(backbone.cached_layers)):
fn = filenames[num_exit]
if os.path.exists(fn):
continue
exit_model = backbone.cache_exits[num_exit]
idx = backbone.cached_layers[num_exit]
for p in model_copy.parameters():
p.requires_grad = False
for p in exit_model.parameters():
p.requires_grad = True
parameters = [p for p in exit_model.parameters()
if p.requires_grad]
optimizer = optim.SGD(parameters, lr=self.conf.lr,
momentum=self.conf.momentum, weight_decay=1e-4)
lr_schedule = optim.lr_scheduler.MultiStepLR(
optimizer, milestones=self.conf.milestones, gamma=0.1)
loss_meter = AverageMeter()
exit_model.train()
for epoch in range(self.conf.online_train_epochs):
train_one_epoch(train_loader, model_copy, optimizer,
criterion, epoch, loss_meter, self.conf,
exit_model, num_exit, idx, self.logger, save_model=fn if epoch == self.conf.online_train_epochs -1 else None)
lr_schedule.step()
for idx, exit_model in enumerate(self.model.backbone.cache_exits):
exit_model.load_state_dict(torch.load(filenames[idx])['state_dict'], strict=True)
del model_copy
self.model.version+=1
self.model_version +=1
self.logger.info(f"Update done! {self.model_version} {self.model.version}", )
self.updating = False
def __init__(self, conf, artifacts):
torch.cuda.set_device(0)
self.confidence_threshold = 0.45
self.logger = logging.getLogger("Model server")
app = FastAPI()
model, test_data = artifacts
self.updating = False
self.conf = conf
self.model_version = 0
self.model = model.to(conf.test_device)
self.test_data = test_data
self.sampler = torch.utils.data.RandomSampler(test_data, replacement=True, num_samples=int(1e6))
self.queue = []
self.new_items = 0
self.seen_items = 0
self.logger.info(f"Available samples: {len(self.test_data)}")
self.test_iter = iter(DataLoader(test_data, conf.test_batch_size, num_workers=0, sampler=self.sampler))
# m = 0
# while next(self.test_iter):
# m+=1
# print("ITER", m)
bs = conf.test_batch_size
# self.update_model(1000)
@app.websocket("/ws")
async def websocket_endpoint(ws: WebSocket):
await ws.accept()
while True:
try:
data = json.loads(await ws.receive_text())
except json.decoder.JSONDecodeError as e:
await ws.send_text(str(e))
continue
# await ws.send_text("RECEIVED")
count = data["count"]
if "action" in data and data["action"] == "update":
await ws.send_text("Update message received")
self.update_model(count)
self.logger.info("MODEL UPDATED")
continue
self.logger.info(f"MSG: {data}")
images, labels = next(self.test_iter)
cc = count - images.size(0)
while cc > 1:
i, l = next(self.test_iter)
images = torch.cat((images, i), dim=0)
labels = torch.cat((labels, l), dim=0)
cc-=i.size(0)
self.logger.info(f"BATCH SIZE: {images.shape}")
images = images.to(self.conf.test_device)
tt = time.perf_counter()
out, results = model.forward(images, self.conf, cache=False, threshold=self.confidence_threshold)#, logger=self.logger)
tt = time.perf_counter() - tt
confidence, pred = torch.max(out, 1)
confidence = torch.round(torch.exp(confidence) * 100) / 100
pred = pred.to('cpu')
confidence = confidence.to('cpu')
correct = torch.logical_or((pred == labels), confidence < self.confidence_threshold)
resp = {
"MsgID": data["id"],
"corrects": correct.sum().item(), #or conf < self.confidence_threshold,
"confidence": str(list(np.around(confidence.cpu().detach().numpy(), decimals=2))),
"exit_id": str(list(results['item_exits'].cpu().detach().numpy().astype(int))),
"V": self.model.version,
"U": self.model_version,
"T": round(tt, 4)
}
await ws.send_json(resp)
self.new_items += count
self.seen_items += count
if not self.updating and self.new_items > 1000: #and self.new_items * 5 > self.seen_items:
th = Thread(target=self.update_model)
th.start()
# th.join()
# self.test_iter = iter(DataLoader(test_data, conf.test_batch_size, True, num_workers=0))
else:
self.logger.info(f"{self.new_items} & {self.seen_items} & {self.updating}")
uvicorn.run(app, host="0.0.0.0", port=9090)
#{"id": 1, "count": 10000, "action": "update"}
#{"id": 1, "count": 100}
|
build_data.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MSCOCO data to TFRecord file format with SequenceExample protos.
The MSCOCO images are expected to reside in JPEG files located in the following
directory structure:
train_image_dir/COCO_train2014_000000000151.jpg
train_image_dir/COCO_train2014_000000000260.jpg
...
and
val_image_dir/COCO_val2014_000000000042.jpg
val_image_dir/COCO_val2014_000000000073.jpg
...
The MSCOCO annotations JSON files are expected to reside in train_captions_file
and val_captions_file respectively.
This script converts the combined MSCOCO data into sharded data files consisting
of 256, 4 and 8 TFRecord files, respectively:
output_dir/train-00000-of-00256
output_dir/train-00001-of-00256
...
output_dir/train-00255-of-00256
and
output_dir/val-00000-of-00004
...
output_dir/val-00003-of-00004
and
output_dir/test-00000-of-00008
...
output_dir/test-00007-of-00008
Each TFRecord file contains ~2300 records. Each record within the TFRecord file
is a serialized SequenceExample proto consisting of precisely one image-caption
pair. Note that each image has multiple captions (usually 5) and therefore each
image is replicated multiple times in the TFRecord files.
The SequenceExample proto contains the following fields:
context:
image/image_id: integer MSCOCO image identifier
image/data: string containing JPEG encoded image in RGB colorspace
feature_lists:
image/caption: list of strings containing the (tokenized) caption words
image/caption_ids: list of integer ids corresponding to the caption words
The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer.
The vocabulary of word identifiers is constructed from the sorted list (by
descending frequency) of word tokens in the training set. Only tokens appearing
at least 4 times are considered; all other words get the "unknown" word id.
NOTE: This script will consume around 100GB of disk space because each image
in the MSCOCO dataset is replicated ~5 times (once per caption) in the output.
This is done for two reasons:
1. In order to better shuffle the training data.
2. It makes it easier to perform asynchronous preprocessing of each image in
TensorFlow.
Running this script using 16 threads may take around 1 hour on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
from datetime import datetime
import os.path
import random
import sys
import threading
from PIL import Image
import numpy as np
import json
import tensorflow as tf
from flickr8k import load_flickr8k_dataset
from coco import load_coco_dataset
from common import Vocabulary, ImageMetadata
tf.flags.DEFINE_string("graph_path", "/home/hillyess/ai/project-image-caption/faster_rcnn_resnet50_coco/exported_graphs/frozen_inference_graph.pb",
"Faster rcnn forzen graph.")
tf.flags.DEFINE_string('dataset', "coco",
"Must be flickr8k, flickr30k, or coco")
# coco path
tf.flags.DEFINE_string("train_image_dir", "/home/hillyess/ai/coco/images/train2014",
"Training image directory.")
tf.flags.DEFINE_string("val_image_dir", "/home/hillyess/ai/coco/images/val2014",
"Validation image directory.")
tf.flags.DEFINE_string("train_captions_file", "/home/hillyess/ai/coco/annotations/captions_train2014.json",
"Training captions JSON file.")
tf.flags.DEFINE_string("val_captions_file", "/home/hillyess/ai/coco/annotations/captions_val2014.json",
"Validation captions JSON file.")
# flickr8k path
tf.flags.DEFINE_string("image_dir", "/home/hillyess/ai/project-image-caption/Flickr8k/Flicker8k_Dataset",
"Directory containing the image files.")
tf.flags.DEFINE_string("text_path", "/home/hillyess/ai/project-image-caption/Flickr8k/Flickr8k_text",
"containing txt files about image caption annotations.")
tf.flags.DEFINE_string("output_dir", "/home/hillyess/coco_tfrecord", "Output data directory.")
tf.flags.DEFINE_integer("train_shards", 256,
"Number of shards in training TFRecord files.")
tf.flags.DEFINE_integer("val_shards", 8,
"Number of shards in validation TFRecord files.")
tf.flags.DEFINE_integer("test_shards", 16,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_string("start_word", "<S>",
"Useless! Directly assigned in common.py. Special word added to the beginning of each sentence.")
tf.flags.DEFINE_string("end_word", "</S>",
"Useless! Directly assigned in common.py. Special word added to the end of each sentence.")
tf.flags.DEFINE_string("unknown_word", "<UNK>",
"Special word meaning 'unknown'.")
tf.flags.DEFINE_integer("min_word_count", 4,
"The minimum number of occurrences of each word in the "
"training set for inclusion in the vocabulary.")
tf.flags.DEFINE_string("word_counts_output_file", "/home/hillyess/coco_tfrecord/word_counts.txt",
"Output vocabulary file of word counts.")
tf.flags.DEFINE_integer("num_threads", 4,
"Number of threads to preprocess the images.")
FLAGS = tf.flags.FLAGS
# A map of names to SSD feature extractors.
LOAD_DATASET_MAP = {
'flickr30k': load_flickr8k_dataset,
'flickr8k': load_flickr8k_dataset,
'coco': load_coco_dataset,
}
class ImageDecoder(object):
"""Helper class for decoding images in TensorFlow."""
def __init__(self,graph_path):
# TensorFlow ops for JPEG decoding.
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(graph_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
with detection_graph.as_default():
# Create a single TensorFlow Session for all image decoding calls.
with tf.Session(config=sess_config, graph=detection_graph) as self._sess:
self._image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
self._proposal_boxes = detection_graph.get_tensor_by_name('proposal_boxes:0')
# self._detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
# self._detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# self._num_detections = detection_graph.get_tensor_by_name('num_detections:0')
self._feature = detection_graph.get_tensor_by_name('SecondStageBoxPredictor/AvgPool:0')
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._encoded_jpeg: encoded_jpeg})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def extract_faster_rcnn_feature(self, filename):
try:
image = Image.open(filename)
except FileNotFoundError:
return None
try:
image_np = self.load_image_into_numpy_array(image)
except ValueError:
return None
image_np_expanded = np.expand_dims(image_np, axis=0)
(boxes,feat) = self._sess.run(
[self._proposal_boxes,self._feature],
feed_dict={self._image_tensor: image_np_expanded})
return np.squeeze(boxes), np.squeeze(feat)
def load_image_into_numpy_array(self, image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_list_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_list_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_list_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[
bytes(v, encoding = "utf8") for v in value]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _float_feature_list(values):
"""Wrapper for inserting an float FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_float_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_feature(bytes(v, encoding = "utf8")) for v in values])
def _int64_list_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_list_feature(v) for v in values])
def _float_list_feature_list(values):
"""Wrapper for inserting an float FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_float_list_feature(v) for v in values])
def _bytes_list_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_list_feature(v) for v in values])
def fix_length_list(lista, fixed_length):
if len(lista)>fixed_length:
return lista[:fixed_length]
elif len(lista)<fixed_length:
i=0
while len(lista)<fixed_length:
lista.append(lista[i])
i = i + 1
return lista
else:
return lista
def _to_sequence_example(image, decoder, vocab):
"""Builds a SequenceExample proto for an image-caption pair.
Args:
image: An ImageMetadata object.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
Returns:
A SequenceExample proto.
"""
# code from tensorflow/models im2txt modified by zisang 20180418
# with tf.gfile.FastGFile(image.filename, "rb") as f:
# encoded_image = f.read()
# try:
# encoded_image = decoder.decode_jpeg(encoded_image)
# except (tf.errors.InvalidArgumentError, AssertionError):
# print("Skipping file with invalid JPEG data: %s" % image.filename)
# return
#
#if not (len(image.captions) == 5):
# print(image.captions)
# pass
image_captions = fix_length_list(image.captions, 5)
assert len(image_captions) == 5
try:
bounding_box, feature_map = decoder.extract_faster_rcnn_feature(image.filename)
except TypeError:
return None
context = tf.train.Features(feature={
"image/image_id": _int64_feature(image.image_id),
"image/filename": _bytes_feature(bytes(image.filename, encoding="utf8")),
"image/data": _bytes_feature(feature_map.tostring()),
"iamge/bounding_box": _bytes_feature(bounding_box.tostring())
})
img_captions_ids = []
img_captions_mask = []
for i in range(len(image_captions)):
caption = image_captions[i]
caption_ids = [vocab.word_to_id(word) for word in caption]
caption_num_words = len(caption_ids)
if caption_num_words > 21:
caption_ids = caption_ids[:21]
caption_num_words = 21
caption_fix_len = np.zeros(21,dtype = np.int32)
current_masks = np.zeros(21,dtype=np.float32)
caption_fix_len[:caption_num_words] = np.array(caption_ids)
current_masks[:caption_num_words] = 1.0
img_captions_ids.append(caption_fix_len)
img_captions_mask.append(current_masks)
feature_lists = tf.train.FeatureLists(feature_list={
"iamge/raw_caption":_bytes_feature_list(image.raw_captions),
"image/caption": _bytes_list_feature_list(image_captions),
"image/caption_ids": _int64_list_feature_list(img_captions_ids),
"image/caption_mask": _float_list_feature_list(img_captions_mask)
})
sequence_example = tf.train.SequenceExample(
context=context, feature_lists=feature_lists)
return sequence_example
def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
num_shards):
"""Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Each thread produces N shards where N = num_shards / num_threads. For
# instance, if num_shards = 128, and num_threads = 2, then the first thread
# would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
writer.close()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush()
def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# # Break up each image into a separate entity for each caption.
# images = [ImageMetadata(image.image_id, image.filename, [caption])
# for image in images for caption in image.captions]
# # Shuffle the ordering of images. Make the randomization repeatable.
# random.seed(12345)
# random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder(FLAGS.graph_path)
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in range(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name))
def _create_vocab(dataset,filename = 'word_counts.txt'):
"""Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object.
"""
print("Creating vocabulary.")
captions = [c for image in dataset for c in image.captions]
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Write out the word counts file.
vocab_file_path = os.path.join(FLAGS.output_dir, filename)
with tf.gfile.FastGFile(vocab_file_path, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("Wrote vocabulary file:", vocab_file_path)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
def _create_image_id_to_captions(dataset, filename):
id_to_cap = {}
for image in dataset:
id_to_cap[image.image_id] = image.raw_captions
file_path = os.path.join(FLAGS.output_dir, filename)
fp = open(file_path, 'w')
json.dump(id_to_cap, fp)
fp.close()
def main(unused_argv):
def _is_valid_num_shards(num_shards):
"""Returns True if num_shards is compatible with FLAGS.num_threads."""
return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads
assert _is_valid_num_shards(FLAGS.train_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
assert _is_valid_num_shards(FLAGS.val_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards")
assert _is_valid_num_shards(FLAGS.test_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test_shards")
assert (FLAGS.dataset in LOAD_DATASET_MAP), (
"Unknown dataset! Must be flickr8k, flickr30k, or coco")
load_dataset = LOAD_DATASET_MAP[FLAGS.dataset]
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
train_dataset,val_dataset,test_dataset = load_dataset(FLAGS)
# Create vocabulary from the training captions.
vocab = _create_vocab(train_dataset)
# Create image id to captions dict for evaluation
_create_image_id_to_captions(train_dataset,filename='train_id_captions.json')
_create_image_id_to_captions(val_dataset,filename='val_id_captions.json')
_create_image_id_to_captions(test_dataset,filename='test_id_captions.json')
_process_dataset("train", train_dataset, vocab, FLAGS.train_shards)
_process_dataset("val", val_dataset, vocab, FLAGS.val_shards)
_process_dataset("test", test_dataset, vocab, FLAGS.test_shards)
if __name__ == "__main__":
tf.app.run()
|
personnages3d.py
|
"""
Echap pour finir proprement le script
Capture de 1 à 4 squelettes
avec
camera Intel RealSense D455, Google posenet et Google Coral.
"""
import os
from time import time, sleep
from threading import Thread
import json
import enum
import numpy as np
import cv2
import pyrealsense2 as rs
from posenet.this_posenet import ThisPosenet
from posenet.pose_engine import EDGES
from myconfig import MyConfig
from post_capture import PostCaptureProcessing
COLORS = [(0, 0, 255), (0, 255, 0), (255, 255, 0), (255, 0, 255)]
class KeypointType(enum.IntEnum):
"""Pose kepoints."""
NOSE = 0
LEFT_EYE = 1
RIGHT_EYE = 2
LEFT_EAR = 3
RIGHT_EAR = 4
LEFT_SHOULDER = 5
RIGHT_SHOULDER = 6
LEFT_ELBOW = 7
RIGHT_ELBOW = 8
LEFT_WRIST = 9
RIGHT_WRIST = 10
LEFT_HIP = 11
RIGHT_HIP = 12
LEFT_KNEE = 13
RIGHT_KNEE = 14
LEFT_ANKLE = 15
RIGHT_ANKLE = 16
class PoseNetConversion:
"""Conversion de posenet vers ma norme
1 ou 2 squelettes capturés:
[Pose(keypoints={
<KeypointType.NOSE: 0>: Keypoint(point=Point(x=652.6, y=176.6),
score=0.8),
<KeypointType.LEFT_EYE: 1>: Keypoint(point=Point(x=655.9, y=164.3),
score=0.9)},
score=0.53292614),
Pose(keypoints={
<KeypointType.NOSE: 0>: Keypoint(point=Point(x=329.2562, y=18.127075),
score=0.91656697),
<KeypointType.LEFT_EYE: 1>: Keypoint(point=Point(x=337.1971, y=4.7381477),
score=0.14472471)},
score=0.35073516)]
Conversion en:
skeleton1 = {0: (x=652.6, y=176.6),
et
skeleton2 = {0: (x=329.2, y=18.1), ... etc ... jusque 16
soit
skeleton2 = {0: (329.2, 18.1),
skeletons = list de skeleton = [skeleton1, skeleton2]
"""
def __init__(self, outputs, threshold):
self.outputs = outputs
self.threshold = threshold
self.skeletons = []
self.conversion()
def conversion(self):
"""Convertit les keypoints posenet dans ma norme"""
self.skeletons = []
for pose in self.outputs:
xys = self.get_points_2D(pose)
self.skeletons.append(xys)
def get_points_2D(self, pose):
""" ma norme = dict{index du keypoint: (x, y), }
xys = {0: (698, 320), 1: (698, 297), 2: (675, 295), .... }
"""
xys = {}
for label, keypoint in pose.keypoints.items():
if keypoint.score > self.threshold:
xys[label.value] = [int(keypoint.point[0]),
int(keypoint.point[1])]
return xys
class Personnage:
"""Permet de stocker facilement les attributs d'un personnage,
et de les reset-er.
"""
def __init__(self, **kwargs):
self.config = kwargs
self.len_histo = int(self.config['pose']['len_histo'])
self.reset()
def reset(self):
self.who = None
self.xys = None
self.points_3D = None
self.center = [100000]*3
# 10x et 10y et 10z soit 1 seconde
self.historic = [0]*3
self.historic[0] = [0]*self.len_histo
self.historic[1] = [0]*self.len_histo
self.historic[2] = [0]*self.len_histo
self.stability = 0
# Distance pour affichage seulement
self.gap = 100000
def add_historic(self, centre):
"""Ajout dans la pile, suppr du premier"""
for i in range(3):
self.historic[i].append(centre[i])
del self.historic[i][0]
class Personnages3D:
""" Capture avec Camera RealSense D455
Détection de la pose avec Coral USB Stick
Calcul des coordonnées 3D de chaque personnage, puis suite
dans PostCaptureProcessing.
La profondeur est le 3ème dans les coordonnées d'un point 3D,
x = horizontale, y = verticale
points_3D = liste de 17 articulations, soit [x,y,z], soit None
colors = liste de 17 valeurs de couleurs, [127,127,127], soit None
mode de l'image à trouver !!!!!!!!!!!! BGR ?
"""
def __init__(self, **kwargs):
"""Les paramètres sont définis dans le fichier personnages3d.ini"""
self.stability_on = 0
self.config = kwargs
print(f"Configuration:\n{self.config}\n\n")
# 1 pour avoir les print et pour enregistrer
self.debug = 0
self.print_data = {}
# Seuil de confiance de reconnaissance du squelette
self.threshold = float(self.config['pose']['threshold'])
# Nombre de pixels autour du point pour moyenne du calcul de profondeur
self.around = int(self.config['pose']['around'])
# Distance de rémanence pour attribution des squelettes
self.distance = float(self.config['pose']['distance'])
# Stabilité
self.stability = int(self.config['pose']['stability'])
# Nombre de personnes à capter
self.person_nbr = min(int(self.config['pose']['person_nbr']), 4)
self.whos = [0]*self.person_nbr
# Taille d'image possible: 1280x720, 640x480 seulement
# 640x480 est utile pour fps > 30
# Les modèles posenet imposent une taille d'image
self.width = int(self.config['camera']['width_input'])
self.height = int(self.config['camera']['height_input'])
# Plein écran de la fenêtre OpenCV
self.full_screen = int(self.config['camera']['full_screen'])
# Le client va utiliser l'ip et port du *.ini
self.post_capture = PostCaptureProcessing(**self.config)
self.create_window()
self.set_pipeline()
self.this_posenet = ThisPosenet(self.width, self.height)
# Toutes les datas des personnages dans un dict self.personnages
self.personnages = []
for i in range(self.person_nbr):
self.personnages.append(Personnage(**self.config))
self.skelet_nbr = 0
self.new_centers = None
# GUI
self.loop = 1
self.client = TcpClient3('192.168.1.223', 8000)
def create_window(self):
cv2.namedWindow('color', cv2.WND_PROP_FULLSCREEN)
# Fenêtre pour la vue du dessus
cv2.namedWindow('vue du dessus', cv2.WND_PROP_FULLSCREEN)
self.black = np.zeros((720, 1280, 3), dtype = "uint8")
def set_window(self):
if self.full_screen:
cv2.setWindowProperty('color', cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
cv2.setWindowProperty('vue du dessus', cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty( 'color',
cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_NORMAL)
cv2.setWindowProperty( 'vue du dessus',
cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_NORMAL)
def set_pipeline(self):
"""Crée le flux d'image avec la caméra D455
1. ( self: pyrealsense2.pyrealsense2.config,
stream_type: pyrealsense2.pyrealsense2.stream,
stream_index: int,
width: int,
height: int,
format: pyrealsense2.pyrealsense2.format = <format.any: 0>,
framerate: int = 0) -> None
format=rs.format.z16
"""
self.pipeline = rs.pipeline()
config = rs.config()
pipeline_wrapper = rs.pipeline_wrapper(self.pipeline)
try:
pipeline_profile = config.resolve(pipeline_wrapper)
except:
print(f'Pas de Capteur Realsense connecté')
os._exit(0)
device = pipeline_profile.get_device()
config.enable_stream( rs.stream.color,
width=self.width,
height=self.height,
format=rs.format.bgr8,
framerate=30)
config.enable_stream( rs.stream.depth,
width=self.width,
height=self.height,
format=rs.format.z16,
framerate=30)
self.pipeline.start(config)
self.align = rs.align(rs.stream.color)
unaligned_frames = self.pipeline.wait_for_frames()
frames = self.align.process(unaligned_frames)
depth = frames.get_depth_frame()
self.depth_intrinsic = depth.profile.as_video_stream_profile().intrinsics
# Affichage de la taille des images
color_frame = frames.get_color_frame()
img = np.asanyarray(color_frame.get_data())
print(f"Taille des images:"
f" {img.shape[1]}x{img.shape[0]}")
def get_body_in_center(self, skelets_3D):
"""Recherche du perso le plus près du centre, pour 1 seul perso"""
who = []
# Tous les décalage sur x
all_x_decal = []
if skelets_3D:
for perso in skelets_3D:
# Le x est la 1ère valeur dans perso
if perso:
decal = get_moyenne(perso, 0)
if decal:
all_x_decal.append(decal)
else:
all_x_decal.append(100000)
if all_x_decal:
all_x_decal_sorted = sorted(all_x_decal)
decal_mini = all_x_decal_sorted[0]
who.append(all_x_decal.index(decal_mini))
return who
def main_frame(self, outputs):
""" Appelé depuis la boucle infinie, c'est le main d'une frame.
Récupération de tous les squelettes
Definition de who
"""
skelets_2D, skelets_3D = None, None
# Récupération de tous les squelettes
if outputs:
# les xys
skelets_2D = PoseNetConversion(outputs, self.threshold).skeletons
if skelets_2D:
# Ajout de la profondeur pour 3D
skelets_3D = self.get_skelets_3D(skelets_2D)
# Récup de who, apply to self.perso
if skelets_3D:
self.skelet_nbr = min(len(skelets_3D), 4)
# Si détection que d'un seul perso
if self.person_nbr == 1:
self.whos = self.get_body_in_center(skelets_3D)
else:
self.who_is_who(skelets_3D)
if self.stability_on:
self.apply_to_personnages_with_stability(skelets_2D, skelets_3D)
else:
self.apply_to_personnages(skelets_2D, skelets_3D)
# Affichage
self.draw_all_poses()
self.draw_all_textes()
self.draw_all_personnages()
def update_centers(self, skelets_3D):
"""
last_centers = liste des centres tirée de l'historique des piles de centre
self.new_centers = liste des centres des squelettes de la frame
"""
self.last_centers = []
for i in range(self.person_nbr):
self.last_centers.append(self.personnages[i].center)
self.new_centers = []
for i in range(self.skelet_nbr):
self.new_centers.append(get_center(skelets_3D[i]))
def who_is_who(self, skelets_3D):
# Préliminaire
self.update_centers(skelets_3D)
print("\nNombre de squelette ------------------------>", self.skelet_nbr)
# Parcours des squelettes pour calculer les distances par rapport
# aux centres des personnages
# dists[0] = liste des distance entre: squelette 0 et les personnages
dists = {}
for skel in range(self.skelet_nbr):
dists[skel] = []
# Recherche des perso proche de ce skelelet
for perso in range(self.person_nbr):
dist = get_distance(self.new_centers[skel], self.last_centers[perso])
if dist > 100000:
dist = 100000
dists[skel].append(dist)
print("distances:", dists) # {0: [41, 41, 41, 41]}
# Attibution avec le perso le plus proche du squelette
whos, TODO = self.attribution_with_nearest(dists)
self.whos = self.default_attribution(whos, TODO)
def default_attribution(self, whos, TODO):
""" Attribution par défaut si pas attribué avant
whos: [1, None, None, None] TODO: 2
objectif --> [1, 0, 2, None]
liste des déjà attribués: done = [1]
à attribuer 0 et 2:
possible = [0, 2, 3]
moins whos
liste des numéros à attribuer: dispo = [0, 2]
len(dispo) = TODO
"""
done = [x for x in whos if x is not None]
dispo = [x for x in range(self.person_nbr) if x not in whos]
print("whos avec nearest:", whos, "TODO:", TODO, "done:", done, "dispo", dispo)
# Attribution importante
d = 0
while TODO > 0:
for i, who in enumerate(whos):
if who is None:
whos[i] = dispo[d]
TODO -= 1
d += 1
break
print("whos final:", whos)
return whos
def attribution_with_nearest(self, dists):
""" Attribution avec le plus près
Nombre de squelette ------------------------> 2
distances: {0: [2, 1091, 1557, 100000], 1: [1092, 3, 1415, 100000]}
whos: [0, 1, None, None] TODO: 0
whos final [0, 1, None, None]
Nombre de squelette ------------------------> 2
distances: {0: [1091, 2, 1413, 100000], 1: [3, 1096, 1556, 100000]}
whos: [1, 0, None, None] TODO: 0
whos final [1, 0, None, None]
"""
whos = [None]*self.person_nbr
gaps = []
# Nombre de squelette qui reste à attribuer
TODO = self.skelet_nbr
for i in range(self.skelet_nbr):
if i in dists:
# Le mini dans la liste
mini = min(dists[i])
# Position du mini dans la liste
index = dists[i].index(mini)
if mini < self.distance:
gaps.append(mini)
whos[index] = i
TODO -= 1
# Ne sert que pour l'affichage
try:
for i in range(len(gaps)):
self.personnages[whos[i]].gap = gaps[i]
except:
pass
return whos, TODO
def apply_to_personnages_with_stability(self, skelets_2D, skelets_3D):
"""
Dans perso.histo, affichage de perso si les 3 derniers sont valides
soit pas de 100000 dans les 3 derniers, 3 = stability
Si un perso est valide, et que pas de valeur en cours, utilisation de la
précédente.
Un perso est valide si perso.stability == 3
Suite à voir ...
"""
for i in range(self.person_nbr):
# Perso bien suivi
if self.whos[i] is not None:
if self.personnages[i].stability == self.stability:
self.personnages[i].who = self.whos[i]
self.personnages[i].xys = skelets_2D[self.whos[i]]
self.personnages[i].points_3D = skelets_3D[self.whos[i]]
c = get_center(skelets_3D[self.whos[i]])
self.personnages[i].center = c
self.personnages[i].add_historic(c)
self.personnages[i].stability += 1
else:
self.personnages[i].stability += 1
if self.personnages[i].stability > self.stability:
self.personnages[i].stability = self.stability
# Pas de data sur cette frame
else:
# Le perso est valide
if self.personnages[i].stability == 3:
self.personnages[i].add_historic(self.personnages[i].center)
self.personnages[i].stability -= 1
else:
self.personnages[i].who = None
self.personnages[i].xys = None
self.personnages[i].points_3D = None
self.personnages[i].add_historic(self.personnages[i].center)
def apply_to_personnages(self, skelets_2D, skelets_3D):
""" whos du type [1, 0, None, 2]
1 attribué au perso 0
0 attribué au perso 1 ... etc ...
"""
for i in range(self.person_nbr):
# Data valide
if self.whos[i] is not None:
self.personnages[i].who = self.whos[i]
self.personnages[i].xys = skelets_2D[self.whos[i]]
self.personnages[i].points_3D = skelets_3D[self.whos[i]]
c = get_center(skelets_3D[self.whos[i]])
self.personnages[i].center = c
self.personnages[i].add_historic(c)
# Pas de data sur cette frame
else:
self.personnages[i].who = None
self.personnages[i].xys = None
self.personnages[i].points_3D = None
self.personnages[i].add_historic(self.personnages[i].center)
def get_skelets_3D(self, skelets_2D):
skelets_3D = []
for xys in skelets_2D:
# #print("xys", xys)
pts = self.get_points_3D(xys)
if pts:
skelets_3D.append(pts)
return skelets_3D
def get_points_3D(self, xys):
"""Calcul des coordonnées 3D dans un repère centré sur la caméra,
avec le z = profondeur
La profondeur est une moyenne de la profondeur des points autour,
sauf les extrêmes, le plus petit et le plus gand.
points_3D est une liste de 17 items, soit (x,y,z), soit None
y est la verticale
z est la profondeur
"""
points_3D = [None]*17
colors = [None]*17
# Parcours des squelettes
for key, xy in xys.items():
x = xy[0]
y = xy[1]
if x and y:
# Calcul de la profondeur du point
profondeur = self.get_profondeur(x, y)
if profondeur:
# Calcul les coordonnées 3D avec x et y coordonnées dans
# l'image et la profondeur du point
# Changement du nom de la fonction trop long
point_2D_to_3D = rs.rs2_deproject_pixel_to_point
point_with_deph = point_2D_to_3D(self.depth_intrinsic,
[x, y],
profondeur)
# Conversion des m en mm
points_3D[key] = [int(1000*x) for x in point_with_deph]
# Suppression des squelettes sans aucune articulation, possible lorsque
# toutes les articulations sont en dessus du seuil de confiance
if points_3D == [None]*17:
points_3D = None
return points_3D
def get_profondeur(self, x, y):
"""Calcul la moyenne des profondeurs des pixels auour du point considéré
Filtre les absurdes et les trop loins
"""
profondeur = None
distances = []
# around = nombre de pixel autour du points
x_min = max(x - self.around, 0)
x_max = min(x + self.around, self.depth_frame.width)
y_min = max(y - self.around, 0)
y_max = min(y + self.around, self.depth_frame.height)
for u in range(x_min, x_max):
for v in range(y_min, y_max):
# Profondeur du point de coordonnée (u, v) dans l'image
distances.append(self.depth_frame.get_distance(u, v))
# Si valeurs non trouvées, retourne [0.0, 0.0, 0.0, 0.0]
# Remove the item 0.0 for all its occurrences
dists = [i for i in distances if i != 0.0]
dists_sort = sorted(dists)
if len(dists_sort) > 2:
# Suppression du plus petit et du plus grand
goods = dists_sort[1:-1]
# TODO: rajouter un filtre sur les absurdes ?
# Calcul de la moyenne des profondeur
somme = 0
for item in goods:
somme += item
profondeur = somme/len(goods)
return profondeur
def draw_all_poses(self):
for i, perso in enumerate(self.personnages):
if perso.xys:
self.draw_pose(perso.xys, COLORS[i])
def draw_pose(self, xys, color):
"""Affiche les points 2D, et les 'os' dans l'image pour un personnage
xys = {0: [790, 331], 2: [780, 313], ... }
"""
points = []
for xy in xys.values():
points.append(xy)
# Dessin des points
for point in points:
x = point[0]
y = point[1]
cv2.circle(self.color_arr, (x, y), 5, color=(100, 100, 100),
thickness=-1)
cv2.circle(self.color_arr, (x, y), 6, color=color, thickness=1)
# Dessin des os
for a, b in EDGES:
if a not in xys or b not in xys:
continue
ax, ay = xys[a]
bx, by = xys[b]
cv2.line(self.color_arr, (ax, ay), (bx, by), color, 2)
def draw_all_textes(self):
"""
for i, perso in enumerate(self.personnages):
if perso.dist:
if perso.dist != 100000:
text = perso.dist
x = 30
y = 200 + i*100
self.draw_texte(text, x, y, COLORS[i])
"""
text = "Distance: " + str(self.distance)
x = 30
y = 50
self.draw_texte(text, x, y, COLORS[3])
text = "Confiance: " + str(self.threshold)
x = 30
y = 100
self.draw_texte(text, x, y, COLORS[2])
text = "Stability: " + str(self.stability)
x = 30
y = 150
self.draw_texte(text, x, y, COLORS[1])
text = "Gap:"
x = 800
y = 50
self.draw_texte(text, x, y, (125,125,125))
for i in range(self.person_nbr):
text = str(self.personnages[i].gap)
x = 800
y = 100 + 50*i
self.draw_texte(text, x, y, COLORS[i])
def draw_texte(self, depth, x, y, color):
"""Affichage d'un texte"""
cv2.putText(self.color_arr, # image
str(depth), # text
(x, y), # position
cv2.FONT_HERSHEY_SIMPLEX, # police
1, # taille police
color, # couleur
2) # épaisseur
def draw_all_personnages(self):
self.black = np.zeros((720, 1280, 3), dtype = "uint8")
cv2.line(self.black, (0, 360), (1280, 360), (255, 255, 255), 2)
for i, perso in enumerate(self.personnages):
if perso.center and perso.center[0] and perso.center[2]:
x = 360 + int(perso.center[0]*160/1000)
if x < 0: x = 0
if x > 1280: x = 1280
y = int(perso.center[2]*160/1000)
if y < 0: y = 0
if y > 720: y = 720
self.draw_personnage(y, x, COLORS[i])
def draw_personnage(self, x, y, color):
cv2.circle(self.black, (x, y), 10, (100, 100, 100), -1)
cv2.circle(self.black, (x, y), 12, color=color, thickness=2)
def post_capture_update(self):
"""Traitement post capture"""
self.post_capture.update(self.personnages)
def run(self, conn):
"""Boucle infinie, quitter avec Echap dans la fenêtre OpenCV"""
t0 = time()
nbr = 0
if conn:
self.receive_thread(conn)
while self.loop:
nbr += 1
frames = self.pipeline.wait_for_frames(timeout_ms=80)
# Align the depth frame to color frame
aligned_frames = self.align.process(frames)
color = aligned_frames.get_color_frame()
self.depth_frame = aligned_frames.get_depth_frame()
if not self.depth_frame and not color:
continue
color_data = color.as_frame().get_data()
self.color_arr = np.asanyarray(color_data)
outputs = self.this_posenet.get_outputs(self.color_arr)
# Recherche des personnages captés
self.main_frame(outputs)
# Post
self.post_capture_update()
# Affichage de l'image
cv2.imshow('color', self.color_arr)
cv2.imshow('vue du dessus', self.black)
# Calcul du FPS, affichage toutes les 10 s
if time() - t0 > 10:
# #print("FPS =", int(nbr/10))
t0, nbr = time(), 0
k = cv2.waitKey(1)
# Space pour full screen or not
if k == 32: # space
if self.full_screen == 1:
self.full_screen = 0
elif self.full_screen == 0:
self.full_screen = 1
self.set_window()
# Pour quitter
if k == 27: # Esc
break
# Du OpenCV propre
cv2.destroyAllWindows()
def receive_thread(self, conn):
t = Thread(target=self.receive, args=(conn, ))
t.start()
def receive(self, conn):
while self.loop:
data = conn.recv()
print("dans run processus =", data)
if data[0] == 'quit':
self.loop = 0
elif data[0] == 'threshold':
self.threshold = data[1]
elif data[0] == 'around':
self.around = data[1]
elif data[0] == 'distance':
self.distance = data[1]
elif data[0] == 'stability':
self.stability = data[1]
for i in range(len(self.personnages)):
self.personnages[i].stability = self.stability
sleep(0.001)
def get_teinte_average(all_colors):
"""Calcul la moyenne des 3 couleurs
all_colors est comme color_data ??
retourne teinte moyenne
"""
average = [[], [], []]
for i in range(3):
average[i] = get_moyenne(all_colors, i)
return average
def get_distance(p1, p2):
"""Distance entre les points p1 et p2, dans le plan horizontal,
sans prendre en compte le y qui est la verticale.
"""
if p1 and p2:
if None not in p1 and None not in p2:
d = ((p1[0] - p2[0])**2 + (p1[2] - p2[2])**2)**0.5
return int(d)
return 100000
def get_center(points_3D):
"""Le centre est le centre de vue du dessus,
la verticale (donc le y) n'est pas prise en compte.
"""
center = []
if points_3D:
for i in range(3):
center.append(get_moyenne(points_3D, i))
return center
def get_moyenne(points_3D, indice):
"""Calcul la moyenne d'une coordonnée des points,
la profondeur est le 3 ème = z, le y est la verticale
indice = 0 pour x, 1 pour y, 2 pour z
"""
somme = 0
n = 0
for i in range(17):
if points_3D[i]:
n += 1
somme += points_3D[i][indice]
if n != 0:
moyenne = int(somme/n)
else:
moyenne = None
return moyenne
def main():
ini_file = 'personnages3d.ini'
config_obj = MyConfig(ini_file)
config = config_obj.conf
# Création de l'objet
p3d = Personnages3D(**config)
# On tourne, silence, caméra, action !!!
conn = None
p3d.run(conn)
def run_in_Process(config, conn):
p3d = Personnages3D(**config)
p3d.run(conn)
if __name__ == '__main__':
"""Excécution de ce script en standalone"""
main()
|
webhook.py
|
'''
webhook.py
pj@mrpjevans.com
Create a WebHook at ifttt.com to do, well, whatever you want! Maybe send
an email to begin with. You'll give it a trigger name which is used to
create a URL something like the following:
https://maker.ifttt.com/trigger/{trigger_name}/with/key/{your_key}
Replace those two values in {} and got to the URL to check it's working.
Once you're happy, change WEBHOOK below to match. Run the script. Every time
the light is switched on, your WebHook is called.
Make you you update the IP address to match your gateway's and
run $ python3 -i -m pytradfri IP to (re)create your
tradfri_standalone_psk.conf (Credentials file)
'''
from pytradfri import Gateway
from pytradfri.api.libcoap_api import APIFactory
from pytradfri.util import load_json, save_json
from time import sleep
import urllib.request
import threading
# Change this IP address to your gateway
IP_ADDRESS = '192.168.0.158'
# Make sure you're in the same directory as this file
CONFIG_FILE = 'tradfri_standalone_psk.conf'
WEBHOOK = 'https://maker.ifttt.com/trigger/{trigger_name}/with/key/{your_key}'
# Load in the file, get our password for the gateway and create an API
conf = load_json(CONFIG_FILE)
identity = conf[IP_ADDRESS].get('identity')
psk = conf[IP_ADDRESS].get('key')
api_factory = APIFactory(host=IP_ADDRESS, psk_id=identity, psk=psk)
# This section connects to the gateway and gets information on devices
api = api_factory.request
gateway = Gateway()
devices_command = gateway.get_devices()
devices_commands = api(devices_command)
devices = api(devices_commands)
# Create an array of objects that are lights
lights = [dev for dev in devices if dev.has_light_control]
def observe(api, device):
def callback(updated_device):
light = updated_device.light_control.lights[0]
if (light.state):
# Light has been switched on
print('Light on! Calling WebHook...')
urllib.request.urlopen(WEBHOOK)
else:
print('Light off')
def err_callback(err):
print(err)
def worker():
api(device.observe(callback, err_callback, duration=120))
threading.Thread(target=worker, daemon=True).start()
print('Sleeping to start observation task')
sleep(1)
observe(api, lights[0])
while(True):
sleep(0.01)
|
test_futures.py
|
import os
import subprocess
import sys
import threading
import functools
import contextlib
import logging
import re
import time
import gc
import traceback
from StringIO import StringIO
from test import test_support
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future)
from concurrent.futures.thread import cpu_count
try:
import unittest2 as unittest
except ImportError:
import unittest
def reap_threads(func):
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
If threading is unavailable this function does nothing.
"""
@functools.wraps(func)
def decorator(*args):
key = test_support.threading_setup()
try:
return func(*args)
finally:
test_support.threading_cleanup(*key)
return decorator
# Executing the interpreter in a subprocess
def _assert_python(expected_success, *args, **env_vars):
cmd_line = [sys.executable]
if not env_vars:
cmd_line.append('-E')
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
# But a special flag that can be set to override -- in this case, the
# caller is responsible to pass the full environment.
if env_vars.pop('__cleanenv', None):
env = {}
env.update(env_vars)
cmd_line.extend(args)
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
try:
out, err = p.communicate()
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
rc = p.returncode
err = strip_python_stderr(err)
if (rc and expected_success) or (not rc and not expected_success):
raise AssertionError(
"Process return code is %d, "
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
return rc, out, err
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` is ok and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(True, *args, **env_vars)
def strip_python_stderr(stderr):
"""Strip the stderr of a Python process from potential debug output
emitted by the interpreter.
This will typically be run on the result of the communicate() method
of a subprocess.Popen object.
"""
stderr = re.sub(r"\[\d+ refs\]\r?\n?$".encode(), "".encode(), stderr).strip()
return stderr
@contextlib.contextmanager
def captured_stderr():
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO."""
logging_stream = StringIO()
handler = logging.StreamHandler(logging_stream)
logging.root.addHandler(handler)
try:
yield logging_stream
finally:
logging.root.removeHandler(handler)
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=IOError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
class ExecutorMixin:
worker_count = 5
def setUp(self):
self.t1 = time.time()
try:
self.executor = self.executor_type(max_workers=self.worker_count)
except NotImplementedError:
e = sys.exc_info()[1]
self.skipTest(str(e))
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
dt = time.time() - self.t1
if test_support.verbose:
print("%.2fs" % dt)
self.assertLess(dt, 60, "synchronization issue: test lasted too long")
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
class ExecutorShutdownTest(unittest.TestCase):
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import %s
from time import sleep
from test_futures import sleep_and_print
t = %s(5)
t.submit(sleep_and_print, 1.0, "apple")
""" % (self.executor_type.__name__, self.executor_type.__name__))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), "apple".encode())
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_threads_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._threads), 3)
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
gc.collect()
for t in threads:
t.join()
class ProcessPoolShutdownTest(ProcessPoolMixin, ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes:
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes:
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
del executor
gc.collect()
queue_management_thread.join()
for p in processes:
p.join()
class WaitTests(unittest.TestCase):
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 3)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=1.5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getcheckinterval()
sys.setcheckinterval(1)
try:
fs = set(self.executor.submit(future_func) for i in range(100))
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setcheckinterval(oldswitchinterval)
class ProcessPoolWaitTests(ProcessPoolMixin, WaitTests):
pass
class AsCompletedTests(unittest.TestCase):
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 2)
completed = [f for f in futures.as_completed([future1,future1])]
self.assertEqual(len(completed), 1)
class ThreadPoolAsCompletedTests(ThreadPoolMixin, AsCompletedTests):
pass
class ProcessPoolAsCompletedTests(ProcessPoolMixin, AsCompletedTests):
pass
class ExecutorTest(unittest.TestCase):
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(next(i), (0, 1))
self.assertEqual(next(i), (0, 1))
self.assertRaises(ZeroDivisionError, next, i)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 3],
timeout=1.5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaises(ValueError) as cm:
self.executor_type(max_workers=number)
assert str(cm.exception) == "max_workers must be greater than 0"
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertEqual(len(finished), 10)
def test_default_workers(self):
executor = self.executor_type()
self.assertEqual(executor._max_workers,
(cpu_count() or 1) * 5)
class ProcessPoolExecutorTest(ProcessPoolMixin, ExecutorTest):
pass
class FutureTests(unittest.TestCase):
def test_done_callback_with_result(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result[0])
def test_done_callback_with_exception(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception[0].args)
def test_done_callback_with_cancel(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled[0])
def test_done_callback_raises(self):
with captured_stderr() as stderr:
raising_was_called = [False]
raising_old_style_was_called = [False]
fn_was_called = [False]
def raising_fn(callback_future):
raising_was_called[0] = True
raise Exception('doh!')
def raising_old_style_fn(callback_future):
raising_old_style_was_called[0] = True
class OldStyle: # Does not inherit from object
def __str__(self):
return 'doh!'
raise OldStyle()
def fn(callback_future):
fn_was_called[0] = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(raising_old_style_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(raising_old_style_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
self.assertIn('OldStyle: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = [None]
def fn(callback_future):
callback_result[0] = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result[0])
def test_done_callback_already_failed(self):
callback_exception = [None]
def fn(callback_future):
callback_exception[0] = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception[0].args)
def test_done_callback_already_cancelled(self):
was_cancelled = [None]
def fn(callback_future):
was_cancelled[0] = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled[0])
def test_repr(self):
self.assertRegexpMatches(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+L? state=pending>')
self.assertRegexpMatches(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+L? state=running>')
self.assertRegexpMatches(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+L? state=cancelled>')
self.assertRegexpMatches(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+L? state=cancelled>')
self.assertRegexpMatches(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+L? state=finished raised IOError>')
self.assertRegexpMatches(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+L? state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=IOError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(IOError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependant.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
IOError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = IOError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), IOError))
def test_old_style_exception(self):
class OldStyle: # Does not inherit from object
def __str__(self):
return 'doh!'
callback_exc_info = [None]
def fn(callback_future):
callback_exc_info[0] = callback_future.exception_info()
f = Future()
f.add_done_callback(fn)
try:
raise OldStyle()
except OldStyle:
want_exc_info = sys.exc_info()
f.set_exception_info(*want_exc_info[1:])
self.assertEqual(f.exception_info(), want_exc_info[1:])
self.assertEqual(callback_exc_info[0], want_exc_info[1:])
try:
f.result()
except OldStyle:
got_exc_info = sys.exc_info()
else:
self.fail('OldStyle exception not raised')
self.assertEqual(got_exc_info[:2], want_exc_info[:2])
got_tb = traceback.extract_tb(got_exc_info[2])
want_tb = traceback.extract_tb(want_exc_info[2])
self.assertEqual(got_tb[-len(want_tb):], want_tb)
@reap_threads
def test_main():
try:
test_support.run_unittest(ProcessPoolExecutorTest,
ThreadPoolExecutorTest,
ProcessPoolWaitTests,
ThreadPoolWaitTests,
ProcessPoolAsCompletedTests,
ThreadPoolAsCompletedTests,
FutureTests,
ProcessPoolShutdownTest,
ThreadPoolShutdownTest)
finally:
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
render.py
|
#!/usr/bin/python
import pygame
import sys
import time
import dpkt
import socket
import struct
import psu
from board import Board
pygame.init()
board = Board(height=45) # we will show the diagonal
# actually only 44x57, but the diagonal makes it 45x57, where the diagonal is always off
for x in xrange(57):
for y in xrange(45):
board.set_light(x, y, (0, 0, 255))
board.display()
def parse_pkt(board, psu_id, pkt, display=True):
strand_id, bulbs_len, = struct.unpack('>II', pkt[13:21])
for bulb_id in xrange(0, bulbs_len/3):
r, g, b, = struct.unpack('>BBB', pkt[24+bulb_id*3:27+bulb_id*3])
y = bulb_id
x = (psu_id*7 + 8) - (strand_id)
# Might seem weird, but basically the left-most PSU (id=0)
# has 8 strands, but all the others have 7
# (for a total of 57)
#
#0, 8 => 0
#...
# , 1 => 7
#1, 7 => 8
# , 6 => 9
# , 5 => 10
# , 4 => 11
# , 3 => 12
# , 2 => 13
# , 1 => 14
#2, 7 => 15
try:
board.set_light(x, y, (r, g, b))
except Exception:
print 'Error: %d %d %d => %d, %d' % (psu_id, bulb_id, strand_id, x, y)
sys.exit(1)
if display:
board.display()
initial_ts = None
initial_rt = None
def wait_for(ts):
global initial_ts
global initial_rt
if initial_ts is None:
initial_ts = ts
initial_rt = time.time()
return
offset = (ts - initial_ts)
sleep_time = (offset + initial_rt) - time.time()
if sleep_time < 0.01:
return
time.sleep(sleep_time)
#dest_psu_ips = ['10.4.57.127', '10.4.57.131', '10.4.57.134', '10.4.57.120', '10.4.57.133', '10.4.132.113', '10.4.163.250', '10.4.135.141']
dest_psu_addrs = [socket.inet_aton(x) for x in psu.dest_ips]
def handle_pkt(ts, buf):
eth = dpkt.ethernet.Ethernet(buf)
if eth.type == dpkt.ethernet.ETH_TYPE_IP:
ip = eth.data
if ip.p == dpkt.ip.IP_PROTO_UDP:
udp = ip.data
#if ip.src == socket.inet_aton("10.1.3.100") and udp.dport == 6038:
if udp.dport == 6038:
if ts is not None:
wait_for(ts)
parse_pkt(board, dest_psu_addrs.index(ip.dst), udp.data)
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit();
sys.exit();
#time.sleep(.05)
running = True
def psu_worker(board, psu_idx, sock, lock):
while running:
data, addr = sock.recvfrom(1024)
# take lock?
lock.acquire()
parse_pkt(board, psu_idx, data, True)
# release lock?
lock.release()
import threading
if len(sys.argv) > 1:
f = open(sys.argv[1], 'r')
pcap = dpkt.pcap.Reader(f)
for ts, buf in pcap:
handle_pkt(ts, buf)
else:
# bind to ifaces
listeners = []
for ip in psu.dest_ips:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
s.bind((ip, 6038))
listeners.append(s)
threads = []
lock = threading.Lock()
for psu_idx in xrange(len(listeners)):
t = threading.Thread(target=psu_worker, args=(board, psu_idx, listeners[psu_idx], lock,))
threads.append(t)
t.start()
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit();
running = False
sys.exit();
time.sleep(0.01)
if False:
# read from iface
s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, 0x0300)
while True:
buf = s.recv(0xffff)
handle_pkt(None, buf)
|
docker.py
|
# Copyright 2016 Koichi Shiraishi. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import re
import threading
from .base import Base
from deoplete.util import load_external_module
load_external_module(__file__, 'urllib3')
load_external_module(__file__, 'dockerhub')
load_external_module(__file__, 'certifi')
from dockerhub.dockerhub import DockerHub
KEYWORD = [
'ADD', 'ARG', 'CMD', 'COPY', 'ENTRYPOINT', 'ENV', 'EXPOSE', 'FROM',
'HEALTHCHECK', 'LABEL', 'MAINTAINER', 'RUN', 'SHELL', 'STOPSIGNAL', 'USER',
'VOLUME', 'WORKDIR'
]
class Source(Base):
def __init__(self, vim):
Base.__init__(self, vim)
self.name = 'docker'
self.mark = '[Docker]'
self.filetypes = ['dockerfile']
self.input_pattern = r'[a-zA-Z_]\w*[:/]\w*|' + \
r'^\s*[' + '|'.join(KEYWORD) + ']\s+(?:[\w\.]*(?:,\s*)?)*'
self.rank = 500
self.debug_enabled = 1
self.hub = DockerHub()
self.cache_images = dict()
self.cache_tags = dict()
self.keyword_result = [{'word': x} for x in KEYWORD]
def init(self, context):
try:
images = self.hub.search('library')
except Exception:
pass
else:
self.cache_images['library'] = []
for i in images:
self.cache_images['library'].append({
'word': i['name'],
'kind': i['description'],
'dup': 1,
})
def on_init(self, context):
th = threading.Thread(target=self.init, name='init', args=(context, ))
th.start()
def get_complete_position(self, context):
m = re.search(r'\w*$', context['input'])
return m.start() if m else -1
def gather_candidates(self, context):
input_text = context['input']
if 'FROM' in input_text:
return self.result_from(context['input'])
elif 'ONBUILD' in input_text:
return self.keyword_result
else:
return self.keyword_result + [{'word': 'ONBUILD'}]
def result_from(self, input_text):
t = input_text.strip('FROM ')
if t:
if t.find(':') != -1:
name = t.split(':')[0]
if self.cache_tags.get(name):
return self.cache_tags[name]
else:
tags = self.hub.tags(name)
out = []
for i in tags:
out.append({
'word': i['name'],
'dup': 1,
})
self.cache_tags[name] = out
return out
elif t.find('/') != -1:
user = t.split('/')[0]
if self.cache_images.get(user):
return self.cache_images[user]
else:
images = self.hub.search(user)
out = []
for i in images:
out.append({
'word': i['name'],
'kind': i['description'],
'dup': 1,
})
self.cache_images[user] = out
return self.cache_images[user]
else:
return self.cache_images['library']
|
AppServer.py
|
# coding : utf-8
import socket
import subprocess
import threading
from time import ctime, sleep
from util import *
import yaml
print('=========SDP应用服务器=========')
# 读取配置文件
try:
f = open('config.yaml', 'r')
global_config = yaml.load(f.read(), Loader=yaml.FullLoader)
# {'AuthServer': {'port': 6789, 'id': 1, 'db_host': 'localhost', 'db_user': 'root', 'db_password': '', 'db_database': 'SDP', 'certificate_validity': 60}, 'AppServer': {'port': 6790, 'id': 1}, 'Client': {'id': 1}}
print('==========读取配置文件=========')
f = open('config.yaml', 'r')
print(f.read() + '\n===============================')
f.close()
except Exception as e:
print('配置读取错误!错误信息:')
print(e)
exit(1)
def appInstance(client_socket, ip_addr, userInfo:dict):
# out_text = subprocess.check_output(['python', '/Users/ylcao/Documents/code/python/github/SDP/src/app.py'])
# client_socket.send(pack_mess(uIP=userInfo['userIP'], uID=userInfo['userID'], sIP=userInfo['serverIP'], sID=userInfo['serverID'], cre='', mess_type='con', mess=out_text))
while True:
try:
# 接收消息
date = client_socket.recv(1024)
if not date:
print('[%s] 失去用户客户端的连接:%s ' % (ctime(), global_config['AuthServer']['ip']))
break
# 解码消息
date_str = date.decode('utf-8').strip()
# 打印消息
print(f'[{ctime()}] 来自 ' + ip_addr[0] + ' 的消息: ' + date_str)
# 解析消息
message = json.loads(date_str)
# 每次消息都检验凭证是否合法
Request_result = accessRequest(message, message['credential'])
# 凭证无效
if Request_result == 'invalid':
# 发送凭证无效信息
client_socket.send(pack_mess(uIP=message['userIP'], uID=message['userID'], sIP=message['serverIP'], sID=userInfo['serverID'], cre='', mess_type='pol', mess=Request_result))
break
# 凭证有效
else:
client_socket.send(pack_mess(uIP=message['userIP'], uID=message['userID'], sIP=message['serverIP'], sID=userInfo['serverID'], cre='', mess_type='con', mess='成功访问应用'))
except Exception as e:
print('会话出错:')
print(e)
break
client_socket.close()
def accessRequest(message: dict, current_credential:str) -> str:
# 连接权限服务器
while True:
try:
ssl_authServer = ssl_client(global_config['AuthServer']['ip'], global_config['AuthServer']['port'])
print('权限服务器连接成功')
# 接收消息
ssl_authServer.recv(1024)
break
except Exception as e:
print(f'[{ctime()}] 连接权限服务器失败,五秒后重试...')
ssl_authServer.close()
sleep(5)
continue
while True:
try:
# 发送用户凭证消息
ssl_authServer.send(pack_mess(uIP=message['userIP'], uID=message['userID'], sIP=message['serverIP'], sID=message['serverID'], cre='', mess_type='cre', mess=current_credential))
# 服务器返回验证消息
date = ssl_authServer.recv(1024)
if not date:
print('[%s] 失去权限服务器的连接:%s ' % (ctime(), global_config['AuthServer']['ip']))
break
# 解码消息
date_str = date.decode('utf-8').strip()
# 打印消息
print(f'[{ctime()}] 来自 ' + global_config['AuthServer']['ip'] + ' 的消息: ' + date_str)
# 解析消息
server_result = json.loads(date_str)
if server_result['content'] != 'invalid':
ssl_authServer.close()
return server_result['content']
return 'invalid'
except Exception as e:
print('会话出错:')
print(e)
break
ssl_authServer.close()
return 'invalid'
# 处理新建的连接
def tcp_link(client_socket, ip_addr):
print("新的用户资源访问连接:%s" % str(ip_addr))
msg = '欢迎访问SDP应用服务器!' + "\r\n"
client_socket.send(msg.encode('utf-8'))
# 循环处理客户端请求
while True:
# 接受来自客户端数据
date = client_socket.recv(1024)
if not date:
print('[%s] 连接断开:%s ' % (ctime(), str(ip_addr)))
break
try:
# 解码消息
date_str = date.decode('utf-8').strip()
# 打印消息
print(f'[{ctime()}] 来自 {ip_addr} 的消息: {date_str}')
# 解析消息到字典变量
message = json.loads(date_str)
# 处理消息
if message['mess_type'] == 'cre':
# 调用登陆函数
Request_result = accessRequest(message, message['content'])
# 凭证无效
if Request_result == 'invalid':
# 发送凭证无效信息
client_socket.send(pack_mess(uIP=message['userIP'], uID=message['userID'], sIP=message['serverIP'], sID=message['serverID'], cre='', mess_type='pol', mess=Request_result))
break
# 凭证有效
else:
# 发送凭证权限信息
client_socket.send(pack_mess(uIP=message['userIP'], uID=message['userID'], sIP=message['serverIP'], sID=message['serverID'], cre='', mess_type='pol', mess=Request_result))
# app实例接管
appInstance(client_socket, ip_addr, message)
# 实例运行完成即断开连接
break
except Exception as e:
client_socket.send(f'请求处理错误!连接断开:{ip_addr}\n'.encode('utf-8'))
print(f'请求处理错误!连接断开:{ip_addr}')
print(e)
break
# 关闭套接字,释放资源
client_socket.close()
def main():
global db
ssl_socket = ssl_server(global_config['AppServer']['ip'], global_config['AppServer']['port'], global_config['AppServer']['listen_num'])
# 循环建立新的连接
while True:
try:
# 建立客户端连接
client_socket, ip_addr = ssl_socket.accept()
t = threading.Thread(
target=tcp_link, args=(client_socket, ip_addr))
t.setDaemon = True
t.start()
except Exception as e:
print(e)
break
# 关闭连接
ssl_socket.close()
# 关闭数据库
db.close()
if __name__ == '__main__':
main()
|
email.py
|
from email import message_from_bytes
# noinspection PyProtectedMember
from email.message import EmailMessage, MIMEPart
from email.utils import parsedate_to_datetime
from typing import Callable, Type
import datetime
import imaplib
import logging
import smtplib
import threading
import time
import tzlocal
from chatty.exceptions import OperationNotSupported, SignalTypeNotSupported
from chatty.sessions.interface import Session
from chatty.signals.interface import Signal
from chatty.signals.message import Message
from chatty.signals.delivery_failure import DeliveryFailure
from chatty.signals.metadata import SignalMetaData
from chatty.types import LoginConfig
LOGGER = logging.getLogger(__name__)
def parse_email_address_list(value):
if not value or not value.strip():
return None
return [address.strip() for address in value.split(';') if address.strip()]
def parse_email_datetime(value, default=None):
if value is None:
return default
try:
return parsedate_to_datetime(value)
except (TypeError, ValueError):
pass
result = None
for piece in value.split(';'):
try:
parsed = parsedate_to_datetime(piece)
except (TypeError, ValueError):
pass
else:
if result is None or result < parsed:
result = parsed
if result is None:
result = default
return result
def is_delivery_status_notification(message: EmailMessage) -> bool:
# noinspection SpellCheckingInspection
"""See https://stackoverflow.com/questions/5298285/""" \
"""detecting-if-an-email-is-a-delivery-status-notification-and-extract-informatio
for details on how to detect delivery status notifications.
"""
# https://stackoverflow.com/questions/5298285/detecting-if-an-email-is-a-delivery-status-notification-and-extract-informatio
return bool('mailer-daemon@' in message['from'].lower() or message['x-failed-recipients'] or
'multipart/report' in message.get_content_type() or 'delivery-status' in message.get_content_type() or
(message['action'] or '').lower() == 'failed' or
(message['subject'] or '').lower().startswith('delivery status notification') or
any('delivery-status' in part.get_content_type() for part in message.get_payload()
if not isinstance(part, str)))
class EmailSession(Session):
"""
An SMTP/IMAP4 email session. The smtp_factory and imap_factory arguments should be functions which
take no arguments and return fully initialized SMTP and IMAP4 connections, respectively. Connections should
already be authenticated before being returned, and the IMAP4 connection should have the appropriate folder
selected.
"""
@classmethod
def email_to_signal(cls, message: EmailMessage) -> 'Signal':
meta_data = SignalMetaData(
identifier=message['message-id'],
origin=message['from'],
addressees=parse_email_address_list(message['to']),
visible_to=parse_email_address_list(message['cc']),
response_to=message['in-reply-to'],
sent_at=parse_email_datetime(message['date']),
received_at=parse_email_datetime(message['received'])
)
# Check if it's a delivery failure notification.
if is_delivery_status_notification(message):
return DeliveryFailure(meta_data, content=message)
return Message(meta_data, message)
def __init__(self, smtp_factory: Callable[[], smtplib.SMTP], imap_factory: Callable[[], imaplib.IMAP4],
starting: datetime.datetime = None, rate: float = 300):
super().__init__()
self._smtp_factory = smtp_factory
self._imap_factory = imap_factory
self._starting = datetime.datetime.now(tzlocal.get_localzone()) if starting is None else starting
self._rate = rate
self._imap_thread = threading.Thread(target=self._imap_thread_main, daemon=True)
self._alive = True
self._imap_thread.start()
def close(self):
self._alive = False
self._imap_thread.join(timeout=1)
def join(self, timeout=None):
self._imap_thread.join(timeout)
def send(self, signal: Signal) -> None:
if not isinstance(signal, Signal):
raise TypeError(type(signal))
if not isinstance(signal, Message):
raise SignalTypeNotSupported(type(signal))
meta_data = signal.meta_data
content = signal.content
if isinstance(content, str):
content_string = content
content = MIMEPart()
content.set_payload(content_string)
if meta_data.room:
raise OperationNotSupported("Chat rooms are not supported for email sessions.")
if meta_data.identifier:
content['message-id'] = meta_data.identifier
if meta_data.origin:
content['from'] = meta_data.origin
if meta_data.addressees:
content['to'] = ';'.join(meta_data.addressees)
if meta_data.visible_to:
content['cc'] = ';'.join(meta_data.visible_to)
if meta_data.response_to:
content['reply-to'] = meta_data.response_to
with self._smtp_factory() as connection:
connection.send_message(content)
def _imap_thread_main(self):
seen = set()
while self._alive:
# noinspection PyBroadException
try:
with self._imap_factory() as connection:
while self._alive:
where = '(SENTSINCE {date:%d-%b-%Y})'.format(date=self._starting - datetime.timedelta(1))
result, data = connection.uid('search', None, where)
if result != 'OK':
raise RuntimeError("Unexpected response to search command: %s" % result)
if data[0] is None:
message_ids = []
else:
message_ids = data[0].split()
for message_id in message_ids:
if message_id in seen:
continue
result, data = connection.uid('fetch', message_id, '(RFC822)')
if result != 'OK':
raise RuntimeError("Unexpected response to fetch command: %s" % result)
email_message = message_from_bytes(data[0][1])
sent_date = parse_email_datetime(email_message['date'], self._starting)
if sent_date >= self._starting:
seen.add(message_id)
self.receive(self.email_to_signal(email_message))
time.sleep(self._rate)
except Exception:
LOGGER.exception("Error while trying to read email.")
time.sleep(self._rate)
class SMTPFactory:
"""Convenience class for creating"""
def __init__(self, connection_info: LoginConfig, connection_type: Type[smtplib.SMTP] = smtplib.SMTP_SSL):
self.connection_info = connection_info
self.connection_type = connection_type
def __call__(self) -> smtplib.SMTP:
connection = self.connection_type(host=self.connection_info.host, port=self.connection_info.port)
connection.connect(host=self.connection_info.host, port=self.connection_info.port)
connection.ehlo()
try:
connection.login(self.connection_info.user, self.connection_info.password)
except smtplib.SMTPNotSupportedError:
LOGGER.critical("Login not supported for %s on %s:%s." %
(self.connection_type.__name__, self.connection_info.host, self.connection_info.port))
return connection
class IMAPFactory:
def __init__(self, connection_info: LoginConfig, mailbox: str = 'inbox',
connection_type: Type[imaplib.IMAP4] = imaplib.IMAP4_SSL):
assert isinstance(connection_info, LoginConfig)
self.connection_info = connection_info
self.mailbox = mailbox
self.connection_type = connection_type
def __call__(self) -> imaplib.IMAP4:
connection = self.connection_type(host=self.connection_info.host, port=self.connection_info.port)
connection.login(self.connection_info.user, self.connection_info.password)
connection.select(self.mailbox)
return connection
|
serverMediaPulse.py
|
from socket import socket, AF_INET, SOCK_STREAM
from threading import Thread
import numpy as np
import zlib
import struct
import cv2
from detector.processor import getCustomPulseApp
HOST = input("Enter Host IP\n")
PORT_VIDEO = 3000
PORT_AUDIO = 4000
lnF = 640*480*3
CHUNK = 1024
BufferSize = 4096
addressesAudio = {}
addresses = {}
threads = {}
class Server:
def __init__(self):
args = {
serial:"",
baud:"",
udp:""
}
self.pulse = getPulseApp({})
def ConnectionsVideo():
while True:
try:
clientVideo, addr = serverVideo.accept()
print("{} is connected!!".format(addr))
addresses[clientVideo] = addr
if len(addresses) > 1:
for sockets in addresses:
if sockets not in threads:
threads[sockets] = True
sockets.send(("start").encode())
Thread(target=ClientConnectionVideo, args=(sockets, )).start()
else:
continue
except:
continue
def ConnectionsSound():
while True:
try:
clientAudio, addr = serverAudio.accept()
print("{} is connected!!".format(addr))
addressesAudio[clientAudio] = addr
Thread(target=ClientConnectionSound, args=(clientAudio, )).start()
except:
continue
def ClientConnectionVideo(clientVideo):
while True:
try:
lengthbuf = recvall(clientVideo, 4)
length, = struct.unpack('!I', lengthbuf)
recvall(clientVideo, length)
except:
continue
def ClientConnectionSound(clientAudio):
while True:
try:
data = clientAudio.recv(BufferSize)
broadcastSound(clientAudio, data)
except:
continue
def recvall(clientVideo, BufferSize):
databytes = b''
i = 0
while i != BufferSize:
to_read = BufferSize - i
if to_read > (1000 * CHUNK):
databytes = clientVideo.recv(1000 * CHUNK)
i += len(databytes)
processVideo(clientVideo, databytes)
else:
if BufferSize == 4:
databytes += clientVideo.recv(to_read)
else:
databytes = clientVideo.recv(to_read)
i += len(databytes)
if BufferSize != 4:
processVideo(clientVideo, databytes)
print("YES!!!!!!!!!" if i == BufferSize else "NO!!!!!!!!!!!!")
if BufferSize == 4:
processVideo(clientVideo, databytes)
return databytes
def broadcastVideo(clientSocket, data_to_be_sent):
for clientVideo in addresses:
if clientVideo != clientSocket:
clientVideo.sendall(data_to_be_sent)
def processVideo(clientSocket, data_to_process):
img = zlib.decompress(data_to_process)
if len(databytes) == length:
print("Recieving Media..")
print("Image Frame Size:- {}".format(len(img)))
img = np.array(list(img))
img = np.array(img, dtype = np.uint8).reshape(480, 640, 3)
frame = cv2.imdecode(img_array, 1)
self.pulse.process(frame)
else:
print("Data CORRUPTED")
def broadcastSound(clientSocket, data_to_be_sent):
for clientAudio in addressesAudio:
if clientAudio != clientSocket:
clientAudio.sendall(data_to_be_sent)
serverVideo = socket(family=AF_INET, type=SOCK_STREAM)
try:
serverVideo.bind((HOST, PORT_VIDEO))
except OSError:
print("Server Busy")
serverAudio = socket(family=AF_INET, type=SOCK_STREAM)
try:
serverAudio.bind((HOST, PORT_AUDIO))
except OSError:
print("Server Busy")
serverAudio.listen(2)
print("Waiting for connection..")
AcceptThreadAudio = Thread(target=ConnectionsSound)
AcceptThreadAudio.start()
serverVideo.listen(2)
print("Waiting for connection..")
AcceptThreadVideo = Thread(target=ConnectionsVideo)
AcceptThreadVideo.start()
AcceptThreadVideo.join()
serverVideo.close()
|
executor.py
|
"""HighThroughputExecutor builds on the Swift/T EMEWS architecture to use MPI for fast task distribution
"""
from concurrent.futures import Future
import typeguard
import logging
import threading
import queue
import pickle
from multiprocessing import Process, Queue
from typing import Any, Dict, List, Optional, Tuple, Union
from ipyparallel.serialize import pack_apply_message # ,unpack_apply_message
from ipyparallel.serialize import deserialize_object # ,serialize_object
from parsl.executors.high_throughput import zmq_pipes
from parsl.executors.high_throughput import interchange
from parsl.executors.errors import *
from parsl.executors.base import ParslExecutor
from parsl.dataflow.error import ConfigurationError
from parsl.providers.provider_base import ExecutionProvider
from parsl.utils import RepresentationMixin
from parsl.providers import LocalProvider
logger = logging.getLogger(__name__)
BUFFER_THRESHOLD = 1024 * 1024
ITEM_THRESHOLD = 1024
class HighThroughputExecutor(ParslExecutor, RepresentationMixin):
"""Executor designed for cluster-scale
The HighThroughputExecutor system has the following components:
1. The HighThroughputExecutor instance which is run as part of the Parsl script.
2. The Interchange which is acts as a load-balancing proxy between workers and Parsl
3. The multiprocessing based worker pool which coordinates task execution over several
cores on a node.
4. ZeroMQ pipes connect the HighThroughputExecutor, Interchange and the process_worker_pool
Here is a diagram
.. code:: python
| Data | Executor | Interchange | External Process(es)
| Flow | | |
Task | Kernel | | |
+----->|-------->|------------>|->outgoing_q---|-> process_worker_pool
| | | | batching | | |
Parsl<---Fut-| | | load-balancing| result exception
^ | | | watchdogs | | |
| | | Q_mngmnt | | V V
| | | Thread<--|-incoming_q<---|--- +---------+
| | | | | |
| | | | | |
+----update_fut-----+
Parameters
----------
provider : :class:`~parsl.providers.provider_base.ExecutionProvider`
Provider to access computation resources. Can be one of :class:`~parsl.providers.aws.aws.EC2Provider`,
:class:`~parsl.providers.cobalt.cobalt.Cobalt`,
:class:`~parsl.providers.condor.condor.Condor`,
:class:`~parsl.providers.googlecloud.googlecloud.GoogleCloud`,
:class:`~parsl.providers.gridEngine.gridEngine.GridEngine`,
:class:`~parsl.providers.jetstream.jetstream.Jetstream`,
:class:`~parsl.providers.local.local.Local`,
:class:`~parsl.providers.sge.sge.GridEngine`,
:class:`~parsl.providers.slurm.slurm.Slurm`, or
:class:`~parsl.providers.torque.torque.Torque`.
label : str
Label for this executor instance.
launch_cmd : str
Command line string to launch the process_worker_pool from the provider. The command line string
will be formatted with appropriate values for the following values (debug, task_url, result_url,
cores_per_worker, nodes_per_block, heartbeat_period ,heartbeat_threshold, logdir). For eg:
launch_cmd="process_worker_pool.py {debug} -c {cores_per_worker} --task_url={task_url} --result_url={result_url}"
address : string
An address to connect to the main Parsl process which is reachable from the network in which
workers will be running. This can be either a hostname as returned by `hostname` or an
IP address. Most login nodes on clusters have several network interfaces available, only
some of which can be reached from the compute nodes. Some trial and error might be
necessary to indentify what addresses are reachable from compute nodes.
worker_ports : (int, int)
Specify the ports to be used by workers to connect to Parsl. If this option is specified,
worker_port_range will not be honored.
worker_port_range : (int, int)
Worker ports will be chosen between the two integers provided.
interchange_port_range : (int, int)
Port range used by Parsl to communicate with the Interchange.
working_dir : str
Working dir to be used by the executor.
worker_debug : Bool
Enables worker debug logging.
managed : Bool
If this executor is managed by the DFK or externally handled.
cores_per_worker : float
cores to be assigned to each worker. Oversubscription is possible
by setting cores_per_worker < 1.0. Default=1
max_workers : int
Caps the number of workers launched by the manager. Default: infinity
prefetch_capacity : int
Number of tasks that could be prefetched over available worker capacity.
When there are a few tasks (<100) or when tasks are long running, this option should
be set to 0 for better load balancing. Default is 0.
suppress_failure : Bool
If set, the interchange will suppress failures rather than terminate early. Default: False
heartbeat_threshold : int
Seconds since the last message from the counterpart in the communication pair:
(interchange, manager) after which the counterpart is assumed to be un-available. Default:120s
heartbeat_period : int
Number of seconds after which a heartbeat message indicating liveness is sent to the
counterpart (interchange, manager). Default:30s
poll_period : int
Timeout period to be used by the executor components in milliseconds. Increasing poll_periods
trades performance for cpu efficiency. Default: 10ms
worker_logdir_root : string
In case of a remote file system, specify the path to where logs will be kept.
"""
@typeguard.typechecked
def __init__(self,
label: str = 'HighThroughputExecutor',
provider: ExecutionProvider = LocalProvider(),
launch_cmd: Optional[str] = None,
address: str = "127.0.0.1",
worker_ports: Optional[Tuple[int, int]] = None,
worker_port_range: Optional[Tuple[int, int]] = (54000, 55000),
interchange_port_range: Optional[Tuple[int, int]] = (55000, 56000),
storage_access: Optional[List[Any]] = None,
working_dir: Optional[str] = None,
worker_debug: bool = False,
cores_per_worker: float = 1.0,
max_workers: Union[int, float] = float('inf'),
prefetch_capacity: int = 0,
heartbeat_threshold: int = 120,
heartbeat_period: int = 30,
poll_period: int = 10,
suppress_failure: bool = False,
managed: bool = True,
worker_logdir_root: Optional[str] = None):
logger.debug("Initializing HighThroughputExecutor")
self.label = label
self.launch_cmd = launch_cmd
self.provider = provider
self.worker_debug = worker_debug
self.storage_access = storage_access if storage_access is not None else []
if len(self.storage_access) > 1:
raise ConfigurationError('Multiple storage access schemes are not supported')
self.working_dir = working_dir
self.managed = managed
self.blocks = {} # type: Dict[str, str]
self.tasks = {} # type: Dict[str, Future]
self.cores_per_worker = cores_per_worker
self.max_workers = max_workers
self.prefetch_capacity = prefetch_capacity
self._task_counter = 0
self.address = address
self.worker_ports = worker_ports
self.worker_port_range = worker_port_range
self.interchange_port_range = interchange_port_range
self.heartbeat_threshold = heartbeat_threshold
self.heartbeat_period = heartbeat_period
self.poll_period = poll_period
self.suppress_failure = suppress_failure
self.run_dir = '.'
self.worker_logdir_root = worker_logdir_root
if not launch_cmd:
self.launch_cmd = ("process_worker_pool.py {debug} {max_workers} "
"-p {prefetch_capacity} "
"-c {cores_per_worker} "
"--poll {poll_period} "
"--task_url={task_url} "
"--result_url={result_url} "
"--logdir={logdir} "
"--block_id={{block_id}} "
"--hb_period={heartbeat_period} "
"--hb_threshold={heartbeat_threshold} ")
def initialize_scaling(self):
""" Compose the launch command and call the scale_out
This should be implemented in the child classes to take care of
executor specific oddities.
"""
debug_opts = "--debug" if self.worker_debug else ""
max_workers = "" if self.max_workers == float('inf') else "--max_workers={}".format(self.max_workers)
worker_logdir = "{}/{}".format(self.run_dir, self.label)
if self.worker_logdir_root is not None:
worker_logdir = "{}/{}".format(self.worker_logdir_root, self.label)
l_cmd = self.launch_cmd.format(debug=debug_opts,
prefetch_capacity=self.prefetch_capacity,
task_url=self.worker_task_url,
result_url=self.worker_result_url,
cores_per_worker=self.cores_per_worker,
max_workers=max_workers,
nodes_per_block=self.provider.nodes_per_block,
heartbeat_period=self.heartbeat_period,
heartbeat_threshold=self.heartbeat_threshold,
poll_period=self.poll_period,
logdir=worker_logdir)
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = self.provider.scaling_enabled
logger.debug("Starting HighThroughputExecutor with provider:\n%s", self.provider)
if hasattr(self.provider, 'init_blocks'):
try:
self.scale_out(blocks=self.provider.init_blocks)
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing("127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming("127.0.0.1", self.interchange_port_range)
self.command_client = zmq_pipes.CommandClient("127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._executor_bad_state = threading.Event()
self._executor_exception = None
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}".format(self._queue_management_thread))
if self.provider:
self.initialize_scaling()
else:
self._scaling_enabled = False
logger.debug("Starting HighThroughputExecutor with no provider")
def _queue_management_worker(self):
"""Listen to the queue for task status messages and handle them.
Depending on the message, tasks will be updated with results, exceptions,
or updates. It expects the following messages:
.. code:: python
{
"task_id" : <task_id>
"result" : serialized result object, if task succeeded
... more tags could be added later
}
{
"task_id" : <task_id>
"exception" : serialized exception object, on failure
}
We do not support these yet, but they could be added easily.
.. code:: python
{
"task_id" : <task_id>
"cpu_stat" : <>
"mem_stat" : <>
"io_stat" : <>
"started" : tstamp
}
The `None` message is a die request.
"""
logger.debug("[MTHREAD] queue management worker starting")
while not self._executor_bad_state.is_set():
try:
msgs = self.incoming_q.get(timeout=1)
# logger.debug("[MTHREAD] get has returned {}".format(len(msgs)))
except queue.Empty:
logger.debug("[MTHREAD] queue empty")
# Timed out.
pass
except IOError as e:
logger.exception("[MTHREAD] Caught broken queue with exception code {}: {}".format(e.errno, e))
return
except Exception as e:
logger.exception("[MTHREAD] Caught unknown exception: {}".format(e))
return
else:
if msgs is None:
logger.debug("[MTHREAD] Got None, exiting")
return
else:
for serialized_msg in msgs:
try:
msg = pickle.loads(serialized_msg)
tid = msg['task_id']
except pickle.UnpicklingError:
raise BadMessage("Message received could not be unpickled")
except Exception:
raise BadMessage("Message received does not contain 'task_id' field")
if tid == -1 and 'exception' in msg:
logger.warning("Executor shutting down due to exception from interchange")
self._executor_exception, _ = deserialize_object(msg['exception'])
logger.exception("Exception: {}".format(self._executor_exception))
# Set bad state to prevent new tasks from being submitted
self._executor_bad_state.set()
# We set all current tasks to this exception to make sure that
# this is raised in the main context.
for task in self.tasks:
self.tasks[task].set_exception(self._executor_exception)
break
task_fut = self.tasks[tid]
if 'result' in msg:
result, _ = deserialize_object(msg['result'])
task_fut.set_result(result)
elif 'exception' in msg:
try:
s, _ = deserialize_object(msg['exception'])
# s should be a RemoteExceptionWrapper... so we can reraise it
try:
s.reraise()
except Exception as e:
task_fut.set_exception(e)
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage("Message received is neither result or exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished")
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(self, q=None):
"""We do not use this yet."""
q.put(None)
def _start_local_queue_process(self):
""" Starts the interchange process locally
Starts the interchange process locally and uses an internal command queue to
get the worker task and result ports that the interchange has bound to.
"""
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port,
self.command_client.port),
"worker_ports": self.worker_ports,
"worker_port_range": self.worker_port_range,
"logdir": "{}/{}".format(self.run_dir, self.label),
"suppress_failure": self.suppress_failure,
"heartbeat_threshold": self.heartbeat_threshold,
"poll_period": self.poll_period,
"logging_level": logging.DEBUG if self.worker_debug else logging.INFO
},
)
self.queue_proc.start()
try:
(worker_task_port, worker_result_port) = comm_q.get(block=True, timeout=120)
except queue.Empty:
logger.error("Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
self.worker_task_url = "tcp://{}:{}".format(self.address, worker_task_port)
self.worker_result_url = "tcp://{}:{}".format(self.address, worker_result_port)
def _start_queue_management_thread(self):
"""Method to start the management thread as a daemon.
Checks if a thread already exists, then starts it.
Could be used later as a restart if the management thread dies.
"""
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(target=self._queue_management_worker)
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning")
def hold_worker(self, worker_id):
"""Puts a worker on hold, preventing scheduling of additional tasks to it.
This is called "hold" mostly because this only stops scheduling of tasks,
and does not actually kill the worker.
Parameters
----------
worker_id : str
Worker id to be put on hold
"""
c = self.command_client.run("HOLD_WORKER;{}".format(worker_id))
logger.debug("Sent hold request to worker: {}".format(worker_id))
return c
@property
def outstanding(self):
outstanding_c = self.command_client.run("OUTSTANDING_C")
# logger.debug("Got outstanding count: {}".format(outstanding_c))
return outstanding_c
@property
def connected_workers(self):
workers = self.command_client.run("WORKERS")
return workers
@property
def connected_managers(self):
workers = self.command_client.run("MANAGERS")
return workers
def _hold_block(self, block_id):
""" Sends hold command to all managers which are in a specific block
Parameters
----------
block_id : str
Block identifier of the block to be put on hold
"""
managers = self.connected_managers
for manager in managers:
if manager['block_id'] == block_id:
logger.debug("[HOLD_BLOCK]: Sending hold to manager:{}".format(manager['manager']))
self.hold_worker(manager['manager'])
def submit(self, func, *args, **kwargs):
"""Submits work to the the outgoing_q.
The outgoing_q is an external process listens on this
queue for new work. This method behaves like a
submit call as described here `Python docs: <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.ThreadPoolExecutor>`_
Args:
- func (callable) : Callable function
- *args (list) : List of arbitrary positional arguments.
Kwargs:
- **kwargs (dict) : A dictionary of arbitrary keyword args for func.
Returns:
Future
"""
if self._executor_bad_state.is_set():
raise self._executor_exception
self._task_counter += 1
task_id = self._task_counter
logger.debug("Pushing function {} to queue with args {}".format(func, args))
self.tasks[task_id] = Future()
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024,
item_threshold=1024)
msg = {"task_id": task_id,
"buffer": fn_buf}
# Post task to the the outgoing queue
self.outgoing_q.put(msg)
# Return the future
return self.tasks[task_id]
@property
def scaling_enabled(self):
return self._scaling_enabled
def scale_out(self, blocks=1):
"""Scales out the number of blocks by "blocks"
Raises:
NotImplementedError
"""
r = []
for i in range(blocks):
if self.provider:
external_block_id = str(len(self.blocks))
launch_cmd = self.launch_cmd.format(block_id=external_block_id)
internal_block = self.provider.submit(launch_cmd, 1, 1)
logger.debug("Launched block {}->{}".format(external_block_id, internal_block))
if not internal_block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
r.extend([external_block_id])
self.blocks[external_block_id] = internal_block
else:
logger.error("No execution provider available")
r = None
return r
def scale_in(self, blocks=None, block_ids=[]):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Parameters
----------
blocks : int
Number of blocks to terminate and scale_in by
block_ids : list
List of specific block ids to terminate. Optional
Raises:
NotImplementedError
"""
if block_ids:
block_ids_to_kill = block_ids
else:
block_ids_to_kill = list(self.blocks.keys())[:blocks]
# Hold the block
for block_id in block_ids_to_kill:
self._hold_block(block_id)
# Now kill via provider
to_kill = [self.blocks.pop(bid) for bid in block_ids_to_kill]
if self.provider:
r = self.provider.cancel(to_kill)
return r
def status(self):
"""Return status of all blocks."""
status = []
if self.provider:
status = self.provider.status(self.blocks.values())
return status
def shutdown(self, hub=True, targets='all', block=False):
"""Shutdown the executor, including all workers and controllers.
This is not implemented.
Kwargs:
- hub (Bool): Whether the hub should be shutdown, Default:True,
- targets (list of ints| 'all'): List of block id's to kill, Default:'all'
- block (Bool): To block for confirmations or not
Raises:
NotImplementedError
"""
logger.info("Attempting HighThroughputExecutor shutdown")
# self.outgoing_q.close()
# self.incoming_q.close()
self.queue_proc.terminate()
logger.info("Finished HighThroughputExecutor shutdown attempt")
return True
|
main.py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import re
import time
import datetime
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import threading
def comment(browser):
browser.get('https://www.instagram.com/')
time.sleep(5)
username = browser.find_element_by_name("username")
username.send_keys('YOUR USERNAME')
passw = browser.find_element_by_name("password")
passw.send_keys('YOUR PASSWORD')
passw.send_keys(Keys.RETURN)
time.sleep(40)
for i in range(5):
browser.get('POST LINK')
commentArea = browser.find_element_by_class_name('Ypffh')
commentArea.click()
time.sleep(5)
commentArea = browser.find_element_by_class_name('Ypffh')
commentArea.click()
commentArea.send_keys("Using selenium to comment in burst of 5 ")
commentArea.send_keys(Keys.RETURN)
time.sleep(5)
if __name__ == '__main__':
browser1 = webdriver.Chrome()
browser2 = webdriver.Chrome()
threading.Thread(target=comment, args=[browser1]).start()
threading.Thread(target=comment, args=[browser2]).start()
# comment(browser)
|
routes.py
|
from .entities import Board
from .strategy import need_food, check_attack, detect_wall_tunnels
from .utils import timing, get_direction, add, neighbours, touching, food_in_box, available_next_positions
from .algorithms import bfs, find_safest_positions, rate_food, flood_fill, rate_cell, longest_path
from .constants import SNAKE_TAUNT, SNAKE_NAME, SNAKE_COLOR, SNAKE_HEAD, SNAKE_TAIL, SNAKE_IMAGE, DIR_NAMES, DIR_VECTORS, FOOD_BOXED_IN_HEALTH,\
SNAKE_SECONDARY_COLOR, DISABLE_ATTACKING, FOOD_HUNGRY_HEALTH, SAFE_SPACE_FACTOR, TAIL_PREFERENCE_FACTOR, LOG_LEVEL,\
SNAKE, FOOD, SPOILED, EMPTY, START_TAUNT
from threading import Thread
from math import floor
from copy import deepcopy
import bottle
import logging
import traceback
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
@bottle.route('/static/<path:path>')
def static(path):
return bottle.static_file(path, root='static/')
@bottle.route('/')
@bottle.post('/start')
@bottle.post('/start/')
def start():
logger.info("GAME START")
return {
'color': SNAKE_COLOR,
'secondary_color': SNAKE_SECONDARY_COLOR,
'taunt': START_TAUNT,
'head_url': ('http://%s/static/%s' % (bottle.request.get_header('host'), SNAKE_IMAGE)),
'name': SNAKE_NAME,
'head_type': SNAKE_HEAD,
'tail_type': SNAKE_TAIL
}
@bottle.post('/move')
@bottle.post('/move/')
def move():
data = {}
move = None
time_remaining = [150] # leave 50ms for network
potential_snake_positions = []
bad_positions = []
with timing("bottle", time_remaining):
data = bottle.request.json
try:
# Get general direction and fallback move
with timing("data parsing", time_remaining):
board = Board(**data)
snake = board.get_snake(data['you']['id'])
except Exception as e:
logger.error("Failure handling request - %s" % str(e))
return {'move': 'up'} # In this case we don't know what the board looks like so guess
try:
# Get spots that an enemy snake could move into and also set snakes that are guaranteed to die as empty squares
with timing("setup board and gather data", time_remaining):
initial_floodfill_board = deepcopy(board)
for enemy in board.enemies:
enemy_options = available_next_positions(board, enemy)
if (len(enemy_options) == 0 and snake.head not in neighbours(enemy.head)) or enemy.attributes['health'] == 0:
for pos in enemy.coords:
board.set_cell(pos, EMPTY)
initial_floodfill_board.set_cell(pos, EMPTY)
continue
potential_snake_positions.extend(enemy_options)
# floodfill in each direction with potential attack positions so we don't
# do something dumb (predicts dead ends that can be seen next turn)
for pos in enemy_options:
if pos not in neighbours(snake.head) or enemy.attributes['length'] >= snake.attributes['length']:
initial_floodfill_board.set_cell(pos, SNAKE)
with timing("detect dangerous tunnels against walls (snake head at other end)", time_remaining):
bad_positions.extend(detect_wall_tunnels(board))
for pos in bad_positions:
initial_floodfill_board.set_cell(pos, SNAKE)
# Flood fill in each direction to find bad directions
with timing("intial flood fill detection", time_remaining):
number_of_squares = []
boxed_in = False
# Get size of space we can safely move into (should be larger than body size)
safe_space_size = snake.attributes.get('length') * SAFE_SPACE_FACTOR
for pos in available_next_positions(board, snake):
if pos in bad_positions:
continue
flooded_squares = flood_fill(initial_floodfill_board, pos, False)
square_count = len(flooded_squares)
number_of_squares.append([pos, square_count, any(x in neighbours(snake.head) for x in flooded_squares)])
if square_count <= safe_space_size:
bad_positions.append(pos)
# If all are bad don't set the largest as bad
if all(pos[1] <= safe_space_size for pos in number_of_squares):
boxed_in = True
for square in number_of_squares:
# if tail present then scale region size by TAIL_PREFERENCE_FACTOR
if square[2]:
square[1] *= TAIL_PREFERENCE_FACTOR
# go through each option and remove the largest from bad positions
number_of_squares = sorted(number_of_squares, key=lambda x: x[1], reverse=True)
for x in range(0, len(number_of_squares)):
# remove from bad_positions if it's the largest or has the same length as the largest
if number_of_squares[0][1] == number_of_squares[x][1]:
bad_positions.remove(number_of_squares[x][0])
# Check if we have the opportunity to attack
with timing("check_attack", time_remaining):
attack = check_attack(board, bad_positions, snake)
# combine and get rid of duplicates
bad_positions = list(set(potential_snake_positions + bad_positions))
# Check if we need food (or if there is any that we can reach)
with timing("need_food", time_remaining):
food = need_food(board, bad_positions, snake)
# If we have the opportunity to attack and are not starving then attack
if attack and not DISABLE_ATTACKING and (snake.attributes['health'] > FOOD_HUNGRY_HEALTH or not food):
logger.info("ATTACKING")
move = get_direction(snake.head, attack)
# if we are boxed in, not attacking, and are in good health then we need to find an exit and max our movement
if boxed_in and not move and (snake.attributes['health'] > FOOD_BOXED_IN_HEALTH or not food_in_box(flood_fill(board, snake.head, True), board)):
logger.info("BOXED IN")
with timing("boxed_in", time_remaining):
# get the flooded squares of the inital floodfill board as that signifies boxed_in
flooded_squares = flood_fill(initial_floodfill_board, snake.head, True)
exit = None
# loop through all snakes starting from tail and check if adjacent to flood choose closest that will be available by the time we get there
for snek in board.snakes:
turns_until_space = 0
for piece in list(reversed(snek.body)):
turns_until_space += 1
for pos in flooded_squares:
if touching(pos, piece) and (exit is None or exit[1] > turns_until_space):
exit = (pos, turns_until_space)
break
# if there isn't a good exit then we need to fallback
if exit:
# if the area is more than a certain amount the longest path takes too long
if len(flooded_squares) > 20:
directions = []
thread_pool = []
next_move = []
for position in [v[0] for v in number_of_squares if v[1] > 0]:
if position in bad_positions:
continue
directions.append((position, get_direction(snake.head, position)))
t = Thread(target=bfs(position, exit[0], board, bad_positions, next_move, include_start=True, boxed=True))
thread_pool.append(t)
for thread in thread_pool:
thread.start()
thread.join()
next_move = [path for path in next_move if not len(path) == 0]
if len(next_move) > 0:
path = max([move for move in next_move], key=len)
move = get_direction(snake.head, path[0])
else:
path = longest_path(snake.head, exit[0], board, bad_positions)
if len(path) > 0:
move = get_direction(snake.head, path[0])
# If we need food find a good path to said food (prioritized over attacking/boxed in when hungry)
if food and not move:
logger.info("FOOD")
with timing("find_food", time_remaining):
food_positions_ratings = rate_food(snake, board, food)
thread_pool = []
next_move = []
for position in [position[0] for position in food_positions_ratings]:
t = Thread(target=bfs(snake.head, position, board, bad_positions, next_move))
thread_pool.append(t)
for thread in thread_pool:
thread.start()
thread.join()
next_move = [path for path in next_move if not len(path) == 0]
if len(next_move) > 0:
food_moves = []
# verify if atleast one path returned had a positive rating
for move in next_move:
for pos in food_positions_ratings:
if pos[0] == move[-1]:
food_moves.append((move, pos[1]))
# if we have more than one option then figure out if we want to get rid of any poor ratings
if len(food_moves) > 1:
pos_moves = [move for move in food_moves if move[1] > 0]
food_moves = pos_moves if pos_moves else food_moves
path = min([move[0] for move in food_moves], key=len)
move = get_direction(snake.head, path[0])
# If we don't need food, don't have the opportunity to attack, and are not boxed in then find a path to a "good" position on the board
if not move:
logger.info("SAFEST")
with timing("find_safest_positions", time_remaining):
positions = find_safest_positions(snake, board, bad_positions)
positions = [position[0] for position in positions]
thread_pool = []
next_move = []
for position in positions:
t = Thread(target=bfs(snake.head, position, board, bad_positions, next_move))
thread_pool.append(t)
for thread in thread_pool:
thread.start()
thread.join()
if len(next_move) > 0:
# instead of max or min choose path with the best rated average
path = max([(path, sum(rate_cell(point, board, snake) for point in path)/len(path)) for path in next_move], key=lambda x: x[1])[0]
move = get_direction(snake.head, path[0])
except Exception as e:
logger.error("Code failure - %s \n %s" % (str(e), str(traceback.format_exc())))
try:
# If code above failed then fallback to a floodfill style move
if not move:
logger.info("FALLBACK")
with timing("floodfill fallback", time_remaining):
temp_board = deepcopy(board)
for pos in potential_snake_positions:
temp_board.set_cell(pos, SNAKE)
# try flood fill with bad positions and no worry tails included
floods = {
"up": len(flood_fill(temp_board, (snake.head[0], snake.head[1]-1))),
"down": len(flood_fill(temp_board, (snake.head[0], snake.head[1]+1))),
"right": len(flood_fill(temp_board, (snake.head[0]+1, snake.head[1]))),
"left": len(flood_fill(temp_board, (snake.head[0]-1, snake.head[1])))
}
# less restrictive as it doesn't look at the potential next move off of food
if all(direction == 0 for direction in floods.values()):
for pos in potential_snake_positions:
if board.get_cell(pos) == EMPTY:
temp_board.set_cell(pos, EMPTY)
floods = {
"up": len(flood_fill(temp_board, (snake.head[0], snake.head[1]-1))),
"down": len(flood_fill(temp_board, (snake.head[0], snake.head[1]+1))),
"right": len(flood_fill(temp_board, (snake.head[0]+1, snake.head[1]))),
"left": len(flood_fill(temp_board, (snake.head[0]-1, snake.head[1])))
}
# less restrictive as it doesn't look at the potential next move on food
if all(direction == 0 for direction in floods.values()):
for pos in potential_snake_positions:
if board.get_cell(pos) in [FOOD, SPOILED]:
temp_board.set_cell(pos, EMPTY)
floods = {
"up": len(flood_fill(temp_board, (snake.head[0], snake.head[1]-1))),
"down": len(flood_fill(temp_board, (snake.head[0], snake.head[1]+1))),
"right": len(flood_fill(temp_board, (snake.head[0]+1, snake.head[1]))),
"left": len(flood_fill(temp_board, (snake.head[0]-1, snake.head[1])))
}
move = max(iter(floods.keys()), key=(lambda key: floods[key]))
except Exception as e:
logger.error("Fallback failure - %s \n %s" % (str(e), str(traceback.format_exc())))
move = "up" # Something is really messed up if this happens
# Verify we didn't pick a bad move (wall or snake) - shouldn't happen but there if needed
with timing("verify move", time_remaining):
m_move = add(snake.head, DIR_VECTORS[DIR_NAMES.index(move)])
if board.inside(m_move) and board.get_cell(m_move) == SNAKE:
logger.info("CHANGED MOVE - verify fallback.")
for direction in DIR_NAMES:
m_move = add(snake.head, DIR_VECTORS[DIR_NAMES.index(direction)])
if board.inside(m_move) and board.get_cell(m_move) != SNAKE:
move = direction
break
turn = floor(data.get('turn', 0)/5)
return {
'move': move, # 'up' | 'down' | 'left' | 'right'
'taunt': SNAKE_TAUNT[turn % 40]
}
|
helper.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 22 11:53:52 2017
@author: GustavZ
"""
import datetime
import cv2
import threading
import time
import tensorflow as tf
import rospy
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
import Queue
elif PY3:
import queue as Queue
class FPS:
# from https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (self._end - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
class FPS2:
def __init__(self, interval):
self._glob_start = None
self._glob_end = None
self._glob_numFrames = 0
self._local_start = None
self._local_numFrames = 0
self._interval = interval
self.curr_local_elapsed = None
self.first = False
def start(self):
self._glob_start = datetime.datetime.now()
self._local_start = self._glob_start
return self
def stop(self):
self._glob_end = datetime.datetime.now()
def update(self):
self.first = True
curr_time = datetime.datetime.now()
self.curr_local_elapsed = (curr_time - self._local_start).total_seconds()
self._glob_numFrames += 1
self._local_numFrames += 1
if self.curr_local_elapsed > self._interval:
rospy.loginfo("FPS: {}".format(self.fps_local()))
self._local_numFrames = 0
self._local_start = curr_time
def elapsed(self):
return (self._glob_end - self._glob_start).total_seconds()
def fps(self):
return self._glob_numFrames / self.elapsed()
def fps_local(self):
if self.first:
return round(self._local_numFrames / self.curr_local_elapsed,1)
else:
return 0.0
class WebcamVideoStream:
# with modifications from https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/
def __init__(self, src, width, height):
# initialize the video camera stream and read the first frame
# from the stream
self.frame_counter = 1
self.width = width
self.height = height
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
#Debug stream shape
self.real_width = int(self.stream.get(3))
self.real_height = int(self.stream.get(4))
print("> Start video stream with shape: {},{}".format(self.real_width,self.real_height))
def start(self):
# start the thread to read frames from the video stream
threading.Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
self.stream.release()
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
self.frame_counter += 1
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def isActive(self):
# check if VideoCapture is still Opened
return self.stream.isOpened
def resize(self):
try:
self.frame = cv2.resize(self.frame, (self.width, self.height))
except:
print("> Error resizing video stream")
class SessionWorker():
# from https://github.com/naisy/realtime_object_detection/blob/master/lib/session_worker.py
# TensorFlow Session Thread
#
# usage:
# before:
# results = sess.run([opt1,opt2],feed_dict={input_x:x,input_y:y})
# after:
# opts = [opt1,opt2]
# feeds = {input_x:x,input_y:y}
# woker = SessionWorker("TAG",graph,config)
# worker.put_sess_queue(opts,feeds)
# q = worker.get_result_queue()
# if q is None:
# continue
# results = q['results']
# extras = q['extras']
#
# extras: None or frame image data for draw. GPU detection thread doesn't wait result. Therefore, keep frame image data if you want to draw detection result boxes on image.
#
def __init__(self,tag,graph,config):
self.lock = threading.Lock()
self.sess_queue = Queue.Queue()
self.result_queue = Queue.Queue()
self.tag = tag
t = threading.Thread(target=self.execution,args=(graph,config))
t.setDaemon(True)
t.start()
return
def execution(self,graph,config):
self.is_thread_running = True
try:
with tf.Session(graph=graph,config=config) as sess:
while self.is_thread_running:
while not self.sess_queue.empty():
q = self.sess_queue.get(block=False)
opts = q["opts"]
feeds= q["feeds"]
extras= q["extras"]
if feeds is None:
results = sess.run(opts)
else:
results = sess.run(opts,feed_dict=feeds)
self.result_queue.put({"results":results,"extras":extras})
self.sess_queue.task_done()
time.sleep(0.005)
except:
import traceback
traceback.print_exc()
self.stop()
return
def is_sess_empty(self):
if self.sess_queue.empty():
return True
else:
return False
def put_sess_queue(self,opts,feeds=None,extras=None):
self.sess_queue.put({"opts":opts,"feeds":feeds,"extras":extras})
return
def is_result_empty(self):
if self.result_queue.empty():
return True
else:
return False
def get_result_queue(self):
result = None
if not self.result_queue.empty():
result = self.result_queue.get(block=False)
self.result_queue.task_done()
return result
def stop(self):
self.is_thread_running=False
with self.lock:
while not self.sess_queue.empty():
q = self.sess_queue.get(block=False)
self.sess_queue.task_done()
return
|
AutDriver.py
|
from threading import Thread
from typing import List, Union
import time
import onnxruntime as rt
import cv2
import os
from PIL import Image
import numpy as np
from autcar import Camera, Car
class Model:
def __init__(self, model_file_path: str, execution_interval: float = 2, name = None):
"""
Model objects are used to define where a model is located and how often the model should be called during driving
@param model_file_path: A file path that points to the .onnx model file
@param execution_interval: Defines a timeout in seconds during repeated model calls
@param name: Is used to give your model a short name to identify it when multiple models are used
"""
self.name = name
self.model_file_path = model_file_path
self.last_command = None
self.execution_interval = execution_interval
def preprocess(self, image: Image):
"""
Preprocess is used to adjust (e.g. scale) the image before it is handed over to the neural network for prediction. It must return a numpy array representation of the image: [1 x channels x image_height x image_width]
@param image: You get a Pillow Image object from the car you can use it for scaling, normalization etc.
"""
processed_image = image.resize((224,168), Image.LINEAR)
X = np.array([np.moveaxis((np.array(processed_image).astype('float32')), -1, 0)])
X -= np.mean(X, keepdims=True)
X /= (np.std(X, keepdims=True) + 1e-6)
return X
class Driver:
def __init__(self, model_instances: Union[Model, List[Model]], car: Car, camera: Camera, execution_function = None, execution_interval: int = 2):
"""
A Driver object is used to autonomously drive a car. It needs a car object and a path to a model file
@param model_instances: Either a model obkect or a list of Model objects that point to the path of .onnx model location
@param car: The car object which is used to control the motor
@param camera: A camera object used to let the car take pictures from the environment
@param execution_interval: Defines how often the model is executed. Default is 2 seconds
"""
self.__car = car
self.__cam = camera
self.__prediction_dict = dict()
self.__command_history_len = 5
if(isinstance(model_instances, list) == False):
model_instance_list = [model_instances]
else:
model_instance_list = model_instances
threads = []
for i, model_instance in enumerate(model_instance_list):
if(os.path.isfile(model_instance.model_file_path) == False):
raise Exception("Error: File %s does not exist. Did you train and create a model file?"%model_instance.model_file_path)
if(model_instance.name is None):
modelname = model_instance.model_file_path
else:
modelname = model_instance.name
thread = Thread(target=self.__predict_onnx, args=(model_instance,modelname,))
self.__prediction_dict[modelname] = [None, None]
threads.append(thread)
if(execution_function is None):
def execute(model_prediction: dict, car: Car, variables: dict):
prediction = model_prediction[list(model_prediction.keys())[0]][0]
last_command = model_prediction[list(model_prediction.keys())[0]][1]
if(prediction == last_command):
return
if(prediction == 0):
print("prediction 0: left light backwards")
car.left("light", "backwards")
elif(prediction == 1):
print("prediction 1: left light forward")
car.left("light", "forward")
elif(prediction == 2):
print("prediction 2: left medium backwards")
car.left("medium", "backwards")
elif(prediction == 3):
print("prediction 3: left medium forward")
car.left("medium", "forward")
elif(prediction == 4):
print("prediction 4: move fast forward")
car.move("forward", "fast")
elif(prediction == 5):
print("prediction 5: move medium backwards")
car.move("backwards", "medium")
elif(prediction == 6):
print("prediction 6: move medium forward")
car.move("forward", "medium")
elif(prediction == 7):
print("prediction 7: right light backwards")
car.right("light", "backwards")
elif(prediction == 8):
print("prediction 8: right light forward")
car.right("light", "forward")
elif(prediction == 9):
print("prediction 9: left medium backwards")
car.right("medium", "backwards")
elif(prediction == 10):
print("prediction 10: right medium forward")
car.right("medium", "forward")
elif(prediction == 11):
print("prediction 11: stop")
car.stop()
execution_function = execute
self.__model_threads = threads
self.__execution_function = execution_function
self.__execution_thread = Thread(target=self.__execute)
self.__variables = dict()
self.__frame = None
self.__stop_driving = False
self.__capture_interval = execution_interval
self.__counter = 0
self.__last_timestamp = 0
def __pad_image(self, image):
target_size = max(image.size)
result = Image.new('RGB', (target_size, target_size), "white")
try:
result.paste(image, (int((target_size - image.size[0]) / 2), int((target_size - image.size[1]) / 2)))
except:
print("Error on image " + image)
raise Exception('pad_image error')
return result
def __normalize(self, arr, desired_mean = 0, desired_std = 1):
arr = arr.astype('float')
for i in range(3):
mean = arr[...,i].mean()
std = arr[...,i].std()
arr[...,i] = (arr[...,i] - mean)*(desired_std/std) + desired_mean
return arr
def __scale_image(self, image, scaling=(224,168)):
try:
return image.resize(scaling, Image.LINEAR)
except:
raise Exception('pad_image error')
# def __drive_tensorflow(self):
# import tensorflow as tf
# from tensorflow.python.platform import gfile
# self.__last_timestamp = time.time()
# with tf.Session() as sess:
# print("load graph")
# with gfile.FastGFile(self.__model_file, 'rb') as f:
# graph_def = tf.GraphDef()
# graph_def.ParseFromString(f.read())
# sess.graph.as_default()
# tf.import_graph_def(graph_def, name='')
# graph_nodes=[n for n in graph_def.node]
# softmax_tensor = sess.graph.get_tensor_by_name('Softmax612:0')
# while True:
# if(self.__stop_driving):
# break
# # We constantly read new images from the cam to empty the VideoCapture buffer
# ret, frame = self.__cam.read()
# self.__frame = frame
# current_time = time.time()
# if(current_time - self.__last_timestamp > self.__capture_interval):
# self.__last_timestamp = current_time
# try:
# img = Image.fromarray(self.__frame)
# except Exception as e:
# print("Cant read image")
# try:
# processed_image = equalize(self.__scale_image(self.__pad_image(img)))
# except Exception as e:
# print("Err while reading image")
# X = np.array([np.moveaxis(np.array(processed_image), -1, 0)])/255.0
# pred = sess.run(softmax_tensor, {'Input501:0': X})
# print(pred)
# index = np.argmax(pred)
# if(index == 0):
# if(self.__last_command == "forward"):
# continue
# print("forward")
# self.__last_command = "forward"
# self.__car.move("forward", "medium")
# elif(index == 1):
# if(self.__last_command == "left"):
# continue
# print("left")
# self.__last_command = "left"
# self.__car.left("light", "forward")
# elif(index == 2):
# if(self.__last_command == "right"):
# continue
# print("right")
# self.__last_command = "right"
# self.__car.right("light", "forward")
def __predict_onnx(self, model_instance: Model, modelname: str):
#last_timestamp = time.time()
sess = rt.InferenceSession(model_instance.model_file_path)
input_name = sess.get_inputs()[0].name
label_name = sess.get_outputs()[0].name
while True:
if(self.__stop_driving):
break
if(isinstance(self.__frame,(np.ndarray, np.generic)) == False):
time.sleep(model_instance.execution_interval)
#if(current_time - last_timestamp > self.__capture_interval):
#last_timestamp = current_time
try:
# OpenCV reads BGR, Pillow reads RGB -> convert
imgconv = cv2.cvtColor(self.__frame, cv2.COLOR_BGR2RGB)
img = Image.fromarray(imgconv)
except Exception as e:
print("Cant read image: " + str(e))
try:
X = model_instance.preprocess(img)
except Exception as e:
print("Error while preprocessing image: "+str(e))
#r, g, b = processed_image.split()
#processed_image = Image.merge("RGB", (b, g, r))
#X = np.array([np.moveaxis((np.array(processed_image).astype('float32')-128), -1, 0)])
pred = sess.run([label_name], {input_name: X.astype(np.float32)})[0]
prediction = int(np.argmax(pred))
if(len(self.__prediction_dict[modelname]) >= self.__command_history_len):
del self.__prediction_dict[modelname][-1]
self.__prediction_dict[modelname] = [prediction] + self.__prediction_dict[modelname]
# print(self.__prediction_dict[modelname])
time.sleep(model_instance.execution_interval)
def __execute(self):
self.__last_timestamp = time.time()
while True:
if(self.__stop_driving):
break
# We constantly read new images from the cam to empty the VideoCapture buffer
ret, frame = self.__cam.read()
self.__frame = frame
current_time = time.time()
if(current_time - self.__last_timestamp > self.__capture_interval):
self.__last_timestamp = current_time
self.__execution_function(self.__prediction_dict, self.__car, self.__variables)
def start(self):
"""
Start the self driving methods
"""
print("Driver started")
try:
for model_thread in self.__model_threads:
model_thread.start()
self.__execution_thread.start()
except KeyboardInterrupt:
print("Keyboard interrupt")
exit()
def stop(self):
self.__stop_driving = True
|
01_calc_square.py
|
import time
import threading
def calc_sq(num):
print("Calculate Square numbers: ")
for n in num:
time.sleep(1)
print(f"Square {n*n}\n")
def calc_cube(num):
print("Calculate Cube numbers: ")
for n in num:
time.sleep(1)
print(f"Cube {n*n*n}\n")
arr = [2, 3, 8, 9]
start = time.time()
t1 = threading.Thread(target=calc_sq, args=(arr,))
t2 = threading.Thread(target=calc_cube, args=(arr,))
t1.start()
t2.start()
t1.join()
t2.join()
end = time.time()
print(f"In the main thread, time taken {end-start}")
|
test_socket.py
|
##
# Copyright (c) 2013 Yury Selivanov
# License: Apache 2.0
##
import asyncio
import unittest
import grenado
import grenado.socket as greensocket
class SocketTests(unittest.TestCase):
def setUp(self):
asyncio.set_event_loop_policy(greando.GreenEventLoopPolicy())
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
def tearDown(self):
self.loop.close()
asyncio.set_event_loop_policy(None)
def test_socket_wrong_event_loop(self):
loop = asyncio.DefaultEventLoopPolicy().new_event_loop()
asyncio.set_event_loop(loop)
self.assertRaises(AssertionError, greensocket.socket)
def test_socket_docs(self):
self.assertIn('accept connections', greensocket.socket.listen.__doc__)
self.assertIn('Receive', greensocket.socket.recv.__doc__)
def test_socket_setblocking(self):
sock = greensocket.socket()
self.assertEquals(sock.gettimeout(), 0)
with self.assertRaisesRegex(
greensocket.error, 'does not support blocking mode'):
sock.setblocking(True)
def test_socket_echo(self):
import socket as std_socket
import threading
import time
check = 0
ev = threading.Event()
def server(sock_factory):
socket = sock_factory()
socket.bind(('127.0.0.1', 0))
assert socket.fileno() is not None
nonlocal addr
addr = socket.getsockname()
socket.listen(1)
ev.set()
sock, client_addrs = socket.accept()
assert isinstance(sock, sock_factory)
data = b''
while not data.endswith(b'\r'):
data += sock.recv(1024)
sock.sendall(data)
ev.wait()
ev.clear()
sock.close()
socket.close()
def client(sock_factory):
ev.wait()
ev.clear()
time.sleep(0.1)
assert addr
sock = sock_factory()
sock.connect(addr)
data = b'hello greenlets\r'
sock.sendall(data)
rep = b''
while not rep.endswith(b'\r'):
rep += sock.recv(1024)
self.assertEqual(data, rep)
ev.set()
nonlocal check
check += 1
sock.close()
addr = None
ev.clear()
thread = threading.Thread(target=client, args=(std_socket.socket,))
thread.setDaemon(True)
thread.start()
self.loop.run_until_complete(
greando.task(server)(greensocket.socket))
thread.join(1)
self.assertEqual(check, 1)
addr = None
ev.clear()
thread = threading.Thread(target=server, args=(std_socket.socket,))
thread.setDaemon(True)
thread.start()
self.loop.run_until_complete(
greando.task(client)(greensocket.socket))
thread.join(1)
self.assertEqual(check, 2)
def test_files_socket_echo(self):
import socket as std_socket
import threading
import time
check = 0
ev = threading.Event()
def server(sock_factory):
socket = sock_factory()
socket.bind(('127.0.0.1', 0))
assert socket.fileno() is not None
nonlocal addr
addr = socket.getsockname()
socket.listen(1)
ev.set()
sock, client_addrs = socket.accept()
assert isinstance(sock, sock_factory)
rfile = sock.makefile('rb')
data = rfile.read(1024)
while not data.endswith(b'\r'):
data += rfile.read(1024)
wfile = sock.makefile('wb')
wfile.write(data)
ev.wait()
ev.clear()
sock.close()
socket.close()
def client(sock_factory):
ev.wait()
ev.clear()
time.sleep(0.1)
assert addr
sock = sock_factory()
sock.connect(addr)
data = b'hello greenlets\r'
sock.sendall(data)
rep = b''
while not rep.endswith(b'\r'):
rep += sock.recv(1024)
self.assertEqual(data, rep)
ev.set()
nonlocal check
check += 1
sock.close()
addr = None
ev.clear()
thread = threading.Thread(target=client, args=(std_socket.socket,))
thread.setDaemon(True)
thread.start()
self.loop.run_until_complete(
greando.task(server)(greensocket.socket))
thread.join(1)
self.assertEqual(check, 1)
|
common.py
|
#common
import sys, argparse, os.path, json, io, glob, time
import pathlib, urllib.request, shutil
from collections import OrderedDict
def as_bytes(x, encoding='utf-8'):
if isinstance(x, str):
return x.encode(encoding)
if isinstance(x, bytes):
return x
if isinstance(x, bytearray):
return bytes(x)
if isinstance(x, memoryview):
return x.tobytes()
ans = str(x)
if isinstance(ans, str):
ans = ans.encode(encoding)
return ans
def as_unicode(x, encoding='utf-8', errors='strict'):
if isinstance(x, bytes):
return x.decode(encoding, errors)
return str(x)
def is_binary(stream):
mode = getattr(stream, "mode", None)
if mode:
return "b" in mode
return not isinstance(stream, io.TextIOBase)
def prints(*a, **kw):
" Print either unicode or bytes to either binary or text mode streams "
import sys
stream = kw.get('file', sys.stdout)
if stream is None:
return
sep, end = kw.get('sep'), kw.get('end')
if sep is None:
sep = ' '
if end is None:
end = '\n'
if is_binary(stream):
encoding = getattr(stream, 'encoding', None) or 'utf-8'
a = (as_bytes(x, encoding=encoding) for x in a)
sep = as_bytes(sep)
end = as_bytes(end)
else:
a = (as_unicode(x, errors='replace') for x in a)
sep = as_unicode(sep)
end = as_unicode(end)
for i, x in enumerate(a):
if sep and i != 0:
stream.write(sep)
stream.write(x)
if end:
stream.write(end)
if kw.get('flush'):
try:
stream.flush()
except Exception:
pass
# dependency package
missing_package = False
def install(package, test=None):
global missing_package
import sys, importlib, subprocess
test = test or package
spam_spec = importlib.util.find_spec(package)
found = spam_spec is not None
if not found:
if missing_package:
prints('Missing dependency')
missing_package = True
prints('Instaling:', package)
subprocess.check_call([sys.executable, '-m', 'pip', '-q', 'install', package])
install('keyboard')
if missing_package:
prints('All dependency are instaled')
prints()
import keyboard
from github import GitHub
GITHUB_DATA = GitHub('un-pogaz', 'MC-generated-data')
GITHUB_BUILDER = GitHub('un-pogaz', 'MC-utility-tools')
animation_loop = ['. ',' . ',' .']
def run_animation(awaitable, text_wait, text_end=None):
import asyncio
global animation_run
def start_animation():
global animation_run
idx = 0
while animation_run:
print(text_wait + animation_loop[idx % len(animation_loop)], end="\r")
idx += 1
if idx == len(animation_loop): idx == 0
time.sleep(0.2)
from threading import Thread
animation_run = True
t = Thread(target=start_animation)
t.start()
asyncio.run(awaitable())
animation_run = False
prints(text_wait, text_end or '', ' ' * len(animation_loop[0]))
time.sleep(0.3)
del t
def make_dirname(path):
dir = os.path.dirname(path)
if dir and not os.path.exists(dir):
os.makedirs(dir)
def read_json(path, default=None):
try:
with open(path, 'r') as f:
return json.load(f)
except:
return default or {}
def write_json(path, obj):
make_dirname(path)
with open(path, 'w',) as f:
json.dump(obj, f, indent=2)
def write_lines(path, lines):
make_dirname(path)
with open(path, 'w') as f:
if len(lines) == 0:
f.write('')
else:
f.writelines(l+'\n' for l in lines[:-1])
f.write(lines[-1])
def safe_del(path):
def remove(a):
pass
if os.path.exists(path):
if os.path.isfile(path):
remove = os.remove
if os.path.isdir(path):
remove = shutil.rmtree
if os.path.islink(path):
remove = os.unlink
try:
remove(path)
except Exception as ex:
pass
BUF_SIZE = 65536
def hash_file(algo, path):
import hashlib
if os.path.exists(path):
with open(path, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
algo.update(data)
return algo.hexdigest()
VERSION_MANIFEST = None
def update_version_manifest():
global VERSION_MANIFEST
version_manifest_path = os.path.join('version_manifest.json')
VERSION_MANIFEST = read_json(version_manifest_path, { 'latest':{'release': None, 'snapshot': None}, 'versions':[], 'pack_format':{}, 'versioning':{}})
edited = False
def update_version_manifest(read_manifest):
edited = False
if VERSION_MANIFEST['latest']['release'] != read_manifest['latest']['release']:
VERSION_MANIFEST['latest']['release'] = read_manifest['latest']['release']
edited = True
if VERSION_MANIFEST['latest']['snapshot'] != read_manifest['latest']['snapshot']:
VERSION_MANIFEST['latest']['snapshot'] = read_manifest['latest']['snapshot']
edited = True
versions = { v['id']:v for v in VERSION_MANIFEST['versions'] }
for k,v in { v['id']:v for v in read_manifest['versions'] }.items():
if 'sha1' in v: del v['sha1']
if 'complianceLevel' in v: del v['complianceLevel']
if k not in versions:
versions[k] = v
edited = True
VERSION_MANIFEST['versions'] = versions.values()
return edited
with urllib.request.urlopen(GITHUB_DATA.get_raw('main', 'version_manifest.json')) as fl:
github_manifest = json.load(fl)
if update_version_manifest(github_manifest):
edited = True
def sub_tree(sub_name):
for v in github_manifest[sub_name]:
i = VERSION_MANIFEST[sub_name]
ni = github_manifest[sub_name]
if v == 'special':
if v not in i:
i[v] = []
edited = True
iv = i[v]
for idx, e in enumerate(ni[v], start=0):
if e not in iv:
iv.insert(idx, e)
edited = True
else:
if v not in i:
i[v] = {}
edited = True
iv = i[v]
niv = ni[v]
for t in niv:
if t not in iv:
iv[t] = []
edited = True
ivt = iv[t]
nivt = niv[t]
for idx, e in enumerate(nivt, start=0):
if e not in ivt:
ivt.insert(idx, e)
edited = True
sub_tree('versioning')
sub_tree('pack_format')
with urllib.request.urlopen('https://launchermeta.mojang.com/mc/game/version_manifest_v2.json') as fl:
if update_version_manifest(json.load(fl)):
edited = True
if edited:
VERSION_MANIFEST['versions'] = sorted(VERSION_MANIFEST['versions'], key=lambda item: item['releaseTime'], reverse=True)
write_json(version_manifest_path, VERSION_MANIFEST)
update_version_manifest()
LATEST_RELEASE = VERSION_MANIFEST.get('latest', {}).get('release', None)
LATEST_SNAPSHOT = VERSION_MANIFEST.get('latest', {}).get('snapshot', None)
def find_output(version):
output = glob.glob(f'**/{version}/', root_dir='.', recursive=True)
if len(output):
return output[0]
def get_latest(version, manifest_json_path=None):
if manifest_json_path:
return read_json(manifest_json_path, {'id': None})['id']
if version in ['r','release']:
return LATEST_RELEASE
if version in ['s','snapshot', 'l', 'latest']:
return LATEST_SNAPSHOT
return version
def valide_version(version, quiet = False, manifest_json_path = None):
if manifest_json_path:
return read_json(manifest_json_path, {'id': None})['id']
else:
if not version:
if quiet:
prints('No version or "manifest_json.json" are declared. One of them are require in quiet mode.')
else:
prints(f'Enter the version:\nid of the version / r or release for the latest release "{LATEST_RELEASE}" / s or snapshot for the latest snapshot "{LATEST_SNAPSHOT}"')
version = input()
version = get_latest(version)
for v in VERSION_MANIFEST['versions']:
if v['id'] == version:
return version
prints(f'The version {version} has invalide.', '' if quiet else ' Press any key to exit.')
if not quiet:
keyboard.read_key()
sys.exit(-1)
def valide_output(args):
if args.output and os.path.exists(args.output):
prints(f'The {args.version} already exit at "{args.output}".', 'This output will be overwrited.' if args.overwrite else '' if args.quiet else 'Do you want overwrite them?')
if (args.quiet and args.overwrite) or input()[:1] == 'y':
args.overwrite = True
else:
sys.exit(-1)
def read_manifest_json(temp, version, manifest_json_path = None):
manifest_url = None
for v in VERSION_MANIFEST['versions']:
if v['id'] == version:
manifest_url = v['url']
break
if not manifest_json_path and not manifest_url:
prints(f'Imposible to build Generated data for {version}. The requested version is not in the "version_manifest.json".')
return -1
if not manifest_json_path:
manifest_json_path = os.path.join(temp, version+'.json')
urllib.request.urlretrieve(manifest_url, manifest_json_path)
if os.path.splitext(manifest_url)[1].lower() == '.zip':
with zipfile.ZipFile(manifest) as zip:
for file in zip.filelist:
if os.path.splitext(file.filename)[1].lower() == '.json':
with zip.open(file) as zi:
manifest_json = json.load(zi)
break
write_json(manifest_json_path, manifest_json)
return read_json(manifest_json_path), manifest_url
def work_done(error, quiet = False):
prints()
if not error:
prints('Work done with success.','' if quiet else 'Press any key to exit.')
if not quiet:
keyboard.read_key()
|
rtk_provider_base.py
|
import os
import time
import json
import datetime
import threading
import math
import re
import collections
import serial
import serial.tools.list_ports
from ..widgets import NTRIPClient
from ...framework.utils import (
helper, resource
)
from ...framework.context import APP_CONTEXT
from ...framework.utils.firmware_parser import parser as firmware_content_parser
from ...framework.utils.print import (print_green, print_yellow, print_red)
from ..base import OpenDeviceBase
from ..configs.openrtk_predefine import (
APP_STR, get_openrtk_products, get_configuratin_file_mapping
)
from ..decorator import with_device_message
from ...models import InternalCombineAppParseRule
from ..upgrade_workers import (
FirmwareUpgradeWorker,
JumpApplicationWorker,
JumpBootloaderWorker
)
from ..parsers.open_field_parser import encode_value
from abc import ABCMeta, abstractmethod
class RTKProviderBase(OpenDeviceBase):
'''
RTK Series UART provider
'''
__metaclass__ = ABCMeta
def __init__(self, communicator, *args):
super(RTKProviderBase, self).__init__(communicator)
self.type = 'RTK'
self.server_update_rate = 100
self.sky_data = []
self.pS_data = []
self.ps_dic = collections.OrderedDict()
self.inspva_flag = 0
self.bootloader_baudrate = 115200
self.app_config_folder = ''
self.device_info = None
self.app_info = None
self.parameters = None
self.setting_folder_path = None
self.data_folder = None
self.debug_serial_port = None
self.rtcm_serial_port = None
self.user_logf = None
self.debug_logf = None
self.rtcm_logf = None
self.debug_c_f = None
self.ntrip_rtcm_logf = None
self.enable_data_log = False
self.is_app_matched = False
self.ntrip_client_enable = False
self.nmea_buffer = []
self.nmea_sync = 0
self.config_file_name = 'openrtk.json'
self.device_category = 'RTK'
self.prepare_folders()
self.ntrip_client = None
self.rtk_log_file_name = ''
self.connected = False
self.port_index_define = {
'user': 0,
'rtcm': 1,
'debug': 2,
}
def prepare_folders(self):
'''
Prepare folders for data storage and configuration
'''
executor_path = resource.get_executor_path()
setting_folder_name = 'setting'
data_folder_path = os.path.join(executor_path, 'data')
if not os.path.isdir(data_folder_path):
os.makedirs(data_folder_path)
self.data_folder = data_folder_path
# copy contents of app_config under executor path
self.setting_folder_path = os.path.join(
executor_path, setting_folder_name)
all_products = get_openrtk_products()
config_file_mapping = get_configuratin_file_mapping()
for product in all_products:
product_folder = os.path.join(self.setting_folder_path, product)
if not os.path.isdir(product_folder):
os.makedirs(product_folder)
for app_name in all_products[product]:
app_name_path = os.path.join(product_folder, app_name)
app_name_config_path = os.path.join(
app_name_path, config_file_mapping[product])
if not os.path.isfile(app_name_config_path):
if not os.path.isdir(app_name_path):
os.makedirs(app_name_path)
app_config_content = resource.get_content_from_bundle(
setting_folder_name,
os.path.join(product,
app_name,
config_file_mapping[product]))
if app_config_content is None:
continue
with open(app_name_config_path, "wb") as code:
code.write(app_config_content)
@property
def is_in_bootloader(self):
''' Check if the connected device is in bootloader mode
'''
if not self.app_info or not self.app_info.__contains__('version'):
return False
version = self.app_info['version']
version_splits = version.split(',')
if len(version_splits) == 1:
if 'bootloader' in version_splits[0].lower():
return True
return False
def bind_device_info(self, device_access, device_info, app_info):
self._build_device_info(device_info)
self._build_app_info(app_info)
self.connected = True
port_name = device_access.port
self._device_info_string = '# Connected {0} with UART on {1} #\nDevice: {2} \nFirmware: {3}'\
.format(self.device_category, port_name, device_info, app_info)
return self._device_info_string
def _build_device_info(self, text):
'''
Build device info
'''
split_text = [x for x in text.split(' ') if x != '']
sn = split_text[4]
# remove the prefix of SN
if sn.find('SN:') == 0:
sn = sn[3:]
self.device_info = {
'name': split_text[0],
'imu': split_text[1],
'pn': split_text[2],
'firmware_version': split_text[3],
'sn': sn
}
def _build_app_info(self, text):
'''
Build app info
'''
app_version = text
split_text = app_version.split(' ')
app_name = next(
(item for item in APP_STR if item in split_text), None)
if not app_name:
app_name = 'RTK_INS'
self.is_app_matched = False
else:
self.is_app_matched = True
self.app_info = {
'app_name': app_name,
'version': text
}
def load_properties(self):
product_name = self.device_info['name']
app_name = self.app_info['app_name']
# Load config from user working path
local_config_file_path = os.path.join(
os.getcwd(), self.config_file_name)
if os.path.isfile(local_config_file_path):
with open(local_config_file_path) as json_data:
self.properties = json.load(json_data)
return
# Load the openimu.json based on its app
app_file_path = os.path.join(
self.setting_folder_path, product_name, app_name, self.config_file_name)
if not self.is_app_matched:
print_yellow(
'Failed to extract app version information from unit.' +
'\nThe supported application list is {0}.'.format(APP_STR) +
'\nTo keep runing, use INS configuration as default.' +
'\nYou can choose to place your json file under execution path if it is an unknown application.')
with open(app_file_path) as json_data:
self.properties = json.load(json_data)
def ntrip_client_thread(self):
# print('new ntrip client')
self.ntrip_client = NTRIPClient(self.properties)
self.ntrip_client.on('parsed', self.handle_rtcm_data_parsed)
if self.device_info.__contains__('sn') and self.device_info.__contains__('pn'):
self.ntrip_client.set_connect_headers({
'Ntrip-Sn': self.device_info['sn'],
'Ntrip-Pn': self.device_info['pn']
})
self.ntrip_client.run()
def handle_rtcm_data_parsed(self, data):
bytes_data = bytearray(data)
if self.communicator.can_write() and not self.is_upgrading:
self.communicator.write(bytes_data)
self.ntrip_rtcm_logf.write(bytes_data)
def build_connected_serial_port_info(self):
if not self.communicator.serial_port:
return None, None
user_port = self.communicator.serial_port.port
user_port_num = ''
port_name = ''
for i in range(len(user_port)-1, -1, -1):
if (user_port[i] >= '0' and user_port[i] <= '9'):
user_port_num = user_port[i] + user_port_num
else:
port_name = user_port[:i+1]
break
return user_port_num, port_name
def after_setup(self):
local_time = time.localtime()
formatted_dir_time = time.strftime("%Y%m%d_%H%M%S", local_time)
formatted_file_time = time.strftime("%Y_%m_%d_%H_%M_%S", local_time)
debug_port = ''
rtcm_port = ''
set_user_para = self.cli_options and self.cli_options.set_user_para
if self.data_folder is None:
raise Exception(
'Data folder does not exists, please check if the application has create folder permission')
try:
self.rtk_log_file_name = os.path.join(
self.data_folder, '{0}_log_{1}'.format(self.device_category.lower(), formatted_dir_time))
os.mkdir(self.rtk_log_file_name)
except:
raise Exception(
'Cannot create log folder, please check if the application has create folder permission')
# set parameters from predefined parameters
if set_user_para:
result = self.set_params(
self.properties["initial"]["userParameters"])
if (result['packetType'] == 'success'):
self.save_config()
# check saved result
self.check_predefined_result()
# start ntrip client
if self.properties["initial"].__contains__("ntrip") and not self.ntrip_client and not self.is_in_bootloader:
self.ntrip_rtcm_logf = open(os.path.join(self.rtk_log_file_name, 'ntrip_rtcm_{0}.bin'.format(
formatted_file_time)), "wb")
thead = threading.Thread(target=self.ntrip_client_thread)
thead.start()
try:
if (self.properties["initial"]["useDefaultUart"]):
user_port_num, port_name = self.build_connected_serial_port_info()
if not user_port_num or not port_name:
return False
debug_port = port_name + \
str(int(user_port_num) + self.port_index_define['debug'])
rtcm_port = port_name + \
str(int(user_port_num) + self.port_index_define['rtcm'])
else:
for x in self.properties["initial"]["uart"]:
if x['enable'] == 1:
if x['name'] == 'DEBUG':
debug_port = x["value"]
elif x['name'] == 'GNSS':
rtcm_port = x["value"]
self.user_logf = open(os.path.join(
self.rtk_log_file_name, 'user_{0}.bin'.format(formatted_file_time)), "wb")
if rtcm_port != '':
print_green('{0} log GNSS UART {1}'.format(
self.device_category, rtcm_port))
self.rtcm_serial_port = serial.Serial(
rtcm_port, '460800', timeout=0.1)
if self.rtcm_serial_port.isOpen():
self.rtcm_logf = open(
os.path.join(self.rtk_log_file_name, 'rtcm_rover_{0}.bin'.format(
formatted_file_time)), "wb")
thead = threading.Thread(
target=self.thread_rtcm_port_receiver, args=(self.rtk_log_file_name,))
thead.start()
if debug_port != '':
print_green('{0} log DEBUG UART {1}'.format(
self.device_category, debug_port))
self.debug_serial_port = serial.Serial(
debug_port, '460800', timeout=0.1)
if self.debug_serial_port.isOpen():
self.debug_logf = open(
os.path.join(self.rtk_log_file_name, 'rtcm_base_{0}.bin'.format(
formatted_file_time)), "wb")
thead = threading.Thread(
target=self.thread_debug_port_receiver, args=(self.rtk_log_file_name,))
thead.start()
#self.save_device_info()
except Exception as ex:
if self.debug_serial_port is not None:
if self.debug_serial_port.isOpen():
self.debug_serial_port.close()
if self.rtcm_serial_port is not None:
if self.rtcm_serial_port.isOpen():
self.rtcm_serial_port.close()
self.debug_serial_port = None
self.rtcm_serial_port = None
APP_CONTEXT.get_logger().logger.error(ex)
print_red(
'Can not log GNSS UART or DEBUG UART, pls check uart driver and connection!')
return False
def nmea_checksum(self, data):
data = data.replace("\r", "").replace("\n", "").replace("$", "")
nmeadata, cksum = re.split('\*', data)
calc_cksum = 0
for s in nmeadata:
calc_cksum ^= ord(s)
return int(cksum, 16), calc_cksum
def on_read_raw(self, data):
for bytedata in data:
if bytedata == 0x24:
self.nmea_buffer = []
self.nmea_sync = 0
self.nmea_buffer.append(chr(bytedata))
else:
self.nmea_buffer.append(chr(bytedata))
if self.nmea_sync == 0:
if bytedata == 0x0D:
self.nmea_sync = 1
elif self.nmea_sync == 1:
if bytedata == 0x0A:
try:
str_nmea = ''.join(self.nmea_buffer)
cksum, calc_cksum = self.nmea_checksum(
str_nmea)
if cksum == calc_cksum:
if str_nmea.find("$GPGGA") != -1 or str_nmea.find("$GNGGA") != -1:
if self.ntrip_client:
self.ntrip_client.send(str_nmea)
#self.add_output_packet('gga', str_nmea)
# print(str_nmea, end='')
APP_CONTEXT.get_print_logger().info(str_nmea.replace('\r\n', ''))
# else:
# print("nmea checksum wrong {0} {1}".format(cksum, calc_cksum))
except Exception as e:
# print('NMEA fault:{0}'.format(e))
pass
self.nmea_buffer = []
self.nmea_sync = 0
if self.user_logf is not None:
self.user_logf.write(data)
@abstractmethod
def thread_debug_port_receiver(self, *args, **kwargs):
pass
@abstractmethod
def thread_rtcm_port_receiver(self, *args, **kwargs):
pass
def on_receive_output_packet(self, packet_type, data, error=None):
'''
Listener for getting output packet
'''
# $GPGGA,080319.00,3130.4858508,N,12024.0998832,E,4,25,0.5,12.459,M,0.000,M,2.0,*46
if packet_type == 'gN':
if self.ntrip_client:
# $GPGGA
gpgga = '$GNGGA' #'$GPGGA'
# time
timeOfWeek = float(data['GPS_TimeofWeek']) - 18
dsec = int(timeOfWeek)
msec = timeOfWeek - dsec
sec = dsec % 86400
hour = int(sec / 3600)
minute = int(sec % 3600 / 60)
second = sec % 60
gga_time = format(hour*10000 + minute*100 +
second + msec, '09.2f')
gpgga = gpgga + ',' + gga_time
# latitude
latitude = float(data['latitude']) * 180 / 2147483648.0
if latitude >= 0:
latflag = 'N'
else:
latflag = 'S'
latitude = math.fabs(latitude)
lat_d = int(latitude)
lat_m = (latitude-lat_d) * 60
lat_dm = format(lat_d*100 + lat_m, '012.7f')
gpgga = gpgga + ',' + lat_dm + ',' + latflag
# longitude
longitude = float(data['longitude']) * 180 / 2147483648.0
if longitude >= 0:
lonflag = 'E'
else:
lonflag = 'W'
longitude = math.fabs(longitude)
lon_d = int(longitude)
lon_m = (longitude-lon_d) * 60
lon_dm = format(lon_d*100 + lon_m, '013.7f')
gpgga = gpgga + ',' + lon_dm + ',' + lonflag
# positionMode
gpgga = gpgga + ',' + str(data['positionMode'])
# svs
gpgga = gpgga + ',' + str(data['numberOfSVs'])
# hop
gpgga = gpgga + ',' + format(float(data['hdop']), '03.1f')
# height
gpgga = gpgga + ',' + \
format(float(data['height']), '06.3f') + ',M'
#
gpgga = gpgga + ',0.000,M'
# diffage
gpgga = gpgga + ',' + \
format(float(data['diffage']), '03.1f') + ','
# ckm
checksum = 0
for i in range(1, len(gpgga)):
checksum = checksum ^ ord(gpgga[i])
str_checksum = hex(checksum)
if str_checksum.startswith("0x"):
str_checksum = str_checksum[2:]
gpgga = gpgga + '*' + str_checksum + '\r\n'
APP_CONTEXT.get_print_logger().info(gpgga)
self.ntrip_client.send(gpgga)
return
elif packet_type == 'pS':
try:
if data['latitude'] != 0.0 and data['longitude'] != 0.0:
if self.pS_data:
if self.pS_data['GPS_Week'] == data['GPS_Week']:
if data['GPS_TimeofWeek'] - self.pS_data['GPS_TimeofWeek'] >= 0.2:
self.add_output_packet('pos', data)
self.pS_data = data
if data['insStatus'] >= 3 and data['insStatus'] <= 5:
ins_status = 'INS_INACTIVE'
if data['insStatus'] == 3:
ins_status = 'INS_SOLUTION_GOOD'
elif data['insStatus'] == 4:
ins_status = 'INS_SOLUTION_FREE'
elif data['insStatus'] == 5:
ins_status = 'INS_ALIGNMENT_COMPLETE'
ins_pos_type = 'INS_INVALID'
if data['insPositionType'] == 1:
ins_pos_type = 'INS_SPP'
elif data['insPositionType'] == 4:
ins_pos_type = 'INS_RTKFIXED'
elif data['insPositionType'] == 5:
ins_pos_type = 'INS_RTKFLOAT'
inspva = '#INSPVA,%s,%10.2f, %s, %s,%12.8f,%13.8f,%8.3f,%9.3f,%9.3f,%9.3f,%9.3f,%9.3f,%9.3f' %\
(data['GPS_Week'], data['GPS_TimeofWeek'], ins_status, ins_pos_type,
data['latitude'], data['longitude'], data['height'],
data['velocityNorth'], data['velocityEast'], data['velocityUp'],
data['roll'], data['pitch'], data['heading'])
APP_CONTEXT.get_print_logger().info(inspva)
else:
self.add_output_packet('pos', data)
self.pS_data = data
else:
self.add_output_packet('pos', data)
self.pS_data = data
except Exception as e:
pass
elif packet_type == 'sK':
if self.sky_data:
if self.sky_data[0]['timeOfWeek'] == data[0]['timeOfWeek']:
self.sky_data.extend(data)
else:
self.add_output_packet('skyview', self.sky_data)
self.add_output_packet('snr', self.sky_data)
self.sky_data = []
self.sky_data.extend(data)
else:
self.sky_data.extend(data)
elif packet_type == 'g1':
self.ps_dic['positionMode'] = data['position_type']
self.ps_dic['numberOfSVs'] = data['number_of_satellites_in_solution']
self.ps_dic['hdop'] = data['hdop']
self.ps_dic['age'] = data['diffage']
if self.inspva_flag == 0:
self.ps_dic['GPS_Week'] = data['GPS_Week']
self.ps_dic['GPS_TimeofWeek'] = data['GPS_TimeOfWeek'] * 0.001
self.ps_dic['latitude'] = data['latitude']
self.ps_dic['longitude'] = data['longitude']
self.ps_dic['height'] = data['height']
self.ps_dic['velocityMode'] = 1
self.ps_dic['velocityNorth'] = data['north_vel']
self.ps_dic['velocityEast'] = data['east_vel']
self.ps_dic['velocityUp'] = data['up_vel']
self.ps_dic['latitude_std'] = data['latitude_standard_deviation']
self.ps_dic['longitude_std'] = data['longitude_standard_deviation']
self.ps_dic['height_std'] = data['height_standard_deviation']
self.ps_dic['north_vel_std'] = data['north_vel_standard_deviation']
self.ps_dic['east_vel_std'] = data['east_vel_standard_deviation']
self.ps_dic['up_vel_std'] = data['up_vel_standard_deviation']
self.add_output_packet('pos', self.ps_dic)
elif packet_type == 'i1':
self.inspva_flag = 1
if data['GPS_TimeOfWeek'] % 200 == 0:
self.ps_dic['GPS_Week'] = data['GPS_Week']
self.ps_dic['GPS_TimeofWeek'] = data['GPS_TimeOfWeek'] * 0.001
self.ps_dic['latitude'] = data['latitude']
self.ps_dic['longitude'] = data['longitude']
self.ps_dic['height'] = data['height']
if data['ins_position_type'] != 1 and data['ins_position_type'] != 4 and data['ins_position_type'] != 5:
self.ps_dic['velocityMode'] = 2
else:
self.ps_dic['velocityMode'] = 1
self.ps_dic['insStatus'] = data['ins_status']
self.ps_dic['insPositionType'] = data['ins_position_type']
self.ps_dic['velocityNorth'] = data['north_velocity']
self.ps_dic['velocityEast'] = data['east_velocity']
self.ps_dic['velocityUp'] = data['up_velocity']
self.ps_dic['roll'] = data['roll']
self.ps_dic['pitch'] = data['pitch']
self.ps_dic['heading'] = data['heading']
self.ps_dic['latitude_std'] = data['latitude_std']
self.ps_dic['longitude_std'] = data['longitude_std']
self.ps_dic['height_std'] = data['height_std']
self.ps_dic['north_vel_std'] = data['north_velocity_std']
self.ps_dic['east_vel_std'] = data['east_velocity_std']
self.ps_dic['up_vel_std'] = data['up_velocity_std']
self.ps_dic['roll_std'] = data['roll_std']
self.ps_dic['pitch_std'] = data['pitch_std']
self.ps_dic['heading_std'] = data['heading_std']
self.add_output_packet('pos', self.ps_dic)
elif packet_type == 'y1':
if self.sky_data:
if self.sky_data[0]['GPS_TimeOfWeek'] == data[0]['GPS_TimeOfWeek']:
self.sky_data.extend(data)
else:
self.add_output_packet('skyview', self.sky_data)
self.add_output_packet('snr', self.sky_data)
self.sky_data = []
self.sky_data.extend(data)
else:
self.sky_data.extend(data)
else:
output_packet_config = next(
(x for x in self.properties['userMessages']['outputPackets']
if x['name'] == packet_type), None)
if output_packet_config and output_packet_config.__contains__('active') \
and output_packet_config['active']:
timeOfWeek = int(data['GPS_TimeOfWeek']) % 60480000
data['GPS_TimeOfWeek'] = timeOfWeek / 1000
self.add_output_packet('imu', data)
@abstractmethod
def build_worker(self, rule, content):
''' Build upgarde worker by rule and content
'''
pass
# override
def get_upgrade_workers(self, firmware_content):
workers = []
rules = [
InternalCombineAppParseRule('rtk', 'rtk_start:', 4),
InternalCombineAppParseRule('ins', 'ins_start:', 4),
InternalCombineAppParseRule('sdk', 'sdk_start:', 4),
]
parsed_content = firmware_content_parser(firmware_content, rules)
# foreach parsed content, if empty, skip register into upgrade center
device_info = self.get_device_connection_info()
for _, rule in enumerate(parsed_content):
content = parsed_content[rule]
if len(content) == 0:
continue
worker = self.build_worker(rule, content)
if not worker:
continue
if (device_info['modelName'] == 'RTK330L') and (rule == 'sdk') and ((int(device_info['serialNumber']) <= 2178200080) and (int(device_info['serialNumber']) >= 2178200001)):
continue
else:
workers.append(worker)
# prepare jump bootloader worker and jump application workder
# append jump bootloader worker before the first firmware upgrade workder
# append jump application worker after the last firmware uprade worker
start_index = -1
end_index = -1
for i, worker in enumerate(workers):
if isinstance(worker, FirmwareUpgradeWorker):
start_index = i if start_index == -1 else start_index
end_index = i
if start_index > -1 and end_index > -1:
workers.insert(
start_index, JumpBootloaderWorker(self.communicator))
workers.insert(
end_index+2, JumpApplicationWorker(self.communicator, self.bootloader_baudrate))
return workers
def get_device_connection_info(self):
return {
'modelName': self.device_info['name'],
'deviceType': self.type,
'serialNumber': self.device_info['sn'],
'partNumber': self.device_info['pn'],
'firmware': self.device_info['firmware_version']
}
def check_predefined_result(self):
local_time = time.localtime()
formatted_file_time = time.strftime("%Y_%m_%d_%H_%M_%S", local_time)
file_path = os.path.join(
self.rtk_log_file_name,
'parameters_predefined_{0}.json'.format(formatted_file_time)
)
# save parameters to data log folder after predefined parameters setup
result = self.get_params()
if result['packetType'] == 'inputParams':
with open(file_path, 'w') as outfile:
json.dump(result['data'], outfile)
# compare saved parameters with predefined parameters
hashed_predefined_parameters = helper.collection_to_dict(
self.properties["initial"]["userParameters"], key='paramId')
hashed_current_parameters = helper.collection_to_dict(
result['data'], key='paramId')
success_count = 0
fail_count = 0
fail_parameters = []
for key in hashed_predefined_parameters:
if hashed_current_parameters[key]['value'] == \
hashed_predefined_parameters[key]['value']:
success_count += 1
else:
fail_count += 1
fail_parameters.append(
hashed_predefined_parameters[key]['name'])
check_result = 'Predefined Parameters are saved. Success ({0}), Fail ({1})'.format(
success_count, fail_count)
if success_count == len(hashed_predefined_parameters.keys()):
print_green(check_result)
if fail_count > 0:
print_yellow(check_result)
print_yellow('The failed parameters: {0}'.format(fail_parameters))
def save_device_info(self):
''' Save device configuration
File name: configuration.json
'''
if self.is_in_bootloader:
return
result = self.get_params()
device_configuration = None
file_path = os.path.join(
self.data_folder, self.rtk_log_file_name, 'configuration.json')
if not os.path.exists(file_path):
device_configuration = []
else:
with open(file_path) as json_data:
device_configuration = (list)(json.load(json_data))
if result['packetType'] == 'inputParams':
session_info = dict()
session_info['time'] = time.strftime(
"%Y-%m-%d %H:%M:%S", time.localtime())
session_info['device'] = self.device_info
session_info['app'] = self.app_info
session_info['interface'] = self.cli_options.interface
if session_info['interface'] == 'uart':
session_info['path'] = self.communicator.serial_port.port
parameters_configuration = dict()
for item in result['data']:
param_name = item['name']
param_value = item['value']
parameters_configuration[param_name] = param_value
session_info['parameters'] = parameters_configuration
device_configuration.append(session_info)
with open(file_path, 'w') as outfile:
json.dump(device_configuration, outfile,
indent=4, ensure_ascii=False)
def after_upgrade_completed(self):
# start ntrip client
if self.properties["initial"].__contains__("ntrip") and not self.ntrip_client and not self.is_in_bootloader:
thead = threading.Thread(target=self.ntrip_client_thread)
thead.start()
#self.save_device_info()
def get_operation_status(self):
if self.is_logging:
return 'LOGGING'
return 'IDLE'
# command list
def server_status(self, *args): # pylint: disable=invalid-name
'''
Get server connection status
'''
return {
'packetType': 'ping',
'data': {'status': '1'}
}
def get_device_info(self, *args): # pylint: disable=invalid-name
'''
Get device information
'''
return {
'packetType': 'deviceInfo',
'data': [
{'name': 'Product Name', 'value': self.device_info['name']},
{'name': 'IMU', 'value': self.device_info['imu']},
{'name': 'PN', 'value': self.device_info['pn']},
{'name': 'Firmware Version',
'value': self.device_info['firmware_version']},
{'name': 'SN', 'value': self.device_info['sn']},
{'name': 'App Version', 'value': self.app_info['version']}
]
}
def get_log_info(self):
'''
Build information for log
'''
return {
"type": self.type,
"model": self.device_info['name'],
"logInfo": {
"pn": self.device_info['pn'],
"sn": self.device_info['sn'],
"rtkProperties": json.dumps(self.properties)
}
}
def get_conf(self, *args): # pylint: disable=unused-argument
'''
Get json configuration
'''
return {
'packetType': 'conf',
'data': {
'outputs': self.properties['userMessages']['outputPackets'],
'inputParams': self.properties['userConfiguration']
}
}
@with_device_message
def get_params(self, *args): # pylint: disable=unused-argument
'''
Get all parameters
'''
has_error = False
parameter_values = []
if self.app_info['app_name'] == 'RTK_INS':
conf_parameters = self.properties['userConfiguration']
conf_parameters_len = len(conf_parameters)-1
step = 10
for i in range(2, conf_parameters_len, step):
start_byte = i
end_byte = i+step-1 if i+step < conf_parameters_len else conf_parameters_len
time.sleep(0.1)
command_line = helper.build_packet(
'gB', [start_byte, end_byte])
result = yield self._message_center.build(command=command_line, timeout=10)
if result['error']:
has_error = True
break
parameter_values.extend(result['data'])
else:
command_line = helper.build_input_packet('gA')
result = yield self._message_center.build(command=command_line, timeout=3)
if result['error']:
has_error = True
parameter_values = result['data']
if not has_error:
self.parameters = parameter_values
yield {
'packetType': 'inputParams',
'data': parameter_values
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def get_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
command_line = helper.build_input_packet(
'gP', properties=self.properties, param=params['paramId'])
# self.communicator.write(command_line)
# result = self.get_input_result('gP', timeout=1)
result = yield self._message_center.build(command=command_line)
data = result['data']
error = result['error']
if error:
yield {
'packetType': 'error',
'data': 'No Response'
}
if data:
self.parameters = data
yield {
'packetType': 'inputParam',
'data': data
}
yield {
'packetType': 'error',
'data': 'No Response'
}
@with_device_message
def set_params(self, params, *args): # pylint: disable=unused-argument
'''
Update paramters value
'''
input_parameters = self.properties['userConfiguration']
grouped_parameters = {}
for parameter in params:
exist_parameter = next(
(x for x in input_parameters if x['paramId'] == parameter['paramId']), None)
if exist_parameter:
has_group = grouped_parameters.__contains__(
exist_parameter['category'])
if not has_group:
grouped_parameters[exist_parameter['category']] = []
current_group = grouped_parameters[exist_parameter['category']]
current_group.append(
{'paramId': parameter['paramId'], 'value': parameter['value'], 'type': exist_parameter['type']})
for group in grouped_parameters.values():
message_bytes = []
for parameter in group:
message_bytes.extend(
encode_value('int8', parameter['paramId'])
)
message_bytes.extend(
encode_value(parameter['type'], parameter['value'])
)
# print('parameter type {0}, value {1}'.format(
# parameter['type'], parameter['value']))
# result = self.set_param(parameter)
command_line = helper.build_packet(
'uB', message_bytes)
# for s in command_line:
# print(hex(s))
result = yield self._message_center.build(command=command_line)
packet_type = result['packet_type']
data = result['data']
if packet_type == 'error':
yield {
'packetType': 'error',
'data': {
'error': data
}
}
break
if data > 0:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
break
yield {
'packetType': 'success',
'data': {
'error': 0
}
}
@with_device_message
def set_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
command_line = helper.build_input_packet(
'uP', properties=self.properties, param=params['paramId'], value=params['value'])
# self.communicator.write(command_line)
# result = self.get_input_result('uP', timeout=1)
result = yield self._message_center.build(command=command_line)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': data
}
}
yield {
'packetType': 'success',
'data': {
'error': data
}
}
@with_device_message
def save_config(self, *args): # pylint: disable=unused-argument
'''
Save configuration
'''
command_line = helper.build_input_packet('sC')
# self.communicator.write(command_line)
# result = self.get_input_result('sC', timeout=2)
result = yield self._message_center.build(command=command_line, timeout=2)
data = result['data']
error = result['error']
if data:
yield {
'packetType': 'success',
'data': error
}
yield {
'packetType': 'success',
'data': error
}
@with_device_message
def reset_params(self, params, *args): # pylint: disable=unused-argument
'''
Reset params to default
'''
command_line = helper.build_input_packet('rD')
result = yield self._message_center.build(command=command_line, timeout=2)
error = result['error']
data = result['data']
if error:
yield {
'packetType': 'error',
'data': {
'error': error
}
}
yield {
'packetType': 'success',
'data': data
}
def upgrade_framework(self, params, *args): # pylint: disable=unused-argument
'''
Upgrade framework
'''
file = ''
if isinstance(params, str):
file = params
if isinstance(params, dict):
file = params['file']
# start a thread to do upgrade
if not self.is_upgrading:
self.is_upgrading = True
self._message_center.pause()
if self._logger is not None:
self._logger.stop_user_log()
while not self._message_center.paused:
time.sleep(0.1)
thread = threading.Thread(
target=self.thread_do_upgrade_framework, args=(file,))
thread.start()
# print("Upgrade OpenRTK firmware started at:[{0}].".format(
# datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
return {
'packetType': 'success'
}
|
test_flight.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import ast
import base64
import os
import struct
import tempfile
import threading
import time
import traceback
import numpy as np
import pytest
import pyarrow as pa
from pyarrow.lib import tobytes
from pyarrow.util import pathlib, find_free_port
try:
from pyarrow import flight
from pyarrow.flight import (
FlightClient, FlightServerBase,
ServerAuthHandler, ClientAuthHandler,
ServerMiddleware, ServerMiddlewareFactory,
ClientMiddleware, ClientMiddlewareFactory,
)
except ImportError:
flight = None
FlightClient, FlightServerBase = object, object
ServerAuthHandler, ClientAuthHandler = object, object
ServerMiddleware, ServerMiddlewareFactory = object, object
ClientMiddleware, ClientMiddlewareFactory = object, object
# Marks all of the tests in this module
# Ignore these with pytest ... -m 'not flight'
pytestmark = pytest.mark.flight
def test_import():
# So we see the ImportError somewhere
import pyarrow.flight # noqa
def resource_root():
"""Get the path to the test resources directory."""
if not os.environ.get("ARROW_TEST_DATA"):
raise RuntimeError("Test resources not found; set "
"ARROW_TEST_DATA to <repo root>/testing/data")
return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight"
def read_flight_resource(path):
"""Get the contents of a test resource file."""
root = resource_root()
if not root:
return None
try:
with (root / path).open("rb") as f:
return f.read()
except FileNotFoundError:
raise RuntimeError(
"Test resource {} not found; did you initialize the "
"test resource submodule?\n{}".format(root / path,
traceback.format_exc()))
def example_tls_certs():
"""Get the paths to test TLS certificates."""
return {
"root_cert": read_flight_resource("root-ca.pem"),
"certificates": [
flight.CertKeyPair(
cert=read_flight_resource("cert0.pem"),
key=read_flight_resource("cert0.key"),
),
flight.CertKeyPair(
cert=read_flight_resource("cert1.pem"),
key=read_flight_resource("cert1.key"),
),
]
}
def simple_ints_table():
data = [
pa.array([-10, -5, 0, 5, 10])
]
return pa.Table.from_arrays(data, names=['some_ints'])
def simple_dicts_table():
dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8())
data = [
pa.chunked_array([
pa.DictionaryArray.from_arrays([1, 0, None], dict_values),
pa.DictionaryArray.from_arrays([2, 1], dict_values)
])
]
return pa.Table.from_arrays(data, names=['some_dicts'])
class ConstantFlightServer(FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
CRITERIA = b"the expected criteria"
def __init__(self, location=None, **kwargs):
super().__init__(location, **kwargs)
# Ticket -> Table
self.table_factories = {
b'ints': simple_ints_table,
b'dicts': simple_dicts_table,
}
def list_flights(self, context, criteria):
if criteria == self.CRITERIA:
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
def do_get(self, context, ticket):
# Return a fresh table, so that Flight is the only one keeping a
# reference.
table = self.table_factories[ticket.ticket]()
return flight.RecordBatchStream(table)
class MetadataFlightServer(FlightServerBase):
"""A Flight server that numbers incoming/outgoing data."""
def do_get(self, context, ticket):
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
return flight.GeneratorStream(
table.schema,
self.number_batches(table))
def do_put(self, context, descriptor, reader, writer):
counter = 0
expected_data = [-10, -5, 0, 5, 10]
while True:
try:
batch, buf = reader.read_chunk()
assert batch.equals(pa.RecordBatch.from_arrays(
[pa.array([expected_data[counter]])],
['a']
))
assert buf is not None
client_counter, = struct.unpack('<i', buf.to_pybytes())
assert counter == client_counter
writer.write(struct.pack('<i', counter))
counter += 1
except StopIteration:
return
@staticmethod
def number_batches(table):
for idx, batch in enumerate(table.to_batches()):
buf = struct.pack('<i', idx)
yield batch, buf
class EchoFlightServer(FlightServerBase):
"""A Flight server that returns the last data uploaded."""
def __init__(self, location=None, expected_schema=None, **kwargs):
super().__init__(location, **kwargs)
self.last_message = None
self.expected_schema = expected_schema
def do_get(self, context, ticket):
return flight.RecordBatchStream(self.last_message)
def do_put(self, context, descriptor, reader, writer):
if self.expected_schema:
assert self.expected_schema == reader.schema
self.last_message = reader.read_all()
class EchoStreamFlightServer(EchoFlightServer):
"""An echo server that streams individual record batches."""
def do_get(self, context, ticket):
return flight.GeneratorStream(
self.last_message.schema,
self.last_message.to_batches(max_chunksize=1024))
def list_actions(self, context):
return []
def do_action(self, context, action):
if action.type == "who-am-i":
return [context.peer_identity(), context.peer().encode("utf-8")]
raise NotImplementedError
class GetInfoFlightServer(FlightServerBase):
"""A Flight server that tests GetFlightInfo."""
def get_flight_info(self, context, descriptor):
return flight.FlightInfo(
pa.schema([('a', pa.int32())]),
descriptor,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
def get_schema(self, context, descriptor):
info = self.get_flight_info(context, descriptor)
return flight.SchemaResult(info.schema)
class ListActionsFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
@classmethod
def expected_actions(cls):
return [
("action-1", "description"),
("action-2", ""),
flight.ActionType("action-3", "more detail"),
]
def list_actions(self, context):
yield from self.expected_actions()
class ListActionsErrorFlightServer(FlightServerBase):
"""A Flight server that tests ListActions."""
def list_actions(self, context):
yield ("action-1", "")
yield "foo"
class CheckTicketFlightServer(FlightServerBase):
"""A Flight server that compares the given ticket to an expected value."""
def __init__(self, expected_ticket, location=None, **kwargs):
super().__init__(location, **kwargs)
self.expected_ticket = expected_ticket
def do_get(self, context, ticket):
assert self.expected_ticket == ticket.ticket
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
return flight.RecordBatchStream(table)
def do_put(self, context, descriptor, reader):
self.last_message = reader.read_all()
class InvalidStreamFlightServer(FlightServerBase):
"""A Flight server that tries to return messages with differing schemas."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())]
assert data1.type != data2.type
table1 = pa.Table.from_arrays(data1, names=['a'])
table2 = pa.Table.from_arrays(data2, names=['a'])
assert table1.schema == self.schema
return flight.GeneratorStream(self.schema, [table1, table2])
class SlowFlightServer(FlightServerBase):
"""A Flight server that delays its responses to test timeouts."""
def do_get(self, context, ticket):
return flight.GeneratorStream(pa.schema([('a', pa.int32())]),
self.slow_stream())
def do_action(self, context, action):
time.sleep(0.5)
return []
@staticmethod
def slow_stream():
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
yield pa.Table.from_arrays(data1, names=['a'])
# The second message should never get sent; the client should
# cancel before we send this
time.sleep(10)
yield pa.Table.from_arrays(data1, names=['a'])
class ErrorFlightServer(FlightServerBase):
"""A Flight server that uses all the Flight-specific errors."""
def do_action(self, context, action):
if action.type == "internal":
raise flight.FlightInternalError("foo")
elif action.type == "timedout":
raise flight.FlightTimedOutError("foo")
elif action.type == "cancel":
raise flight.FlightCancelledError("foo")
elif action.type == "unauthenticated":
raise flight.FlightUnauthenticatedError("foo")
elif action.type == "unauthorized":
raise flight.FlightUnauthorizedError("foo")
elif action.type == "protobuf":
err_msg = b'this is an error message'
raise flight.FlightUnauthorizedError("foo", err_msg)
raise NotImplementedError
def list_flights(self, context, criteria):
yield flight.FlightInfo(
pa.schema([]),
flight.FlightDescriptor.for_path('/foo'),
[],
-1, -1
)
raise flight.FlightInternalError("foo")
class ExchangeFlightServer(FlightServerBase):
"""A server for testing DoExchange."""
def do_exchange(self, context, descriptor, reader, writer):
if descriptor.descriptor_type != flight.DescriptorType.CMD:
raise pa.ArrowInvalid("Must provide a command descriptor")
elif descriptor.command == b"echo":
return self.exchange_echo(context, reader, writer)
elif descriptor.command == b"get":
return self.exchange_do_get(context, reader, writer)
elif descriptor.command == b"put":
return self.exchange_do_put(context, reader, writer)
elif descriptor.command == b"transform":
return self.exchange_transform(context, reader, writer)
else:
raise pa.ArrowInvalid(
"Unknown command: {}".format(descriptor.command))
def exchange_do_get(self, context, reader, writer):
"""Emulate DoGet with DoExchange."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
writer.begin(data.schema)
writer.write_table(data)
def exchange_do_put(self, context, reader, writer):
"""Emulate DoPut with DoExchange."""
num_batches = 0
for chunk in reader:
if not chunk.data:
raise pa.ArrowInvalid("All chunks must have data.")
num_batches += 1
writer.write_metadata(str(num_batches).encode("utf-8"))
def exchange_echo(self, context, reader, writer):
"""Run a simple echo server."""
started = False
for chunk in reader:
if not started and chunk.data:
writer.begin(chunk.data.schema)
started = True
if chunk.app_metadata and chunk.data:
writer.write_with_metadata(chunk.data, chunk.app_metadata)
elif chunk.app_metadata:
writer.write_metadata(chunk.app_metadata)
elif chunk.data:
writer.write_batch(chunk.data)
else:
assert False, "Should not happen"
def exchange_transform(self, context, reader, writer):
"""Sum rows in an uploaded table."""
for field in reader.schema:
if not pa.types.is_integer(field.type):
raise pa.ArrowInvalid("Invalid field: " + repr(field))
table = reader.read_all()
sums = [0] * table.num_rows
for column in table:
for row, value in enumerate(column):
sums[row] += value.as_py()
result = pa.Table.from_arrays([pa.array(sums)], names=["sum"])
writer.begin(result.schema)
writer.write_table(result)
class HttpBasicServerAuthHandler(ServerAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
buf = incoming.read()
auth = flight.BasicAuth.deserialize(buf)
if auth.username not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
if self.creds[auth.username] != auth.password:
raise flight.FlightUnauthenticatedError("wrong password")
outgoing.write(tobytes(auth.username))
def is_valid(self, token):
if not token:
raise flight.FlightUnauthenticatedError("token not provided")
if token not in self.creds:
raise flight.FlightUnauthenticatedError("unknown user")
return token
class HttpBasicClientAuthHandler(ClientAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, username, password):
super().__init__()
self.basic_auth = flight.BasicAuth(username, password)
self.token = None
def authenticate(self, outgoing, incoming):
auth = self.basic_auth.serialize()
outgoing.write(auth)
self.token = incoming.read()
def get_token(self):
return self.token
class TokenServerAuthHandler(ServerAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
username = incoming.read()
password = incoming.read()
if username in self.creds and self.creds[username] == password:
outgoing.write(base64.b64encode(b'secret:' + username))
else:
raise flight.FlightUnauthenticatedError(
"invalid username/password")
def is_valid(self, token):
token = base64.b64decode(token)
if not token.startswith(b'secret:'):
raise flight.FlightUnauthenticatedError("invalid token")
return token[7:]
class TokenClientAuthHandler(ClientAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, username, password):
super().__init__()
self.username = username
self.password = password
self.token = b''
def authenticate(self, outgoing, incoming):
outgoing.write(self.username)
outgoing.write(self.password)
self.token = incoming.read()
def get_token(self):
return self.token
class HeaderServerMiddleware(ServerMiddleware):
"""Expose a per-call value to the RPC method body."""
def __init__(self, special_value):
self.special_value = special_value
class HeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Expose a per-call hard-coded value to the RPC method body."""
def start_call(self, info, headers):
return HeaderServerMiddleware("right value")
class HeaderFlightServer(FlightServerBase):
"""Echo back the per-call hard-coded value."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
if middleware:
return [middleware.special_value.encode()]
return [b""]
class MultiHeaderFlightServer(FlightServerBase):
"""Test sending/receiving multiple (binary-valued) headers."""
def do_action(self, context, action):
middleware = context.get_middleware("test")
headers = repr(middleware.client_headers).encode("utf-8")
return [headers]
class SelectiveAuthServerMiddlewareFactory(ServerMiddlewareFactory):
"""Deny access to certain methods based on a header."""
def start_call(self, info, headers):
if info.method == flight.FlightMethod.LIST_ACTIONS:
# No auth needed
return
token = headers.get("x-auth-token")
if not token:
raise flight.FlightUnauthenticatedError("No token")
token = token[0]
if token != "password":
raise flight.FlightUnauthenticatedError("Invalid token")
return HeaderServerMiddleware(token)
class SelectiveAuthClientMiddlewareFactory(ClientMiddlewareFactory):
def start_call(self, info):
return SelectiveAuthClientMiddleware()
class SelectiveAuthClientMiddleware(ClientMiddleware):
def sending_headers(self):
return {
"x-auth-token": "password",
}
class RecordingServerMiddlewareFactory(ServerMiddlewareFactory):
"""Record what methods were called."""
def __init__(self):
super().__init__()
self.methods = []
def start_call(self, info, headers):
self.methods.append(info.method)
return None
class RecordingClientMiddlewareFactory(ClientMiddlewareFactory):
"""Record what methods were called."""
def __init__(self):
super().__init__()
self.methods = []
def start_call(self, info):
self.methods.append(info.method)
return None
class MultiHeaderClientMiddlewareFactory(ClientMiddlewareFactory):
"""Test sending/receiving multiple (binary-valued) headers."""
def __init__(self):
# Read in test_middleware_multi_header below.
# The middleware instance will update this value.
self.last_headers = {}
def start_call(self, info):
return MultiHeaderClientMiddleware(self)
class MultiHeaderClientMiddleware(ClientMiddleware):
"""Test sending/receiving multiple (binary-valued) headers."""
EXPECTED = {
"x-text": ["foo", "bar"],
"x-binary-bin": [b"\x00", b"\x01"],
}
def __init__(self, factory):
self.factory = factory
def sending_headers(self):
return self.EXPECTED
def received_headers(self, headers):
# Let the test code know what the last set of headers we
# received were.
self.factory.last_headers = headers
class MultiHeaderServerMiddlewareFactory(ServerMiddlewareFactory):
"""Test sending/receiving multiple (binary-valued) headers."""
def start_call(self, info, headers):
return MultiHeaderServerMiddleware(headers)
class MultiHeaderServerMiddleware(ServerMiddleware):
"""Test sending/receiving multiple (binary-valued) headers."""
def __init__(self, client_headers):
self.client_headers = client_headers
def sending_headers(self):
return MultiHeaderClientMiddleware.EXPECTED
def test_flight_server_location_argument():
locations = [
None,
'grpc://localhost:0',
('localhost', find_free_port()),
]
for location in locations:
with FlightServerBase(location) as server:
assert isinstance(server, FlightServerBase)
def test_server_exit_reraises_exception():
with pytest.raises(ValueError):
with FlightServerBase():
raise ValueError()
@pytest.mark.slow
def test_client_wait_for_available():
location = ('localhost', find_free_port())
server = None
def serve():
global server
time.sleep(0.5)
server = FlightServerBase(location)
server.serve()
client = FlightClient(location)
thread = threading.Thread(target=serve, daemon=True)
thread.start()
started = time.time()
client.wait_for_available(timeout=5)
elapsed = time.time() - started
assert elapsed >= 0.5
def test_flight_list_flights():
"""Try a simple list_flights call."""
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
assert list(client.list_flights()) == []
flights = client.list_flights(ConstantFlightServer.CRITERIA)
assert len(list(flights)) == 1
def test_flight_do_get_ints():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.pandas
def test_do_get_ints_pandas():
"""Try a simple do_get call."""
table = simple_ints_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'ints')).read_pandas()
assert list(data['some_ints']) == table.column(0).to_pylist()
def test_flight_do_get_dicts():
table = simple_dicts_table()
with ConstantFlightServer() as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'dicts')).read_all()
assert data.equals(table)
def test_flight_do_get_ticket():
"""Make sure Tickets get passed to the server."""
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
with CheckTicketFlightServer(expected_ticket=b'the-ticket') as server:
client = flight.connect(('localhost', server.port))
data = client.do_get(flight.Ticket(b'the-ticket')).read_all()
assert data.equals(table)
def test_flight_get_info():
"""Make sure FlightEndpoint accepts string and object URIs."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_flight_info(flight.FlightDescriptor.for_command(b''))
assert info.total_records == -1
assert info.total_bytes == -1
assert info.schema == pa.schema([('a', pa.int32())])
assert len(info.endpoints) == 2
assert len(info.endpoints[0].locations) == 1
assert info.endpoints[0].locations[0] == flight.Location('grpc://test')
assert info.endpoints[1].locations[0] == \
flight.Location.for_grpc_tcp('localhost', 5005)
def test_flight_get_schema():
"""Make sure GetSchema returns correct schema."""
with GetInfoFlightServer() as server:
client = FlightClient(('localhost', server.port))
info = client.get_schema(flight.FlightDescriptor.for_command(b''))
assert info.schema == pa.schema([('a', pa.int32())])
def test_list_actions():
"""Make sure the return type of ListActions is validated."""
# ARROW-6392
with ListActionsErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(
flight.FlightServerError,
match=("Results of list_actions must be "
"ActionType or tuple")
):
list(client.list_actions())
with ListActionsFlightServer() as server:
client = FlightClient(('localhost', server.port))
assert list(client.list_actions()) == \
ListActionsFlightServer.expected_actions()
class ConvenienceServer(FlightServerBase):
"""
Server for testing various implementation conveniences (auto-boxing, etc.)
"""
@property
def simple_action_results(self):
return [b'foo', b'bar', b'baz']
def do_action(self, context, action):
if action.type == 'simple-action':
return self.simple_action_results
elif action.type == 'echo':
return [action.body]
elif action.type == 'bad-action':
return ['foo']
elif action.type == 'arrow-exception':
raise pa.ArrowMemoryError()
def test_do_action_result_convenience():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
# do_action as action type without body
results = [x.body for x in client.do_action('simple-action')]
assert results == server.simple_action_results
# do_action with tuple of type and body
body = b'the-body'
results = [x.body for x in client.do_action(('echo', body))]
assert results == [body]
def test_nicer_server_exceptions():
with ConvenienceServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightServerError,
match="a bytes-like object is required"):
list(client.do_action('bad-action'))
# While Flight/C++ sends across the original status code, it
# doesn't get mapped to the equivalent code here, since we
# want to be able to distinguish between client- and server-
# side errors.
with pytest.raises(flight.FlightServerError,
match="ArrowMemoryError"):
list(client.do_action('arrow-exception'))
def test_get_port():
"""Make sure port() works."""
server = GetInfoFlightServer("grpc://localhost:0")
try:
assert server.port > 0
finally:
server.shutdown()
@pytest.mark.skipif(os.name == 'nt',
reason="Unix sockets can't be tested on Windows")
def test_flight_domain_socket():
"""Try a simple do_get call over a Unix domain socket."""
with tempfile.NamedTemporaryFile() as sock:
sock.close()
location = flight.Location.for_grpc_unix(sock.name)
with ConstantFlightServer(location=location):
client = FlightClient(location)
reader = client.do_get(flight.Ticket(b'ints'))
table = simple_ints_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
reader = client.do_get(flight.Ticket(b'dicts'))
table = simple_dicts_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
@pytest.mark.slow
def test_flight_large_message():
"""Try sending/receiving a large message via Flight.
See ARROW-4421: by default, gRPC won't allow us to send messages >
4MiB in size.
"""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024 * 1024))
], names=['a'])
with EchoFlightServer(expected_schema=data.schema) as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
# Write a single giant chunk
writer.write_table(data, 10 * 1024 * 1024)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_generator_stream():
"""Try downloading a flight of RecordBatches in a GeneratorStream."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=['a'])
with EchoStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
writer.write_table(data)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_invalid_generator_stream():
"""Try streaming data with mismatched schemas."""
with InvalidStreamFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(pa.ArrowException):
client.do_get(flight.Ticket(b'')).read_all()
def test_timeout_fires():
"""Make sure timeouts fire on slow requests."""
# Do this in a separate thread so that if it fails, we don't hang
# the entire test process
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("", b"")
options = flight.FlightCallOptions(timeout=0.2)
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightTimedOutError):
list(client.do_action(action, options=options))
def test_timeout_passes():
"""Make sure timeouts do not fire on fast requests."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
options = flight.FlightCallOptions(timeout=5.0)
client.do_get(flight.Ticket(b'ints'), options=options).read_all()
basic_auth_handler = HttpBasicServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
token_auth_handler = TokenServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
@pytest.mark.slow
def test_http_basic_unauth():
"""Test that auth fails when not authenticated."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*unauthenticated.*"):
list(client.do_action(action))
def test_http_basic_auth():
"""Test a Python implementation of HTTP basic authentication."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd'))
results = client.do_action(action)
identity = next(results)
assert identity.body.to_pybytes() == b'test'
peer_address = next(results)
assert peer_address.body.to_pybytes() != b''
def test_http_basic_auth_invalid_password():
"""Test that auth fails with the wrong password."""
with EchoStreamFlightServer(auth_handler=basic_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
with pytest.raises(flight.FlightUnauthenticatedError,
match=".*wrong password.*"):
client.authenticate(HttpBasicClientAuthHandler('test', 'wrong'))
next(client.do_action(action))
def test_token_auth():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
action = flight.Action("who-am-i", b"")
client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_token_auth_invalid():
"""Test an auth mechanism that uses a handshake."""
with EchoStreamFlightServer(auth_handler=token_auth_handler) as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightUnauthenticatedError):
client.authenticate(TokenClientAuthHandler('test', 'wrong'))
def test_location_invalid():
"""Test constructing invalid URIs."""
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
flight.connect("%")
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
ConstantFlightServer("%")
def test_location_unknown_scheme():
"""Test creating locations for unknown schemes."""
assert flight.Location("s3://foo").uri == b"s3://foo"
assert flight.Location("https://example.com/bar.parquet").uri == \
b"https://example.com/bar.parquet"
@pytest.mark.slow
@pytest.mark.requires_testing_data
def test_tls_fails():
"""Make sure clients cannot connect when cert verification fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Ensure client doesn't connect when certificate verification
# fails (this is a slow test since gRPC does retry a few times)
client = FlightClient("grpc+tls://localhost:" + str(s.port))
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints')).read_all()
@pytest.mark.requires_testing_data
def test_tls_do_get():
"""Try a simple do_get call over TLS."""
table = simple_ints_table()
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = FlightClient(('localhost', s.port),
tls_root_certs=certs["root_cert"])
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_override_hostname():
"""Check that incorrectly overriding the hostname fails."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
override_hostname="fakehostname")
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
def test_flight_do_get_metadata():
"""Try a simple do_get call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
batches = []
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b''))
idx = 0
while True:
try:
batch, metadata = reader.read_chunk()
batches.append(batch)
server_idx, = struct.unpack('<i', metadata.to_pybytes())
assert idx == server_idx
idx += 1
except StopIteration:
break
data = pa.Table.from_batches(batches)
assert data.equals(table)
def test_flight_do_put_metadata():
"""Try a simple do_put call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
with writer:
for idx, batch in enumerate(table.to_batches(max_chunksize=1)):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
buf = metadata_reader.read()
assert buf is not None
server_idx, = struct.unpack('<i', buf.to_pybytes())
assert idx == server_idx
def test_flight_do_put_limit():
"""Try a simple do_put call with a size limit."""
large_batch = pa.RecordBatch.from_arrays([
pa.array(np.ones(768, dtype=np.int64())),
], names=['a'])
with EchoFlightServer() as server:
client = FlightClient(('localhost', server.port),
write_size_limit_bytes=4096)
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
large_batch.schema)
with writer:
with pytest.raises(flight.FlightWriteSizeExceededError,
match="exceeded soft limit") as excinfo:
writer.write_batch(large_batch)
assert excinfo.value.limit == 4096
smaller_batches = [
large_batch.slice(0, 384),
large_batch.slice(384),
]
for batch in smaller_batches:
writer.write_batch(batch)
expected = pa.Table.from_batches([large_batch])
actual = client.do_get(flight.Ticket(b'')).read_all()
assert expected == actual
@pytest.mark.slow
def test_cancel_do_get():
"""Test canceling a DoGet operation on the client side."""
with ConstantFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
reader.cancel()
with pytest.raises(flight.FlightCancelledError, match=".*Cancel.*"):
reader.read_chunk()
@pytest.mark.slow
def test_cancel_do_get_threaded():
"""Test canceling a DoGet operation from another thread."""
with SlowFlightServer() as server:
client = FlightClient(('localhost', server.port))
reader = client.do_get(flight.Ticket(b'ints'))
read_first_message = threading.Event()
stream_canceled = threading.Event()
result_lock = threading.Lock()
raised_proper_exception = threading.Event()
def block_read():
reader.read_chunk()
read_first_message.set()
stream_canceled.wait(timeout=5)
try:
reader.read_chunk()
except flight.FlightCancelledError:
with result_lock:
raised_proper_exception.set()
thread = threading.Thread(target=block_read, daemon=True)
thread.start()
read_first_message.wait(timeout=5)
reader.cancel()
stream_canceled.set()
thread.join(timeout=1)
with result_lock:
assert raised_proper_exception.is_set()
def test_roundtrip_types():
"""Make sure serializable types round-trip."""
ticket = flight.Ticket("foo")
assert ticket == flight.Ticket.deserialize(ticket.serialize())
desc = flight.FlightDescriptor.for_command("test")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
desc = flight.FlightDescriptor.for_path("a", "b", "test.arrow")
assert desc == flight.FlightDescriptor.deserialize(desc.serialize())
info = flight.FlightInfo(
pa.schema([('a', pa.int32())]),
desc,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
info2 = flight.FlightInfo.deserialize(info.serialize())
assert info.schema == info2.schema
assert info.descriptor == info2.descriptor
assert info.total_bytes == info2.total_bytes
assert info.total_records == info2.total_records
assert info.endpoints == info2.endpoints
def test_roundtrip_errors():
"""Ensure that Flight errors propagate from server to client."""
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.do_action(flight.Action("internal", b"")))
with pytest.raises(flight.FlightTimedOutError, match=".*foo.*"):
list(client.do_action(flight.Action("timedout", b"")))
with pytest.raises(flight.FlightCancelledError, match=".*foo.*"):
list(client.do_action(flight.Action("cancel", b"")))
with pytest.raises(flight.FlightUnauthenticatedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthenticated", b"")))
with pytest.raises(flight.FlightUnauthorizedError, match=".*foo.*"):
list(client.do_action(flight.Action("unauthorized", b"")))
with pytest.raises(flight.FlightInternalError, match=".*foo.*"):
list(client.list_flights())
def test_do_put_independent_read_write():
"""Ensure that separate threads can read/write on a DoPut."""
# ARROW-6063: previously this would cause gRPC to abort when the
# writer was closed (due to simultaneous reads), or would hang
# forever.
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with MetadataFlightServer() as server:
client = FlightClient(('localhost', server.port))
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
count = [0]
def _reader_thread():
while metadata_reader.read() is not None:
count[0] += 1
thread = threading.Thread(target=_reader_thread)
thread.start()
batches = table.to_batches(max_chunksize=1)
with writer:
for idx, batch in enumerate(batches):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
# Causes the server to stop writing and end the call
writer.done_writing()
# Thus reader thread will break out of loop
thread.join()
# writer.close() won't segfault since reader thread has
# stopped
assert count[0] == len(batches)
def test_server_middleware_same_thread():
"""Ensure that server middleware run on the same thread as the RPC."""
with HeaderFlightServer(middleware={
"test": HeaderServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
results = list(client.do_action(flight.Action(b"test", b"")))
assert len(results) == 1
value = results[0].body.to_pybytes()
assert b"right value" == value
def test_middleware_reject():
"""Test rejecting an RPC with server middleware."""
with HeaderFlightServer(middleware={
"test": SelectiveAuthServerMiddlewareFactory(),
}) as server:
client = FlightClient(('localhost', server.port))
# The middleware allows this through without auth.
with pytest.raises(pa.ArrowNotImplementedError):
list(client.list_actions())
# But not anything else.
with pytest.raises(flight.FlightUnauthenticatedError):
list(client.do_action(flight.Action(b"", b"")))
client = FlightClient(
('localhost', server.port),
middleware=[SelectiveAuthClientMiddlewareFactory()]
)
response = next(client.do_action(flight.Action(b"", b"")))
assert b"password" == response.body.to_pybytes()
def test_middleware_mapping():
"""Test that middleware records methods correctly."""
server_middleware = RecordingServerMiddlewareFactory()
client_middleware = RecordingClientMiddlewareFactory()
with FlightServerBase(middleware={"test": server_middleware}) as server:
client = FlightClient(
('localhost', server.port),
middleware=[client_middleware]
)
descriptor = flight.FlightDescriptor.for_command(b"")
with pytest.raises(NotImplementedError):
list(client.list_flights())
with pytest.raises(NotImplementedError):
client.get_flight_info(descriptor)
with pytest.raises(NotImplementedError):
client.get_schema(descriptor)
with pytest.raises(NotImplementedError):
client.do_get(flight.Ticket(b""))
with pytest.raises(NotImplementedError):
writer, _ = client.do_put(descriptor, pa.schema([]))
writer.close()
with pytest.raises(NotImplementedError):
list(client.do_action(flight.Action(b"", b"")))
with pytest.raises(NotImplementedError):
list(client.list_actions())
with pytest.raises(NotImplementedError):
writer, _ = client.do_exchange(descriptor)
writer.close()
expected = [
flight.FlightMethod.LIST_FLIGHTS,
flight.FlightMethod.GET_FLIGHT_INFO,
flight.FlightMethod.GET_SCHEMA,
flight.FlightMethod.DO_GET,
flight.FlightMethod.DO_PUT,
flight.FlightMethod.DO_ACTION,
flight.FlightMethod.LIST_ACTIONS,
flight.FlightMethod.DO_EXCHANGE,
]
assert server_middleware.methods == expected
assert client_middleware.methods == expected
def test_extra_info():
with ErrorFlightServer() as server:
client = FlightClient(('localhost', server.port))
try:
list(client.do_action(flight.Action("protobuf", b"")))
assert False
except flight.FlightUnauthorizedError as e:
assert e.extra_info is not None
ei = e.extra_info
assert ei == b'this is an error message'
@pytest.mark.requires_testing_data
def test_mtls():
"""Test mutual TLS (mTLS) with gRPC."""
certs = example_tls_certs()
table = simple_ints_table()
with ConstantFlightServer(
tls_certificates=[certs["certificates"][0]],
verify_client=True,
root_certificates=certs["root_cert"]) as s:
client = FlightClient(
('localhost', s.port),
tls_root_certs=certs["root_cert"],
cert_chain=certs["certificates"][0].cert,
private_key=certs["certificates"][0].key)
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
def test_doexchange_get():
"""Emulate DoGet with DoExchange."""
expected = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"get")
writer, reader = client.do_exchange(descriptor)
with writer:
table = reader.read_all()
assert expected == table
def test_doexchange_put():
"""Emulate DoPut with DoExchange."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"put")
writer, reader = client.do_exchange(descriptor)
with writer:
writer.begin(data.schema)
for batch in batches:
writer.write_batch(batch)
writer.done_writing()
chunk = reader.read_chunk()
assert chunk.data is None
expected_buf = str(len(batches)).encode("utf-8")
assert chunk.app_metadata == expected_buf
def test_doexchange_echo():
"""Try a DoExchange echo server."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=["a"])
batches = data.to_batches(max_chunksize=512)
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"echo")
writer, reader = client.do_exchange(descriptor)
with writer:
# Read/write metadata before starting data.
for i in range(10):
buf = str(i).encode("utf-8")
writer.write_metadata(buf)
chunk = reader.read_chunk()
assert chunk.data is None
assert chunk.app_metadata == buf
# Now write data without metadata.
writer.begin(data.schema)
for batch in batches:
writer.write_batch(batch)
assert reader.schema == data.schema
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata is None
# And write data with metadata.
for i, batch in enumerate(batches):
buf = str(i).encode("utf-8")
writer.write_with_metadata(batch, buf)
chunk = reader.read_chunk()
assert chunk.data == batch
assert chunk.app_metadata == buf
def test_doexchange_transform():
"""Transform a table with a service."""
data = pa.Table.from_arrays([
pa.array(range(0, 1024)),
pa.array(range(1, 1025)),
pa.array(range(2, 1026)),
], names=["a", "b", "c"])
expected = pa.Table.from_arrays([
pa.array(range(3, 1024 * 3 + 3, 3)),
], names=["sum"])
with ExchangeFlightServer() as server:
client = FlightClient(("localhost", server.port))
descriptor = flight.FlightDescriptor.for_command(b"transform")
writer, reader = client.do_exchange(descriptor)
with writer:
writer.begin(data.schema)
writer.write_table(data)
writer.done_writing()
table = reader.read_all()
assert expected == table
def test_middleware_multi_header():
"""Test sending/receiving multiple (binary-valued) headers."""
with MultiHeaderFlightServer(middleware={
"test": MultiHeaderServerMiddlewareFactory(),
}) as server:
headers = MultiHeaderClientMiddlewareFactory()
client = FlightClient(('localhost', server.port), middleware=[headers])
response = next(client.do_action(flight.Action(b"", b"")))
# The server echoes the headers it got back to us.
raw_headers = response.body.to_pybytes().decode("utf-8")
client_headers = ast.literal_eval(raw_headers)
# Don't directly compare; gRPC may add headers like User-Agent.
for header, values in MultiHeaderClientMiddleware.EXPECTED.items():
assert client_headers.get(header) == values
assert headers.last_headers.get(header) == values
@pytest.mark.requires_testing_data
def test_generic_options():
"""Test setting generic client options."""
certs = example_tls_certs()
with ConstantFlightServer(tls_certificates=certs["certificates"]) as s:
# Try setting a string argument that will make requests fail
options = [("grpc.ssl_target_name_override", "fakehostname")]
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
generic_options=options)
with pytest.raises(flight.FlightUnavailableError):
client.do_get(flight.Ticket(b'ints'))
# Try setting an int argument that will make requests fail
options = [("grpc.max_receive_message_length", 32)]
client = flight.connect(('localhost', s.port),
tls_root_certs=certs["root_cert"],
generic_options=options)
with pytest.raises(pa.ArrowInvalid):
client.do_get(flight.Ticket(b'ints'))
|
Main_Program_v10.py
|
'''
Main Program v10
--- Features:
- Support for reading GPS position values through LCM
- Support for reading GPS command values through LCM
- Support for Mode change
- Support for infrared lane following - Transfered to uno
- Support for Odometry.
- Serial interface with the Arduino has been disabled
- Fuse together odometry and GPS values to give us better
estimate of the robot location and orientation
- Fixed speed update bug on lane following
- Add support for reading ultrasonic distance
- PID controller for keeping distance to ultrasonic implemented
- Support for lane change implemented: changing to the left and changing to the right.
------------------------------------------------------------------------------------------
- Support for Cyborg mode controlled. : Robot receives information about the cyborg information
through LCM at attempts to move towards it while keeping a set distance to the object.
- Platooning mode implemented : Follow the Leader, Side Formation and Change of Leader
Uses:
- Uses the Arduino code - Robot_Arduino_v6
'''
#############
# Import libraries
from multiprocessing import Process, Value, Array
import numpy as np
import matplotlib.pyplot as plt
import time
import pygame
from random import randint
import lcm
from bumblee import gps_command, action_command, arduino_in, arduino_out, collision_robots, cyborg_detection, platooning
from threading import Timer, Thread
from Odometry import get_motor_state
from Robot_Helper import ThreadWithReturnValue
###############################################################
### Define all global variables used here
##############################################################
# This variables holds the raw data sent from the arduino
# it is a string data type. Sample data is =
# First 5 values are the Infrared sensors, next 2 the encoders, next 1 is motor status, last two is extras
Data_Unpacked = "0,0,0,0,0,0,0,0,0,0,0"
# stores all the Infrared red values
sensors_ir = np.array([0, 0, 0, 0, 0])
# store all the encoder values leftenc = 0, rightenc = 1
encoders = np.array([0, 0])
# store the motor status
motor_status = 0
# stores the remaining IO extra values
extra_io = np.array([0, 0])
# store the ultrasonic sensor distance being sent
sensor_distance = 0
# this variable holds the data to be sent out.
Data_toPack = '0,0,0,0,0,0'
# variable holds the motor speeds left_speed = 0, right_speed = 1
motor_speeds = np.array([0, 0])
controller_speeds = [0, 0]
# variable holds the motor enable, 1 = ACTIVATE motor, 0 = Disable motor
motor_enable = 1
# stores the value for the extra IO to be sent out
extra_io_out = np.array([0, 0, 0])
#############
#############
# Button variables
button_forward = 0
button_turn = 0
button_speedUp = 0
button_speedDown = 0
currentSpeed = 0
pygame.init()
############## Odometery and GPS position related stuffs
# ticks variables
pre_left_ticks = 0
pre_right_ticks = 0
new_left_ticks = 0
new_right_ticks = 0
# Measured start position. (x, y, theta)
initial_state = [0, 0, 0]
# Covariance at start position. intial convaraince of 10,10,5 deg
odo_cov = np.diag([10 ** 2, 10 ** 2, np.deg2rad(1) ** 2])
# odometry based position data
odo_pose = initial_state
# tells us if their is information from the GPS
gps_available = False
# GPS based position data
gps_pose = [0, 0, 0]
# create data history
odo_pose_stack = []
gps_pose_stack = []
final_pose_stack = []
odo_cov_stack = []
gps_cov_stack = []
final_cov_stack = []
data_logger_odo = []
data_logger_gps = []
data_logger_eky = []
###################################################
# GLobal readness
am_i_ready = False
# handles terminating other threads and process
global_finish = False
# used across multiple running process
global_finish_process = Value('i', 0)
global_finish_process.value = 0
weAreDoneHere = False
# for debugging
Program_DEBUG = True
### GPS Server related variables
LINETHRESHOLD = 800
reduceFactor = 0.35
# robot mode
# mode 0 - Manual control
# mode 1 - GPS control
# mode 2 - Line Following
robot_mode = 0
# servo state
current_servo_state = False
# distanceThread
distanceThread = False
# min working speed
speed_min_robot = 35
speed_max_robot = 150
set_point = 15
#####################################
# variable for storing the insect detections
insect_detections = list()
# for handling the cyborg thread
stop_cyborg = True
cyborg_speed = [0,0]
#####################################
#Global Platooning Values
pmyId = 0
pplatoonId = 10
prole = 0
pstartStop = 0
planeId = 0
planeChange = 0
pmySpeed = 0
pcurrentLeader = 0
#robot collision data
robots_collision = list()
leaderAngle = 0
leaderDistance = 0
platoonMode = 0
myangle = 0
# sideThread
sideThread = False
side_speeds = [0, 0]
# followThread
followThread = False
# follow speed
follow_speeds = [0, 0]
# center coordinates for referencing angles from
center_x = 314
center_y = 437
# PID controller to keep the robots moving side by side
def keepAngle():
global angle_speeds
print('INSIDE KEEEP ANGLE THREADDDDDDDDDDDD')
leaderTemp=0
angleTemp=0
prevDistance=0
prevAngle=0
# the PID parameters
kp_angle = 5
ki_angle = 0
kd_angle = 1.5
integral_angle = 0
derivative_angle = 0
lasterror_angle = 0
time_change = 0.1
while angleThread:
# find the error difference
#print('myangle inside thread',myangle)
angleDifference=leaderAngle-myangle
error_angle = abs(leaderAngle-myangle)
if error_angle>300:
if(angleDifference>0):
error_angle=abs(leaderAngle-myangle-360)
else:
error_angle=abs(leaderAngle-myangle+360)
if leaderDistance > prevDistance:
#leaderTemp=1
error_angle=-1*error_angle
else:
error_angle=error_angle
if leaderAngle>prevAngle:
angleTemp=1
else:
angleTemp=2
integral_angle = (integral_angle + error_angle)
derivative_angle = (error_angle - lasterror_angle)/time_change
lasterror_angle = error_angle
pid_speed = kp_angle * error_angle + integral_angle * ki_angle + derivative_angle * kd_angle
print ('pid speed - :', pid_speed)
print ('error - :', error_angle)
print ('angle_leader - :', leaderAngle)
print ('my angle - :', myangle)
# now we need to interpolate this speed value to something that won't kill the robot
if pid_speed > 0:
sign = 1
else:
sign = -1
# need to find the max speed value and the min speed value pid results
# check if currentSpeed is low or not set
if currentSpeed <= speed_min_robot:
max_speed = speed_min_robot + 50
else:
max_speed = currentSpeed
if sign == 1:
outputSpeed = int(np.interp(pid_speed, [0, 550], [speed_min_robot, max_speed]))
else:
outputSpeed = int(np.interp(pid_speed, [-150, 0], [-abs(max_speed), -speed_min_robot]))
prevDistance=leaderDistance
prevAngle=leaderAngle
#print (outputSpeed)
# send out the speed
# here, we stop the robot if the error difference is less than 3
if abs(error_angle) <= 5:
angle_speeds = [0, 0]
else:
angle_speeds = [outputSpeed, outputSpeed]
print (angle_speeds)
time.sleep(time_change)
# function keeps distance to the nearest robot
# this should run as a thread when it is being called
def keepDistance_orginal():
global controller_speeds
# the PID parameters
kp = 5
ki = 0
kd = 1.5
integral = 0
derivative = 0
lasterror = 0
time_change = 0.1
while distanceThread:
# find the error difference
error = sensor_distance - set_point
integral = (integral + error)
derivative = (error - lasterror)/time_change
lasterror = error
pid_speed = kp * error + integral * ki + derivative * kd
print ('pid speed - :', pid_speed)
print ('error - :', error)
print ('distance - :', sensor_distance)
# now we need to interpolate this speed value to something that won't kill the robot
if pid_speed > 0:
sign = 1
else:
sign = -1
# need to find the max speed value and the min speed value pid results
# check if currentSpeed is low or not set
if currentSpeed <= speed_min_robot:
max_speed = speed_min_robot + 30
else:
max_speed = currentSpeed
if sign == 1:
outputSpeed = int(np.interp(pid_speed, [0, 600], [speed_min_robot, max_speed]))
else:
outputSpeed = int(np.interp(pid_speed, [-100, 0], [-abs(max_speed), -speed_min_robot]))
#print (outputSpeed)
# send out the speed
# here, we stop the robot if the error difference is less than 3
if abs(error) <= 3:
controller_speeds = [0, 0]
else:
controller_speeds = [outputSpeed, outputSpeed]
print (controller_speeds)
time.sleep(time_change)
# function keeps distance to the nearest robot
# this should run as a thread when it is being called
def sideDistance():
global side_speeds
# the PID parameters
kp = 7
ki = 0
kd = 1.5
integral = 0
derivative = 0
lasterror = 0
time_change = 0.05
print('INSIDE SIDEFORMATION', leaderDistance)
while sideThread:
# find the error difference
error = leaderDistance - 5
integral = (integral + error)
derivative = (error - lasterror)/time_change
lasterror = error
pid_speed = kp * error + integral * ki + derivative * kd
print ('pid speed - :', pid_speed)
print ('error - :', error)
print ('leader distance - :', leaderDistance)
# now we need to interpolate this speed value to something that won't kill the robot
if pid_speed > 0:
sign = 1
else:
sign = -1
# need to find the max speed value and the min speed value pid results
# check if currentSpeed is low or not set
if currentSpeed <= speed_min_robot:
max_speed = speed_min_robot + 50
else:
max_speed = currentSpeed
if sign == 1:
outputSpeed = int(np.interp(pid_speed, [0, 550], [speed_min_robot, max_speed]))
else:
outputSpeed = int(np.interp(pid_speed, [-150, 0], [-abs(max_speed), -speed_min_robot]))
#print (outputSpeed)
# send out the speed
# here, we stop the robot if the error difference is less than 3
if abs(error) <= 13:
side_speeds = [0, 0]
else:
side_speeds = [outputSpeed, outputSpeed]
#print (controller_speeds)
time.sleep(time_change)
# function keeps distance to the nearest robot
# this should run as a thread when it is being called
def followDistance():
global follow_speeds
# the PID parameters
kp = 5
ki = 0
kd = 1.5
integral = 0
derivative = 0
lasterror = 0
time_change = 0.05
print('INSIDE KEEPDISTANCE', leaderDistance)
while followThread:
# find the error difference
error = leaderDistance - 50
integral = (integral + error)
derivative = (error - lasterror)/time_change
lasterror = error
pid_speed = kp * error + integral * ki + derivative * kd
print ('pid speed - :', pid_speed)
print ('error - :', error)
print ('leader distance - :', leaderDistance)
# now we need to interpolate this speed value to something that won't kill the robot
if pid_speed > 0:
sign = 1
else:
sign = -1
# need to find the max speed value and the min speed value pid results
# check if currentSpeed is low or not set
if currentSpeed <= speed_min_robot:
max_speed = speed_min_robot + 50
else:
max_speed = currentSpeed
if sign == 1:
outputSpeed = int(np.interp(pid_speed, [0, 600], [speed_min_robot, max_speed]))
else:
outputSpeed = int(np.interp(pid_speed, [-150, 0], [-abs(max_speed), -speed_min_robot]))
#print (outputSpeed)
# send out the speed
# here, we stop the robot if the error difference is less than 3
if abs(error) <= 20:
follow_speeds = [0, 0]
else:
follow_speeds = [outputSpeed, outputSpeed]
#print (controller_speeds)
time.sleep(time_change)
# transform odometry postion - function transform odometry value to the real world coordinates
def transformOdo(position, mode):
x, y, theta = position
# two types of mode are supported here.
# transforming to the world or transform back to the orginal coordinates
# mode 1 transform to the world (GPS) coordinates
if mode == 1:
newX = -1 * y
newY = x
return (newX, newY, theta)
# transform from the world (GPS) coordinates to odometry coordinates
elif mode == 2:
newX = y
newY = -1 * x
return (newX, newY, theta)
else:
return (newX, newY, theta)
# Odometry Thread
def OdoThread():
global pre_left_ticks, pre_right_ticks
# get latest encoder value
new_left_ticks = encoders[0]
new_right_ticks = encoders[1]
motor_ticks = ((new_left_ticks - pre_left_ticks), (new_right_ticks - pre_right_ticks))
# gets the global saved pisition data
# first transform position back to odo coordinates
pose_temp = transformOdo(odo_pose, mode=2)
# updates the latest value
pose, cov = get_motor_state(pose_temp, motor_ticks, odo_cov)
# here will update the ticks of the robot
pre_left_ticks = new_left_ticks
pre_right_ticks = new_right_ticks
# odo_pose = pose
return pose, cov
# Kalmar Filter thread
def KalmarFilter():
# A queue to hold position related data availables
# define the global variables that needs to be updated
global odo_pose, odo_cov, odo_pose_flip, odo_pose_stack, gps_pose_stack
# EKY Threads always runs until told to stop
print("Kalmar launched")
# get the current time for reference purpose
time_ref = time.time()
while True:
try:
if global_finish_process.value == 1:
break
# now = time.time()
# starts the odo_thread
odo_thread = ThreadWithReturnValue(name="OdometryThread", target=OdoThread)
# starts the odo thread now
odo_thread.start()
# waits for it to finish
pose, odo_cov = odo_thread.join()
# transform the value to the GPS coordinates system
odo_pose = transformOdo(pose, mode=1)
# updates data history
odo_pose_stack.append(odo_pose)
gps_pose_stack.append(gps_pose)
# wait for 20 ms
time.sleep(0.02)
except KeyboardInterrupt: # handles error
print("Exception in Kalmar")
break
def saveLogger():
# creates the odo array and save to text
odo_logs = np.array([(log[0], log[1], log[2], log[3], log[4], log[5], log[6], log[7], log[8], log[9], log[10], log[11], log[12]) for log in data_logger_odo])
lidar_logs = np.array([(log[0], log[1], log[2], log[3], log[4], log[5], log[6], log[7], log[8], log[9], log[10], log[11], log[12]) for log in data_logger_lidar])
eky_logs = np.array([(log[0], log[1], log[2], log[3], log[4], log[5], log[6], log[7], log[8], log[9], log[10], log[11], log[12]) for log in data_logger_eky])
f_odo = open("odologs.txt", "wb")
f_lidar = open("lidarlogs.txt", "wb")
f_eky = open("ekylogs.txt", "wb")
# save results
np.savetxt(f_odo, odo_logs)
np.savetxt(f_lidar, lidar_logs)
np.savetxt(f_eky, eky_logs)
f_odo.close()
f_lidar.close()
f_eky.close()
# this function handles the robot lane following.
def laneFollowing():
'''
The flow is like this:
- Use the IR sensors to detect the black track.
- Check if the leftmost sensor detects a black track, if yes,
turn the robot towards the right side by some offset
- check if the rightmost sensor detects a black track, if yes,
turn the robot towards the left side by some offset
- check if both the middle three sensors are detecting the black track,
then stop the robot from moving.
- if the left most, right most, and middle sensors are not detecting anything then
- move forward.
:return:
'''
global motor_speeds, currentSpeed
# asign the sensor variables
leftmost, middle_left, middle_center, middle_right, rightmost = sensors_ir
if leftmost > LINETHRESHOLD:
motor_speeds = np.array([currentSpeed, int(reduceFactor*currentSpeed)])
time.sleep(0.2)
elif rightmost > LINETHRESHOLD:
motor_speeds = np.array([int(reduceFactor*currentSpeed), currentSpeed])
time.sleep(0.2)
elif middle_center > LINETHRESHOLD and middle_left > LINETHRESHOLD and middle_right > LINETHRESHOLD:
currentSpeed = 0
motor_speeds = np.array([currentSpeed, currentSpeed])
elif middle_right > LINETHRESHOLD:
motor_speeds = np.array([0, currentSpeed])
time.sleep(0.5)
elif middle_left > LINETHRESHOLD:
motor_speeds = np.array([currentSpeed, 0])
time.sleep(0.5)
else:
motor_speeds = np.array([currentSpeed, currentSpeed])
# Platooning Manager
def toPlatoon():
global prole, motor_speeds, lanechange, pcurrentLeader, platoonMode, pstartStop
#print('robot_collision',len(robots_collision))
global sideThread, leaderDistance
global followThread
#print(gps_pose)
def find_my_angle():
#print('inside following role')
constX=float(center_x/2.5)
constY=float(center_y/2.5)
# fetch my GPS position
myX=gps_pose[0]
myY=gps_pose[1]
x_diff= float(myX-constX)
y_diff=float(myY-constY)
global myangle, leaderAngle
# calculate my robot angle
myangle = np.rad2deg(np.arctan2(x_diff,y_diff))
if myangle<0:
myangle=myangle+360
#print('My angle', myangle)
if leaderAngle<0:
leaderAngle=leaderAngle+360
# debugging
# prole = 2
# pcurrentLeader = 6
# Check for the robot role. prole= 4 leader, 2= follower
if prole==4: # leader mode
print ('leading mode')
followThread = False
sideThread = False
# check of change of leader mode
if pstartStop == 22:
global extra_io_out
laneChange=1 # change lane
extra_io_out[2] = 1
motor_speeds=np.array([70, 70])
time.sleep(10)
extra_io_out[2] = 2
pstartStop = 0
if pstartStop==0:
motor_speeds=np.array([0,0])
if pmySpeed==65:
# leader speed is set, starts moving the leader
# check the given speed of the leader
if currentSpeed <= pmySpeed:
max_speed = pmySpeed + 5
else:
max_speed = currentSpeed
motor_speeds=np.array([max_speed, max_speed])
else:
motor_speeds=np.array([0,0])
if planeChange==10:
lanechange=1
if planeChange==20:
lanechange=2
# check if robot is a follower
elif prole==2:
print ('Follower mode')
# calculates leader parameters
global leaderAngle
# find the leader distance and angle data
for i in range(len(robots_collision)):
b=robots_collision[i]
if b[0]==pcurrentLeader:
leaderAngle=b[4]
leaderDistance=b[3]
break
# no leader set
if pcurrentLeader == -1:
motor_speeds = np.array([0,0])
folowThread = False
sideThread = False
print ('no leader set')
else:
#print ('leader distance - ', leaderDistance)
#print ('leader angle - ', leaderAngle)
# platoonMode = 0 is for side formation
if platoonMode == 0:
# update my robot angles and leader angle
#find_my_angle()
# disable the followThread
if followThread == True:
followThread = False
motor_speeds=np.array([0, 0])
#print ('Side formation')
if sideThread == False:
#print('inside angleThread false')
sideThread = True
sideController = Thread(target=sideDistance)
sideController.start()
#print('angle thread start')
if sideThread == True:
# update the motor speeds
motor_speeds = np.array([side_speeds[0], side_speeds[1]])
#print('side formation - motor_speeds', motor_speeds)
# platoonMode = 1 is for follow the leader on a straigth path
if platoonMode == 1:
# stops the side thread if still running
sideThread=False
print ('Follow formation')
#print ('--- followThread --', followThread)
# check of change of leader mode - stop follower for now
if pstartStop == 22:
motor_speeds = np.array([0, 0])
time.sleep(8)
pstartStop = 0
if followThread == False:
followThread = True
followController=Thread(target=followDistance)
followController.start()
# print('after thread start')
if followThread == True:
# update the motor speeds
motor_speeds=np.array([follow_speeds[0], follow_speeds[1]])
#print('follow formation - motor_speeds', motor_speeds)
## Correct robot angle - not working properly
def correctAngle(target_angle, threshold=4):
# reads our position
theta = odo_pose[2]
turnLeft = -1
turnRight = 1
if (target_angle < 0):
speedModifier = turnRight
else:
speedModifier = turnLeft
global motor_speeds
# we need to transform our angle to degree
theta = np.rad2deg(odo_pose[2])
goal_theta = theta + target_angle
alpha = goal_theta - theta
fixed_speed = 52
while abs(alpha) > threshold:
try:
# update the motorsped to turn at a fixed rate
leftSpeed = speedModifier * fixed_speed
rightSpeed = -speedModifier * fixed_speed
motor_speeds = np.array([leftSpeed, rightSpeed])
# find the current position
theta = odo_pose[0]
print ('my theta - ',theta)
alpha = goal_theta - theta
print ('correction', alpha)
# sleep
time.sleep(0.03)
if stop_cyborg == True:
motor_speeds = np.array([0, 0])
break
except KeyboardInterrupt:
break
# stops the robot
motor_speeds = np.array([0, 0])
def cyborg_mode():
# this functions handles things related to controlling the robot to the insects
# the PID parameters
global stop_cyborg, cyborg_speed
time_change = 0.2
# setpoints to use
distance_setpoint = 20
angle_setpoint = 10
# rate of turing
turn_rate = 0.9
forward_speed = 0.8
backward_speed = 0.7
def robot_left(turn_rate, max_speed):
# turns the robot at a rate
right_speed = int(turn_rate * max_speed)
left_speed = int(-(turn_rate * max_speed))
return [0, right_speed]
def robot_right(turn_rate, max_speed):
right_speed = int(-(turn_rate * max_speed))
left_speed = int(turn_rate * max_speed)
return [left_speed, 0]
def robot_forward(speed_rate, max_speed):
final_speed = int(speed_rate * max_speed)
return [final_speed, final_speed]
# fetch the latest detection results
# only the first value should be used
# check if their is data available first
if len(insect_detections) > 0:
_, insect_angle, insect_distance = insect_detections[0]
print ('now in insect mode: ',insect_detections[0])
# only follow to about 20 cm distance
error_distance = insect_distance - distance_setpoint
while abs(error_distance) > 5 and insect_angle != -1:
try:
# fetch the latest value
if len(insect_detections) == 0:
stop_cyborg = True
break
_, insect_angle, insect_distance = insect_detections[0]
error_distance = insect_distance - distance_setpoint
# breaks if nan received
if insect_angle == -1:
stop_cyborg = True
break
# we have two goals, optimized the angle to almost zero. i.e the robot is perfectly algin to the detected object
# and reduce the distance to object.
# conditions for operation
# if distance is greater than set point - keep going forward
# if angle is negative, turn right
# if angle is positive, turn left
# check if currentSpeed is low or not set
if currentSpeed <= speed_min_robot:
max_speed = speed_min_robot + 38
else:
max_speed = currentSpeed
# only correct orientation when going forward
if error_distance >= 5:
# checking for negative angle
if insect_angle < -(angle_setpoint) or insect_angle > angle_setpoint:
# checking for negative angle
if insect_angle < -(angle_setpoint):
print ('Turning right')
cyborg_speed = robot_right(turn_rate, 65)
# for positive angles
elif insect_angle > angle_setpoint:
print ('Turning left')
cyborg_speed = robot_left(turn_rate, 65)
else:
# we go forward
print ('going forward mode')
cyborg_speed = robot_forward(forward_speed, max_speed)
else:
print ('going backward')
cyborg_speed = robot_forward(backward_speed, -max_speed)
# if not of those conditions above, we stop the robot. Either target is too close or not found
# debugging
print ('error :',insect_angle)
print ('distance to object:',insect_distance)
print ('Left and Right speed values (updated)', cyborg_speed)
if stop_cyborg == True:
break
time.sleep(time_change)
except Exception:
print ('Error in Cyborg mode')
# reset cyborg mode to being ready again
stop_cyborg = True
# should consider stoping the robot if this loop ends
cyborg_speed = [0, 0]
# This function uses a timer interrupt or thread to continous send a heartbeat signal
def send_heartbeat():
while True:
try:
# now = time.time()
# runs every 5 ms or 10 ms
time.sleep(0.009)
sendUpdate()
# print ("heart2: sampling time: ",time.time() - now)
if global_finish_process.value == 1:
print(" Heartbeat stopping now")
break
except KeyboardInterrupt:
break
# this need to run countinous with a thread for every 30 - 50 ms interval
def sendUpdate():
global extra_io_out, Data_toPack
# also we send out the current data available
# build the data pack together
#print (motor_speeds, motor_enable, extra_io_out)
# updates servo value
if current_servo_state == True:
extra_io_out[0] = 150
else:
extra_io_out[0] = 300
# updates lane following based on mode 2 or 4
if robot_mode==2 or robot_mode==4:
extra_io_out[1]=99
else:
extra_io_out[1]=0
dataOut = arduino_out()
dataOut.leftspeed = motor_speeds[0]
dataOut.rightspeed = motor_speeds[1]
dataOut.motorEnable = motor_enable
dataOut.io_device1 = extra_io_out[0]
dataOut.io_device2 = extra_io_out[1]
dataOut.io_device3 = extra_io_out[2]
# send out the data pack
lc.publish("Arduino_In", dataOut.encode())
# for debugging
Data_toPack = str(dataOut.leftspeed) + ',' + str(dataOut.rightspeed) + ',' + str(dataOut.motorEnable) + ',' + str(dataOut.io_device1) + ',' + str(dataOut.io_device2) + ',' + str(dataOut.io_device3)
# reset the lane change values being sent so it doens't keep issuing that commands
'''
if extra_io_out[2] == 1 or extra_io_out[2] == 2:
# then reset those values
extra_io_out[2] = 0
'''
def teleOperateThread():
while True:
try:
# now = time.time()
# runs every 5 ms or 10 ms
time.sleep(0.02)
pygame.event.get()
teleOperate()
# print ("heart2: sampling time: ",time.time() - now)
if global_finish_process.value == 1:
print(" Teleoperate stopping now")
break
except KeyboardInterrupt:
break
def teleOperate():
global button_forward, button_turn, button_speedUp, button_speedDown, currentSpeed, motor_speeds, robot_mode, current_servo_state
global distanceThread, extra_io_out, stop_cyborg, sideThread, lanechange, followThread
# get the axis button if value = -1 move forward, if 1 move backward, else stop
button_forward = (joystick.get_axis(1))
# if value is -1 turn left if 1 turn right
button_turn = (joystick.get_axis(0))
# value = 1 increase speed up or down by a factor of 1
button_speedUp = joystick.get_button(9)
button_speedDown = joystick.get_button(8)
button_stop = (joystick.get_button(7))
#print ('dad - ', button_forward, button_turn, button_speedUp, button_speedDown)
# gets hat value
hats = joystick.get_hat(0)
mode_up = hats[1]
mode_left = hats[0]
# gets select and start button
button_select = joystick.get_button(10)
button_start = joystick.get_button(11)
# servo button
button_servo = joystick.get_button(0)
# lane change button
button_lane_left = joystick.get_button(13)
button_lane_right = joystick.get_button(14)
# platoon button
platoon_button = button_select
if button_servo == 1:
current_servo_state = not(current_servo_state)
time.sleep(0.1)
# check mode
if mode_up == -1:
# manual mode
robot_mode = 0
elif mode_up == 1:
# GPS controller mode ---> now cyborg mode
robot_mode = 1
elif mode_left == -1:
# auto mode
robot_mode = 2
elif mode_left == 1:
# pattern mode ---> now keeping distance mode
robot_mode = 3
# set platoon button
if platoon_button == 1:
robot_mode = 4
if robot_mode == 4:
# platooning mode
toPlatoon()
elif robot_mode == 2:
# auto mode
# send that value to the uno board
stop_cyborg = True
extra_io_out[1] = 99
# only update the motor speed manually if keep distance isn't running
if distanceThread == False:
motor_speeds = np.array([currentSpeed, currentSpeed])
else:
motor_speeds = np.array([controller_speeds[0], controller_speeds[1]])
# check for lane change buttons being pressed
if button_lane_left == 1:
extra_io_out[2] = 1
if button_lane_right == 1:
extra_io_out[2] = 2
elif robot_mode == 3:
# stop platooning related threads
followThread = False
sideThread = False
# run the keep distance as a thread and let it do it's things
if distanceThread == False:
distanceThread = True
distanceController = Thread(target=keepDistance_orginal)
distanceController.start()
#robot_mode = 2
# update the motor speed value from the controller thread
if distanceThread == True:
motor_speeds = np.array([controller_speeds[0], controller_speeds[1]])
elif robot_mode == 1:
# cyborg mode
if stop_cyborg == True:
stop_cyborg = False
cyborgController = Thread(target=cyborg_mode)
cyborgController.start()
if stop_cyborg == False:
# update the motor speed
motor_speeds = np.array([cyborg_speed[0], cyborg_speed[1]])
else:
# manual mode
distanceThread = False
stop_cyborg = True
sideThread = False
followThread = False
extra_io_out[1] = 0
if button_forward < -0.5:
# time to move forward:
speed = currentSpeed
motor_speeds = np.array([speed, speed])
elif button_forward > 0.5:
speed = -currentSpeed
motor_speeds = np.array([speed, speed])
if button_stop == 1:
currentSpeed = 0
motor_speeds = np.array([currentSpeed, currentSpeed])
if button_turn < -0.5:
# time to turn left:
speed = currentSpeed
motor_speeds = np.array([-speed, speed])
elif button_turn > 0.5:
# turn right
speed = currentSpeed
motor_speeds = np.array([speed, -speed])
if button_forward != 0 and button_speedUp == 1:
# increase the speed
currentSpeed = currentSpeed + 1
if currentSpeed >= speed_max_robot:
currentSpeed = speed_max_robot
# decreases the speed
if button_forward != 0 and button_speedDown == 1:
currentSpeed = currentSpeed - 1
if currentSpeed <= 0:
currentSpeed = 0
def lcmThread():
while True:
try:
lc.handle()
#time.sleep(0.01)
if global_finish_process.value == 1:
print(" LCM Shutting down now")
break
except KeyboardInterrupt:
break
def platoon_manager(channel,data):
if robot_mode==4:
platoon=platooning.decode(data)
global pmyId,pplatoonId,prole,pstartStop,planeId,planeChange,pmySpeed,pcurrentLeader
pmyId = platoon.myId
pplatoonId = platoon.platoonId
prole=platoon.role
pstartStop=platoon.startStop
planeId=platoon.laneId
planeChange=platoon.laneChange
pmySpeed=platoon.mySpeed
pcurrentLeader=platoon.currentLeader
def insect_manager(channel, data):
insects = cyborg_detection.decode(data)
global insect_detections
insect_detections = insects.data
#print (insect_detections)
def collison_manager(channel, data):
mgs = collision_robots.decode(data)
'''
print("Received message on channel \"%s\"" % channel)
print(msg.x, msg.y, msg.p, msg.q)
print("")
'''
robot_len = mgs.robots_len
global robots_collision
robots_collision = mgs.collision_array
# transform the robots collision data to be able to find the location of each robots
'''
data collision structure
robot id, distance, location
0 10cm FRONT
3 20cm BACK
8 28cm LEFT
'''
#if robot_len > 0:
def gps_manager(channel, data):
gps = gps_command.decode(data)
'''
print("Received message on channel \"%s\"" % channel)
print(msg.x, msg.y, msg.p, msg.q)
print("")
'''
global gps_pose, gps_available
gps_pose = (gps.x, gps.y, 0)
gps_available = True
global odo_pose
### Here we will fuse together both the GPS and Odo position
### since we believe the GPS value is super accurate,
### we automatically take the values of the GPS when available
odo_pose = (gps.x, gps.y, odo_pose[2])
def action_manager(channel, data):
action = action_command.decode(data)
'''
print("Received message on channel \"%s\"" % channel)
print(action.leftspeed, action.rightspeed)
print("")
'''
global motor_speeds, currentSpeed
# only update if in GPS mode
if robot_mode == 1:
if action.leftspeed == action.rightspeed:
currentSpeed == action.rightspeed
motor_speeds = np.array([action.leftspeed+10, action.rightspeed+10])
elif action.leftspeed > action.rightspeed:
motor_speeds = np.array([action.leftspeed+10, action.rightspeed])
else:
motor_speeds = np.array([action.leftspeed, action.rightspeed+10])
# this function is the LCM handler for managing data coming from the sensors
def arduino_dataIn(channel, data):
# read the available data
data_available = arduino_in.decode(data)
global sensors_ir, encoders, motor_status, extra_io, Data_Unpacked, sensor_distance
# update the IR sensors values
sensors_ir = np.array([data_available.extreme_left, data_available.left, data_available.center, data_available.right, data_available.extreme_right])
# update the encoders
encoders = np.array([np.long(data_available.encoder_left), np.long(data_available.encoder_right)])
# update the motor status
motor_status = data_available.motorEnable
# update the extra IOs data
extra_io = np.array([data_available.extra_io1, data_available.extra_io2])
# update the sensor distance read
temp = data_available.distance
if temp < 450:
sensor_distance = temp
# for debugging
Data_Unpacked = str(sensors_ir) + ',' + str(encoders) + ',' + str(motor_status) + ',' + str(extra_io) + ',' + str(sensor_distance)
##################################################################################
##################################################################################
####################### Main Program Starts Here ################################
# initiaölized LCM
lc = lcm.LCM()
if __name__ == '__main__':
# starts the send_heartbeat timer thread
heart = Thread(name='HeartBeat', target=send_heartbeat)
# starts heatbeat siganl
heart.setDaemon(True)
heart.start()
# initialize our joystick
# joystick config
# Initialize the joysticks.
pygame.joystick.init()
joystick = pygame.joystick.Joystick(0)
joystick.init()
# starts the send_teleoperate thread
teleOperation = Thread(name='TeleOperate', target=teleOperateThread)
# starts TeleOperate siganl
teleOperation.setDaemon(True)
teleOperation.start()
# Starts the LCM Channels for handling GPS stuffs
lc = lcm.LCM()
lc.subscribe("BumbleBee_Action", action_manager)
lc.subscribe("BumbleBee_GPS", gps_manager)
lc.subscribe("Arduino_Out", arduino_dataIn)
lc.subscribe("BumbleBee_Collision", collison_manager)
lc.subscribe("BumbleBee_Insects", insect_manager)
lc.subscribe("Platooning_Action", platoon_manager)
# starts the LCM thread
lcm_thread = Thread(name='Lcm_Manager', target=lcmThread)
# starts LCM thread
lcm_thread.setDaemon(True)
lcm_thread.start()
if Program_DEBUG:
print("LCM Thread Started ")
# needs to wait to ensure LCM has connected to the data streaming source
time.sleep(2)
# get latest encoder value
pre_left_ticks = encoders[0]
pre_right_ticks = encoders[1]
# here we will update the inital value for the robot from the GPS position given
# update initial odo_pose
# waits for 5 secs for GPS to be ready
time.sleep(2)
if gps_available == True:
odo_pose = (gps_pose[0], gps_pose[1], odo_pose[2])
# starts the kalmar thread
kalmar = Thread(name="KalmarThread", target=KalmarFilter)
kalmar.setDaemon(True)
kalmar.start()
# delay for about 10 ms and activates the greenlight to signal redness
time.sleep(0.1)
# activates green light
#extra_io_out[0] = 1
# Now I am ready
am_i_ready = True
if Program_DEBUG:
print("Now am Ready", am_i_ready)
print ()
# activates the running light
#extra_io_out[1] = 1
# running now
while am_i_ready:
try:
# let's do stuff here
time.sleep(1)
if Program_DEBUG:
print("Recieved - ", Data_Unpacked)
#print ("Odometry Position - ", odo_pose)
#print ("Sending out - ", Data_toPack)
print ("Odo Position - ", odo_pose )
print ("Robot Mode - ", robot_mode)
except KeyboardInterrupt:
print ("Stop")
global_finish_process.value = 1
global_finish = True
am_i_ready = False
break
# cleaning up stage
distanceThread = False
followThread = False
sideThread = False
heart.join()
teleOperation.join()
lcm_thread.join()
kalmar.join()
# chill around for some mins
time.sleep(1)
pygame.quit()
print("odo pose - ", odo_pose)
print("GPS pose - ", gps_pose)
print("Finished")
####################### Plotting resutls######################
pos_x = []
pos_y = []
pos_x_2 = []
pos_y_2 = []
pos_x_3 = []
pos_y_3 = []
for i in odo_pose_stack:
pos_x.append(i[0])
pos_y.append(i[1])
for i in gps_pose_stack:
pos_x_2.append(i[0])
pos_y_2.append(i[1])
#for i in final_pose_stack:
# pos_x_3.append(i[0])
# pos_y_3.append(i[1])
'''
plt.figure()
plt.plot(pos_x, pos_y, "r-", label='Odometry Movement')
plt.plot(pos_x_2, pos_y_2, "g-", label='GPS Movement')
#plt.plot(pos_x_3, pos_y_3, "b-")
#plt.plot(refscan[:, 0], refscan[:, 1], 'b.')
plt.axis("equal")
plt.grid(True)
plt.legend()
plt.show()
'''
|
wsdump.py
|
#!c:\users\connexall\desktop\welcomebot\welcomebot\scripts\python.exe
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
gameserver.py
|
#!/usr/bin/python3
from engine.event import *
from engine.action import *
from engine.code import *
from engine.player import *
from engine.round import *
from engine.team import *
import configparser
from flask import Flask, json, jsonify, make_response, session, render_template, request, send_file
import json
import os
import psycopg2
import queue
class App:
app = Flask(__name__, static_url_path = "", static_folder = "www")
SESSION_TYPE = 'Redis'
app.config.from_object(__name__)
app.secret_key = "ExtraSecretSessionKey"#os.urandom(24)
# START BLOCK
# Player registration
def registration_template(error):
return render_template("registration.html", error=error)
def pending_template():
if App.logged_in():
return render_template("pending.html", user=request.cookies.get("user"), phone=request.cookies.get("phone"))
else:
return "403 Connection Forbidden"
def playing_template():
if App.logged_in():
return render_template("game_view.html")
else:
return "403 Connection Forbidden"
@app.route("/isJailed")
def jailed():
if App.logged_in():
return str(Event.isPlayerJailed(Player._getIdByName(request.cookies.get("user"))))
else:
return "403 Connection Forbidden"
def logged_in():
try:
if request.cookies.get("user") == None or request.cookies.get("web_hash") == None:
return False
else:
return True
except KeyError:
return False
@app.route("/login", methods=["GET"])
def login():
web_hash = request.args.get("hash")
phone = Player.getMobileById(Player.getIdByHash(web_hash))
user = Player.getNameById(Player.getIdByHash(web_hash))
return App.add_cookies(user, phone, web_hash)
@app.route("/")
def index():
if App.logged_in():
if not Event.isPlayerJailed(Player._getIdByName(request.cookies.get("user"))):
return App.playing_template()
return App.pending_template()
else:
return App.registration_template(" ")
# Set HTTP headers so those files would not be cached
@app.route("/events.json")
def events():
response = make_response(send_file("www/events.json"))
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'
return response
@app.route("/stats.json")
def stats():
response = make_response(send_file("www/stats.json"))
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'
return response
@app.route("/register", methods=["GET"])
def new_player():
user = request.args.get("user")
phone = request.args.get("phone")
if user and phone:
if Action.addPlayer(user, phone, ''):
return App.add_cookies(user, phone, Player.getHashById(Player._getIdByName(user)))
else:
return App.registration_template("Probleem registreerimisel, kontrolli sisestatud andmeid.")
else:
return App.registration_template("Mõlemad väljad on kohustuslikud.")
@app.route("/cookie")
def add_cookies(user, phone, web_hash):
try:
expire_date = datetime.datetime.now()
expire_date = expire_date + datetime.timedelta(days=1)
cookies = make_response(render_template("to_game.html"))
cookies.set_cookie("user", user, expires=expire_date)
cookies.set_cookie("phone", phone, expires=expire_date)
cookies.set_cookie("web_hash", web_hash, expires=expire_date)
return cookies
except:
return "Problem adding cookies"
@app.route("/delCookies")
def delete_cookies():
try:
cookies = make_response(render_template("to_game.html"))
cookies.set_cookie("user", "", expires=0)
cookies.set_cookie("phone", "", expires=0)
cookies.set_cookie("web_hash", "", expires=0)
return cookies
except:
return "Problem adding cookies"
@app.route("/wrongInfo")
def wrong_info():
if App.logged_in():
phone = request.args.get("phone")
if phone == request.cookies.get("phone"):
Player.delPlayer(request.cookies.get("user"))
return App.delete_cookies()
else:
return "User data preserved"
else:
return "403 Connection Forbidden"
# Player registration
# END BLOCK
# START BLOCK
# Player actions
@app.route("/flee")
def flee_jail():
fleeing_code = request.args.get("fleeingCode")
if Action.fleePlayerWithCode(fleeing_code):
return "You got out"
else:
return "Your escape failed"
@app.route("/tag")
def tag():
if App.logged_in():
tag_code = request.args.get("tagCode")
if Action.handleWeb(request.cookies.get("web_hash"), tag_code):
return "Hit"
else:
return "Your attempt to catch them failed"
else:
return "403 Connection Forbidden"
@app.route("/messageTeam", methods=["GET"])
def messageTeam():
if App.logged_in():
team_message = request.args.get("message")
player_id = Player.getIdByHash(request.cookies.get("web_hash"))
if team_message and player_id:
if Action.sayToMyTeam(player_id, team_message):
return "Message sent"
else:
return "Error sending message"
else:
return "Message missing, or invalid player info"
else:
return "403 Connection Forbidden"
# Player actions
# END BLOCK
# START BLOCK
# Getting data
@app.route("/user")
def username():
if App.logged_in():
return request.cookies.get("user")
else:
return "403 Connection Forbidden"
@app.route("/userTeam")
def user_team():
if App.logged_in():
if Team.getPlayerTeamId(Player.getIdByHash(request.cookies.get("web_hash")),Round.getActiveId()):
return str(Team.getPlayerTeamId(Player.getIdByHash(request.cookies.get("web_hash")),Round.getActiveId()))
else:
return "Player is not currently in a team"
else:
return "403 Connection Forbidden"
@app.route("/baseMessage")
def base_message():
try:
return Action.base_msg_get()["text"]
except KeyError:
return ""
@app.route("/message")
def personal_message():
if App.logged_in():
data = {}
data['jailed'] = str(Event.isPlayerJailed(Player._getIdByName(request.cookies.get("user"))))
message = Action.browserRequestsMessages(request.cookies.get("web_hash"))
data['message'] = message
return jsonify(data)
else:
return "403 Connection Forbidden"
@app.route("/teams")
def teams():
all_teams = []
for team in game_config.teams:
all_teams.append(team['name'])
return jsonify(all_teams)
# Getting data
# END BLOCK
# START BLOCK
# Spawnmaster screen
def spawn_view():
if App.is_master():
Round.updateActiveId()
players, teamless = Stats.playersDetailed()
rounds = Round.getRounds()
return render_template("spawn.html", rounds=rounds, teamless=teamless, players = players)
else:
return "403 Connection Forbidden"
def is_master():
try:
if session["master"] == 1:
return True
else:
return False
except KeyError:
return False
@app.route("/spawn")
def spawnmaster():
if App.is_master():
return App.spawn_view()
else:
return render_template("spawn_login.html")
@app.route("/masterLogin", methods=["GET"])
def master_login():
user = request.args.get("user")
password = request.args.get("pw")
if user == config['users']['spawnuser'] and \
password == config['users']['spawnpassword']:
session["master"] = 1
return App.spawnmaster()
else:
return "403 Connection Forbidden"
@app.route("/getcode", methods=["GET"])
def getcode():
# expects request /getcode?site=A
site = request.args.get("site")
if site not in ['A', 'B']:
return "403 Connection Forbidden"
code, shortcode = game.sites[site].lock()
data = {'code': code, 'shortcode': shortcode}
return jsonify(data)
@app.route("/unlock", methods=["GET"])
def unlock():
# expects request /getcode?site=A
site = request.args.get("s")
code = request.args.get("c")
if site not in ['A', 'B']:
return "403 Connection Forbidden"
print(site, code)
data = {}
data['response'] = game.sites[site].unlock(code)
return jsonify(data)
@app.route("/pollsite", methods=["GET"])
def pollsite():
site = request.args.get("site")
if site not in ['A', 'B']:
return "403 Connection Forbidden"
data = {}
s = game.sites[site]
# Check if keypad was unlocked
data['lock'] = s.locked
if s.starting:
data['startround'] = True
s.starting = False
return jsonify(data)
@app.route("/masterout")
def master_logout():
if App.is_master():
session.clear()
return "Spanwmaster has logged out"
else:
return "403 Connection Forbidden"
# Spawnmaster screen
# END BLOCK
# START BLOCK
# Stats screens
@app.route("/baseLogin", methods=["GET"])
def base_login():
user = request.args.get("user")
password = request.args.get("pw")
if user == config['users']['baseuser'] and \
password == config['users']['basepassword']:
session["base"] = 1
return App.base_template()
else:
return "403 Connection Forbidden"
def is_base():
try:
if session["base"] == 1:
return True
else:
return False
except KeyError:
return False
@app.route("/base")
def base_template():
if App.is_base():
return render_template("base.html")
else:
return render_template("base_login.html")
@app.route("/spectate")
def spectator_template():
return render_template("spectate.html")
@app.route("/baseout")
def base_logout():
if App.is_base():
session.clear()
return "Basemaster has logged out"
else:
return "403 Connection Forbidden"
# Stats screens
# END BLOCK
# START BLOCK
# Spawnmaster's actions
# Adding a new round
@app.route("/addRound", methods=["GET"])
def startRound():
roundName = request.args.get("roundName")
# How many minutes does the round last
roundLength = request.args.get("roundLength")
# In how many minutes does the round begin
startsAt = request.args.get("startsAt")
try:
int(roundLength)
#int(startsIn)
except ValueError:
return "Round length and starttime has to be entered as integers."
startTime = datetime.datetime.now()
startTime = startTime.replace(hour=int(startsAt[0:2]), minute=int(startsAt[3:5]), second=0, microsecond=0)
endTime = startTime + datetime.timedelta(seconds = int(roundLength) * 60)
startTimeString = format(startTime, dateformat)
endTimeString = format(endTime, dateformat)
if not roundName or not roundLength or not startsAt:
return "Insufficient info for a new round"
else:
if Round.add(roundName, startTimeString, endTimeString):
Action.addTeamsToAllRounds()
return "New round \"" + roundName + "\" start time " + startTimeString + ", end time " + endTimeString + "."
else:
return "Error: New round has overlapping time. not added: \"" + roundName + "\" start time " + startTimeString + ", end time " + endTimeString + "."
# Adding player to a team in active round
@app.route("/addToTeam", methods = ["GET"])
def addToTeam():
team_name = request.args.get("teamName")
player_id = request.args.get("playerId")
if team_name and player_id:
try:
Action.addPlayerToTeam(Player.getNameById(player_id), team_name)
return "Player " + Player.getNameById(player_id) + " added to team" + team_name
except:
return "Team or player id were given as invalid values."
else:
return "Missing team or player id."
# Spawnmaster's actions
# END BLOCK
# Routes for SMS
@app.route("/sms", methods=['GET'])
def smsserver():
# Check the stupid "password"
if request.args.get('pass') != 'avf2DA3XeJZmqy9KKVjFdGfU':
return jsonify({'error': 'error'})
# Receive incoming SMSes
incoming = json.loads(request.data.decode('utf8'))
for message in incoming['incoming']:
# Act on the message, it's something similar to
# {'number': 512314, 'contents': 'Welcome here',
# 'sent': sent, 'received': received}
#print(message)
Action.handleSms(message['number'], message['contents'])
# Mark all the old enough messages ready for SMSing
Action.messages_timeout_check()
out = []
try:
while True:
element = sms_queue.get_nowait()
out.append(element)
except queue.Empty:
pass
return jsonify({'outgoing': out})
# Routes for printing
@app.route("/print", methods=['GET'])
def printserver():
if request.args.get('pass') != 'htpT2U8UMpApV852DGSncBP7':
return jsonify({'error': 'error'})
data = []
try:
while True:
element = printer_queue.get_nowait()
data.append(element)
except queue.Empty:
pass
return jsonify({'print': data})
if __name__ == "__main__":
# Start program
config = configparser.ConfigParser()
config.read('config.ini')
# Connect to database
try:
db = config['database']
parameters = "host='%s' dbname='%s' user='%s' password='%s'" % (
db['host'], db['dbname'], db['user'], db['password'])
connection = psycopg2.connect(parameters)
connection.set_session(autocommit=True)
cursor = connection.cursor()
except:
print ("Error. Unable to connect to the database. If losing data is acceptable, try running 'python reset_db.py'")
exit()
# Queues
sms_queue = queue.Queue()
printer_queue = queue.Queue()
game = Game(config, cursor)
Action.initAllConnect(cursor, sms_queue, printer_queue)
Round.updateActiveId()
Stats.updateStats()
Stats.printPlayersDetailed()
debug = False
if debug:
App.app.run(debug=True)
else:
import logging
from threading import Thread
from engine.cli import processInput
logging.basicConfig(filename='flask.log', level=logging.DEBUG)
appthread = Thread(target=App.app.run, args=())
appthread.setDaemon(True)
appthread.start()
while True:
processInput()
|
aria2_download.py
|
from bot import aria2, download_dict_lock, STOP_DUPLICATE_MIRROR
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot.helper.ext_utils.bot_utils import *
from .download_helper import DownloadHelper
from bot.helper.mirror_utils.status_utils.aria_download_status import AriaDownloadStatus
from bot.helper.telegram_helper.message_utils import *
import threading
from aria2p import API
from time import sleep
class AriaDownloadHelper(DownloadHelper):
def __init__(self):
super().__init__()
@new_thread
def __onDownloadStarted(self, api, gid):
sleep(1)
LOGGER.info(f"onDownloadStart: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
self.name = download.name
sname = download.name
gdrive = GoogleDriveHelper(None)
smsg, button = gdrive.drive_list(sname)
if STOP_DUPLICATE_MIRROR:
if smsg:
dl.getListener().onDownloadError(f'😡 𝑭𝒊𝒍𝒆 𝒊𝒔 𝒂𝒍𝒓𝒆𝒂𝒅𝒚 𝒂𝒗𝒂𝒊𝒍𝒂𝒃𝒍𝒆 𝒊𝒏 𝑫𝒓𝒊𝒗𝒆\n𝑭𝒊𝒔𝒓𝒕 𝒔𝒆𝒂𝒓𝒄𝒉 𝑩𝒆𝒇𝒐𝒓𝒆 𝑴𝒊𝒓𝒓𝒐𝒓𝒊𝒏𝒈 𝒂𝒏𝒚𝒕𝒉𝒊𝒏𝒈 😡\n𝑰𝒇 𝒚𝒐𝒖 𝒅𝒐 𝒕𝒉𝒊𝒔 𝒂𝒈𝒂𝒊𝒏❗ 𝒀𝒐𝒖 𝒘𝒊𝒍𝒍 𝒃𝒆 𝑩𝒂𝒏 😐.\n\n')
print(dl.getListener())
sendMarkup(" 𝐇𝐞𝐫𝐞 𝐚𝐫𝐞 𝐭𝐡𝐞 𝐒𝐞𝐚𝐫𝐜𝐡 🔍 𝐑𝐞𝐬𝐮𝐥𝐭𝐬:👇👇", dl.getListener().bot, dl.getListener().update, button)
aria2.remove([download])
return
update_all_messages()
def __onDownloadComplete(self, api: API, gid):
LOGGER.info(f"onDownloadComplete: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
if download.followed_by_ids:
new_gid = download.followed_by_ids[0]
new_download = api.get_download(new_gid)
with download_dict_lock:
download_dict[dl.uid()] = AriaDownloadStatus(new_gid, dl.getListener())
if new_download.is_torrent:
download_dict[dl.uid()].is_torrent = True
update_all_messages()
LOGGER.info(f'Changed gid from {gid} to {new_gid}')
else:
if dl: threading.Thread(target=dl.getListener().onDownloadComplete).start()
@new_thread
def __onDownloadPause(self, api, gid):
LOGGER.info(f"onDownloadPause: {gid}")
dl = getDownloadByGid(gid)
dl.getListener().onDownloadError('Download stopped by user!🌜🌛')
@new_thread
def __onDownloadStopped(self, api, gid):
LOGGER.info(f"onDownloadStop: {gid}")
dl = getDownloadByGid(gid)
if dl: dl.getListener().onDownloadError('𝐘𝐨𝐮𝐫 𝐋𝐢𝐧𝐤 𝐢𝐬 𝐃𝐄𝐀𝐃 ❗ 😒 𝐃𝐨𝐧❜𝐭 𝐮𝐬𝐞 𝐋𝐨𝐰 𝐒𝐞𝐞𝐝𝐬 𝐓𝐨𝐫𝐫𝐞𝐧𝐭')
@new_thread
def __onDownloadError(self, api, gid):
sleep(0.5) #sleep for split second to ensure proper dl gid update from onDownloadComplete
LOGGER.info(f"onDownloadError: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
error = download.error_message
LOGGER.info(f"Download Error: {error}")
if dl: dl.getListener().onDownloadError(error)
def start_listener(self):
aria2.listen_to_notifications(threaded=True, on_download_start=self.__onDownloadStarted,
on_download_error=self.__onDownloadError,
on_download_pause=self.__onDownloadPause,
on_download_stop=self.__onDownloadStopped,
on_download_complete=self.__onDownloadComplete)
def add_download(self, link: str, path,listener):
if is_magnet(link):
download = aria2.add_magnet(link, {'dir': path})
else:
download = aria2.add_uris([link], {'dir': path})
if download.error_message: #no need to proceed further at this point
listener.onDownloadError(download.error_message)
return
with download_dict_lock:
download_dict[listener.uid] = AriaDownloadStatus(download.gid,listener)
LOGGER.info(f"Started: {download.gid} DIR:{download.dir} ")
|
ddqn_nstep_per_verup_lstm.py
|
import math, random
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import visdom
import time
import os
vis = visdom.Visdom(port = 8097)
import torch.multiprocessing as mp
#win_4 = vis.line(Y=torch.tensor([0]),opts=dict(title='reward'))
import time
ttime= time.time()
def time_check(num=0):
global ttime
print(f'{num} time:{time.time()-ttime}')
ttime = time.time()
"""
+double
+dueling
+episodic mem
+nstep
+per
+image version
+frame_stack
+RND
+lstm
"""
max_shared_q_size = 5
frame_stack = 1
n_step = 5
PER_alpha = 0.9 # 0 is uniform per
count_episode = False
RND_const = 0
start_frame = 1000
num_frames = 50000
batch_size =32
vis_render=True
EPS_CONST = 1
lr = 0.0006
rnd_lr = 0.00001
burn_in_len = 5
mem_size = 20000
seq_len = 7
env_id = 'CartPole-v0'
#env = gym.make(env_id)
cnn_enable=True
s_dim = 1*frame_stack
a_dim = 2
state_shape = (1,1,84,84)
import torchvision
togray = torchvision.transforms.Grayscale()
toten = torchvision.transforms.ToTensor()
resize = torchvision.transforms.Resize((84,84))
topil = torchvision.transforms.ToPILImage()
def obs_preproc(x):
xten = toten(togray(resize(topil(x))))
return xten.reshape(state_shape)
class env_cover():
def __init__(self,env_id):
self.env = gym.make(env_id)
def reset(self):
ss = self.env.reset()
#ss = np.delete(ss,[1,3])
return torch.from_numpy(ss).float().view(1,s_dim).to(dev)
#return obs_preproc(env.render(mode='rgb_array')).to(dev)
def step(self,act):
ss,rr,dd,_ = self.env.step(act)
#ss = np.delete(ss,[1,3])
return torch.from_numpy(ss).float().view(1,s_dim).to(dev),rr,dd,0
def close(self):
self.env.close()
cnn_enable = False
vis_render=False
s_dim = 2
state_shape = (1,1,s_dim)
a_dim = 3
env_id = 'MountainCar-v0'
env = env_cover(env_id)
use_cuda = False
use_cuda = torch.cuda.is_available()
dev = torch.device('cuda' if use_cuda else 'cpu')
print(dev)
import torch.utils.data
from collections import deque
class ReplayBuffer():
def __init__(self,capacity, mainQ, targetQ, shared_state):
self.win_bar = vis.bar(X=torch.rand([10]))
self.win_bar_td = vis.bar(X=torch.rand([10]))
self.count = 0
self.capacity = capacity
self.buffer = deque(maxlen= capacity)
self.mainQ = mainQ
self.targetQ= targetQ
self.shared_state = shared_state
def push(self, data ):
# [[state ,action,reward,gamma,ireward,igamma ],state_mem]
with torch.no_grad():
state = data[0].to(dev)
action = data[1].to(dev)
reward = data[2].to(dev)
gamma = data[3].to(dev)
ireward = data[4].to(dev)
igamma = data[5].to(dev)
self.mainQ.reset_state()
self.targetQ.reset_state()
mhx, mcx = self.mainQ.get_state()
thx, tcx = self.targetQ.get_state()
b_len = state.size(0)
td_loss, state_mem = calc_td(self.mainQ,self.targetQ,state,action,reward,gamma,ireward,igamma,
mhx.to(dev),mcx.to(dev),thx.to(dev),tcx.to(dev),
b_len-n_step, stored_state=True)
self.count += data[0].size(0) if not count_episode else 1
priority = []
eta = 0.9
td_loss = td_loss.view(-1)
for i in range(len(td_loss)-seq_len):
p = (eta*td_loss[i:i+seq_len].max()+(1.-eta)*td_loss[i:i+seq_len].mean())**PER_alpha
priority.append(p)
priority = torch.stack(priority).view(-1)
# td_loss_total = sum(priority)/len(priority)
td_loss_total = priority.max()
with self.shared_state["vis"].get_lock():
vis.bar(X=td_loss.cpu().view(-1,1), win= self.win_bar_td, opts=dict(title='push td_loss'))
self.buffer.append([data,td_loss,priority,td_loss_total,state_mem])
while self.count > self.capacity:
self.count -= self.buffer.popleft()[0][0].size(0) if not count_episode else 1
def sample(self,batch_size):
weight = [self.buffer[i][3] for i in range(len(self.buffer))]
batch_epi = list(torch.utils.data.WeightedRandomSampler(torch.stack(weight),batch_size, True))
s = []
for episode_idx in batch_epi:
episode = self.buffer[episode_idx][0]
priority = self.buffer[episode_idx][2]
state_mem = self.buffer[episode_idx][4]
ii = list(torch.utils.data.WeightedRandomSampler(priority , 1, True))[0]
start = ii - burn_in_len if ii-burn_in_len>=0 else 0
brun_state = episode[0][start:ii].to(dev)
mhx = state_mem[start][0].to(dev)
mcx = state_mem[start][1].to(dev)
thx = state_mem[start][2].to(dev)
tcx = state_mem[start][3].to(dev)
state =episode[0][ii:ii+seq_len+n_step]
action =episode[1][ii:ii+seq_len+n_step]
reward =episode[2][ii:ii+seq_len+n_step]
gamma =episode[3][ii:ii+seq_len+n_step]
ireward =episode[4][ii:ii+seq_len+n_step]
igamma =episode[5][ii:ii+seq_len+n_step]
s.append([episode_idx,ii,state,action,reward,gamma,ireward,igamma,mhx,mcx, thx,tcx ,brun_state])
epi_idx,seq_idx,state, action, reward,gamma,ireward,igamma,mhx,mcx, thx,tcx, burn_state = zip(*s)
shape = (batch_size,-1)
state = torch.cat(state,1).to(dev)
action = torch.cat(action,1).to(dev)
reward = torch.cat(reward,1).to(dev)
gamma = torch.cat(gamma,1).to(dev)
ireward = torch.cat(ireward,1).to(dev)
igamma = torch.cat(igamma,1).to(dev)
epi_idx = torch.LongTensor(epi_idx).reshape(shape).to(dev)
seq_idx = torch.LongTensor(seq_idx).reshape(shape).to(dev)
mhx = torch.cat(mhx,0).reshape((batch_size,1,-1 )).to(dev)
mcx = torch.cat(mcx,0).reshape((batch_size,1,-1 )).to(dev)
thx = torch.cat(thx,0).reshape((batch_size,1,-1 )).to(dev)
tcx = torch.cat(tcx,0).reshape((batch_size,1,-1 )).to(dev)
return epi_idx,seq_idx,state, action, reward,gamma,ireward,igamma,mhx,mcx, thx,tcx, burn_state
def priority_update(self,epi_idx,seq_idx,loss):
td_array = self.buffer[epi_idx][1]
# priority = self.buffer[epi_idx][2]
# total_priority = self.buffer[epi_idx][3]
for i in range(seq_len):
td_array[seq_idx+i] = loss[i].abs()
# for i in range(seq_len):
# priority[seq_idx+i] = loss[i]
start = seq_idx-seq_len
start = start if start>=0 else 0
end = seq_idx+seq_len
end = end if end<= len(td_array)-seq_len else len(td_array)-seq_len
eta = 0.9
for i in range(start, end):
p = (eta*td_array[i:i+seq_len].max()+(1.-eta)*td_array[i:i+seq_len].mean())**PER_alpha
self.buffer[epi_idx][2][i] = p.view(-1)
self.buffer[epi_idx][3] = sum(self.buffer[epi_idx][2])/len(self.buffer[epi_idx][2])
bar = []
for i in range(len(self.buffer)):
bar.append(self.buffer[i][3])
with self.shared_state["vis"].get_lock():
vis.bar(X=torch.stack(bar), win= self.win_bar, opts=dict(title='total priority'))
def __len__(self):
return self.count
def __repr__(self):
return '\rmem size: {}/{} ' .format(self.count, self.capacity)
class Flatten(nn.Module):
def forward(self,inputs):
return inputs.view(inputs.size(0),-1)
class DQN(nn.Module):
def __init__(self, num_inputs, num_outputs, dev ):
super(DQN,self).__init__()
if cnn_enable:
size=7*7*64
self.feature = nn.Sequential(
nn.Conv2d(num_inputs,64,8,stride= 4),nn.ReLU(),
nn.Conv2d(64,64,4,stride=2),nn.ReLU(),
nn.Conv2d(64,64,3,stride=1),nn.ReLU(),
Flatten(),
nn.Linear(size,128),nn.ReLU(),
)
else :
self.feature = nn.Sequential(
nn.Linear(s_dim,128),nn.ReLU(),
)
self.lstm_size = 128
self.lstm = nn.LSTMCell(self.lstm_size, self.lstm_size)
self.advantage = nn.Sequential(
nn.Linear(self.lstm_size,128),nn.ReLU(),
nn.Linear(128,128),nn.ReLU(),
nn.Linear(128,num_outputs),
)
self.value = nn.Sequential(
nn.Linear(self.lstm_size,128),nn.ReLU(),
nn.Linear(128,128),nn.ReLU(),
nn.Linear(128,1),
)
self.iadvantage = nn.Sequential(
nn.Linear(self.lstm_size,128),nn.ReLU(),
nn.Linear(128,128),nn.ReLU(),
nn.Linear(128,num_outputs),
)
self.ivalue = nn.Sequential(
nn.Linear(self.lstm_size,128),nn.ReLU(),
nn.Linear(128,128),nn.ReLU(),
nn.Linear(128,1),
)
self.hx = None
self.cx = None
self.dev = dev
def forward(self,x):
x = self.feature(x)
if self.hx is None:
self.hx = torch.zeros((x.size(0) ,self.lstm_size)).to(self.dev)
self.cx = torch.zeros((x.size(0) ,self.lstm_size)).to(self.dev)
self.hx, self.cx = self.lstm(x , (self.hx, self.cx))
x= self.hx
adv = self.advantage(x)
val = self.value(x)
iadv = self.iadvantage(x)
ival = self.ivalue(x)
Q = val + adv - adv.mean()
iQ = ival + iadv - iadv.mean()
Qa = Q.argmax(1).view(-1,1)
iQa = iQ.argmax(1).view(-1,1)
return Q,Qa,iQ,iQa
def set_state(self, hx, cx):
self.hx = hx
self.cx = cx
def reset_state(self):
self.hx = None
self.cx = None
def get_state(self):
if self.hx is None:
return torch.zeros((1 ,self.lstm_size)).to(self.dev), torch.zeros((1 ,self.lstm_size)).to(self.dev)
else:
return self.hx.detach(), self.cx.detach()
class RND(nn.Module):
def __init__(self,num_inputs):
super(RND,self).__init__()
self.target= nn.Sequential(
nn.Conv2d(num_inputs*2,64,8,stride=4),nn.ReLU(),
nn.Conv2d(64,64,4,stride=2),nn.ReLU(),
nn.Conv2d(64,64,3,stride=1),nn.ReLU(),
Flatten(),
nn.Linear(64*7*7,128),nn.ReLU(),
nn.Linear(128,128),
)
self.predictor = nn.Sequential(
nn.Conv2d(num_inputs*2,64,8,stride=4),nn.ReLU(),
nn.Conv2d(64,64,4,stride=2),nn.ReLU(),
nn.Conv2d(64,64,3,stride=1),nn.ReLU(),
Flatten(),
nn.Linear(64*7*7,128),nn.ReLU(),
nn.Linear(128,128),nn.ReLU(),
nn.Linear(128,128),
)
for m in self.modules():
if isinstance(m,nn.Linear):
nn.init.orthogonal_(m.weight,np.sqrt(2))
m.bias.data.zero_()
for param in self.target.parameters():
param.requires_grad =False
def forward(self, obs, next_obs):
Tobs = torch.cat([obs,next_obs],dim=1)
target_feature = self.target(Tobs)
predict_feature = self.predictor(Tobs)
return predict_feature*RND_const, target_feature*RND_const
def update_target(tar,cur):
tar.load_state_dict(cur.state_dict())
def calc_td(main_model,target_model,state, action, reward,gamma,ireward,igamma,mhx,mcx, thx,tcx, story_len, stored_state =False):
y_t_hat = []
iy_t_hat = []
state_mem = []
with torch.no_grad():
main_model.set_state(mhx,mcx)
target_model.set_state(thx,tcx)
if stored_state:
mhx,mcx = main_model.get_state()
thx,tcx = target_model.get_state()
state_mem.append([mhx,mcx,thx,tcx])
for i in range(n_step):
_,_,_,_ = main_model(state[i])
_,_,_,_ = target_model(state[i])
if stored_state:
mhx,mcx = main_model.get_state()
thx,tcx = target_model.get_state()
state_mem.append([mhx,mcx,thx,tcx])
for i in range(story_len):
qv,_,iqv,_ = main_model(state[i+n_step])
_,tqa,_,tiqa = target_model(state[i+n_step])
if stored_state:
mhx,mcx = main_model.get_state()
thx,tcx = target_model.get_state()
state_mem.append([mhx,mcx,thx,tcx])
y_t_hat.append(reward[i] + (gamma[i+n_step]**n_step)*qv.gather(1,tqa))
iy_t_hat.append(ireward[i] + (igamma[i+n_step]**n_step)*iqv.gather(1,tiqa))
losses=[]
main_model.reset_state()
target_model.reset_state()
main_model.set_state(mhx,mcx)
target_model.set_state(thx,tcx)
for i in range(story_len):
q,_,iq,_ = main_model(state[i])
td = q.gather(1,action[i]) - y_t_hat[i]
itd = iq.gather(1,action[i]) - iy_t_hat[i]
losses.append(td+itd)
return torch.cat(losses,1).abs(), state_mem
def actor_process(a_id,num_frames,shared_state,shared_queue,block=True, eps=0.1):
print(f'#{a_id} start')
win_epsil = vis.line(Y=torch.tensor([0]),opts=dict(title='epsilon'+str(a_id)))
win_r = vis.line(Y=torch.tensor([0]),opts=dict(title='reward'+str(a_id)))
win_exp_q = vis.line(Y=torch.tensor([0]),opts=dict(title='exp_q'+str(a_id)))
mainQ = DQN(s_dim, a_dim, dev ).to(dev)
rnd_model = RND(s_dim).to(dev)
mainQ.load_state_dict(shared_state["mainQ"].state_dict())
episode_reward=0
local_mem = []
epsilon = 1
state_mem = []
done = True
gamma = 0.997
state = env.reset()
q_val=[]
for frame_idx in range(num_frames):
if done:
if len(local_mem)!=0:
with shared_state["vis"].get_lock():
vis.line(X=torch.tensor([frame_idx]), Y=torch.tensor([episode_reward]), win = win_r, update='append')
# vis.line(X=torch.tensor([frame_idx]), Y=torch.tensor([epsilon]), win = win_epsil, update='append')
vis.line(Y=torch.cat(q_val,0), win= win_exp_q, opts=dict(title='exp_q'+str(a_id)))
for i in range(n_step):
local_mem.append([torch.zeros(state.size()).to(dev),0,0,0,0,0])
# for i in range(len(local_mem)-n_step):
# local_mem[i][5] = 0.99 if local_mem[i][3]!=0 else 0
# state = local_mem[i][0]
# next_state = local_mem[i+n_step][0]
#
## state = torch.cat([local_mem[j if j>=0 else 0][0] for j in range(i-frame_stack+1,i+1)],1)
## next_state = torch.cat([local_mem[j if j>=0 else 0][0] for j in range(i-frame_stack+1+n_step,i+1+n_step)],1)
# pred , targ = rnd_model(state.to(dev),next_state.to(dev))
# i_reward = ((pred-targ)**2).mean().item()
# local_mem[i][4] = i_reward
for i in range(len(local_mem)-n_step):
local_mem[i][2] = sum([local_mem[i+j][2] *(local_mem[i+j][3]**j) for j in range(n_step)])
# local_mem[i][4] = sum([local_mem[i+j][4] *(0.99**j) for j in range(n_step)])
# ll = []
# for i in range(len(local_mem)-n_step):
# ll.append(local_mem[i][4])
# win_ir = vis.line(Y=torch.tensor(ll),win= win_ir)
with torch.no_grad():
mainQ.reset_state()
# targetQ.reset_state()
mhx,mcx= mainQ.get_state()
# thx,tcx= targetQ.get_state()
state,action,reward,gamma,ireward,igamma = zip(*local_mem)
b_len = len(local_mem)
state = torch.stack(state)
action = torch.LongTensor(action).reshape((b_len,1,1))
reward = torch.Tensor(reward).reshape((b_len,1,1))
gamma = torch.Tensor(gamma).reshape((b_len,1,1))
ireward = torch.Tensor(ireward).reshape((b_len,1,1))
igamma = torch.Tensor(igamma).reshape((b_len,1,1))
blocking = True if shared_queue.qsize()>max_shared_q_size and block else False
shared_queue.put([state.cpu() ,action,reward,gamma,ireward,igamma ],block=blocking)
while True:
with shared_state["wait"].get_lock():
if shared_state["wait"].value > 0:
shared_state["wait"].value -=1
break
time.sleep(0.01)
if block == False:
return 0
state = env.reset()
episode_reward=0
gamma = 0.997
local_mem = []
state_mem = []
mainQ.reset_state()
# targetQ.reset_state()
q_val = []
# epsilon= 0.01**(EPS_CONST*frame_idx/num_frames)
epsilon= eps
with torch.no_grad():
mhx,mcx = mainQ.get_state()
# thx,tcx = targetQ.get_state()
# state_mem.append([mhx,mcx,thx,tcx])
# state_mem.append([mhx,mcx])
qv,qa,iqv,iqa = mainQ(state)
# _,_,_,_ = targetQ(state)
action = qa.item() if random.random() > epsilon else random.randrange(a_dim)
q_val.append(qv.detach())
# if vis_render:
# vis.image(state.view(84,84),win = win_img)
next_state , reward, done ,_ = env.step(action)
local_mem.append([state, action ,reward, gamma, 0 , 0])
state = next_state
episode_reward += reward
if shared_state["update"][a_id]:
mainQ.load_state_dict(shared_state["mainQ"].state_dict())
# targetQ.load_state_dict(shared_state["targetQ"].state_dict())
shared_state["update"][a_id]=False
print('actor_update',mainQ.value[0].weight[0][0:5].detach())
print('done')
env.close()
def learner_process(max_id,num_frames,shared_state,shared_queue,block=True):
try:
win_ir = vis.line(Y=torch.tensor([0]),opts=dict(title='ireward'))
win_l0 = vis.line(Y=torch.tensor([0]),opts=dict(title='loss'))
win_l1 = vis.line(Y=torch.tensor([0]),opts=dict(title='rnd_loss'))
mainQ = DQN(s_dim, a_dim, dev ).to(dev)
targetQ = DQN(s_dim, a_dim, dev ).to(dev)
rnd_model = RND(s_dim).to(dev)
mainQ.load_state_dict(shared_state["mainQ"].state_dict())
targetQ.load_state_dict(shared_state["targetQ"].state_dict())
optimizer = optim.Adam(mainQ.parameters(),lr)
rnd_optimizer = optim.Adam(rnd_model.parameters(),rnd_lr)
replay_buffer = ReplayBuffer(mem_size,mainQ , targetQ,shared_state)
def soft_update(target_model, model, tau):
for target_param, param in zip(target_model.parameters(), model.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def update():
epi_idx,seq_idx,state, action, reward,gamma,ireward,igamma,mhx,mcx, thx,tcx, burn_state = replay_buffer.sample(batch_size)
burned_hx = []
burned_cx = []
burned_thx = []
burned_tcx = []
with torch.no_grad():
for i in range(batch_size):
mainQ.reset_state()
targetQ.reset_state()
mainQ.set_state(mhx[i],mcx[i])
targetQ.set_state(thx[i],tcx[i])
for j in range(len(burn_state[i])):
_,_,_,_ = mainQ(burn_state[i][j])
_,_,_,_ = targetQ(burn_state[i][j])
t_mhx,t_mcx = mainQ.get_state()
burned_hx.append(t_mhx)
burned_cx.append(t_mcx)
t_thx,t_tcx = targetQ.get_state()
burned_thx.append(t_thx)
burned_tcx.append(t_tcx)
mhx = torch.cat(burned_hx,0).to(dev)
mcx = torch.cat(burned_cx,0).to(dev)
thx = torch.cat(burned_thx,0).to(dev)
tcx = torch.cat(burned_tcx,0).to(dev)
loss,_ = calc_td(mainQ,targetQ,state, action, reward,gamma,ireward,igamma,mhx,mcx, thx,tcx,seq_len)
optimizer.zero_grad()
loss.pow(2).mean().backward()
optimizer.step()
# pm,tm = rnd_model(state,nstate)
# rnd_loss = ((pm-tm)**2).mean()
# rnd_optimizer.zero_grad()
# rnd_loss.backward()
# rnd_optimizer.step()
for i in range(len(epi_idx)):
replay_buffer.priority_update(epi_idx[i],seq_idx[i],loss[i].detach())
return loss.pow(2).mean().item(),0
# if len(replay_buffer)==0:
if block==False:
if shared_queue.qsize()<2 :
print('return shared q size > 2 ')
return 0
data = shared_queue.get(block=True)
replay_buffer.push(data)
while len(replay_buffer) < start_frame and block:
data = shared_queue.get(block=True)
replay_buffer.push(data)
print(repr(replay_buffer),end='\r')
for frame_idx in range(num_frames):
print(repr(replay_buffer),end='\r')
if shared_queue.qsize()!=0:
# while shared_queue.qsize() != 0:
data = shared_queue.get()
replay_buffer.push(data)
loss, rnd_loss = update()
print(f'#learner l:{loss:.5f}')
with shared_state["vis"].get_lock():
vis.line(X=torch.tensor([frame_idx]),Y=torch.tensor([loss]),win=win_l0,update ='append')
vis.line(X=torch.tensor([frame_idx]),Y=torch.tensor([rnd_loss]),win=win_l1,update ='append')
with shared_state["wait"].get_lock():
shared_state["wait"].value +=1
if frame_idx % 4 == 0:
# if random.random() < 1/10 :
soft_update(targetQ,mainQ,0.3)
# update_target(targetQ,mainQ)
if frame_idx % 3 == 0:
# if random.random() < 1/20 :
shared_state["mainQ"].load_state_dict(mainQ.state_dict())
shared_state["targetQ"].load_state_dict(targetQ.state_dict())
for i in range(max_id):
shared_state["update"][i]=True
if block == False:
return 0
except Exception as e:
print(e)
if __name__ == '__main__':
os.system('cls')
vis.close()
num_processes = 2
shared_queue = mp.Queue()
shared_state = dict()
shared_state["mainQ"] = DQN(s_dim, a_dim, dev ).share_memory()
shared_state["targetQ"] = DQN(s_dim, a_dim, dev ).share_memory()
shared_state["update"] = mp.Array('i', [0 for i in range(num_processes)])
# shared_state["wait"] = mp.Array('i', [0 for i in range(num_processes)])
shared_state["vis"] = mp.Value('i',0)
shared_state["wait"] = mp.Value('i',0)
shared_state["wait"].value = start_frame//10
# for i in range(100):
# actor_process(0,num_frames,shared_state,shared_queue,False)
# actor_process(0,num_frames,shared_state,shared_queue,False)
# learner_process(1,num_frames,shared_state,shared_queue,False)
# time.sleep(10)
##
proc_list = []
proc_list.append(mp.Process(target=learner_process, args=(num_processes,num_frames,shared_state,shared_queue)))
eps = [0.1,0.2,0.4,0.3,0.2,0.6,0.4,0.6,0.2,0.4]
for i in range(num_processes):
proc_list.append( mp.Process(target=actor_process, args=(i,num_frames,shared_state,shared_queue,eps[i])) )
for proc in proc_list:
proc.start()
try:
for proc in proc_list:
proc.join()
except:
print('qclose')
shared_queue.close()
print('process close')
for proc in proc_list:
proc.terminate()
shared_queue.join_thread()
|
basic_gpu_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.gen_array_ops import broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.cached_session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = sess.run(out)
with self.cached_session(use_gpu=False) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = sess.run(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64)
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64)
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.cached_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = sess.run(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
data_gt_1 = data + 2 # for x > 1
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arcsinh, math_ops.asinh, use_gpu)
self._compare(data_gt_1, np.arccosh, math_ops.acosh, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.cosh, math_ops.cosh, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sinh, math_ops.sinh, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
self._compare(data, np.arctanh, math_ops.atanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDivide(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
np_out = np.floor_divide(x, y + 0.1)
with self.session(use_gpu=True) as sess:
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y + 0.1)
ofunc = inx / iny
out_func2 = math_ops.floor(ofunc)
tf_out = sess.run(out_func2)
self.assertAllClose(np_out, tf_out)
class BroadcastSimpleTest(test.TestCase):
def _GetGradientArgs(self, xs, ys):
with self.cached_session(use_gpu=True) as sess:
return sess.run(broadcast_gradient_args(xs, ys))
def testBroadcast(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
_GRAD_TOL = {dtypes.float32: 1e-3}
def _compareGradientX(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
xs = list(x.shape)
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, out, zs, x_init_value=x)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGradientY(self,
x,
y,
np_func,
tf_func,
numeric_gradient_type=None):
z = np_func(x, y)
zs = list(z.shape)
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
if x.dtype in (np.float32, np.float64):
out = 1.1 * tf_func(inx, iny)
else:
out = tf_func(inx, iny)
ys = list(np.shape(y))
jacob_t, jacob_n = gradient_checker.compute_gradient(
iny, ys, out, zs, x_init_value=y)
tol = self._GRAD_TOL[dtypes.as_dtype(x.dtype)]
self.assertAllClose(jacob_t, jacob_n, rtol=tol, atol=tol)
def _compareGpu(self, x, y, np_func, tf_func):
np_ans = np_func(x, y)
with self.cached_session(use_gpu=True):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = out.eval()
self.assertAllClose(np_ans, tf_gpu)
self.assertShapeEqual(np_ans, out)
# TODO(zhifengc/ke): make gradient checker work on GPU.
def testGradient(self):
x = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
y = (1 + np.linspace(0, 5, np.prod([1, 3, 2]))).astype(np.float32).reshape(
[1, 3, 2])
self._compareGradientX(x, y, np.true_divide, math_ops.truediv)
self._compareGradientY(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y, np.true_divide, math_ops.truediv)
self._compareGpu(x, y + 0.1, np.floor_divide, math_ops.floordiv)
class GpuMultiSessionMemoryTest(test_util.TensorFlowTestCase):
"""Tests concurrent sessions executing on the same GPU."""
def _run_session(self, session, results):
n_iterations = 500
with session as s:
data = variables.Variable(1.0)
with ops.device('/device:GPU:0'):
random_seed.set_random_seed(1)
matrix1 = variables.Variable(
random_ops.truncated_normal([1024, 1]), name='matrix1')
matrix2 = variables.Variable(
random_ops.truncated_normal([1, 1024]), name='matrix2')
x1 = math_ops.multiply(data, matrix1, name='x1')
x3 = math_ops.matmul(x1, math_ops.matmul(matrix2, matrix1))
x4 = math_ops.matmul(array_ops.transpose(x3), x3, name='x4')
s.run(variables.global_variables_initializer())
for _ in xrange(n_iterations):
value = s.run(x4)
results.add(value.flat[0])
if len(results) != 1:
break
def testConcurrentSessions(self):
n_threads = 4
threads = []
results = []
for _ in xrange(n_threads):
session = self.session(graph=ops.Graph(), use_gpu=True)
results.append(set())
args = (session, results[-1])
threads.append(threading.Thread(target=self._run_session, args=args))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
flat_results = set([x for x in itertools.chain(*results)])
self.assertEqual(1,
len(flat_results),
'Expected single value, got %r' % flat_results)
if __name__ == '__main__':
test.main()
|
smoother.py
|
"""this is a prototype ensemble smoother based on the LM-EnRML
algorithm of Chen and Oliver 2013. It requires the pest++ "sweep" utility
to propagate the ensemble forward.
"""
from __future__ import print_function, division
import os
from datetime import datetime
import shutil
import threading
import time
import numpy as np
import pandas as pd
import pyemu
from pyemu.en import ParameterEnsemble,ObservationEnsemble
from pyemu.mat import Cov,Matrix
from pyemu.pst import Pst
from .logger import Logger
class EnsembleMethod(object):
"""Base class for ensemble-type methods. Should not be instantiated directly
Parameters
----------
pst : pyemu.Pst or str
a control file instance or filename
parcov : pyemu.Cov or str
a prior parameter covariance matrix or filename. If None,
parcov is constructed from parameter bounds (diagonal)
obscov : pyemu.Cov or str
a measurement noise covariance matrix or filename. If None,
obscov is constructed from observation weights.
num_slaves : int
number of slaves to use in (local machine) parallel evaluation of the parmaeter
ensemble. If 0, serial evaluation is used. Ignored if submit_file is not None
submit_file : str
the name of a HTCondor submit file. If not None, HTCondor is used to
evaluate the parameter ensemble in parallel by issuing condor_submit
as a system command
port : int
the TCP port number to communicate on for parallel run management
slave_dir : str
path to a directory with a complete set of model files and PEST
interface files
"""
def __init__(self,pst,parcov=None,obscov=None,num_slaves=0,use_approx_prior=True,
submit_file=None,verbose=False,port=4004,slave_dir="template"):
self.logger = Logger(verbose)
if verbose is not False:
self.logger.echo = True
self.num_slaves = int(num_slaves)
if submit_file is not None:
if not os.path.exists(submit_file):
self.logger.lraise("submit_file {0} not found".format(submit_file))
elif num_slaves > 0:
if not os.path.exists(slave_dir):
self.logger.lraise("template dir {0} not found".format(slave_dir))
self.slave_dir = slave_dir
self.submit_file = submit_file
self.port = int(port)
self.paren_prefix = ".parensemble.{0:04d}.csv"
self.obsen_prefix = ".obsensemble.{0:04d}.csv"
if isinstance(pst,str):
pst = Pst(pst)
assert isinstance(pst,Pst)
self.pst = pst
self.sweep_in_csv = pst.pestpp_options.get("sweep_parameter_csv_file","sweep_in.csv")
self.sweep_out_csv = pst.pestpp_options.get("sweep_output_csv_file","sweep_out.csv")
if parcov is not None:
assert isinstance(parcov,Cov)
else:
parcov = Cov.from_parameter_data(self.pst)
if obscov is not None:
assert isinstance(obscov,Cov)
else:
obscov = Cov.from_observation_data(pst)
self.parcov = parcov
self.obscov = obscov
# if restart_iter > 0:
# self.restart_iter = restart_iter
# paren = self.pst.filename+self.paren_prefix.format(restart_iter)
# assert os.path.exists(paren),\
# "could not find restart par ensemble {0}".format(paren)
# obsen0 = self.pst.filename+self.obsen_prefix.format(0)
# assert os.path.exists(obsen0),\
# "could not find restart obs ensemble 0 {0}".format(obsen0)
# obsen = self.pst.filename+self.obsen_prefix.format(restart_iter)
# assert os.path.exists(obsen),\
# "could not find restart obs ensemble {0}".format(obsen)
# self.restart = True
self.__initialized = False
self.iter_num = 0
self.raw_sweep_out = None
@property
def current_phi(self):
""" the current phi vector
Returns
-------
current_phi : pandas.DataFrame
the current phi vector as a pandas dataframe
"""
return pd.DataFrame(data={"phi":self._calc_phi_vec(self.obsensemble)},\
index=self.obsensemble.index)
@property
def current_actual_phi(self):
return self.obsensemble.phi_vector
def initialize(self,*args,**kwargs):
raise Exception("EnsembleMethod.initialize() must be implemented by the derived types")
def _calc_delta(self,ensemble,scaling_matrix=None):
'''
calc the scaled ensemble differences from the mean
'''
mean = np.array(ensemble.mean(axis=0))
delta = ensemble.as_pyemu_matrix()
for i in range(ensemble.shape[0]):
delta.x[i,:] -= mean
if scaling_matrix is not None:
delta = scaling_matrix * delta.T
delta *= (1.0 / np.sqrt(float(ensemble.shape[0] - 1.0)))
return delta
def _calc_obs(self,parensemble):
self.logger.log("removing existing sweep in/out files")
try:
os.remove(self.sweep_in_csv)
except Exception as e:
self.logger.warn("error removing existing sweep in file:{0}".format(str(e)))
try:
os.remove(self.sweep_out_csv)
except Exception as e:
self.logger.warn("error removing existing sweep out file:{0}".format(str(e)))
self.logger.log("removing existing sweep in/out files")
if parensemble.isnull().values.any():
parensemble.to_csv("_nan.csv")
self.logger.lraise("_calc_obs() error: NaNs in parensemble (written to '_nan.csv')")
if self.submit_file is None:
self._calc_obs_local(parensemble)
else:
self._calc_obs_condor(parensemble)
# make a copy of sweep out for restart purposes
# sweep_out = str(self.iter_num)+"_raw_"+self.sweep_out_csv
# if os.path.exists(sweep_out):
# os.remove(sweep_out)
# shutil.copy2(self.sweep_out_csv,sweep_out)
self.logger.log("reading sweep out csv {0}".format(self.sweep_out_csv))
failed_runs,obs = self._load_obs_ensemble(self.sweep_out_csv)
self.logger.log("reading sweep out csv {0}".format(self.sweep_out_csv))
self.total_runs += obs.shape[0]
self.logger.statement("total runs:{0}".format(self.total_runs))
return failed_runs,obs
def _load_obs_ensemble(self,filename):
if not os.path.exists(filename):
self.logger.lraise("obsensemble file {0} does not exists".format(filename))
obs = pd.read_csv(filename)
obs.columns = [item.lower() for item in obs.columns]
self.raw_sweep_out = obs.copy() # save this for later to support restart
assert "input_run_id" in obs.columns,\
"'input_run_id' col missing...need newer version of sweep"
obs.index = obs.input_run_id
failed_runs = None
if 1 in obs.failed_flag.values:
failed_runs = obs.loc[obs.failed_flag == 1].index.values
self.logger.warn("{0} runs failed (indices: {1})".\
format(len(failed_runs),','.join([str(f) for f in failed_runs])))
obs = ObservationEnsemble.from_dataframe(df=obs.loc[:,self.obscov.row_names],
pst=self.pst)
if obs.isnull().values.any():
self.logger.lraise("_calc_obs() error: NaNs in obsensemble")
return failed_runs, obs
def _get_master_thread(self):
master_stdout = "_master_stdout.dat"
master_stderr = "_master_stderr.dat"
def master():
try:
#os.system("sweep {0} /h :{1} 1>{2} 2>{3}". \
# format(self.pst.filename, self.port, master_stdout, master_stderr))
pyemu.helpers.run("sweep {0} /h :{1} 1>{2} 2>{3}". \
format(self.pst.filename, self.port, master_stdout, master_stderr))
except Exception as e:
self.logger.lraise("error starting condor master: {0}".format(str(e)))
with open(master_stderr, 'r') as f:
err_lines = f.readlines()
if len(err_lines) > 0:
self.logger.warn("master stderr lines: {0}".
format(','.join([l.strip() for l in err_lines])))
master_thread = threading.Thread(target=master)
master_thread.start()
time.sleep(2.0)
return master_thread
def _calc_obs_condor(self,parensemble):
self.logger.log("evaluating ensemble of size {0} with htcondor".\
format(parensemble.shape[0]))
parensemble.to_csv(self.sweep_in_csv)
master_thread = self._get_master_thread()
condor_temp_file = "_condor_submit_stdout.dat"
condor_err_file = "_condor_submit_stderr.dat"
self.logger.log("calling condor_submit with submit file {0}".format(self.submit_file))
try:
os.system("condor_submit {0} 1>{1} 2>{2}".\
format(self.submit_file,condor_temp_file,condor_err_file))
except Exception as e:
self.logger.lraise("error in condor_submit: {0}".format(str(e)))
self.logger.log("calling condor_submit with submit file {0}".format(self.submit_file))
time.sleep(2.0) #some time for condor to submit the job and echo to stdout
condor_submit_string = "submitted to cluster"
with open(condor_temp_file,'r') as f:
lines = f.readlines()
self.logger.statement("condor_submit stdout: {0}".\
format(','.join([l.strip() for l in lines])))
with open(condor_err_file,'r') as f:
err_lines = f.readlines()
if len(err_lines) > 0:
self.logger.warn("stderr from condor_submit:{0}".\
format([l.strip() for l in err_lines]))
cluster_number = None
for line in lines:
if condor_submit_string in line.lower():
cluster_number = int(float(line.split(condor_submit_string)[-1]))
if cluster_number is None:
self.logger.lraise("couldn't find cluster number...")
self.logger.statement("condor cluster: {0}".format(cluster_number))
master_thread.join()
self.logger.statement("condor master thread exited")
self.logger.log("calling condor_rm on cluster {0}".format(cluster_number))
os.system("condor_rm cluster {0}".format(cluster_number))
self.logger.log("calling condor_rm on cluster {0}".format(cluster_number))
self.logger.log("evaluating ensemble of size {0} with htcondor".\
format(parensemble.shape[0]))
def _calc_obs_local(self,parensemble):
'''
propagate the ensemble forward using sweep.
'''
self.logger.log("evaluating ensemble of size {0} locally with sweep".\
format(parensemble.shape[0]))
parensemble.to_csv(self.sweep_in_csv)
if self.num_slaves > 0:
master_thread = self._get_master_thread()
pyemu.utils.start_slaves(self.slave_dir,"sweep",self.pst.filename,
self.num_slaves,slave_root='..',port=self.port)
master_thread.join()
else:
os.system("sweep {0}".format(self.pst.filename))
self.logger.log("evaluating ensemble of size {0} locally with sweep".\
format(parensemble.shape[0]))
def _calc_phi_vec(self,obsensemble):
obs_diff = self._get_residual_matrix(obsensemble)
q = np.diagonal(self.obscov_inv_sqrt.get(row_names=obs_diff.col_names,col_names=obs_diff.col_names).x)
phi_vec = []
for i in range(obs_diff.shape[0]):
o = obs_diff.x[i,:]
phi_vec.append(((obs_diff.x[i,:] * q)**2).sum())
return np.array(phi_vec)
def _phi_report(self,phi_csv,phi_vec,cur_lam):
#print(phi_vec.min(),phi_vec.max())
phi_csv.write("{0},{1},{2},{3},{4},{5},{6},".format(self.iter_num,
self.total_runs,
cur_lam,
phi_vec.min(),
phi_vec.max(),
phi_vec.mean(),
np.median(phi_vec),
phi_vec.std()))
#[print(phi) for phi in phi_vec]
phi_csv.write(",".join(["{0:20.8}".format(phi) for phi in phi_vec]))
phi_csv.write("\n")
phi_csv.flush()
# def _phi_report(self,phi_vec,cur_lam):
# self.phi_csv.write("{0},{1},{2},{3},{4},{5},{6}".format(self.iter_num,
# self.total_runs,
# cur_lam,
# phi_vec.min(),
# phi_vec.max(),
# phi_vec.mean(),
# np.median(phi_vec),
# phi_vec.std()))
# self.phi_csv.write(",".join(["{0:20.8}".format(phi) for phi in phi_vec]))
# self.phi_csv.write("\n")
# self.phi_csv.flush()
def _apply_inequality_constraints(self,res_mat):
obs = self.pst.observation_data.loc[res_mat.col_names]
gt_names = obs.loc[obs.obgnme.apply(lambda x: x.startswith("g_") or x.startswith("less")), "obsnme"]
lt_names = obs.loc[obs.obgnme.apply(lambda x: x.startswith("l_") or x.startswith("greater")), "obsnme"]
if gt_names.shape[0] == 0 and lt_names.shape[0] == 0:
return res_mat
res_df = res_mat.to_dataframe()
if gt_names.shape[0] > 0:
for gt_name in gt_names:
#print(res_df.loc[:,gt_name])
#if the residual is greater than zero, this means the ineq is satisified
res_df.loc[res_df.loc[:,gt_name] > 0,gt_name] = 0.0
#print(res_df.loc[:,gt_name])
#print()
if lt_names.shape[0] > 0:
for lt_name in lt_names:
#print(res_df.loc[:,lt_name])
#f the residual is less than zero, this means the ineq is satisfied
res_df.loc[res_df.loc[:,lt_name] < 0,lt_name] = 0.0
#print(res_df.loc[:,lt_name])
#print()
def _get_residual_matrix(self, obsensemble):
obs_matrix = obsensemble.nonzero.as_pyemu_matrix()
res_mat = obs_matrix - self.obs0_matrix.get(col_names=obs_matrix.col_names,row_names=obs_matrix.row_names)
#print(res_mat)
self._apply_inequality_constraints(res_mat)
#print(res_mat)
return res_mat
def update(self,lambda_mults=[1.0],localizer=None,run_subset=None,use_approx=True):
raise Exception("EnsembleMethod.update() must be implemented by the derived types")
class EnsembleSmoother(EnsembleMethod):
"""an implementation of the GLM iterative ensemble smoother
Parameters
----------
pst : pyemu.Pst or str
a control file instance or filename
parcov : pyemu.Cov or str
a prior parameter covariance matrix or filename. If None,
parcov is constructed from parameter bounds (diagonal)
obscov : pyemu.Cov or str
a measurement noise covariance matrix or filename. If None,
obscov is constructed from observation weights.
num_slaves : int
number of slaves to use in (local machine) parallel evaluation of the parmaeter
ensemble. If 0, serial evaluation is used. Ignored if submit_file is not None
use_approx_prior : bool
a flag to use the MLE (approx) upgrade solution. If True, a MAP
solution upgrade is used
submit_file : str
the name of a HTCondor submit file. If not None, HTCondor is used to
evaluate the parameter ensemble in parallel by issuing condor_submit
as a system command
port : int
the TCP port number to communicate on for parallel run management
slave_dir : str
path to a directory with a complete set of model files and PEST
interface files
drop_bad_reals : float
drop realizations with phi greater than drop_bad_reals. If None, all
realizations are kept. Default is None
Example
-------
``>>>import pyemu``
``>>>es = pyemu.EnsembleSmoother(pst="pest.pst")``
"""
def __init__(self,pst,parcov=None,obscov=None,num_slaves=0,use_approx_prior=True,
submit_file=None,verbose=False,port=4004,slave_dir="template",drop_bad_reals=None):
super(EnsembleSmoother,self).__init__(pst=pst,parcov=parcov,obscov=obscov,num_slaves=num_slaves,
submit_file=submit_file,verbose=verbose,port=port,slave_dir=slave_dir)
self.use_approx_prior = bool(use_approx_prior)
self.half_parcov_diag = None
self.half_obscov_diag = None
self.delta_par_prior = None
self.drop_bad_reals = drop_bad_reals
def initialize(self,num_reals=1,init_lambda=None,enforce_bounds="reset",
parensemble=None,obsensemble=None,restart_obsensemble=None,
):
"""Initialize the iES process. Depending on arguments, draws or loads
initial parameter observations ensembles and runs the initial parameter
ensemble
Parameters
----------
num_reals : int
the number of realizations to draw. Ignored if parensemble/obsensemble
are not None
init_lambda : float
the initial lambda to use. During subsequent updates, the lambda is
updated according to upgrade success
enforce_bounds : str
how to enfore parameter bound transgression. options are
reset, drop, or None
parensemble : pyemu.ParameterEnsemble or str
a parameter ensemble or filename to use as the initial
parameter ensemble. If not None, then obsenemble must not be
None
obsensemble : pyemu.ObservationEnsemble or str
an observation ensemble or filename to use as the initial
observation ensemble. If not None, then parensemble must
not be None
restart_obsensemble : pyemu.ObservationEnsemble or str
an observation ensemble or filename to use as an
evaluated observation ensemble. If not None, this will skip the initial
parameter ensemble evaluation - user beware!
Example
-------
``>>>import pyemu``
``>>>es = pyemu.EnsembleSmoother(pst="pest.pst")``
``>>>es.initialize(num_reals=100)``
"""
'''
(re)initialize the process
'''
# initialize the phi report csv
self.enforce_bounds = enforce_bounds
self.total_runs = 0
# this matrix gets used a lot, so only calc once and store
self.obscov_inv_sqrt = self.obscov.get(self.pst.nnz_obs_names).inv.sqrt
if parensemble is not None and obsensemble is not None:
self.logger.log("initializing with existing ensembles")
if isinstance(parensemble,str):
self.logger.log("loading parensemble from file")
if not os.path.exists(obsensemble):
self.logger.lraise("can not find parensemble file: {0}".\
format(parensemble))
df = pd.read_csv(parensemble,index_col=0)
#df.index = [str(i) for i in df.index]
self.parensemble_0 = ParameterEnsemble.from_dataframe(df=df,pst=self.pst)
self.logger.log("loading parensemble from file")
elif isinstance(parensemble,ParameterEnsemble):
self.parensemble_0 = parensemble.copy()
else:
raise Exception("unrecognized arg type for parensemble, " +\
"should be filename or ParameterEnsemble" +\
", not {0}".format(type(parensemble)))
self.parensemble = self.parensemble_0.copy()
if isinstance(obsensemble,str):
self.logger.log("loading obsensemble from file")
if not os.path.exists(obsensemble):
self.logger.lraise("can not find obsensemble file: {0}".\
format(obsensemble))
df = pd.read_csv(obsensemble,index_col=0).loc[:,self.pst.nnz_obs_names]
#df.index = [str(i) for i in df.index]
self.obsensemble_0 = ObservationEnsemble.from_dataframe(df=df,pst=self.pst)
self.logger.log("loading obsensemble from file")
elif isinstance(obsensemble,ObservationEnsemble):
self.obsensemble_0 = obsensemble.copy()
else:
raise Exception("unrecognized arg type for obsensemble, " +\
"should be filename or ObservationEnsemble" +\
", not {0}".format(type(obsensemble)))
assert self.parensemble_0.shape[0] == self.obsensemble_0.shape[0]
#self.num_reals = self.parensemble_0.shape[0]
num_reals = self.parensemble.shape[0]
self.logger.log("initializing with existing ensembles")
else:
self.logger.log("initializing smoother with {0} realizations".format(num_reals))
#self.num_reals = int(num_reals)
#assert self.num_reals > 1
self.logger.log("initializing parensemble")
#self.parensemble_0 = ParameterEnsemble(self.pst)
#self.parensemble_0.draw(cov=self.parcov,num_reals=num_reals)
self.parensemble_0 = pyemu.ParameterEnsemble.from_gaussian_draw(ParameterEnsemble(self.pst),
self.parcov,num_reals=num_reals)
self.parensemble_0.enforce(enforce_bounds=enforce_bounds)
self.logger.log("initializing parensemble")
self.parensemble = self.parensemble_0.copy()
self.parensemble_0.to_csv(self.pst.filename +\
self.paren_prefix.format(0))
self.logger.log("initializing parensemble")
self.logger.log("initializing obsensemble")
#self.obsensemble_0 = ObservationEnsemble(self.pst)
#self.obsensemble_0.draw(cov=self.obscov,num_reals=num_reals)
self.obsensemble_0 = pyemu.ObservationEnsemble.from_id_gaussian_draw(ObservationEnsemble(self.pst),
num_reals=num_reals)
#self.obsensemble = self.obsensemble_0.copy()
# save the base obsensemble
self.obsensemble_0.to_csv(self.pst.filename +\
self.obsen_prefix.format(-1))
self.logger.log("initializing obsensemble")
self.logger.log("initializing smoother with {0} realizations".format(num_reals))
self.obs0_matrix = self.obsensemble_0.nonzero.as_pyemu_matrix()
self.enforce_bounds = enforce_bounds
self.phi_csv = open(self.pst.filename + ".iobj.csv", 'w')
self.phi_csv.write("iter_num,total_runs,lambda,min,max,mean,median,std,")
self.phi_csv.write(','.join(["{0:010d}". \
format(i + 1) for i in range(num_reals)]))
self.phi_csv.write('\n')
self.phi_act_csv = open(self.pst.filename + ".iobj.actual.csv", 'w')
self.phi_act_csv.write("iter_num,total_runs,lambda,min,max,mean,median,std,")
self.phi_act_csv.write(','.join(["{0:010d}". \
format(i + 1) for i in range(num_reals)]))
self.phi_act_csv.write('\n')
if restart_obsensemble is not None:
self.logger.log("loading restart_obsensemble {0}".format(restart_obsensemble))
failed_runs,self.obsensemble = self._load_obs_ensemble(restart_obsensemble)
assert self.obsensemble.shape[0] == self.obsensemble_0.shape[0]
assert list(self.obsensemble.columns) == list(self.obsensemble_0.columns)
self.logger.log("loading restart_obsensemble {0}".format(restart_obsensemble))
else:
# run the initial parameter ensemble
self.logger.log("evaluating initial ensembles")
failed_runs, self.obsensemble = self._calc_obs(self.parensemble)
self.obsensemble.to_csv(self.pst.filename +\
self.obsen_prefix.format(0))
self.logger.log("evaluating initial ensembles")
if failed_runs is not None:
self.logger.warn("dropping failed realizations")
#failed_runs_str = [str(f) for f in failed_runs]
#self.parensemble = self.parensemble.drop(failed_runs)
#self.obsensemble = self.obsensemble.drop(failed_runs)
self.parensemble.loc[failed_runs,:] = np.NaN
self.parensemble = self.parensemble.dropna()
self.obsensemble.loc[failed_runs,:] = np.NaN
self.obsensemble = self.obsensemble.dropna()
self.current_phi_vec = self._calc_phi_vec(self.obsensemble)
if self.drop_bad_reals is not None:
drop_idx = np.argwhere(self.current_phi_vec > self.drop_bad_reals).flatten()
run_ids = self.obsensemble.index.values
drop_idx = run_ids[drop_idx]
if len(drop_idx) == self.obsensemble.shape[0]:
raise Exception("dropped all realizations as 'bad'")
if len(drop_idx) > 0:
self.logger.warn("{0} realizations dropped as 'bad' (indices :{1})".\
format(len(drop_idx),','.join([str(d) for d in drop_idx])))
self.parensemble.loc[drop_idx,:] = np.NaN
self.parensemble = self.parensemble.dropna()
self.obsensemble.loc[drop_idx,:] = np.NaN
self.obsensemble = self.obsensemble.dropna()
self.current_phi_vec = self._calc_phi_vec(self.obsensemble)
self._phi_report(self.phi_csv,self.current_phi_vec,0.0)
self._phi_report(self.phi_act_csv, self.obsensemble.phi_vector.values, 0.0)
self.last_best_mean = self.current_phi_vec.mean()
self.last_best_std = self.current_phi_vec.std()
self.logger.statement("initial phi (mean, std): {0:15.6G},{1:15.6G}".\
format(self.last_best_mean,self.last_best_std))
if init_lambda is not None:
self.current_lambda = float(init_lambda)
else:
#following chen and oliver
x = self.last_best_mean / (2.0 * float(self.obsensemble.shape[1]))
self.current_lambda = 10.0**(np.floor(np.log10(x)))
# if using the approximate form of the algorithm, let
# the parameter scaling matrix be the identity matrix
# jwhite - dec 5 2016 - using the actual parcov inv
# for upgrades seems to be pushing parameters around
# too much. for now, just not using it, maybe
# better choices of lambda will tame it
self.logger.statement("current lambda:{0:15.6g}".format(self.current_lambda))
if self.use_approx_prior:
self.logger.statement("using approximate parcov in solution")
self.half_parcov_diag = 1.0
else:
#self.logger.statement("using full parcov in solution")
# if self.parcov.isdiagonal:
# self.half_parcov_diag = self.parcov.sqrt.inv
# else:
# self.half_parcov_diag = Cov(x=np.diag(self.parcov.x),
# names=self.parcov.col_names,
# isdiagonal=True).inv.sqrt
self.half_parcov_diag = 1.0
self.delta_par_prior = self._calc_delta_par(self.parensemble_0)
u,s,v = self.delta_par_prior.pseudo_inv_components()
self.Am = u * s.inv
self.__initialized = True
def get_localizer(self):
""" get an empty/generic localizer matrix that can be filled
Returns
-------
localizer : pyemu.Matrix
matrix with nnz obs names for rows and adj par names for columns
"""
onames = self.pst.nnz_obs_names
pnames = self.pst.adj_par_names
localizer = Matrix(x=np.ones((len(onames),len(pnames))),row_names=onames,col_names=pnames)
return localizer
def _calc_delta_par(self,parensemble):
'''
calc the scaled parameter ensemble differences from the mean
'''
return self._calc_delta(parensemble, self.half_parcov_diag)
def _calc_delta_obs(self,obsensemble):
'''
calc the scaled observation ensemble differences from the mean
'''
return self._calc_delta(obsensemble.nonzero, self.obscov.inv.sqrt)
def update(self,lambda_mults=[1.0],localizer=None,run_subset=None,use_approx=True,
calc_only=False):
"""update the iES one GLM cycle
Parameters
----------
lambda_mults : list
a list of lambda multipliers to test. Each lambda mult value will require
evaluating (a subset of) the parameter ensemble.
localizer : pyemu.Matrix
a jacobian localizing matrix
run_subset : int
the number of realizations to test for each lambda_mult value. For example,
if run_subset = 30 and num_reals=100, the first 30 realizations will be run (in
parallel) for each lambda_mult value. Then the best lambda_mult is selected and the
remaining 70 realizations for that lambda_mult value are run (in parallel).
use_approx : bool
a flag to use the MLE or MAP upgrade solution. True indicates use MLE solution
calc_only : bool
a flag to calculate the upgrade matrix only (not run the ensemble). This is mostly for
debugging and testing on travis. Default is False
Example
-------
``>>>import pyemu``
``>>>es = pyemu.EnsembleSmoother(pst="pest.pst")``
``>>>es.initialize(num_reals=100)``
``>>>es.update(lambda_mults=[0.1,1.0,10.0],run_subset=30)``
"""
if run_subset is not None:
if run_subset >= self.obsensemble.shape[0]:
self.logger.warn("run_subset ({0}) >= num of active reals ({1})...ignoring ".\
format(run_subset,self.obsensemble.shape[0]))
run_subset = None
self.iter_num += 1
self.logger.log("iteration {0}".format(self.iter_num))
self.logger.statement("{0} active realizations".format(self.obsensemble.shape[0]))
if self.obsensemble.shape[0] < 2:
self.logger.lraise("at least active 2 realizations (really like 300) are needed to update")
if not self.__initialized:
#raise Exception("must call initialize() before update()")
self.logger.lraise("must call initialize() before update()")
self.logger.log("calculate scaled delta obs")
scaled_delta_obs = self._calc_delta_obs(self.obsensemble)
self.logger.log("calculate scaled delta obs")
self.logger.log("calculate scaled delta par")
scaled_delta_par = self._calc_delta_par(self.parensemble)
self.logger.log("calculate scaled delta par")
self.logger.log("calculate pseudo inv comps")
u,s,v = scaled_delta_obs.pseudo_inv_components()
self.logger.log("calculate pseudo inv comps")
self.logger.log("calculate obs diff matrix")
obs_diff = self.obscov_inv_sqrt * self._get_residual_matrix(self.obsensemble).T
self.logger.log("calculate obs diff matrix")
# here is the math part...calculate upgrade matrices
mean_lam,std_lam,paren_lam,obsen_lam = [],[],[],[]
lam_vals = []
for ilam,cur_lam_mult in enumerate(lambda_mults):
parensemble_cur_lam = self.parensemble.copy()
#print(parensemble_cur_lam.isnull().values.any())
cur_lam = self.current_lambda * cur_lam_mult
lam_vals.append(cur_lam)
self.logger.log("calcs for lambda {0}".format(cur_lam_mult))
scaled_ident = Cov.identity_like(s) * (cur_lam+1.0)
scaled_ident += s**2
scaled_ident = scaled_ident.inv
# build up this matrix as a single element so we can apply
# localization
self.logger.log("building upgrade_1 matrix")
upgrade_1 = -1.0 * (self.half_parcov_diag * scaled_delta_par) *\
v * s * scaled_ident * u.T
self.logger.log("building upgrade_1 matrix")
# apply localization
if localizer is not None:
self.logger.log("applying localization")
upgrade_1.hadamard_product(localizer)
self.logger.log("applying localization")
# apply residual information
self.logger.log("applying residuals")
upgrade_1 *= obs_diff
self.logger.log("applying residuals")
self.logger.log("processing upgrade_1")
upgrade_1 = upgrade_1.to_dataframe()
upgrade_1.index.name = "parnme"
upgrade_1 = upgrade_1.T
upgrade_1.index = [int(i) for i in upgrade_1.index]
upgrade_1.to_csv(self.pst.filename+".upgrade_1.{0:04d}.csv".\
format(self.iter_num))
if upgrade_1.isnull().values.any():
self.logger.lraise("NaNs in upgrade_1")
self.logger.log("processing upgrade_1")
#print(upgrade_1.isnull().values.any())
#print(parensemble_cur_lam.index)
#print(upgrade_1.index)
parensemble_cur_lam += upgrade_1
# parameter-based upgrade portion
if not use_approx and self.iter_num > 1:
self.logger.log("building upgrade_2 matrix")
par_diff = (self.parensemble - self.parensemble_0.loc[self.parensemble.index,:]).\
as_pyemu_matrix().T
x4 = self.Am.T * self.half_parcov_diag * par_diff
x5 = self.Am * x4
x6 = scaled_delta_par.T * x5
x7 = v * scaled_ident * v.T * x6
upgrade_2 = -1.0 * (self.half_parcov_diag *
scaled_delta_par * x7).to_dataframe()
upgrade_2.index.name = "parnme"
upgrade_2 = upgrade_2.T
upgrade_2.to_csv(self.pst.filename+".upgrade_2.{0:04d}.csv".\
format(self.iter_num))
upgrade_2.index = [int(i) for i in upgrade_2.index]
if upgrade_2.isnull().values.any():
self.logger.lraise("NaNs in upgrade_2")
parensemble_cur_lam += upgrade_2
self.logger.log("building upgrade_2 matrix")
parensemble_cur_lam.enforce(self.enforce_bounds)
# this is for testing failed runs on upgrade testing
# works with the 10par_xsec smoother test
#parensemble_cur_lam.iloc[:,:] = -1000000.0
paren_lam.append(pd.DataFrame(parensemble_cur_lam.loc[:,:]))
self.logger.log("calcs for lambda {0}".format(cur_lam_mult))
if calc_only:
return
# subset if needed
# and combine lambda par ensembles into one par ensemble for evaluation
if run_subset is not None and run_subset < self.parensemble.shape[0]:
#subset_idx = ["{0:d}".format(i) for i in np.random.randint(0,self.parensemble.shape[0]-1,run_subset)]
subset_idx = self.parensemble.iloc[:run_subset,:].index.values
self.logger.statement("subset idxs: " + ','.join([str(s) for s in subset_idx]))
paren_lam_subset = [pe.loc[subset_idx,:] for pe in paren_lam]
paren_combine = pd.concat(paren_lam_subset,ignore_index=True)
paren_lam_subset = None
else:
subset_idx = self.parensemble.index.values
paren_combine = pd.concat(paren_lam,ignore_index=True)
self.logger.log("evaluating ensembles for lambdas : {0}".\
format(','.join(["{0:8.3E}".format(l) for l in lam_vals])))
failed_runs, obsen_combine = self._calc_obs(paren_combine)
#if failed_runs is not None:
# obsen_combine.loc[failed_runs,:] = np.NaN
self.logger.log("evaluating ensembles for lambdas : {0}".\
format(','.join(["{0:8.3E}".format(l) for l in lam_vals])))
paren_combine = None
if failed_runs is not None and len(failed_runs) == obsen_combine.shape[0]:
self.logger.lraise("all runs failed - cannot continue")
# unpack lambda obs ensembles from combined obs ensemble
nrun_per_lam = self.obsensemble.shape[0]
if run_subset is not None:
nrun_per_lam = run_subset
obsen_lam = []
for i in range(len(lam_vals)):
sidx = i * nrun_per_lam
eidx = sidx + nrun_per_lam
oe = ObservationEnsemble.from_dataframe(df=obsen_combine.iloc[sidx:eidx,:].copy(),
pst=self.pst)
oe.index = subset_idx
# check for failed runs in this set - drop failed runs from obs ensembles
if failed_runs is not None:
failed_runs_this = np.array([f for f in failed_runs if f >= sidx and f < eidx]) - sidx
if len(failed_runs_this) > 0:
if len(failed_runs_this) == oe.shape[0]:
self.logger.warn("all runs failed for lambda {0}".format(lam_vals[i]))
else:
self.logger.warn("{0} run failed for lambda {1}".\
format(len(failed_runs_this),lam_vals[i]))
oe.iloc[failed_runs_this,:] = np.NaN
oe = oe.dropna()
# don't drop bad reals here, instead, mask bad reals in the lambda
# selection and drop later
# if self.drop_bad_reals is not None:
# assert isinstance(drop_bad_reals, float)
# drop_idx = np.argwhere(self.current_phi_vec > self.drop_bad_reals).flatten()
# run_ids = self.obsensemble.index.values
# drop_idx = run_ids[drop_idx]
# if len(drop_idx) == self.obsensemble.shape[0]:
# raise Exception("dropped all realizations as 'bad'")
# if len(drop_idx) > 0:
# self.logger.warn("{0} realizations dropped as 'bad' (indices :{1})". \
# format(len(drop_idx), ','.join([str(d) for d in drop_idx])))
# self.parensemble.loc[drop_idx, :] = np.NaN
# self.parensemble = self.parensemble.dropna()
# self.obsensemble.loc[drop_idx, :] = np.NaN
# self.obsensemble = self.obsensemble.dropna()
#
# self.current_phi_vec = self._calc_phi_vec(self.obsensemble)
obsen_lam.append(oe)
obsen_combine = None
# here is where we need to select out the "best" lambda par and obs
# ensembles
self.logger.statement("\n**************************")
self.logger.statement(str(datetime.now()))
self.logger.statement("total runs:{0}".format(self.total_runs))
self.logger.statement("iteration: {0}".format(self.iter_num))
self.logger.statement("current lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
format(self.current_lambda,
self.last_best_mean,self.last_best_std))
phi_vecs = [self._calc_phi_vec(obsen) for obsen in obsen_lam]
if self.drop_bad_reals is not None:
for i,pv in enumerate(phi_vecs):
#for testing the drop_bad_reals functionality
#pv[[0,3,7]] = self.drop_bad_reals + 1.0
pv[pv>self.drop_bad_reals] = np.NaN
pv = pv[~np.isnan(pv)]
if len(pv) == 0:
raise Exception("all realization for lambda {0} dropped as 'bad'".\
format(lam_vals[i]))
phi_vecs[i] = pv
mean_std = [(pv.mean(),pv.std()) for pv in phi_vecs]
update_pars = False
update_lambda = False
# accept a new best if its within 10%
best_mean = self.last_best_mean * 1.1
best_std = self.last_best_std * 1.1
best_i = 0
for i,(m,s) in enumerate(mean_std):
self.logger.statement(" tested lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
format(self.current_lambda * lambda_mults[i],m,s))
if m < best_mean:
update_pars = True
best_mean = m
best_i = i
if s < best_std:
update_lambda = True
best_std = s
if np.isnan(best_mean):
self.logger.lraise("best mean = NaN")
if np.isnan(best_std):
self.logger.lraise("best std = NaN")
if not update_pars:
self.current_lambda *= max(lambda_mults) * 10.0
self.current_lambda = min(self.current_lambda,100000)
self.logger.statement("not accepting iteration, increased lambda:{0}".\
format(self.current_lambda))
else:
self.parensemble = ParameterEnsemble.from_dataframe(df=paren_lam[best_i],pst=self.pst)
if run_subset is not None:
failed_runs, self.obsensemble = self._calc_obs(self.parensemble)
if failed_runs is not None:
self.logger.warn("dropping failed realizations")
self.parensemble.loc[failed_runs, :] = np.NaN
self.parensemble = self.parensemble.dropna()
self.obsensemble.loc[failed_runs, :] = np.NaN
self.obsensemble = self.obsensemble.dropna()
self.current_phi_vec = self._calc_phi_vec(self.obsensemble)
#self._phi_report(self.current_phi_vec,self.current_lambda * lambda_mults[best_i])
best_mean = self.current_phi_vec.mean()
best_std = self.current_phi_vec.std()
else:
self.obsensemble = obsen_lam[best_i]
# reindex parensemble in case failed runs
self.parensemble = ParameterEnsemble.from_dataframe(df=self.parensemble.loc[self.obsensemble.index],pst=self.pst)
self.current_phi_vec = phi_vecs[best_i]
if self.drop_bad_reals is not None:
# for testing drop_bad_reals functionality
# self.current_phi_vec[::2] = self.drop_bad_reals + 1.0
drop_idx = np.argwhere(self.current_phi_vec > self.drop_bad_reals).flatten()
run_ids = self.obsensemble.index.values
drop_idx = run_ids[drop_idx]
if len(drop_idx) > self.obsensemble.shape[0] - 3:
raise Exception("dropped too many realizations as 'bad'")
if len(drop_idx) > 0:
self.logger.warn("{0} realizations dropped as 'bad' (indices :{1})". \
format(len(drop_idx), ','.join([str(d) for d in drop_idx])))
self.parensemble.loc[drop_idx, :] = np.NaN
self.parensemble = self.parensemble.dropna()
self.obsensemble.loc[drop_idx, :] = np.NaN
self.obsensemble = self.obsensemble.dropna()
self.current_phi_vec = self._calc_phi_vec(self.obsensemble)
best_mean = self.current_phi_vec.mean()
best_std = self.current_phi_vec.std()
self._phi_report(self.phi_csv,self.current_phi_vec,self.current_lambda * lambda_mults[best_i])
self._phi_report(self.phi_act_csv, self.obsensemble.phi_vector.values,self.current_lambda * lambda_mults[best_i])
self.logger.statement(" best lambda:{0:15.6G}, mean:{1:15.6G}, std:{2:15.6G}".\
format(self.current_lambda*lambda_mults[best_i],
best_mean,best_std))
self.logger.statement(" actual mean phi: {0:15.6G}".format(float(self.current_actual_phi.mean())))
self.last_best_mean = best_mean
self.last_best_std = best_std
if update_lambda:
# be aggressive
self.current_lambda *= (lambda_mults[best_i] * 0.75)
# but don't let lambda get too small
self.current_lambda = max(self.current_lambda,0.00001)
self.logger.statement("updating lambda: {0:15.6G}".\
format(self.current_lambda ))
self.logger.statement("**************************\n")
self.parensemble.to_csv(self.pst.filename+self.paren_prefix.\
format(self.iter_num))
self.obsensemble.to_csv(self.pst.filename+self.obsen_prefix.\
format(self.iter_num))
if self.raw_sweep_out is not None:
self.raw_sweep_out.to_csv(self.pst.filename+"_raw{0}".\
format(self.iter_num))
self.logger.log("iteration {0}".format(self.iter_num))
|
regen.py
|
#!/usr/bin/env python3
import os
import time
import multiprocessing
from tqdm import tqdm
import argparse
# run DM procs
os.environ["USE_WEBCAM"] = "1"
import cereal.messaging as messaging
from cereal.services import service_list
from cereal.visionipc.visionipc_pyx import VisionIpcServer, VisionStreamType # pylint: disable=no-name-in-module, import-error
from common.params import Params
from common.realtime import Ratekeeper, DT_MDL, DT_DMON
from common.transformations.camera import eon_f_frame_size, eon_d_frame_size
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.manager.process import ensure_running
from selfdrive.manager.process_config import managed_processes
from selfdrive.test.update_ci_routes import upload_route
from tools.lib.route import Route
from tools.lib.framereader import FrameReader
from tools.lib.logreader import LogReader
process_replay_dir = os.path.dirname(os.path.abspath(__file__))
FAKEDATA = os.path.join(process_replay_dir, "fakedata/")
def replay_service(s, msgs):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(service_list[s].frequency, print_delay_threshold=None)
smsgs = [m for m in msgs if m.which() == s]
while True:
for m in smsgs:
# TODO: use logMonoTime
pm.send(s, m.as_builder())
rk.keep_time()
vs = None
def replay_cameras(lr, frs):
cameras = [
("roadCameraState", DT_MDL, eon_f_frame_size, VisionStreamType.VISION_STREAM_ROAD),
("driverCameraState", DT_DMON, eon_d_frame_size, VisionStreamType.VISION_STREAM_DRIVER),
]
def replay_camera(s, stream, dt, vipc_server, fr, size):
pm = messaging.PubMaster([s, ])
rk = Ratekeeper(1 / dt, print_delay_threshold=None)
img = b"\x00" * int(size[0]*size[1]*3/2)
while True:
if fr is not None:
img = fr.get(rk.frame % fr.frame_count, pix_fmt='yuv420p')[0]
img = img.flatten().tobytes()
rk.keep_time()
m = messaging.new_message(s)
msg = getattr(m, s)
msg.frameId = rk.frame
pm.send(s, m)
vipc_server.send(stream, img, msg.frameId, msg.timestampSof, msg.timestampEof)
# init vipc server and cameras
p = []
global vs
vs = VisionIpcServer("camerad")
for (s, dt, size, stream) in cameras:
fr = frs.get(s, None)
vs.create_buffers(stream, 40, False, size[0], size[1])
p.append(multiprocessing.Process(target=replay_camera,
args=(s, stream, dt, vs, fr, size)))
# hack to make UI work
vs.create_buffers(VisionStreamType.VISION_STREAM_RGB_BACK, 4, True, eon_f_frame_size[0], eon_f_frame_size[1])
vs.start_listener()
return p
def regen_segment(lr, frs=None, outdir=FAKEDATA):
lr = list(lr)
if frs is None:
frs = dict()
# setup env
params = Params()
params.clear_all()
params.put_bool("Passive", False)
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("CommunityFeaturesToggle", True)
params.put_bool("CommunityFeaturesToggle", True)
cal = messaging.new_message('liveCalibration')
cal.liveCalibration.validBlocks = 20
cal.liveCalibration.rpyCalib = [0.0, 0.0, 0.0]
params.put("CalibrationParams", cal.to_bytes())
os.environ["LOG_ROOT"] = outdir
os.environ["SIMULATION"] = "1"
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = msg.carParams.carFingerprint
if len(msg.carParams.carFw) and (car_fingerprint in FW_VERSIONS):
params.put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
#TODO: init car, make sure starts engaged when segment is engaged
fake_daemons = {
'sensord': [
multiprocessing.Process(target=replay_service, args=('sensorEvents', lr)),
],
'pandad': [
multiprocessing.Process(target=replay_service, args=('can', lr)),
multiprocessing.Process(target=replay_service, args=('pandaStates', lr)),
],
#'managerState': [
# multiprocessing.Process(target=replay_service, args=('managerState', lr)),
#],
'thermald': [
multiprocessing.Process(target=replay_service, args=('deviceState', lr)),
],
'camerad': [
*replay_cameras(lr, frs),
],
# TODO: fix these and run them
'paramsd': [
multiprocessing.Process(target=replay_service, args=('liveParameters', lr)),
],
'locationd': [
multiprocessing.Process(target=replay_service, args=('liveLocationKalman', lr)),
],
}
try:
# start procs up
ignore = list(fake_daemons.keys()) + ['ui', 'manage_athenad', 'uploader']
ensure_running(managed_processes.values(), started=True, not_run=ignore)
for procs in fake_daemons.values():
for p in procs:
p.start()
for _ in tqdm(range(60)):
# ensure all procs are running
for d, procs in fake_daemons.items():
for p in procs:
if not p.is_alive():
raise Exception(f"{d}'s {p.name} died")
time.sleep(1)
finally:
# kill everything
for p in managed_processes.values():
p.stop()
for procs in fake_daemons.values():
for p in procs:
p.terminate()
r = params.get("CurrentRoute", encoding='utf-8')
return os.path.join(outdir, r + "--0")
def regen_and_save(route, sidx, upload=False, use_route_meta=True):
if use_route_meta:
r = Route(args.route)
lr = LogReader(r.log_paths()[args.seg])
fr = FrameReader(r.camera_paths()[args.seg])
else:
lr = LogReader(f"cd:/{route.replace('|', '/')}/{sidx}/rlog.bz2")
fr = FrameReader(f"cd:/{route.replace('|', '/')}/{sidx}/fcamera.hevc")
rpath = regen_segment(lr, {'roadCameraState': fr})
relr = os.path.relpath(rpath)
print("\n\n", "*"*30, "\n\n")
print("New route:", relr, "\n")
if upload:
upload_route(relr)
return relr
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate new segments from old ones")
parser.add_argument("--upload", action="store_true", help="Upload the new segment to the CI bucket")
parser.add_argument("route", type=str, help="The source route")
parser.add_argument("seg", type=int, help="Segment in source route")
args = parser.parse_args()
regen_and_save(args.route, args.seg, args.upload)
|
scheduler.py
|
'''
This is the application scheduler.
It defines scheduled tasks and runs them as per their defined schedule.
This scheduler is started and stopped when the app is started and stopped.
Unless RUN_SCHEDULE is set to False in the config. In which case it must be started manually / managed by supervisor.
It is presumed to run on one machine at present.
If scaling later requires having multiple machines, then this scheduler should only run on the machine that has access to
the relevant directories. There is a task that moves files from ftp user jail directories to tmp processing locations, and
this is the limitation - creating sftp accounts has to happen on one machine or across machines, but that would increase
attack surface for security vulnerability. So probably better to have only one machine open to sftp, and if necessary for
later scale the script that is called to move data from the sftp jails to processing locations could do so by round-robin
to multiple processing machines. The jper app config has settings for running this scheduler and what frequencies to run each
process, so it is just a case of installing jper on each machine but only setting the frequencies for the processes desired to
be scheduled on each given machine.
Or, if scheduled tasks themselves also need to be scaled up, the scheduler can continue to run on
all machines but some synchronisation would have to be added to that tasks were not run on every machine. Also, each machine
running the schedule would need access to any relevant directories.
'''
import schedule, time, os, shutil, requests, datetime, tarfile, zipfile, subprocess, getpass, uuid, json, csv
from threading import Thread
from octopus.core import app, initialise
from service import reports
import models
if app.config.get('DEEPGREEN_EZB_ROUTING',False):
import routing_deepgreen as routing
else:
import routing
# functions for the checkftp to unzip and move stuff up then zip again in incoming packages
def zip(src, dst):
zf = zipfile.ZipFile(dst, "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(src)
for dirname, subdirs, files in os.walk(src):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
zf.write(absname, arcname)
zf.close()
def extract(fl,path):
app.logger.debug('Extracting ' + fl)
try:
# TODO the tar method has not yet been tested...
tar = tarfile.open(fl)
# 2019-11-18 TD : add the actual path for extraction here
tar.extractall(path=path)
#tar.extractall()
tar.close()
app.logger.debug('Extracted tar ' + fl)
return True
except:
try:
with zipfile.ZipFile(fl) as zf:
# 2019-11-18 TD : replace the 'hand made' routine by the libary call
zf.extractall(path=path)
# 2019-11-18 TD : this loop apparently does not work with nested folder
# structures, so we discard it
# for member in zf.infolist():
# # Path traversal defense copied from
# # http://hg.python.org/cpython/file/tip/Lib/http/server.py#l789
# words = member.filename.split('/')
# for word in words[:-1]:
# drive, word = os.path.splitdrive(word)
# head, word = os.path.split(word)
# if word in (os.curdir, os.pardir, ''): continue
# path = os.path.join(path, word)
# zf.extract(member, path)
app.logger.debug('Extracted zip ' + fl)
return True
except Exception as e:
app.logger.error('Scheduler - Extraction could not be done for ' + fl + ' : "{x}"'.format(x=e.message))
return False
def flatten(destination, depth=None):
if depth is None:
depth = destination
app.logger.debug('Flatten depth set ' + destination + ' ' + depth)
#
# 2019-11-18 TD : Introducing the '.xml' file as recursion stop.
# If an .xml file is found in a folder in the .zip file then this
# *is* a single publication to be separated from the enclosing .zip file
has_xml = False
for fl in os.listdir(depth):
if 'article_metadata.xml' in fl: # De Gruyter provides a second .xml sometimes, sigh.
os.remove(depth + '/' + fl)
continue
if not has_xml and '.xml' in fl:
app.logger.debug('Flatten ' + fl + ' found in folder')
has_xml = True
words = destination.split('/')
stem = words[-1] + '/' + os.path.splitext(fl)[0]
if not os.path.exists(destination + '/' + stem):
os.makedirs(destination + '/' + stem)
app.logger.debug('Flatten new ' + destination + '/' + stem + ' created')
# 2019-11-18 TD : end of recursion stop marker search
#
for fl in os.listdir(depth):
app.logger.debug('Flatten at ' + fl)
# 2019-11-18 TD : Additional check for 'has_xml' (the stop marker)
# if '.zip' in fl: # or '.tar' in fl:
if not has_xml and '.zip' in fl: # or '.tar' in fl:
app.logger.debug('Flatten ' + fl + ' is an archive')
extracted = extract(depth + '/' + fl, depth)
if extracted:
app.logger.debug('Flatten ' + fl + ' is extracted')
os.remove(depth + '/' + fl)
flatten(destination,depth)
# 2019-11-18 TD : Additional check for 'has_xml' (the stop marker)
# elif os.path.isdir(depth + '/' + fl):
elif os.path.isdir(depth + '/' + fl) and not has_xml:
app.logger.debug('Flatten ' + fl + ' is not a file, flattening')
flatten(destination, depth + '/' + fl)
else:
try:
# shutil.move(depth + '/' + fl, destination)
# 2019-11-18 TD : Some 'new' +stem dst place to move all the single pubs into
if has_xml and os.path.isdir(destination + '/' + stem):
shutil.move(depth + '/' + fl, destination + '/' + stem)
else:
shutil.move(depth + '/' + fl, destination)
except:
pass
# 2016-11-30 TD : routine to peak in flattened packages, looking for a .xml file floating around
def pkgformat(src):
# our first best guess...
### pkg_fmt = "https://datahub.deepgreen.org/FilesAndJATS"
pkg_fmt = "unknown"
for fl in os.listdir(src):
app.logger.debug('Pkgformat at ' + fl)
if '.xml' in fl:
app.logger.debug('Pkgformat tries to open ' + src + '/' + fl)
try:
with open(src + '/' + fl,'r') as f:
for line in f:
if "//NLM//DTD Journal " in line:
pkg_fmt = "https://datahub.deepgreen.org/FilesAndJATS"
break
elif "//NLM//DTD JATS " in line:
pkg_fmt = "https://datahub.deepgreen.org/FilesAndJATS"
break
elif "//RSC//DTD RSC " in line:
pkg_fmt = "https://datahub.deepgreen.org/FilesAndRSC"
break
except:
app.logger.info('Pkgformat could not open ' + src + '/' + fl)
# there shall only be *one* .xml as per package
break
app.logger.debug('Pkgformat returns ' + pkg_fmt)
return pkg_fmt
#
# 2019-07-17 TD : change target of the move operation to the big dg_storage for all deliveries
#
def moveftp():
try:
# # move any files in the jail of ftp users into the temp directory for later processing
# tmpdir = app.config.get('TMP_DIR','/tmp')
pubstoredir = app.config.get('PUBSTOREDIR','/data/dg_storage')
userdir = app.config.get('USERDIR','/home/sftpusers')
userdirs = os.listdir(userdir)
app.logger.info("Scheduler - from FTP folders found " + str(len(userdirs)) + " user directories")
for dir in userdirs:
# 2019-07-30 TD : One more loop over possible subfolders of the user
# Please note: They are *exclusively* created by 'createFTPuser.sh'
# At least, there should be the (old) 'xfer' folder
founditems = False
for xfer in os.listdir(userdir + '/' + dir):
# if len(os.listdir(userdir + '/' + dir + '/xfer')):
if len(os.listdir(userdir + '/' + dir + '/' + xfer)):
founditems = True
# for thisitem in os.listdir(userdir + '/' + dir + '/xfer'):
for thisitem in os.listdir(userdir + '/' + dir + '/' + xfer):
app.logger.info('Scheduler - moving file ' + thisitem + ' for Account:' + dir)
fl = os.path.dirname(os.path.abspath(__file__)) + '/models/moveFTPfiles.sh'
try:
newowner = getpass.getuser()
except:
newowner = 'green'
uniqueid = uuid.uuid4().hex
# targetdir = tmpdir + '/' + dir
# uniquedir = tmpdir + '/' + dir + '/' + uniqueid
targetdir = pubstoredir + '/' + dir
# 2019-07-17 TD : introduce a new directory that will indicate pending items
pendingdir = pubstoredir + '/' + dir + '/pending'
uniquedir = pubstoredir + '/' + dir + '/' + uniqueid
# moveitem = userdir + '/' + dir + '/xfer/' + thisitem
moveitem = userdir + '/' + dir + '/' + xfer + '/' + thisitem
subprocess.call( [ 'sudo', fl, dir, newowner, targetdir, uniqueid, uniquedir, moveitem, pendingdir ] )
if founditems is False:
app.logger.debug('Scheduler - found nothing to move for Account:' + dir)
except:
app.logger.error("Scheduler - move from FTP failed")
if app.config.get('MOVEFTP_SCHEDULE',10) != 0:
schedule.every(app.config.get('MOVEFTP_SCHEDULE',10)).minutes.do(moveftp)
#
# 2019-07-17 TD : process the big delivery/publisher dg_storage for all pending items
#
def copyftp():
try:
# copy any files in the big delivery/publisher dg_storage into the temp dir for processing
tmpdir = app.config.get('TMP_DIR','/tmp')
maxtransacts = app.config.get('MAX_TMPDIR_TRANSACTS_PER_ACC',99)
pubstoredir = app.config.get('PUBSTOREDIR','/data/dg_storage')
pubstoredirs = os.listdir(pubstoredir)
app.logger.info("Scheduler - from DG-STORAGE folders found " + str(len(pubstoredirs)) + " user directories")
for dir in pubstoredirs:
# 2019-07-29 TD : check if 'tmpdir/dir' exists at all
if os.path.exists(tmpdir + '/' + dir) is False:
os.makedirs(tmpdir + '/' + dir)
# 2019-07-17 TD : limit temp dir to 100 transactions per account
if len(os.listdir(tmpdir + '/' + dir)) > maxtransacts:
app.logger.info('Scheduler - skipping this copy process because len(transactions)>' + str(maxtransacts) + ' in temp directory for Account:' + dir)
continue
if len(os.listdir(pubstoredir + '/' + dir + '/pending')):
for transact in os.listdir(pubstoredir + '/' + dir + '/pending'):
if len(os.listdir(tmpdir + '/' + dir)) > maxtransacts:
break
app.logger.info('Scheduler - copying folder of transaction ' + transact + ' for Account:' + dir)
src = pubstoredir + '/' + dir + '/pending/' + transact
dst = tmpdir + '/' + dir + '/' + transact
# subprocess.call( [ 'cp -R', ...] )
shutil.rmtree(dst, ignore_errors=True) # target MUST NOT exist!
shutil.copytree(src, dst)
try:
os.remove(src) # try to take the pending symlink away
except Exception as e:
app.logger.error("Scheduler - failed to delete pending entry: '{x}'".format(x=e.message))
else:
app.logger.debug('Scheduler - currently, nothing to copy for Account:' + dir)
except:
app.logger.error("Scheduler - copy from DG-STORAGE failed")
if app.config.get('COPYFTP_SCHEDULE',10) != 0:
schedule.every(app.config.get('COPYFTP_SCHEDULE',10)).minutes.do(copyftp)
def processftp():
try:
# list all directories in the temp dir - one for each ftp user for whom files have been moved from their jail
userdir = app.config.get('TMP_DIR','/tmp')
userdirs = os.listdir(userdir)
app.logger.debug("Scheduler - processing for FTP found " + str(len(userdirs)) + " temp user directories")
for dir in userdirs:
# configure for sending anything for the user of this dir
apiurl = app.config['API_URL']
acc = models.Account().pull(dir)
if acc is None:
continue
apiurl += '?api_key=' + acc.data['api_key']
# there is a uuid dir for each item moved in a given operation from the user jail
for udir in os.listdir(userdir + '/' + dir):
thisdir = userdir + '/' + dir + '/' + udir
app.logger.debug('Scheduler - processing ' + thisdir + ' for Account:' + dir)
for xpub in os.listdir(thisdir):
pub = xpub
# should be a dir per publication notification - that is what they are told to provide
# and at this point there should just be one pub in here, whether it be a file or directory or archive
# if just a file, even an archive, dump it into a directory so it can be zipped easily
if os.path.isfile(thisdir + '/' + pub):
nf = uuid.uuid4().hex
os.makedirs(thisdir + '/' + nf)
shutil.move(thisdir + '/' + pub, thisdir + '/' + nf + '/')
pub = nf
# by now this should look like this:
# /Incoming/ftptmp/<useruuid>/<transactionuuid>/<uploadeddirORuuiddir>/<thingthatwasuploaded>
# they should provide a directory of files or a zip, but it could just be one file
# but we don't know the hierarchy of the content, so we have to unpack and flatten it all
# unzip and pull all docs to the top level then zip again. Should be jats file at top now
flatten(thisdir + '/' + pub)
# 2019-11-18 TD : 'flatten' has been modified to process bulk deliveries
# (i.e. more then one pub per zip file!) as well.
# If it is bulk, there maybe a lot of zip files, and
# we need a loop:
pdir = thisdir
if os.path.isdir(thisdir + '/' + pub + '/' + pub):
pdir = thisdir + '/' + pub + '/' + pub
#
for singlepub in os.listdir(pdir):
# 2016-11-30 TD : Since there are (at least!?) 2 formats now available, we have to find out
## 2019-11-18 TD : original path without loop where zip file is packed
## from source folder "thisdir + '/' + pub"
## pkg_fmt = pkgformat(thisdir + '/' + pub)
## #
## pkg = thisdir + '/' + pub + '.zip'
## zip(thisdir + '/' + pub, pkg)
##
pkg_fmt = pkgformat(pdir + '/' + singlepub)
#
pkg = pdir + '/' + singlepub + '.zip'
zip(pdir + '/' + singlepub, pkg)
# create a notification and send to the API to join the unroutednotification index
notification = {
"content": { "packaging_format": pkg_fmt }
#"content": {"packaging_format": "https://datahub.deepgreen.org/FilesAndJATS"}
## "content": {"packaging_format": "https://pubrouter.jisc.ac.uk/FilesAndJATS"}
}
files = [
("metadata", ("metadata.json", json.dumps(notification), "application/json")),
("content", ("content.zip", open(pkg, "rb"), "application/zip"))
]
app.logger.debug('Scheduler - processing POSTing ' + pkg + ' ' + json.dumps(notification))
resp = requests.post(apiurl, files=files, verify=False)
if str(resp.status_code).startswith('4') or str(resp.status_code).startswith('5'):
app.logger.error('Scheduler - processing completed with POST failure to ' + apiurl + ' - ' + str(resp.status_code) + ' - ' + resp.text)
else:
app.logger.info('Scheduler - processing completed with POST to ' + apiurl + ' - ' + str(resp.status_code))
shutil.rmtree(userdir + '/' + dir + '/' + udir, ignore_errors=True) # 2019-12-02 TD : kill "udir" folder no matter what status
except Exception as e:
app.logger.error('Scheduler - failed scheduled process for FTP temp directories: "{x}"'.format(x=e.message))
if app.config.get('PROCESSFTP_SCHEDULE',10) != 0:
schedule.every(app.config.get('PROCESSFTP_SCHEDULE',10)).minutes.do(processftp)
def checkunrouted():
urobjids = []
robjids = []
counter = 0
limit = app.config.get('CHECKUNROUTED_SCHEDULE',10) * 5
# 2019-06-13 TD : to cope with mass deliveries, we have to limit the next loop
# (factor 10 times the time to the next call seems reasonable...)
try:
app.logger.debug("Scheduler - check for unrouted notifications")
# query the service.models.unroutednotification index
# returns a list of unrouted notification from the last three up to four months
counter = 0
for obj in models.UnroutedNotification.scroll():
counter += 1
res = routing.route(obj)
if res:
robjids.append(obj.id)
else:
urobjids.append(obj.id)
# 2019-06-13 TD : to cope with mass deliveries, we have to limit
# the loop over the unrouted notifs
if counter >= limit:
break
# 2017-06-06 TD : replace str() by .format() string interpolation
app.logger.debug("Scheduler - routing sent {cnt} notification(s) for routing".format(cnt=counter))
if app.config.get("DELETE_ROUTED", False) and len(robjids) > 0:
# 2017-06-06 TD : replace str() by .format() string interpolation
app.logger.debug("Scheduler - routing deleting {x} of {cnt} unrouted notification(s) that have been processed and routed".format(x=len(robjids),cnt=counter))
models.UnroutedNotification.bulk_delete(robjids)
# 2017-05-17 TD :
time.sleep(2) # 2 seconds grace time
if app.config.get("DELETE_UNROUTED", False) and len(urobjids) > 0:
# 2017-06-06 TD : replace str() by .format() string interpolation
app.logger.debug("Scheduler - routing deleting {x} of {cnt} unrouted notifications that have been processed and were unrouted".format(x=len(urobjids),cnt=counter))
models.UnroutedNotification.bulk_delete(urobjids)
# 2017-05-17 TD :
time.sleep(2) # again, 2 seconds grace
except Exception as e:
app.logger.error("Scheduler - Failed scheduled check for unrouted notifications: cnt={cnt}, len(robjids)={a}, len(urobjids)={b}".format(cnt=counter,a=len(robjids),b=len(urobjids)))
app.logger.error("Scheduler - Failed scheduled check for unrouted notifications: '{x}'".format(x=e.message))
if app.config.get('CHECKUNROUTED_SCHEDULE',10) != 0:
schedule.every(app.config.get('CHECKUNROUTED_SCHEDULE',10)).minutes.do(checkunrouted)
def monthly_reporting():
# python schedule does not actually handle months, so this will run every day and check whether the current month has rolled over or not
try:
app.logger.debug('Scheduler - Running monthly reporting')
# create / update a monthly deliveries by institution report
# it should have the columns HEI, Jan, Feb...
# and rows are HEI names then count for each month
# finally ends with sum total (total of all numbers above)
# and unique total (total unique objects accessed - some unis may have accessed the same one)
# query the retrieval index to see which institutions have retrieved content from the router in the last month
month = datetime.datetime.now().strftime("%B")[0:3]
year = str(datetime.datetime.now().year)
app.logger.debug('Scheduler - checking monthly reporting for ' + month + ' ' + year)
reportsdir = app.config.get('REPORTSDIR','/home/green/jper_reports')
if not os.path.exists(reportsdir): os.makedirs(reportsdir)
monthtracker = reportsdir + '/monthtracker.cfg'
try:
lm = open(monthtracker,'r')
lastmonth = lm.read().strip('\n')
lm.close()
except:
lm = open(monthtracker,'w')
lm.close()
lastmonth = ''
if lastmonth != month:
app.logger.debug('Scheduler - updating monthly report of notifications delivered to institutions')
lmm = open(monthtracker,'w')
lmm.write(month)
lmm.close()
# get the month number that we are reporting on
tmth = datetime.datetime.utcnow().month - 1
# if the month is zero, it means the year just rolled over
if tmth == 0:
tmth = 12
lastyear = int(year) - 1
frm = str(lastyear) + "-" + str(tmth) + "-01T00:00:00Z"
to_date = str(year) + "-01-01T00:00:00Z"
else:
mnthstr = str(tmth) if tmth > 9 else "0" + str(tmth)
nexmnth = str(tmth + 1) if tmth + 1 > 9 else "0" + str(tmth + 1)
frm = str(year) + "-" + mnthstr + "-01T00:00:00Z"
if tmth == 12:
nextyear = int(year) + 1
to_date = str(nextyear) + "-01-01T00:00:00Z"
else:
to_date = str(year) + "-" + nexmnth + "-01T00:00:00Z"
# specify the file that we're going to output to
reportfile = reportsdir + '/monthly_notifications_to_institutions_' + year + '.csv'
# run the delivery report
reports.delivery_report(frm, to_date, reportfile)
# necessary tasks for other monthly reporting could be defined here
# reporting that has to run more regularly could be defined as different reporting methods altogether
# and controlled with different settings in the config
except Exception as e:
app.logger.error("Scheduler - Failed scheduled reporting job: '{x}'".format(x=e.message))
if app.config.get('SCHEDULE_MONTHLY_REPORTING',False):
schedule.every().day.at("00:05").do(monthly_reporting)
def delete_old_routed():
app.logger.info('Scheduler - checking for old routed indexes to delete')
try:
# each day send a delete to the index name that is beyond the range of those to keep
# so only actually has an effect on the first day of each month - other days in the month it is sending a delete to an index that is already gone
# index names look like routed201601
# so read from config how many months to keep, and add 1 to it
# so if in March, and keep is 3, then it becomes 4
keep = app.config.get('SCHEDULE_KEEP_ROUTED_MONTHS',3) + 1
year = datetime.datetime.utcnow().year
# subtracting the keep gives us a month of -1 if now March
month = datetime.datetime.utcnow().month - keep
if month < 1:
# so roll back the year, and set the month to 11 (if now March)
year = year - 1
month = 12 + month
# so idx would look like routed201511 if now March - meaning we would keep Dec, Jan, and Feb (and Mar currently in use of course)
idx = 'routed' + str(year) + str(month)
addr = app.config['ELASTIC_SEARCH_HOST'] + '/' + app.config['ELASTIC_SEARCH_INDEX'] + '/' + idx
app.logger.debug('Scheduler - sending delete to ' + addr)
# send the delete - at the start of a month this would delete an index. Other days it will just fail
requests.delete(addr)
except Exception as e:
app.logger.error("Scheduler - Failed monthly routed index deletion: '{x}'".format(x=e.message))
if app.config.get('SCHEDULE_DELETE_OLD_ROUTED',False):
schedule.every().day.at("03:00").do(delete_old_routed)
def cheep():
app.logger.debug("Scheduled cheep")
print("Scheduled cheep")
#schedule.every(1).minutes.do(cheep)
def run():
while True:
schedule.run_pending()
time.sleep(1)
def go():
thread = Thread(target = run)
thread.daemon = True
thread.start()
if __name__ == "__main__":
initialise()
print("starting scheduler")
app.logger.debug("Scheduler - starting up directly in own process.")
run()
|
ftumixer.py
|
#!/usr/bin/env python
# Copyright 2013 Jonas Schulte-Coerne
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
try: # for python3 compatibility
import ConfigParser as configparser
except ImportError:
import configparser
import functools
import re
import os
import select
import threading
import wx
import alsaaudio
class Mixer(object):
"""
This class is a wraps the interaction with ALSA.
It is responsible for:
- setting the volume of an ALSA control
- asking for the volume of an ALSA control
- polling for changes of ALSA controls (does not work. Probably due to a
bug in pyalsaaudio)
If a method of this class needs a channel as a parameter, it is given as an
integer starting with 0. This differs from the GUI and the names of the
Fast Track Ultra's ALSA controls, where channel numbers start with 1.
"""
def __init__(self, card_index, disable_effects, mute_most_digital_routes):
"""
@param card_index: the card index of the Fast Track Ultra that shall be
controlled. (hw:1 means that the card index is 1)
@param disable_effects: if True, all controls for the effects of the
Fast Track Ultra will be muted
@param mute_most_digital_routes: if True, all digital routes are muted,
except for routes from a digital input
to an output with the same number. This
way the routing of the digital signals
can be done with JACK
"""
self.__card_index = card_index
# poll for mixer value changes
self.__observers = [] # a list of functions that are called when a mixer value changes
self.__descriptors_to_routes = {}
self.__poll = select.epoll()
self.__polling_active = True
self.__polling_thread = threading.Thread(target=self.__PollForChanges)
self.__polling_thread.start()
# create mixer objects
regex_analog = re.compile("AIn\d - Out\d")
regex_digital = re.compile("DIn\d - Out\d")
self.__analog_routes = []
self.__digital_routes = []
self.__fx_control_names = []
for name in alsaaudio.mixers(self.__card_index):
if regex_analog.match(name):
self.__CreateRoute(name=name, digital=False)
elif regex_digital.match(name):
self.__CreateRoute(name=name, digital=True)
else:
self.__fx_control_names.append(name)
if disable_effects:
self.DisableEffects()
if mute_most_digital_routes:
self.MuteMostDigitalRoutes()
def Destroy(self):
"""
Stops the polling thread, that polls for changes of ALSA controls.
"""
self.__polling_active = False
self.__polling_thread.join()
def GetNumberOfChannels(self):
"""
Returns the number of channels of the audio interface.
It is assumed that the audio interface has as many input channels as it
has output channels.
"""
return len(self.__analog_routes)
def GetVolume(self, output_channel, input_channel, digital=False):
"""
Returns the volume of the ALSA control that is specified by the parameters.
The result is an integer between 0 and 100.
The channel numbers for the input and output channels start with 0.
"""
if digital:
return self.__digital_routes[output_channel][input_channel].getvolume()[0]
else:
return self.__analog_routes[output_channel][input_channel].getvolume()[0]
def SetVolume(self, value, output_channel, input_channel, digital=False):
"""
Sets the volume of the ALSA control that is specified by the parameters.
The given value shall be an integer between 0 and 100.
The channel numbers for the input and output channels start with 0.
"""
if digital:
self.__digital_routes[output_channel][input_channel].setvolume(value, 0)
else:
self.__analog_routes[output_channel][input_channel].setvolume(value, 0)
def AddObserver(self, function):
"""
Adds an observer function that will be called when an ALSA control has
been changed by an external program.
This function has to accept the two arguments "changed_analog_routes" and
"changed_digital_routes". These arguments are lists of tuples of integers
(output, input), that specify the routes that have changed.
The channel numbers for the input and output channels start with 0.
"""
self.__observers.append(function)
def DisableEffects(self):
"""
This method mutes all ALSA controls that are related to the Fast Track
Ultra's built in effects processor.
"""
for n in self.__fx_control_names:
c = alsaaudio.Mixer(n, cardindex=self.__card_index)
if c.volumecap() != []:
c.setvolume(0, 0)
def MuteMostDigitalRoutes(self):
"""
This method mutes all digital routes, except for routes from a digital
input to an output with the same number ("DIn1 - Out1", "DIn2 - Out2"...).
This way the routing of the digital signals can be done with JACK or alike.
"""
for o in range(len(self.__digital_routes)):
for i in range(len(self.__digital_routes[o])):
if o != i:
self.__digital_routes[o][i].setvolume(0, 0)
def GetConfigDict(self):
"""
Returns a dictionary with the values of all ALSA controls for the Fast
Track Ultra, including the effects controls.
This dictionary can then saved to a config file.
"""
result = {}
result["Analog"] = {}
for o in range(len(self.__analog_routes)):
for i in range(len(self.__analog_routes[o])):
result["Analog"]["ain%i_to_out%i" % (i + 1, o + 1)] = self.GetVolume(output_channel=o, input_channel=i, digital=False)
result["Digital"] = {}
for o in range(len(self.__digital_routes)):
for i in range(len(self.__analog_routes[o])):
result["Digital"]["din%i_to_out%i" % (i + 1, o + 1)] = self.GetVolume(output_channel=o, input_channel=i, digital=True)
result["Effects"] = {}
for n in self.__fx_control_names:
mixer = alsaaudio.Mixer(n, cardindex=self.__card_index)
cname = n.replace(" ", "_").lower()
if mixer.getenum() == ():
result["Effects"][cname] = mixer.getvolume()[0]
else:
result["Effects"][cname] = mixer.getenum()[0]
return result
def ParseConfigDict(self, configdict):
"""
Sets the values of ALSA controls according to the values in the given
dictionary.
Only the controls for which the dictionary contains a value are changed.
"""
changed_analog_routes = []
changed_digital_routes = []
if "Analog" in configdict:
for key in configdict["Analog"]:
i, o = [int(s) - 1 for s in key.split("ain")[1].split("_to_out")]
self.SetVolume(value=int(configdict["Analog"][key]), output_channel=o, input_channel=i, digital=False)
changed_analog_routes.append((o, i))
if "Digital" in configdict:
for key in configdict["Digital"]:
i, o = [int(s) - 1 for s in key.split("din")[1].split("_to_out")]
self.SetVolume(value=int(configdict["Digital"][key]), output_channel=o, input_channel=i, digital=True)
changed_digital_routes.append((o, i))
if "Effects" in configdict:
for n in self.__fx_control_names:
cname = n.replace(" ", "_").lower()
if cname in configdict["Effects"]:
mixer = alsaaudio.Mixer(n, cardindex=self.__card_index)
if mixer.getenum() == ():
mixer.setvolume(int(configdict["Effects"][cname]), 0)
elif configdict["Effects"][cname] in mixer.getenum()[1]:
# I have not found a way to do this with pyalsaaudio, yet
import subprocess
call = []
call.append("amixer")
call.append("-c%i" % self.__card_index)
call.append("sset")
call.append(str(n))
call.append(str(configdict["Effects"][cname]))
subprocess.check_output(call)
for o in self.__observers:
o(changed_analog_routes, changed_digital_routes)
def __CreateRoute(self, name, digital):
"""
Used internally to setup the alsaaudio.Mixer objects and the select.poll
object that polls for changes in the ALSA controls.
"""
out_index = int(name[10]) - 1
in_index = int(name[3]) - 1
list_of_routes = self.__analog_routes
if digital:
list_of_routes = self.__digital_routes
# create data structure
for i in range(len(list_of_routes), out_index + 1):
list_of_routes.append([])
for i in range(len(list_of_routes[out_index]), in_index + 1):
list_of_routes[out_index].append(None)
# create mixer
route = alsaaudio.Mixer(name, cardindex=self.__card_index)
list_of_routes[out_index][in_index] = route
# enable poll for changes
descriptor = route.polldescriptors()[0]
self.__poll.register(*descriptor)
self.__descriptors_to_routes[descriptor[0]] = (out_index, in_index, digital, descriptor[1], descriptor[0])
def __PollForChanges(self):
"""
This method is run in a separate thread. It polls for changes in the
ALSA controls, so this program can update itself, when an external program
changes a control.
"""
while self.__polling_active:
changed_analog_routes = []
changed_digital_routes = []
for d in self.__poll.poll(700):
if d[1] & select.POLLIN:
route = self.__descriptors_to_routes[d[0]]
if route[2]:
changed_digital_routes.append(route[0:2])
else:
changed_analog_routes.append(route[0:2])
os.read(d[0], 512)
if changed_analog_routes != [] or changed_digital_routes != []:
for o in self.__observers:
o(changed_analog_routes, changed_digital_routes)
class Gui(object):
"""
This class sets up the GUI for the mixer.
It is responsible for:
- initializing the GUI's window
- adding itself to the given Config object
- running the GUI's main loop
For information about how to use the GUI, see the README file.
"""
def __init__(self, mixer, config):
"""
@param mixer: a Mixer object
@param config a Config object
"""
self.__mixer = mixer
self.__config = config
self.__config.SetGui(self)
self.__app = wx.PySimpleApp()
self.__frame = wx.Frame(parent=None, title="Fast Track Ultra Mixer", size=(480, 320))
self.__app.SetTopWindow(self.__frame)
# menu
menubar = wx.MenuBar()
self.__frame.SetMenuBar(menubar)
filemenu = wx.Menu()
menubar.Append(filemenu, "File")
loaditem = filemenu.Append(id=wx.ID_ANY, text="Load config")
self.__frame.Bind(wx.EVT_MENU, self.__OnLoadConfig, loaditem)
saveitem = filemenu.Append(id=wx.ID_ANY, text="Save config")
self.__frame.Bind(wx.EVT_MENU, self.__OnSaveConfig, saveitem)
helpmenu = wx.Menu()
menubar.Append(helpmenu, "Help")
infoitem = helpmenu.Append(id=wx.ID_ANY, text="Info")
self.__frame.Bind(wx.EVT_MENU, self.__OnInfo, infoitem)
# notebook
mainsizer = wx.BoxSizer(wx.VERTICAL)
self.__frame.SetSizer(mainsizer)
notebook = wx.Notebook(parent=self.__frame)
mainsizer.Add(notebook, 1, wx.EXPAND)
# master slider
masterpanel = wx.Panel(parent=notebook)
notebook.AddPage(masterpanel, "Master")
masterpanelsizer = wx.BoxSizer(wx.HORIZONTAL)
masterpanel.SetSizer(masterpanelsizer)
mastersizer = wx.BoxSizer(wx.VERTICAL)
masterpanelsizer.Add(mastersizer, 1, wx.EXPAND)
mlabel = wx.StaticText(parent=masterpanel, label="Master")
mastersizer.Add(mlabel, 0, wx.ALIGN_CENTER_HORIZONTAL)
self.__masterslider = wx.Slider(parent=masterpanel, style=wx.SL_VERTICAL | wx.SL_INVERSE)
mastersizer.Add(self.__masterslider, 1, wx.EXPAND)
self.__masterslider.SetMin(0)
self.__masterslider.SetMax(100)
mastervalue = 0
self.__masterlabel = wx.StaticText(parent=masterpanel)
mastersizer.Add(self.__masterlabel, 0, wx.ALIGN_CENTER_HORIZONTAL)
self.__masterslider.Bind(wx.EVT_SLIDER, self.__OnMaster)
# macros
buttonbox = wx.StaticBox(parent=masterpanel, label="Macros")
buttonsizer = wx.StaticBoxSizer(box=buttonbox, orient=wx.VERTICAL)
masterpanelsizer.Add(buttonsizer, 1, wx.EXPAND)
mute_hardware_routes = wx.Button(parent=masterpanel, label="Mute hardware routes")
buttonsizer.Add(mute_hardware_routes, 0, wx.EXPAND)
mute_hardware_routes.Bind(wx.EVT_BUTTON, self.MuteHardwareRoutes)
pass_through_inputs = wx.Button(parent=masterpanel, label="Pass through inputs")
buttonsizer.Add(pass_through_inputs, 0, wx.EXPAND)
pass_through_inputs.Bind(wx.EVT_BUTTON, self.PassThroughInputs)
disable_effects = wx.Button(parent=masterpanel, label="Disable effects")
buttonsizer.Add(disable_effects, 0, wx.EXPAND)
disable_effects.Bind(wx.EVT_BUTTON, self.__DisableEffects)
mute_most_digital_routes = wx.Button(parent=masterpanel, label="Mute most digital routes")
buttonsizer.Add(mute_most_digital_routes, 0, wx.EXPAND)
mute_most_digital_routes.Bind(wx.EVT_BUTTON, self.__MuteMostDigitalRoutes)
# hardware routing sections
self.__hardwarerouting_sliders = []
self.__links = []
self.__linkchoices = []
for o in range(self.__mixer.GetNumberOfChannels()):
self.__hardwarerouting_sliders.append([])
panel = wx.Panel(parent=notebook)
notebook.AddPage(panel, "Out%i" % (o + 1))
panelsizer = wx.BoxSizer(wx.VERTICAL)
panel.SetSizer(panelsizer)
psizer = wx.BoxSizer(wx.HORIZONTAL)
panelsizer.Add(psizer, 1, wx.EXPAND)
for i in range(self.__mixer.GetNumberOfChannels()):
ssizer = wx.BoxSizer(wx.VERTICAL)
psizer.Add(ssizer, 1, wx.EXPAND)
clabel = wx.StaticText(parent=panel, label="AIn%i" % (i + 1))
ssizer.Add(clabel, 0, wx.ALIGN_CENTER_HORIZONTAL)
slider = wx.Slider(parent=panel, style=wx.SL_VERTICAL | wx.SL_INVERSE)
ssizer.Add(slider, 1, wx.EXPAND)
slider.SetMin(0)
slider.SetMax(100)
slider.SetValue(self.__mixer.GetVolume(output_channel=o, input_channel=i))
vlabel = wx.StaticText(parent=panel)
vlabel.SetLabel(str(self.__mixer.GetVolume(output_channel=o, input_channel=i)))
ssizer.Add(vlabel, 0, wx.ALIGN_CENTER_HORIZONTAL)
partial = functools.partial(self.__OnHardwareRouting, output_channel=o, input_channel=i)
slider.Bind(wx.EVT_SLIDER, partial)
self.__hardwarerouting_sliders[o].append((slider, vlabel))
# linking of output channels
self.__links.append(None)
lsizer = wx.BoxSizer(wx.HORIZONTAL)
panelsizer.Add(lsizer, 0, wx.EXPAND)
linklabel = wx.StaticText(parent=panel, label="Link to")
lsizer.Add(linklabel, 0, wx.ALIGN_CENTER_VERTICAL)
linkchoices = ["Out%i" % (i + 1) for i in range(0, self.__mixer.GetNumberOfChannels()) if i != o]
linkchoices.insert(0, "None")
linkchoice = wx.Choice(parent=panel, choices=linkchoices)
lsizer.Add(linkchoice)
partial = functools.partial(self.__OnLink, output_channel=o, choice=linkchoice)
linkchoice.Bind(wx.EVT_CHOICE, partial)
self.__linkchoices.append(linkchoice)
# calculating value for master slider
mastervalue += self.__mixer.GetVolume(output_channel=o, input_channel=o, digital=True)
mastervalue /= float(self.__mixer.GetNumberOfChannels())
self.__masterslider.SetValue(int(round(mastervalue)))
self.__masterlabel.SetLabel(str(self.__masterslider.GetValue()))
self.__mixer.AddObserver(self.__OnMixerEvent)
def MainLoop(self):
"""
Layouts the main window, shows it and runs the wx main loop.
This method blocks until the window is closed.
"""
self.__frame.Layout()
self.__frame.Show()
self.__app.MainLoop()
def GetConfigDict(self):
"""
Returns a dictionary with the configuration values for the GUI.
The dictionary will contain information about which outputs are linked.
The dictionary can be saved with the configuration.
"""
result = {}
result["GUI"] = {}
for i in range(len(self.__links)):
if self.__links[i] is None:
result["GUI"]["link%ito" % (i + 1)] = "0"
else:
result["GUI"]["link%ito" % (i + 1)] = str(self.__links[i] + 1)
return result
def ParseConfigDict(self, configdict):
"""
Parses a configuration dictionary and sets up the GUI accordingly.
"""
if "GUI" in configdict:
for key in configdict["GUI"]:
link = int(key.lstrip("link").rstrip("to")) - 1
to = int(configdict["GUI"][key]) - 1
if to < 0:
self.__links[link] = None
self.__linkchoices[link].SetStringSelection("None")
else:
self.__links[link] = to
self.__linkchoices[link].SetStringSelection("Out%i" % (to + 1))
def __OnMaster(self, event):
"""
This will be called when the "master"-slider is moved.
It sets the values of the ALSA controls for the routes from a digital input
to its respective output (with the same number as the input) to the value
of the slider.
"""
for c in range(self.__mixer.GetNumberOfChannels()):
self.__mixer.SetVolume(value=self.__masterslider.GetValue(), output_channel=c, input_channel=c, digital=True)
self.__masterlabel.SetLabel(str(self.__masterslider.GetValue()))
def __OnHardwareRouting(self, event, output_channel, input_channel):
"""
This will be called when one of the sliders for the routing of the analog
signals is moved.
"""
slider, vlabel = self.__hardwarerouting_sliders[output_channel][input_channel]
volume = slider.GetValue()
self.__mixer.SetVolume(value=volume, output_channel=output_channel, input_channel=input_channel)
vlabel.SetLabel(str(volume))
linked_output = self.__links[output_channel]
if linked_output is not None and event is not None:
linked_slider = self.__hardwarerouting_sliders[linked_output][input_channel][0]
if event.GetId() != linked_slider.GetId():
linked_slider.SetValue(volume)
self.__OnHardwareRouting(event=event, output_channel=linked_output, input_channel=input_channel)
def __OnMixerEvent(self, changed_analog_routes, changed_digital_routes):
"""
This will be passed to the mixer as an observer, that is called when an
external program changes an ALSA control for the Fast Track Ultra.
This method can be called from a different thread, as all accesses to the
GUI are encapsulated in a nested function that is called with wx.CallAfter
"""
def worker():
for o, i in changed_analog_routes:
volume = self.__mixer.GetVolume(output_channel=o, input_channel=i)
slider, vlabel = self.__hardwarerouting_sliders[o][i]
if volume != slider.GetValue():
# print "A change in route from input %i to output %i" % (i, o)
slider.SetValue(volume)
vlabel.SetLabel(str(volume))
for o, i in changed_digital_routes:
if o == i:
mastervolume = 0
for c in range(self.__mixer.GetNumberOfChannels()):
mastervolume += self.__mixer.GetVolume(output_channel=c, input_channel=c, digital=True)
mastervolume /= float(self.__mixer.GetNumberOfChannels())
self.__masterslider.SetValue(int(round(mastervolume)))
self.__masterlabel.SetLabel(str(self.__masterslider.GetValue()))
break
wx.CallAfter(worker)
def __OnLink(self, event, output_channel, choice):
"""
This method is called when one of the "link to"-dropdown selectors has
changed.
"""
selection = choice.GetStringSelection()
if selection == "None":
self.__links[output_channel] = None
else:
self.__links[output_channel] = int(selection[-1]) - 1
def __OnLoadConfig(self, event):
"""
This method is called when the menu's "Load config" item is clicked.
It shows a file selector dialog and loads the config from the selected file.
"""
dialog = wx.FileDialog(parent=self.__frame, style=wx.FD_OPEN)
if dialog.ShowModal() == wx.ID_OK:
self.__config.Load(filename=dialog.GetPath())
dialog.Destroy()
def __OnSaveConfig(self, event):
"""
This method is called when the menu's "Save config" item is clicked.
It shows a file selector dialog and saves the config to the selected file.
"""
dialog = wx.FileDialog(parent=self.__frame, style=wx.FD_SAVE)
if dialog.ShowModal() == wx.ID_OK:
self.__config.Save(filename=dialog.GetPath())
dialog.Destroy()
def __OnInfo(self, event):
"""
This method is called when the menu's "Info" item is clicked.
It shows a message box that displays information about the license of this
program and where to get help.
"""
text = []
text.append("Fast Track Ultra Mixer")
text.append("")
text.append("(c) Copyright Jonas Schulte-Coerne")
text.append("This program is licensed under the terms of the Apache License, version 2.")
text.append("For more information about the license see: http://www.apache.org/licenses/LICENSE-2.0")
text.append("")
text.append("For help about how to use this program, see https://github.com/JonasSC/FTU-Mixer")
wx.MessageBox("\n".join(text), "Info", wx.OK | wx.ICON_INFORMATION)
def MuteHardwareRoutes(self, event=None):
"""
A method for a button in the "Macros" box in the "master" tab of the notebook.
It mutes all routes for the analog inputs.
"""
for o in range(self.__mixer.GetNumberOfChannels()):
for i in range(self.__mixer.GetNumberOfChannels()):
self.__hardwarerouting_sliders[o][i][0].SetValue(0)
self.__OnHardwareRouting(event=None, output_channel=o, input_channel=i)
def PassThroughInputs(self, event=None):
"""
A method for a button in the "Macros" box in the "master" tab of the notebook.
It turns all routes from analog inputs to outputs with the same number
to full volume. Other routes are not changed.
"""
for c in range(self.__mixer.GetNumberOfChannels()):
self.__hardwarerouting_sliders[c][c][0].SetValue(100)
self.__OnHardwareRouting(event=None, output_channel=c, input_channel=c)
def __DisableEffects(self, event):
"""
A method for a button in the "Macros" box in the "master" tab of the notebook.
It mutes all ALSA controls that are related to the Fast Track Ultra's
built in effects processor.
"""
self.__mixer.DisableEffects()
def __MuteMostDigitalRoutes(self, event):
"""
A method for a button in the "Macros" box in the "master" tab of the notebook.
This method mutes all digital routes, except for routes from a digital
input to an output with the same number ("DIn1 - Out1", "DIn2 - Out2"...).
This way the routing of the digital signals can be done with JACK or alike.
"""
self.__mixer.MuteMostDigitalRoutes()
class Config(object):
"""
This class wraps the config file handling.
It is responsible for:
- gathering config dictionaries from the mixer and the GUI, joining them
and saving them to a config file
- loading a config file to a dictionary and passing that to the mixer and
the GUI
"""
def __init__(self, mixer):
"""
@param mixer: a Mixer instance
"""
self.__mixer = mixer
self.__gui = None
def Load(self, filename):
"""
Loads a config file to a dictionary and passes that to the mixer and the
GUI objects.
"""
configdict = {}
parser = configparser.ConfigParser()
parser.read(filename)
for s in parser.sections():
configdict[s] = {}
for o in parser.options(s):
configdict[s][o] = parser.get(s, o)
self.__mixer.ParseConfigDict(configdict)
if self.__gui is not None:
self.__gui.ParseConfigDict(configdict)
def Save(self, filename):
"""
Retrieves the config dictionaries from the mixer and the GUI and saves
them to a config file.
"""
# generate configdict
configdict = self.__mixer.GetConfigDict()
if self.__gui is not None:
gui_configdict = self.__gui.GetConfigDict()
for section in gui_configdict:
configdict[section] = gui_configdict[section]
# write it to a config file
parser = configparser.ConfigParser()
for s in configdict:
parser.add_section(s)
for v in configdict[s]:
parser.set(s, v, configdict[s][v])
with open(filename, 'w') as configfile:
parser.write(configfile)
def SetGui(self, gui):
"""
Sets the GUI object.
"""
self.__gui = gui
if __name__ == "__main__":
card_index = None
i = 0
for c in alsaaudio.cards():
if c == "Ultra" or c == "F8R":
card_index = i
i += 1
if card_index is None:
print "No M-Audio Fast Track Ultra or Ultra 8R found. Exiting..."
else:
# parse command line arguments
parser = argparse.ArgumentParser(description="A little mixer for the M-Audio Fast Track Ultra audio interfaces.")
parser.add_argument("-c, --card", dest="card_index", action="store", default=card_index, help="The card index of the interface that shall be controlled.")
parser.add_argument("-l, --load-config", dest="config", action="store", default="", help="A configuration file that shall be loaded on startup.")
parser.add_argument("-X, --no-gui", dest="show_gui", action="store_false", default=True, help="Do not show the mixer GUI.")
parser.add_argument("-F, --dont-disable-fx", dest="disable_effects", action="store_false", default=True, help="Do not disable all effects on startup.")
parser.add_argument("-M, --dont-mute-most-digital-outputs", dest="mute_most_digital_routes", action="store_false", default=True, help="Do not mute most digital outputs on startup. Without this all digital outputs will be muted except for 'DIn1 - Out1', 'Din2 - Out2'... so the routing of the digital signals can be done with JACK.")
parser.add_argument("-m, --mute-hardware-routes", dest="mute_hardware_routes", action="store_true", default=False, help="Mute all hardware routes of the analog signals.")
parser.add_argument("-p, --pass-through-inputs", dest="pass_through_inputs", action="store_true", default=False, help="Route all analog inputs to their respective outputs. This does not affect other routes.")
args = parser.parse_args()
# setup necessary objects
mixer = Mixer(card_index=args.card_index, disable_effects=args.disable_effects, mute_most_digital_routes=args.mute_most_digital_routes)
config = Config(mixer=mixer)
if args.show_gui:
gui = Gui(mixer=mixer, config=config)
# configure objects according to the command line arguments
if args.mute_hardware_routes:
gui.MuteHardwareRoutes()
if args.pass_through_inputs:
gui.PassThroughInputs()
configpath = os.path.normpath(os.path.abspath(os.path.expanduser(os.path.expandvars(args.config))))
if os.path.exists(configpath):
config.Load(filename=configpath)
# run the GUI if necessary
if args.show_gui:
gui.MainLoop()
# clean up
mixer.Destroy()
|
threading_progress.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# threading_progress.py
"""
Generic classes to perform threading with a progress frame
"""
# Copyright (c) 2020 Dan Cutright
# This file is part of DVHA DICOM Editor, released under a BSD license.
# See the file LICENSE included with this distribution, also
# available at https://github.com/cutright/DVHA-DICOM-Editor
import wx
from pubsub import pub
from queue import Queue
from threading import Thread
from time import sleep
class ProgressFrame(wx.Dialog):
"""Create a window to display progress and begin provided worker"""
def __init__(
self,
obj_list,
action,
close_msg=None,
action_msg=None,
action_gui_phrase="Processing",
title="Progress",
sub_gauge=False,
kwargs=False,
star_map=False,
):
wx.Dialog.__init__(self, None)
self.close_msg = close_msg
self.worker_args = [
obj_list,
action,
action_msg,
action_gui_phrase,
title,
kwargs,
star_map,
]
self.action_gui_phrase = action_gui_phrase
self.gauge = wx.Gauge(self, wx.ID_ANY, 100)
self.sub_gauge = wx.Gauge(self, wx.ID_ANY, 100) if sub_gauge else None
self.label = wx.StaticText(self, wx.ID_ANY, "Initializing...")
self.sub_label = (
wx.StaticText(self, wx.ID_ANY, "") if sub_gauge else None
)
self.__set_properties()
self.__do_subscribe()
self.__do_layout()
self.run()
def run(self):
"""Initiate layout in GUI and begin thread"""
self.Show()
ProgressFrameWorker(*self.worker_args)
def __set_properties(self):
self.SetMinSize((672, 100))
def __do_subscribe(self):
pub.subscribe(self.update, "progress_update")
pub.subscribe(self.sub_update, "sub_progress_update")
pub.subscribe(self.set_title, "progress_set_title")
pub.subscribe(self.close, "progress_close")
@staticmethod
def __do_unsubscribe():
pub.unsubAll(topicName="progress_update")
pub.unsubAll(topicName="sub_progress_update")
pub.unsubAll(topicName="progress_set_title")
pub.unsubAll(topicName="progress_close")
def __do_layout(self):
sizer_wrapper = wx.BoxSizer(wx.VERTICAL)
sizer_objects = wx.BoxSizer(wx.VERTICAL)
sizer_objects.Add(self.label, 0, 0, 0)
sizer_objects.Add(self.gauge, 0, wx.EXPAND, 0)
if self.sub_gauge is not None:
sizer_objects.Add(self.sub_label, 0, 0, 0)
sizer_objects.Add(self.sub_gauge, 0, wx.EXPAND, 0)
sizer_wrapper.Add(sizer_objects, 0, wx.ALL | wx.EXPAND, 10)
self.SetSizer(sizer_wrapper)
self.Fit()
self.Layout()
self.Center()
def set_title(self, msg):
"""
Parameters
----------
msg :
Returns
-------
"""
wx.CallAfter(self.SetTitle, msg)
def update(self, msg):
"""Update the progress message and gauge
Parameters
----------
msg : dict
a dictionary with keys of 'label' and 'gauge' text and progress fraction, respectively
Returns
-------
"""
label = msg["label"]
wx.CallAfter(self.label.SetLabelText, label)
wx.CallAfter(self.gauge.SetValue, int(100 * msg["gauge"]))
def sub_update(self, msg):
"""
Parameters
----------
msg :
Returns
-------
"""
label = msg["label"]
wx.CallAfter(self.sub_label.SetLabelText, label)
wx.CallAfter(self.sub_gauge.SetValue, int(100 * msg["gauge"]))
def close(self):
"""Destroy layout in GUI and send message close message for parent"""
if self.close_msg is not None:
wx.CallAfter(pub.sendMessage, self.close_msg)
self.__do_unsubscribe()
wx.CallAfter(self.Destroy)
class ProgressFrameWorker(Thread):
"""Create a thread, perform action on each item in obj_list"""
def __init__(
self,
obj_list,
action,
action_msg,
action_gui_phrase,
title,
kwargs,
star_map,
):
Thread.__init__(self)
pub.sendMessage("progress_set_title", msg=title)
self.obj_list = obj_list
self.obj_count = len(self.obj_list)
self.action = action
self.action_msg = action_msg
self.action_gui_phrase = action_gui_phrase
self.kwargs = kwargs
self.star_map = star_map
self.start()
def run(self):
""" """
queue = self.get_queue()
worker = Thread(target=self.target, args=[queue])
worker.setDaemon(True)
worker.start()
queue.join()
sleep(0.3) # Allow time for user to see final progress in GUI
pub.sendMessage("progress_close")
def get_queue(self):
""" """
queue = Queue()
for i, obj in enumerate(self.obj_list):
msg = {
"label": "%s (%s of %s)"
% (self.action_msg, i + 1, len(self.obj_list)),
"gauge": float(i / len(self.obj_list)),
}
queue.put((obj, msg))
return queue
def target(self, queue):
"""
Parameters
----------
queue :
Returns
-------
"""
while queue.qsize():
parameters = queue.get()
self.do_action(*parameters)
queue.task_done()
msg = {"label": "Process Complete", "gauge": 1.0}
wx.CallAfter(pub.sendMessage, "progress_update", msg=msg)
def do_action(self, obj, msg):
"""
Parameters
----------
obj :
msg :
Returns
-------
"""
wx.CallAfter(pub.sendMessage, "progress_update", msg=msg)
if self.kwargs:
result = self.action(**obj)
elif self.star_map:
result = self.action(*obj)
else:
result = self.action(obj)
if self.action_msg is not None:
msg = {"obj": obj, "data": result}
wx.CallAfter(pub.sendMessage, self.action_msg, msg=msg)
|
utils.py
|
#================================================================
#
# File name : utils.py
# Author : PyLessons
# Created date: 2020-09-27
# Website : https://pylessons.com/
# GitHub : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
# Description : additional yolov3 and yolov4 functions
#
#================================================================
from multiprocessing import Process, Queue, Pipe
import cv2
import time
import random
import colorsys
import numpy as np
import tensorflow as tf
import sys
sys.path.append('/home/dylan/catkin_ws/src/yolo_ros/src/TensorFlow_Yolo') # Need to edit to own path
from yolov3.configs import *
from yolov3.yolov4 import *
from tensorflow.python.saved_model import tag_constants
checkpoint_path = f"/home/dylan/catkin_ws/src/yolo_ros/src/TensorFlow_Yolo/checkpoints/{TRAIN_MODEL_NAME}" # Need to edit to own path
def load_yolo_weights(model, weights_file):
tf.keras.backend.clear_session() # used to reset layer names
# load Darknet original weights to TensorFlow model
if YOLO_TYPE == "yolov3":
range1 = 75 if not TRAIN_YOLO_TINY else 13
range2 = [58, 66, 74] if not TRAIN_YOLO_TINY else [9, 12]
if YOLO_TYPE == "yolov4":
range1 = 110 if not TRAIN_YOLO_TINY else 21
range2 = [93, 101, 109] if not TRAIN_YOLO_TINY else [17, 20]
with open(weights_file, 'rb') as wf:
major, minor, revision, seen, _ = np.fromfile(wf, dtype=np.int32, count=5)
j = 0
for i in range(range1):
if i > 0:
conv_layer_name = 'conv2d_%d' %i
else:
conv_layer_name = 'conv2d'
if j > 0:
bn_layer_name = 'batch_normalization_%d' %j
else:
bn_layer_name = 'batch_normalization'
conv_layer = model.get_layer(conv_layer_name)
filters = conv_layer.filters
k_size = conv_layer.kernel_size[0]
in_dim = conv_layer.input_shape[-1]
if i not in range2:
# darknet weights: [beta, gamma, mean, variance]
bn_weights = np.fromfile(wf, dtype=np.float32, count=4 * filters)
# tf weights: [gamma, beta, mean, variance]
bn_weights = bn_weights.reshape((4, filters))[[1, 0, 2, 3]]
bn_layer = model.get_layer(bn_layer_name)
j += 1
else:
conv_bias = np.fromfile(wf, dtype=np.float32, count=filters)
# darknet shape (out_dim, in_dim, height, width)
conv_shape = (filters, in_dim, k_size, k_size)
conv_weights = np.fromfile(wf, dtype=np.float32, count=np.product(conv_shape))
# tf shape (height, width, in_dim, out_dim)
conv_weights = conv_weights.reshape(conv_shape).transpose([2, 3, 1, 0])
if i not in range2:
conv_layer.set_weights([conv_weights])
bn_layer.set_weights(bn_weights)
else:
conv_layer.set_weights([conv_weights, conv_bias])
assert len(wf.read()) == 0, 'failed to read all data'
def Load_Yolo_model():
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
print(f'GPUs {gpus}')
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: pass
if YOLO_FRAMEWORK == "tf": # TensorFlow detection
if YOLO_TYPE == "yolov4":
Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
if YOLO_CUSTOM_WEIGHTS == False:
print("Loading Darknet_weights from:", Darknet_weights)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
else:
checkpoint = checkpoint_path
if TRAIN_YOLO_TINY:
checkpoint += "_Tiny"
print("\nLOADING CUSTOM WEIGHTS FROM:", checkpoint)
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights(checkpoint) # use custom weights
elif YOLO_FRAMEWORK == "trt": # TensorRT detection
saved_model_loaded = tf.saved_model.load(YOLO_CUSTOM_WEIGHTS, tags=[tag_constants.SERVING])
signature_keys = list(saved_model_loaded.signatures.keys())
yolo = saved_model_loaded.signatures['serving_default']
return yolo
def image_preprocess(image, target_size, gt_boxes=None):
ih, iw = target_size
h, w, _ = image.shape
scale = min(iw/w, ih/h)
nw, nh = int(scale * w), int(scale * h)
image_resized = cv2.resize(image, (nw, nh))
image_paded = np.full(shape=[ih, iw, 3], fill_value=128.0)
dw, dh = (iw - nw) // 2, (ih-nh) // 2
image_paded[dh:nh+dh, dw:nw+dw, :] = image_resized
image_paded = image_paded / 255.
if gt_boxes is None:
return image_paded
else:
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]] * scale + dw
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]] * scale + dh
return image_paded, gt_boxes
def draw_bbox(image, bboxes, CLASSES=YOLO_COCO_CLASSES, show_label=True, show_confidence = True, Text_colors=(255,255,0), rectangle_colors='', tracking=False):
NUM_CLASS = read_class_names(CLASSES)
num_classes = len(NUM_CLASS)
image_h, image_w, _ = image.shape
hsv_tuples = [(1.0 * x / num_classes, 1., 1.) for x in range(num_classes)]
#print("hsv_tuples", hsv_tuples)
colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors))
random.seed(0)
random.shuffle(colors)
random.seed(None)
for i, bbox in enumerate(bboxes):
coor = np.array(bbox[:4], dtype=np.int32)
score = bbox[4]
class_ind = int(bbox[5])
bbox_color = rectangle_colors if rectangle_colors != '' else colors[class_ind]
bbox_thick = int(0.6 * (image_h + image_w) / 1000)
if bbox_thick < 1: bbox_thick = 1
fontScale = 0.75 * bbox_thick
(x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2], coor[3])
# put object rectangle
cv2.rectangle(image, (x1, y1), (x2, y2), bbox_color, bbox_thick*2)
if show_label:
# get text label
score_str = " {:.2f}".format(score) if show_confidence else ""
if tracking: score_str = " "+str(score)
try:
label = "{}".format(NUM_CLASS[class_ind]) + score_str
except KeyError:
print("You received KeyError, this might be that you are trying to use yolo original weights")
print("while using custom classes, if using custom model in configs.py set YOLO_CUSTOM_WEIGHTS = True")
# get text size
(text_width, text_height), baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, thickness=bbox_thick)
# put filled text rectangle
cv2.rectangle(image, (x1, y1), (x1 + text_width, y1 - text_height - baseline), bbox_color, thickness=cv2.FILLED)
# put text above rectangle
cv2.putText(image, label, (x1, y1-4), cv2.FONT_HERSHEY_COMPLEX_SMALL,
fontScale, Text_colors, bbox_thick, lineType=cv2.LINE_AA)
return image
def bboxes_iou(boxes1, boxes2):
boxes1 = np.array(boxes1)
boxes2 = np.array(boxes2)
boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1])
boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1])
left_up = np.maximum(boxes1[..., :2], boxes2[..., :2])
right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:])
inter_section = np.maximum(right_down - left_up, 0.0)
inter_area = inter_section[..., 0] * inter_section[..., 1]
union_area = boxes1_area + boxes2_area - inter_area
ious = np.maximum(1.0 * inter_area / union_area, np.finfo(np.float32).eps)
return ious
def nms(bboxes, iou_threshold, sigma=0.3, method='nms'):
"""
:param bboxes: (xmin, ymin, xmax, ymax, score, class)
Note: soft-nms, https://arxiv.org/pdf/1704.04503.pdf
https://github.com/bharatsingh430/soft-nms
"""
classes_in_img = list(set(bboxes[:, 5]))
best_bboxes = []
for cls in classes_in_img:
cls_mask = (bboxes[:, 5] == cls)
cls_bboxes = bboxes[cls_mask]
# Process 1: Determine whether the number of bounding boxes is greater than 0
while len(cls_bboxes) > 0:
# Process 2: Select the bounding box with the highest score according to socre order A
max_ind = np.argmax(cls_bboxes[:, 4])
best_bbox = cls_bboxes[max_ind]
best_bboxes.append(best_bbox)
cls_bboxes = np.concatenate([cls_bboxes[: max_ind], cls_bboxes[max_ind + 1:]])
# Process 3: Calculate this bounding box A and
# Remain all iou of the bounding box and remove those bounding boxes whose iou value is higher than the threshold
iou = bboxes_iou(best_bbox[np.newaxis, :4], cls_bboxes[:, :4])
weight = np.ones((len(iou),), dtype=np.float32)
assert method in ['nms', 'soft-nms']
if method == 'nms':
iou_mask = iou > iou_threshold
weight[iou_mask] = 0.0
if method == 'soft-nms':
weight = np.exp(-(1.0 * iou ** 2 / sigma))
cls_bboxes[:, 4] = cls_bboxes[:, 4] * weight
score_mask = cls_bboxes[:, 4] > 0.
cls_bboxes = cls_bboxes[score_mask]
return best_bboxes
def postprocess_boxes(pred_bbox, original_image, input_size, score_threshold):
valid_scale=[0, np.inf]
pred_bbox = np.array(pred_bbox)
pred_xywh = pred_bbox[:, 0:4]
pred_conf = pred_bbox[:, 4]
pred_prob = pred_bbox[:, 5:]
# 1. (x, y, w, h) --> (xmin, ymin, xmax, ymax)
pred_coor = np.concatenate([pred_xywh[:, :2] - pred_xywh[:, 2:] * 0.5,
pred_xywh[:, :2] + pred_xywh[:, 2:] * 0.5], axis=-1)
# 2. (xmin, ymin, xmax, ymax) -> (xmin_org, ymin_org, xmax_org, ymax_org)
org_h, org_w = original_image.shape[:2]
resize_ratio = min(input_size / org_w, input_size / org_h)
dw = (input_size - resize_ratio * org_w) / 2
dh = (input_size - resize_ratio * org_h) / 2
pred_coor[:, 0::2] = 1.0 * (pred_coor[:, 0::2] - dw) / resize_ratio
pred_coor[:, 1::2] = 1.0 * (pred_coor[:, 1::2] - dh) / resize_ratio
# 3. clip some boxes those are out of range
pred_coor = np.concatenate([np.maximum(pred_coor[:, :2], [0, 0]),
np.minimum(pred_coor[:, 2:], [org_w - 1, org_h - 1])], axis=-1)
invalid_mask = np.logical_or((pred_coor[:, 0] > pred_coor[:, 2]), (pred_coor[:, 1] > pred_coor[:, 3]))
pred_coor[invalid_mask] = 0
# 4. discard some invalid boxes
bboxes_scale = np.sqrt(np.multiply.reduce(pred_coor[:, 2:4] - pred_coor[:, 0:2], axis=-1))
scale_mask = np.logical_and((valid_scale[0] < bboxes_scale), (bboxes_scale < valid_scale[1]))
# 5. discard boxes with low scores
classes = np.argmax(pred_prob, axis=-1)
scores = pred_conf * pred_prob[np.arange(len(pred_coor)), classes]
score_mask = scores > score_threshold
mask = np.logical_and(scale_mask, score_mask)
coors, scores, classes = pred_coor[mask], scores[mask], classes[mask]
return np.concatenate([coors, scores[:, np.newaxis], classes[:, np.newaxis]], axis=-1)
def detect_image(Yolo, image_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
if output_path != '': cv2.imwrite(output_path, image)
if show:
# Show the image
cv2.imshow("predicted image", image)
# Load and hold the image
cv2.waitKey(0)
# To close the window after the required kill value was provided
cv2.destroyAllWindows()
return image
def Predict_bbox_mp(Frames_data, Predicted_data, Processing_times):
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
try: tf.config.experimental.set_memory_growth(gpus[0], True)
except RuntimeError: print("RuntimeError in tf.config.experimental.list_physical_devices('GPU')")
Yolo = Load_Yolo_model()
times = []
while True:
if Frames_data.qsize()>0:
image_data = Frames_data.get()
t1 = time.time()
Processing_times.put(time.time())
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
Predicted_data.put(pred_bbox)
def postprocess_mp(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime):
times = []
while True:
if Predicted_data.qsize()>0:
pred_bbox = Predicted_data.get()
if realtime:
while original_frames.qsize() > 1:
original_image = original_frames.get()
else:
original_image = original_frames.get()
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
times.append(time.time()-Processing_times.get())
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
#print("Time: {:.2f}ms, Final FPS: {:.1f}".format(ms, fps))
Processed_frames.put(image)
def Show_Image_mp(Processed_frames, show, Final_frames):
while True:
if Processed_frames.qsize()>0:
image = Processed_frames.get()
Final_frames.put(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
# detect from webcam
def detect_video_realtime_mp(video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors='', realtime=False):
if realtime:
vid = cv2.VideoCapture(0)
else:
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
no_of_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
original_frames = Queue()
Frames_data = Queue()
Predicted_data = Queue()
Processed_frames = Queue()
Processing_times = Queue()
Final_frames = Queue()
p1 = Process(target=Predict_bbox_mp, args=(Frames_data, Predicted_data, Processing_times))
p2 = Process(target=postprocess_mp, args=(Predicted_data, original_frames, Processed_frames, Processing_times, input_size, CLASSES, score_threshold, iou_threshold, rectangle_colors, realtime))
p3 = Process(target=Show_Image_mp, args=(Processed_frames, show, Final_frames))
p1.start()
p2.start()
p3.start()
while True:
ret, img = vid.read()
if not ret:
break
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_frames.put(original_image)
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
Frames_data.put(image_data)
while True:
if original_frames.qsize() == 0 and Frames_data.qsize() == 0 and Predicted_data.qsize() == 0 and Processed_frames.qsize() == 0 and Processing_times.qsize() == 0 and Final_frames.qsize() == 0:
p1.terminate()
p2.terminate()
p3.terminate()
break
elif Final_frames.qsize()>0:
image = Final_frames.get()
if output_path != '': out.write(image)
cv2.destroyAllWindows()
def detect_video(Yolo, video_path, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times, times_2 = [], []
vid = cv2.VideoCapture(video_path)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, img = vid.read()
try:
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_image, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
image = draw_bbox(original_image, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
t3 = time.time()
times.append(t2-t1)
times_2.append(t3-t1)
times = times[-20:]
times_2 = times_2[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
fps2 = 1000 / (sum(times_2)/len(times_2)*1000)
image = cv2.putText(image, "Time: {:.1f}FPS".format(fps), (0, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_image, bboxes, read_class_names(CLASSES))
print("Time: {:.2f}ms, Detection FPS: {:.1f}, total FPS: {:.1f}".format(ms, fps, fps2))
if output_path != '': out.write(image)
if show:
cv2.imshow('output', image)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
# detect from webcam
def detect_realtime(Yolo, output_path, input_size=416, show=False, CLASSES=YOLO_COCO_CLASSES, score_threshold=0.3, iou_threshold=0.45, rectangle_colors=''):
times = []
vid = cv2.VideoCapture(0)
# by default VideoCapture returns float instead of int
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(vid.get(cv2.CAP_PROP_FPS))
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(output_path, codec, fps, (width, height)) # output_path must be .mp4
while True:
_, frame = vid.read()
try:
original_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
original_frame = cv2.cvtColor(original_frame, cv2.COLOR_BGR2RGB)
except:
break
image_data = image_preprocess(np.copy(original_frame), [input_size, input_size])
image_data = image_data[np.newaxis, ...].astype(np.float32)
t1 = time.time()
if YOLO_FRAMEWORK == "tf":
pred_bbox = Yolo.predict(image_data)
elif YOLO_FRAMEWORK == "trt":
batched_input = tf.constant(image_data)
result = Yolo(batched_input)
pred_bbox = []
for key, value in result.items():
value = value.numpy()
pred_bbox.append(value)
t2 = time.time()
pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]
pred_bbox = tf.concat(pred_bbox, axis=0)
bboxes = postprocess_boxes(pred_bbox, original_frame, input_size, score_threshold)
bboxes = nms(bboxes, iou_threshold, method='nms')
times.append(t2-t1)
times = times[-20:]
ms = sum(times)/len(times)*1000
fps = 1000 / ms
print("Time: {:.2f}ms, {:.1f} FPS".format(ms, fps))
frame = draw_bbox(original_frame, bboxes, CLASSES=CLASSES, rectangle_colors=rectangle_colors)
# CreateXMLfile("XML_Detections", str(int(time.time())), original_frame, bboxes, read_class_names(CLASSES))
image = cv2.putText(frame, "Time: {:.1f}FPS".format(fps), (0, 30),
cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
if output_path != '': out.write(frame)
if show:
cv2.imshow('output', frame)
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
|
app.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import logging
import json
import os
import pathlib
import pprint
import subprocess
import threading
import time
import boto3
import srt
from flask import Flask, jsonify, Response
from flask_cors import CORS
# -- Environment variables - defined by CloudFormation when deployed
VIDEO_BUCKET = os.environ.get('RESOURCE_BUCKET')
SSM_VIDEO_CHANNEL_MAP_PARAM = os.environ.get('PARAMETER_IVS_VIDEO_CHANNEL_MAP', 'retaildemostore-ivs-video-channel-map')
USE_DEFAULT_IVS_STREAMS = os.environ.get('USE_DEFAULT_IVS_STREAMS') == 'true'
DEFAULT_THUMB_FNAME = 'default_thumb.png'
STATIC_FOLDER = '/app/static'
STATIC_URL_PATH = '/static'
SUBTITLE_FORMAT = 'srt'
LOCAL_VIDEO_DIR = '/app/video-files/'
DEFAULT_STREAMS_CONFIG_S3_PATH = 'videos/default_streams/default_streams.json'
# -- Parameterised ffmpeg commands
FFMPEG_STREAM_CMD = """ffmpeg -loglevel panic -hide_banner -re -stream_loop -1 -i \"{video_filepath}\" \
-r 30 -c:v copy -f flv rtmps://{ingest_endpoint}:443/app/{stream_key} -map 0:s -f {subtitle_format} -"""
FFMPEG_SUBS_COMMAND = "ffmpeg -i \"{video_filepath}\" \"{subtitle_path}\""
# Globally accessed variable to store stream metadata (URLs & associated product IDs). Returned via `stream_details`
# endpoint
stream_details = {}
ivs_client = boto3.client('ivs')
ssm_client = boto3.client('ssm')
s3_client = boto3.client('s3')
# -- Load default streams config
def load_default_streams_config():
app.logger.info(f"Downloading default streams config from from bucket {VIDEO_BUCKET} with key {DEFAULT_STREAMS_CONFIG_S3_PATH}.")
config_response = s3_client.get_object(Bucket=VIDEO_BUCKET, Key=DEFAULT_STREAMS_CONFIG_S3_PATH)
config = json.loads(config_response['Body'].read().decode('utf-8'))
for (key, entry) in config.items():
app.logger.info(f"{key}, {entry}")
config[key] = {**entry, 'thumb_url': STATIC_URL_PATH + '/' + entry['thumb_fname']}
config[key].pop('thumb_fname', None)
app.logger.info("Pulled config:")
app.logger.info(config)
return config
# -- Video streaming
def download_video_file(s3_key):
"""
Downloads a video file and associated thumbnail from S3. Thumbnails are identified by a .png file with the same
name and in the same location as the video.
"""
local_path = LOCAL_VIDEO_DIR + s3_key.split('/')[-1]
app.logger.info(f"Downloading file {s3_key} from bucket {VIDEO_BUCKET} to {local_path}.")
s3_client.download_file(Bucket=VIDEO_BUCKET, Key=s3_key, Filename=local_path)
app.logger.info(f"File {s3_key} downloaded from bucket {VIDEO_BUCKET} to {local_path}.")
thumbnail_path = None
thumbnail_key = '.'.join(s3_key.split('.')[:-1]) + '.png'
try:
local_thumbnail_fname = thumbnail_key.split('/')[-1]
local_thumbnail_path = app.static_folder + '/' + local_thumbnail_fname
s3_client.download_file(Bucket=VIDEO_BUCKET, Key=thumbnail_key, Filename=local_thumbnail_path)
app.logger.info(f"File {thumbnail_key} downloaded from bucket {VIDEO_BUCKET} to {local_thumbnail_path}.")
thumbnail_path = app.static_url_path + '/' + local_thumbnail_fname
except Exception as e:
app.logger.warning(f'No thumbnail available for {VIDEO_BUCKET}/{s3_key} as {VIDEO_BUCKET}/{thumbnail_key} - '
f'exception: {e}')
return local_path, thumbnail_path
def get_ffmpeg_stream_cmd(video_filepath, ingest_endpoint, stream_key, subtitle_format):
"""
Returns the command to start streaming a video using ffmpeg.
"""
return FFMPEG_STREAM_CMD.format(**locals())
def get_ffmpeg_subs_cmd(video_filepath, subtitle_path):
"""
Returns the ffmpeg command to rip subtitles (ie. metadata) from a video file.
"""
return FFMPEG_SUBS_COMMAND.format(**locals())
def get_featured_products(video_filepath, channel_id):
"""
Extracts a list of product IDs from the metadata attached to a video file. The values are saved in the global
`stream_details` dict.
"""
subtitle_path = pathlib.Path(video_filepath).with_suffix('.srt')
get_subs_command = get_ffmpeg_subs_cmd(video_filepath, subtitle_path)
process = subprocess.run(
get_subs_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=True)
with open(subtitle_path) as f:
subtitle_content = srt.parse(f)
for line in subtitle_content:
product_id = json.loads(line.content)['productId']
if 'products' not in stream_details[channel_id]:
stream_details[channel_id]['products'] = [product_id]
else:
if product_id not in stream_details[channel_id]['products']:
stream_details[channel_id]['products'].append(product_id)
def is_ssm_parameter_set(parameter_name):
"""
Returns whether an SSM parameter with a given name has been set (ie. value is not 'NONE')
"""
try:
response = ssm_client.get_parameter(Name=parameter_name)
return response['Parameter']['Value'] != 'NONE'
except ssm_client.exceptions.ParameterNotFound:
return False
def put_ivs_metadata(channel_arn, line):
"""
Sends metadata to a given IVS stream. Metadata can be any string, but the AWS Retail Demo Store UI expects
metadata of the format {"productId":"<product-id>"}
"""
try:
app.logger.info(f'Sending metadata to stream: {line}')
ivs_client.put_metadata(
channelArn=channel_arn,
metadata=line
)
except ivs_client.exceptions.ChannelNotBroadcasting:
app.logger.warning(f'Channel not broadcasting. Waiting for 5 seconds.')
app.logger.info('Running ffmpeg processes:')
app.logger.info(os.system("ps aux|grep 'PID\|ffmpeg'"))
time.sleep(5)
def get_stream_state(channel_arn):
"""
Returns the state of a stream given it's ARN. One of 'LIVE', 'OFFLINE' (from API response)
or 'NOT_BROADCASTING' (inferred).
"""
try:
stream_response = ivs_client.get_stream(channelArn=channel_arn)['stream']
stream_state = stream_response['state']
except ivs_client.exceptions.ChannelNotBroadcasting:
stream_state = "NOT_BROADCASTING"
return stream_state
def start_streams():
"""
Initiates all IVS streams based on environment variables. If the SSM_VIDEO_CHANNEL_MAP_PARAM (map of videos in
S3 to IVS channels) is set and the user has not requested to use the default IVS streams
(USE_DEFAULT_IVS_STREAMS, defined by CloudFormation input) then one stream will be started per video described
in the video to IVS channel map. Each stream runs in a separate thread.
If streams are not started, then `stream_details` will be set to the details of a collection of existing streams
"""
if is_ssm_parameter_set(SSM_VIDEO_CHANNEL_MAP_PARAM) and not USE_DEFAULT_IVS_STREAMS:
video_channel_param_value = ssm_client.get_parameter(Name=SSM_VIDEO_CHANNEL_MAP_PARAM)['Parameter']['Value']
app.logger.info(f"Found IVS channel map: {video_channel_param_value}")
video_channel_map = json.loads(video_channel_param_value)
for idx, (s3_video_key, ivs_channel_arn) in enumerate(video_channel_map.items()):
threading.Thread(target=stream, args=(s3_video_key, ivs_channel_arn, idx)).start()
else:
global stream_details
stream_details = load_default_streams_config()
def stream(s3_video_key, ivs_channel_arn, channel_id):
"""
Starts the stream for a given video file and IVS channel. The video file is streamed on a loop using ffmpeg, and
any attached metadata (from the subtitles embedded in the video file) is sent to the channel's `put_metadata`
endpoint.
"""
video_filepath, thumb_url = download_video_file(s3_video_key)
if thumb_url is None:
thumb_url = app.static_url_path + '/' + DEFAULT_THUMB_FNAME
channel_response = ivs_client.get_channel(arn=ivs_channel_arn)['channel']
ingest_endpoint = channel_response['ingestEndpoint']
playback_endpoint = channel_response['playbackUrl']
stream_details[channel_id] = {'playback_url': playback_endpoint,
'thumb_url': thumb_url}
get_featured_products(video_filepath, channel_id)
stream_state = get_stream_state(ivs_channel_arn)
stream_arn = ivs_client.list_stream_keys(channelArn=ivs_channel_arn)['streamKeys'][0]['arn']
stream_key = ivs_client.get_stream_key(arn=stream_arn)['streamKey']['value']
app.logger.info(f"Stream details:\nIngest endpoint: {ingest_endpoint}\nStream state: {stream_state}")
if SUBTITLE_FORMAT == 'srt':
while True:
if stream_state != "NOT_BROADCASTING":
app.logger.info(f"Stream {stream_arn} is currently in state {stream_state}. Waiting for state NOT_BROADCASTING")
sleep_time = 20
app.logger.info(f"Waiting for {sleep_time} seconds")
time.sleep(sleep_time)
stream_state = get_stream_state(ivs_channel_arn)
continue
app.logger.info('Starting video stream')
ffmpeg_stream_cmd = get_ffmpeg_stream_cmd(video_filepath, ingest_endpoint, stream_key, SUBTITLE_FORMAT)
app.logger.info(f'ffmpeg command: {ffmpeg_stream_cmd}')
process = subprocess.Popen(
ffmpeg_stream_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=True)
app.logger.info('Running ffmpeg processes:')
app.logger.info(os.system("ps aux|grep 'PID\|ffmpeg'"))
lines = iter(process.stdout)
app.logger.info('Starting event stream')
while True:
try:
int(next(lines).strip())
time_range = next(lines).strip()
if not '-->' in time_range:
raise ValueError(f'Expected a time range instead of {time_range}')
send_text = ''
while True:
text = next(lines).strip()
if len(text) == 0: break
if len(send_text)>0: send_text+='\n'
send_text += text
put_ivs_metadata(ivs_channel_arn, send_text)
except StopIteration:
app.logger.warning('Video iteration has stopped unexpectedly. Attempting restart in 10 seconds.')
time.sleep(10)
break
else:
raise NotImplementedError(f'{SUBTITLE_FORMAT} is not currently supported by this demo.')
# -- End Video streaming
# -- Logging
class LoggingMiddleware(object):
def __init__(self, app):
self._app = app
def __call__(self, environ, resp):
errorlog = environ['wsgi.errors']
pprint.pprint(('REQUEST', environ), stream=errorlog)
def log_response(status, headers, *args):
pprint.pprint(('RESPONSE', status, headers), stream=errorlog)
return resp(status, headers, *args)
return self._app(environ, log_response)
# -- End Logging
# -- Exceptions
class BadRequest(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
# -- Handlers
app = Flask(__name__,
static_folder=STATIC_FOLDER,
static_url_path=STATIC_URL_PATH)
corps = CORS(app)
@app.errorhandler(BadRequest)
def handle_bad_request(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/')
def index():
return 'Videos Service'
@app.route('/stream_details')
def streams():
response_data = []
for value in stream_details.values():
response_data.append(value)
response = {
"streams": response_data
}
return Response(json.dumps(response), content_type = 'application/json')
@app.route('/health')
def health():
return 'OK'
if __name__ == '__main__':
app.wsgi_app = LoggingMiddleware(app.wsgi_app)
app.logger.setLevel(level=logging.INFO)
app.logger.info(f"VIDEO_BUCKET: {VIDEO_BUCKET}")
app.logger.info(f"SSM_VIDEO_CHANNEL_MAP_PARAM: {SSM_VIDEO_CHANNEL_MAP_PARAM}")
app.logger.info(f"USE_DEFAULT_IVS_STREAMS: {USE_DEFAULT_IVS_STREAMS}")
app.logger.info("Starting video streams")
start_streams()
app.logger.info("Starting API")
app.run(debug=False, host='0.0.0.0', port=80)
|
test_c10d_common.py
|
# Owner(s): ["oncall: distributed"]
import copy
import os
import sys
import tempfile
import threading
import time
from datetime import timedelta
from itertools import product
from sys import platform
import torch
import torch.distributed as dist
if not dist.is_available():
print("distributed package not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.distributed.distributed_c10d as c10d
import torch.distributed.algorithms.ddp_comm_hooks.powerSGD_hook as powerSGD
import torch.nn.functional as F
import torch.testing._internal.common_utils as common
from torch import nn
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
TEST_WITH_DEV_DBG_ASAN,
)
if TEST_WITH_DEV_DBG_ASAN:
print("Multiprocessing spawn is not compatible with dev/dbg asan", file=sys.stderr)
sys.exit(0)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
torch.backends.cuda.matmul.allow_tf32 = False
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process : (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class AbstractTimeoutTest(object):
def _test_store_timeout(self, backend, init_method, c2p):
try:
dist.init_process_group(
backend=backend,
init_method=init_method,
world_size=1,
rank=0,
timeout=timedelta(seconds=1),
)
default_store = c10d._get_default_store()
tik = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
default_store.get("nonexistent key")
tok = time.time()
dist.destroy_process_group()
c2p.append(float(tok - tik))
except RuntimeError as e:
# catch "Address already in use" error and report it to the main
# thread
c2p.append(e)
def _init_methods(self):
f = tempfile.NamedTemporaryFile(delete=False)
if sys.platform == "win32":
yield "file:///%s" % f.name.replace("\\", "/")
f.close()
else:
yield "file://%s" % f.name
f.close()
yield "tcp://127.0.0.1:%d" % common.find_free_port()
def _test_default_store_timeout(self, backend):
for init_method in self._init_methods():
c2p = []
t = threading.Thread(
target=self._test_store_timeout, args=(backend, init_method, c2p)
)
t.daemon = True
t.start()
t.join(5)
self.assertEqual(1, len(c2p))
if isinstance(c2p[0], float):
# waiting time should be 1s, use 3s to rule out false alarm
self.assertGreater(3, c2p[0])
elif isinstance(c2p[0], RuntimeError):
# let @retry_on_connect_failures handle the error
raise c2p[0]
else:
raise RuntimeError("Unexpected type {}".format(type(c2p[0])))
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False)
self.fc2 = nn.Linear(10, 50, bias=False)
self.fc3 = nn.Linear(50, 4, bias=False)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return F.softmax(x, dim=1)
class DoubleGpuNet(nn.Module):
def __init__(self, gpus):
super(DoubleGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[1])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.fc3(x)
return F.softmax(x, dim=1).to(dev0)
class QuadraGpuNet(nn.Module):
def __init__(self, gpus):
super(QuadraGpuNet, self).__init__()
self.fc1 = nn.Linear(2, 10, bias=False).to(gpus[0])
self.fc2 = nn.Linear(10, 50, bias=False).to(gpus[1])
self.fc3 = nn.Linear(50, 4, bias=False).to(gpus[2])
self.fc4 = nn.Linear(4, 4, bias=False).to(gpus[3])
self.relu = nn.ReLU()
self.no_grad_param = nn.Parameter(
torch.tensor([2, 2]).long(), requires_grad=False
).to(gpus[0])
def forward(self, x):
dev0 = self.fc1.weight.device
dev1 = self.fc2.weight.device
dev2 = self.fc3.weight.device
dev3 = self.fc4.weight.device
x = self.relu(self.fc1(x.to(dev0)))
x = self.relu(self.fc2(x.to(dev1)))
x = self.relu(self.fc3(x.to(dev2)))
x = self.fc4(x.to(dev3))
return F.softmax(x, dim=1).to(dev0)
class ConvNet(nn.Module):
def __init__(self, gpus, layouts, dtypes):
super(ConvNet, self).__init__()
self.dtypes = dtypes
if isinstance(gpus, list):
self.layer_gpus = gpus
else:
gpus = [gpus] * 4
self.conv0 = torch.nn.Conv2d(8, 16, (2, 2)).to(
device=gpus[0], memory_format=layouts[0], dtype=dtypes[0]
)
self.conv1 = torch.nn.Conv2d(16, 32, (2, 2)).to(
device=gpus[1], memory_format=layouts[1], dtype=dtypes[1]
)
self.conv2 = torch.nn.Conv2d(32, 16, (2, 2)).to(
device=gpus[2], memory_format=layouts[2], dtype=dtypes[2]
)
self.conv3 = torch.nn.Conv2d(16, 8, (2, 2)).to(
device=gpus[3], memory_format=layouts[3], dtype=dtypes[3]
)
def forward(self, x):
x = x.to(self.dtypes[0])
# Could say
# x = self.conv0(x).to(device=self.conv1.weight.device, dtype=self.dtypes[1])
# etc. But I don't want to appeal to the weights' devices directly, because part of this test's purpose
# is to verify weights are where expected if the model gets replicated.
gpus = self.layer_gpus if hasattr(self, "layer_gpus") else [x.device] * 4
x = self.conv0(x).to(device=gpus[1], dtype=self.dtypes[1])
x = self.conv1(x).to(device=gpus[2], dtype=self.dtypes[2])
x = self.conv2(x).to(device=gpus[3], dtype=self.dtypes[3])
return self.conv3(x)
class Task(nn.Module):
def __init__(self):
super().__init__()
self.p = nn.Parameter(torch.ones(2, 2))
def forward(self, x):
return self.p + x
class ModuleForDdpCommHook(nn.Module):
def __init__(self):
super().__init__()
self.t0 = Task()
def forward(self, x, rank):
return self.t0(x + rank)
class SparseGradientModule(nn.Module):
def __init__(self):
super(SparseGradientModule, self).__init__()
self.embedding = nn.EmbeddingBag(10, 10, sparse=True)
def forward(self, x):
return F.softmax(self.embedding(x), dim=1)
class AbstractDistributedDataParallelTest(object):
def tearDown(self):
# DistributedDataParallel test doesn't seem to call FileStore destructor
# TODO: investigate this test and the test is known to have issues
# Use this hack to remove files for that test
try:
os.remove(self.file_name)
except OSError:
pass
@property
def world_size(self):
return 2
def _prepare_single_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
model = Net()
device = devices[0] if devices else torch.device("cuda:%d" % self.rank)
ddp_model = DistributedDataParallel(
copy.deepcopy(model).to(device),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
model.to(device)
input = torch.randn(global_batch_size, 2).to(device)
target = torch.randn(global_batch_size, 4).to(device)
return model, ddp_model, input, target
def _prepare_multi_device_module(
self,
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view=False,
):
self.assertTrue(
len(devices) == 2 or len(devices) == 4,
"unexpected devices for ddp tests {}".format(devices),
)
if len(devices) == 2:
model = DoubleGpuNet(devices)
elif len(devices) == 4:
model = QuadraGpuNet(devices)
ddp_model = DistributedDataParallel(
copy.deepcopy(model),
device_ids=device_ids,
process_group=process_group,
bucket_cap_mb=0.001,
gradient_as_bucket_view=gradient_as_bucket_view,
)
input = torch.randn(global_batch_size, 2).cuda(devices[0])
target = torch.randn(global_batch_size, 4)
return model, ddp_model, input, target
def _test_ddp_with_process_group(
self,
process_group,
devices,
device_ids,
multi_device=False,
gradient_as_bucket_view=False,
):
"""
Note: we pass down `device_ids` all the way to DistributedDataParallel
as part of the test. Below you find tests that either use a list of
integers, a list of `torch.Device` instances, or an empty list.
The `devices` argument is used to control placement of the model and
must always be specified as list of `torch.Device` instances.
"""
local_batch_size = 1 if devices is None else len(devices)
global_batch_size = self.world_size * local_batch_size
if multi_device:
model, ddp_model, input, target = self._prepare_multi_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertTrue(ddp_logging_data.get("is_multi_device_module"))
else:
model, ddp_model, input, target = self._prepare_single_device_module(
process_group,
devices,
device_ids,
global_batch_size,
gradient_as_bucket_view,
)
ddp_logging_data = ddp_model._get_ddp_logging_data()
self.assertFalse(ddp_logging_data.get("is_multi_device_module"))
def step_model(model, input, target):
model.train()
output = model(input)
loss = F.mse_loss(output, target.to(output.device))
loss.backward()
def update_parameters(model):
for param in model.parameters():
with torch.no_grad():
param -= param.grad
param.grad = None
# check two model parameters over 2 iterations
for iteration in range(2):
# single cpu/gpu training
step_model(model, input, target)
# DDP training, DDP scatters subsets of input_cpu to nodes/GPUs
step_model(
ddp_model,
input[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
target[
self.rank * local_batch_size : (self.rank + 1) * local_batch_size
],
)
# Update weights and run a second iteration to shake out errors
update_parameters(model)
update_parameters(ddp_model)
self.assertEqual(
len(list(model.parameters())), len(list(ddp_model.parameters()))
)
for i, j in zip(model.parameters(), ddp_model.parameters()):
self.assertEqual(i, j, rtol=1.3e-06, atol=5e-5)
# Shuffle the input so that DDP input is different
torch.manual_seed(1337 + iteration)
input = input[torch.randperm(global_batch_size)]
def _gpu_model_with_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False, state=None
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a DDP communication hook if any.
if hook is not None:
gpu_model.register_comm_hook(state, hook)
return gpu_model
def _gpu_model_with_builtin_ddp_comm_hook(
self, process_group, hook=None, gradient_as_bucket_view=False
):
device_id = gpus_for_rank(self.world_size)[self.rank][0]
gpu_model = DistributedDataParallel(
ModuleForDdpCommHook().to(device_id),
device_ids=[device_id],
process_group=process_group,
gradient_as_bucket_view=gradient_as_bucket_view,
)
# Register a built-in DDP communication hook if defined
if hook is not None:
gpu_model._register_builtin_comm_hook(hook)
return gpu_model
def _run_and_verify_hook(self, model, input, expected_grad):
# Run forward
output = model(input, self.rank)
# Run backward
output.mean().backward()
[self.assertEqual(p.grad, expected_grad) for p in model.parameters()]
def _simple_hook(
self, state: object, bucket: dist.GradBucket
) -> torch.futures.Future[torch.Tensor]:
fut = torch.futures.Future()
fut.set_result(torch.ones_like(bucket.buffer()))
def fut_then(fut):
# Add ones to fut's result.
t = fut.value()
return t + torch.ones_like(t)
return fut.then(fut_then)
class DistributedDataParallelTest(
AbstractDistributedDataParallelTest, MultiProcessTestCase
):
def setUp(self):
super(DistributedDataParallelTest, self).setUp()
self._spawn_processes()
def test_invalid_powerSGD_state(self):
for start_powerSGD_iter, use_error_feedback, warm_start in product(
[0, 1], [True, False], [True, False]
):
if not use_error_feedback and not warm_start:
continue
with self.assertRaisesRegex(
ValueError,
"Expect `start_powerSGD_iter` > 1 if `use_error_feedback` or `warm_start` is enabled, "
"because PowerSGD can only be applied after the first two iterations in DDP.",
):
state = powerSGD.PowerSGDState(
process_group=None,
matrix_approximation_rank=1,
start_powerSGD_iter=start_powerSGD_iter,
use_error_feedback=use_error_feedback,
warm_start=warm_start,
)
class ComputeBucketAssignmentTest(TestCase):
def test_single_limit_single_dtype(self):
tensors = [
torch.empty([100], dtype=torch.float),
torch.empty([200], dtype=torch.float),
torch.empty([100], dtype=torch.float),
torch.empty([50], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0], [1], [2], [3]], result)
def test_single_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [400]
)
self.assertTrue(all(size_lim == 400 for size_lim in per_bucket_size_limits))
self.assertEqual([[0, 2], [1, 3], [4], [5]], result)
def test_multi_limit_single_dtype(self):
tensors = [
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
torch.empty([10], dtype=torch.float),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [40, 80]
)
self.assertEqual(per_bucket_size_limits, [40, 80, 80])
self.assertEqual([[0], [1, 2], [3]], result)
def test_multi_limit_multi_dtype(self):
tensors = [
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
torch.empty([50], dtype=torch.float),
torch.empty([25], dtype=torch.double),
]
result, per_bucket_size_limits = dist._compute_bucket_assignment_by_size(
tensors, [200, 400]
)
self.assertEqual([[0], [1], [2, 4], [3, 5]], result)
self.assertEqual(per_bucket_size_limits, [200, 200, 400, 400])
class AbstractCommTest(object):
@property
def op_timeout_sec(self):
return 1
@property
def world_size(self):
return 2
def _verify_sequence_number_across_pg(self, pg, verify_pg):
seq_num = pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
# We use a separate pg to verify the sequence numbers, otherwise these
# collectives will themselves increment the sequence number.
dist.all_gather_object(obj_list, seq_num, group=verify_pg)
self.assertEqual(len(set(obj_list)), 1)
return obj_list[0]
def _test_sequence_num_incremented(self, process_group, ranks):
# verify initial sequence numbers. Use a distinct process group for
# verification to keep counts as expected with respect to process_group.
verify_pg = dist.new_group(
ranks=ranks,
backend="gloo",
)
assert dist.get_world_size(process_group) == dist.get_world_size(verify_pg)
initial_num = (
self._verify_sequence_number_across_pg(
pg=process_group, verify_pg=verify_pg
)
if not c10d._rank_not_in_group(process_group)
else -1
)
# Verify sequence numbers are appropriately incremented
for i in range(10):
t = torch.ones(1, device=torch.cuda.current_device())
dist.all_reduce(t, group=process_group)
if not c10d._rank_not_in_group(process_group):
seq_num = self._verify_sequence_number_across_pg(
pg=process_group,
verify_pg=verify_pg,
)
self.assertEqual(initial_num + i + 1, seq_num)
if dist.get_world_size(process_group) > 2:
# Test when certain ranks don't call collectives
if dist.get_rank(process_group) not in [0, 2]:
dist.all_reduce(t, group=process_group, async_op=True)
# Now ranks 0 and 2 should be lagging by 1.
if not c10d._rank_not_in_group(process_group):
seq_num = process_group._get_sequence_number_for_group()
rank = dist.get_rank(process_group)
obj_list = [None for _ in range(dist.get_world_size(verify_pg))]
dist.all_gather_object(obj_list, (rank, seq_num), group=verify_pg)
rank_to_seq_num = {rank: num for (rank, num) in obj_list}
self.assertEqual(len(set(rank_to_seq_num.values())), 2)
self.assertEqual(rank_to_seq_num[0], rank_to_seq_num[2])
expected_same = {
rank_to_seq_num[i]
for i in rank_to_seq_num.keys()
if i not in [0, 2]
}
self.assertEqual(len(expected_same), 1)
self.assertEqual(rank_to_seq_num[0] + 1, rank_to_seq_num[1])
def _test_sequence_num_incremented_default_group(self, backend_name):
torch.cuda.set_device(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
self._test_sequence_num_incremented(
c10d._get_default_group(),
ranks=list(i for i in range(dist.get_world_size())),
)
def _test_sequence_num_incremented_subgroup(self, backend_name):
torch.cuda.set_device(self.rank)
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend_name,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup_ranks = [0, 1, 2]
subgroup = dist.new_group(subgroup_ranks)
self._test_sequence_num_incremented(subgroup, subgroup_ranks)
def _test_sequence_num_set_default_pg(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
default_pg = c10d._get_default_group()
seq_num = default_pg._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size())]
dist.all_gather_object(obj_list, seq_num)
self.assertEqual(len(set(obj_list)), 1)
def _test_sequence_num_set_new_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
subgroup = dist.new_group([0, 1])
if not c10d._rank_not_in_group(subgroup):
subgroup_seq = subgroup._get_sequence_number_for_group()
obj_list = [None for _ in range(dist.get_world_size(subgroup))]
dist.all_gather_object(obj_list, subgroup_seq, group=subgroup)
self.assertEqual(len(set(obj_list)), 1)
def _test_warn_not_in_group(self, backend):
store = dist.FileStore(self.file_name, self.world_size)
dist.init_process_group(
backend,
world_size=self.world_size,
rank=self.rank,
store=store,
)
in_group_ranks = list(filter(lambda x: x % 2 == 0, range(self.world_size)))
group = dist.new_group(in_group_ranks)
x = torch.zeros(2, 2).cuda(self.rank)
xs = [torch.zeros(2, 2).cuda(self.rank) for _ in range(len(in_group_ranks))]
if self.rank not in in_group_ranks:
msg = ".*{}.*does not belong to.*"
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_gather")):
dist.all_gather(xs, x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("all_reduce")):
dist.all_reduce(x, group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("barrier")):
dist.barrier(group=group)
with self.assertWarnsOnceRegex(UserWarning, msg.format("broadcast")):
dist.broadcast(x, src=0, group=group)
else:
dist.all_gather(xs, x, group=group)
dist.all_reduce(x, group=group)
dist.barrier(group=group)
dist.broadcast(x, src=0, group=group)
class CommTest(AbstractCommTest, MultiProcessTestCase):
def setUp(self):
super(CommTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(CommTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_distributed_debug_mode(self):
# Default should be off
default_debug_mode = dist._get_debug_mode()
self.assertEqual(default_debug_mode, dist._DistributedDebugLevel.OFF)
mapping = {
"OFF": dist._DistributedDebugLevel.OFF,
"INFO": dist._DistributedDebugLevel.INFO,
"DETAIL": dist._DistributedDebugLevel.DETAIL,
}
invalid_debug_modes = ["foo", 0, 1, -1]
for mode in mapping.keys():
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
set_debug_mode = dist._get_debug_mode()
self.assertEqual(
set_debug_mode,
mapping[mode],
f"Expected {mode} to map to {mapping[mode]} but got {set_debug_mode}",
)
for mode in invalid_debug_modes:
os.environ["TORCH_DISTRIBUTED_DEBUG"] = str(mode)
with self.assertRaisesRegex(RuntimeError, "to be one of"):
dist._get_debug_mode()
class DummyWork(dist._Work):
def wait(self, timeout=5.0):
if torch.cuda.is_available():
torch.cuda.current_stream().synchronize()
return True
class DummyProcessGroup(dist.ProcessGroup):
def getBackendName(self):
return "Dummy"
def allgather(self, output_tensor_lists, input_tensor_list, opts=None):
for output_tensor_list, input_tensor in zip(output_tensor_lists, input_tensor_list):
for output_tensor in output_tensor_list:
output_tensor.copy_(input_tensor)
return DummyWork()
def allreduce(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
def barrier(self, opts=None):
store = c10d._get_default_store()
key = "TEST:DummyProcessGroup:barrier"
if self.rank() == 0:
worker_count = 0
# By default, TCPServer lives on rank 0. So rank 0 needs to make
# sure that it does not exit too early before other ranks finish
# using the store.
# Note that, _store_based_barrier does not solve this problem, as
# all ranks need to run at least one store.add(key, 0) before
# exiting, but there is no guarantee that rank 0 is still alive at
# that point.
while worker_count < self.size() - 1:
worker_count = store.add(key, 0)
else:
store.add(key, 1)
return DummyWork()
def broadcast(self, tensor_list, opts=None):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def reduce_scatter(self, output_tensor_list, input_tensor_lists, opts=None):
for output_tensor, input_tensor_list in zip(output_tensor_list, input_tensor_lists):
output_tensor.copy_(input_tensor_list[self.rank()])
return DummyWork()
def send(self, tensor_list, dst, tag=0):
for tensor in tensor_list:
tensor.add_(1)
return DummyWork()
def recv(self, tensor_list, src, tag=0):
for tensor in tensor_list:
tensor.add_(2)
return DummyWork()
class PythonProcessGroupTest(MultiProcessTestCase):
def setUp(self):
super(PythonProcessGroupTest, self).setUp()
self._spawn_processes()
def tearDown(self):
super(PythonProcessGroupTest, self).tearDown()
try:
os.remove(self.file_name)
except OSError:
pass
def test_get_backend_name(self):
dpg = DummyProcessGroup(0, 1)
self.assertEqual("Dummy", dpg.name())
def test_backend_class_attr(self):
dist.Backend.register_backend(
"dummy",
PythonProcessGroupTest.create_dummy
)
self.assertEqual(dist.Backend.DUMMY, "DUMMY")
self.assertEqual(
dist.Backend._plugins["DUMMY"],
PythonProcessGroupTest.create_dummy
)
@staticmethod
def create_dummy(store, rank, size, timeout):
return DummyProcessGroup(rank, size)
def test_collectives(self):
dist.Backend.register_backend("dummy", PythonProcessGroupTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test all_gather
input_tensor = torch.ones(2, 2) * 7
output_tensor_list = [torch.zeros(2, 2) for _ in range(self.world_size)]
dist.all_gather(output_tensor_list, input_tensor)
for tensor in output_tensor_list:
self.assertEqual(tensor, input_tensor)
# test all_reduce
input_tensor = torch.ones(2, 2) * 7
dist.all_reduce(input_tensor)
self.assertEqual(input_tensor, torch.ones(2, 2) * 7 + 2)
# test broadcast
input_tensor = torch.zeros(2, 2)
dist.broadcast(input_tensor, 0, async_op=True).wait()
self.assertEqual(torch.ones(2, 2), input_tensor)
# test reduce_scatter
output_tensor = torch.zeros(2, 2)
input_tensor_list = [torch.ones(2, 2) for _ in range(self.world_size)]
dist.reduce_scatter(output_tensor, input_tensor_list)
self.assertEqual(output_tensor, torch.zeros(2, 2) + 1)
dist.barrier()
dist.destroy_process_group()
def test_send_recv(self):
dist.Backend.register_backend("dummy", PythonProcessGroupTest.create_dummy)
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '6789'
dist.init_process_group("dummy", rank=self.rank, world_size=self.world_size)
# test send
input_tensor = torch.zeros(2, 2)
dist.send(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 1)
# test recv
input_tensor = torch.zeros(2, 2)
dist.recv(input_tensor, (self.rank + 1) % self.world_size)
self.assertEqual(input_tensor, torch.zeros(2, 2) + 2)
dist.barrier()
# intentionally not calling into `destroy_process_group` as not all
# user applications would explicitly that.
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
NetworkPacketLossCollector.py
|
#!/usr/bin/env python
import os
import queue
import socket
import time
import threading
from threading import Thread
import copy
import json
from datetime import datetime
import stomp
import tools
import siteMapping
TOPIC = "/topic/perfsonar.raw.packet-loss-rate"
INDEX_PREFIX = 'ps_packetloss-'
siteMapping.reload()
class MyListener(object):
def on_message(self, headers, message):
q.put(message)
def on_error(self, headers, message):
print('received an error %s' % message)
os._exit(1)
def on_heartbeat_timeout(self):
print('AMQ - lost heartbeat. Needs a reconnect!')
connect_to_MQ(reset=True)
def on_disconnected(self):
print('AMQ - no connection. Needs a reconnect!')
connect_to_MQ(reset=True)
def connect_to_MQ(reset=False):
if tools.connection is not None:
if reset and tools.connection.is_connected():
tools.connection.disconnect()
tools.connection = None
if tools.connection.is_connected():
return
print("connecting to MQ")
tools.connection = None
addresses = socket.getaddrinfo('clever-turkey.rmq.cloudamqp.com', 61614)
ip = addresses[0][4][0]
host_and_ports = [(ip, 61614)]
print(host_and_ports)
tools.connection = stomp.Connection(
host_and_ports=host_and_ports,
use_ssl=True,
vhost=RMQ_parameters['RMQ_VHOST']
)
tools.connection.set_listener('MyConsumer', MyListener())
tools.connection.start()
tools.connection.connect(RMQ_parameters['RMQ_USER'], RMQ_parameters['RMQ_PASS'], wait=True)
tools.connection.subscribe(destination=TOPIC, ack='auto', id=RMQ_parameters['RMQ_ID'], headers={"durable": True, "auto-delete": False})
return
def eventCreator():
aLotOfData = []
es_conn = tools.get_es_connection()
while True:
d = q.get()
m = json.loads(d)
data = {
'_type': 'doc'
}
source = m['meta']['source']
destination = m['meta']['destination']
data['MA'] = m['meta']['measurement_agent']
data['src'] = source
data['dest'] = destination
data['src_host'] = m['meta']['input_source']
data['dest_host'] = m['meta']['input_destination']
data['ipv6'] = False
if ':' in source or ':' in destination:
data['ipv6'] = True
so = siteMapping.getPS(source)
de = siteMapping.getPS(destination)
if so is not None:
data['src_site'] = so[0]
data['src_VO'] = so[1]
if de is not None:
data['dest_site'] = de[0]
data['dest_VO'] = de[1]
data['src_production'] = siteMapping.isProductionLatency(source)
data['dest_production'] = siteMapping.isProductionLatency(destination)
if 'datapoints' not in m:
q.task_done()
print(threading.current_thread().name, "no datapoints found in the message")
continue
su = m['datapoints']
# print(su)
for ts, th in su.items():
dati = datetime.utcfromtimestamp(float(ts))
data['_index'] = INDEX_PREFIX + str(dati.year) + "." + str(dati.month) + "." + str(dati.day)
data['timestamp'] = int(float(ts) * 1000)
data['_id'] = hash((m['meta']['org_metadata_key'], data['timestamp']))
data['packet_loss'] = th
# print(data)
aLotOfData.append(copy.copy(data))
q.task_done()
if len(aLotOfData) > 500:
succ = tools.bulk_index(aLotOfData, es_conn=es_conn, thread_name=threading.current_thread().name)
if succ is True:
aLotOfData = []
if len(aLotOfData) > 10000:
print('too many entries in memory. sleep for a minute.')
time.sleep(60)
RMQ_parameters = tools.get_RMQ_connection_parameters()
q = queue.Queue()
# start eventCreator threads
for i in range(1):
t = Thread(target=eventCreator)
t.daemon = True
t.start()
while True:
connect_to_MQ()
time.sleep(55)
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "threads:", threading.active_count(), "qsize:", q.qsize())
|
KpmManager.py
|
from threading import Thread, Timer, Event
from queue import Queue
import sublime_plugin, sublime
from .SoftwareOffline import *
from .SoftwareUtil import *
from .SoftwareHttp import *
from .Constants import *
from .CommonUtil import *
from .TrackerManager import *
DEFAULT_DURATION = 60
# payload trigger to store it for later.
def post_json(json_data):
# save the data to the offline data file
processAndAggregateData(json.loads(json_data))
jwt = getJwt()
for filepath, payload in json.loads(json_data)['source'].items():
track_codetime_event(
jwt=jwt,
keystrokes=payload['keystrokes'],
lines_added=payload.get('document_change_info', {}).get('lines_added', 0),
lines_deleted=payload.get('document_change_info', {}).get('lines_deleted', 0),
characters_added=payload.get('document_change_info', {}).get('characters_added', 0),
characters_deleted=payload.get('document_change_info', {}).get('characters_deleted', 0),
single_deletes=payload.get('document_change_info', {}).get('single_deletes', 0),
multi_deletes=payload.get('document_change_info', {}).get('multi_deletes', 0),
single_adds=payload.get('document_change_info', {}).get('single_adds', 0),
multi_adds=payload.get('document_change_info', {}).get('multi_adds', 0),
auto_indents=payload.get('document_change_info', {}).get('auto_indents', 0),
replacements=payload.get('document_change_info', {}).get('replacements', 0),
is_net_change=payload.get('document_change_info', {}).get('is_net_change', False),
start_time=payload['local_start'],
end_time=payload['local_end'],
file_path=payload['file_path'],
file_name=payload['file_name'],
syntax=payload['syntax'],
line_count=payload['lines'],
character_count=payload['length'],
project_name=payload['project_name'],
project_directory=payload['project_directory'],
plugin_id=payload['plugin_id'],
plugin_version=payload['plugin_version'],
plugin_name=payload['plugin_name'],
repo_identifier=payload['repo_identifier'],
repo_name=payload['repo_name'],
owner_id=payload.get('repo_owner_id', None),
git_branch=payload['git_branch'],
git_tag=payload['git_tag']
)
PluginData.reset_source_data()
#
# Background thread used to send data every minute.
#
class BackgroundWorker():
def __init__(self, threads_count, target_func):
self.queue = Queue(maxsize=0)
self.target_func = target_func
self.threads = []
for i in range(threads_count):
thread = Thread(target=self.worker, daemon=True)
thread.start()
self.threads.append(thread)
def worker(self):
while True:
self.target_func(self.queue.get())
self.queue.task_done()
#
# kpm payload data structure
#
class PluginData():
__slots__ = ('source', 'keystrokes', 'start', 'local_start', 'project', 'pluginId', 'version', 'os', 'timezone', 'elapsed_seconds')
background_worker = BackgroundWorker(1, post_json)
active_datas = {} # active projects, where each entry is a project directory
line_counts = {}
send_timer = None
def __init__(self, project):
self.source = {}
self.start = 0
self.local_start = 0
self.timezone = ''
self.keystrokes = 0
self.project = project
self.pluginId = PLUGIN_ID
self.version = getVersion()
self.timezone = getTimezone()
self.os = getOs()
self.elapsed_seconds = 0
def json(self):
# make sure all file end times are set
dict_data = {key: getattr(self, key, None)
for key in self.__slots__}
return json.dumps(dict_data)
# send the kpm info
def send(self):
# check if it has data
if PluginData.background_worker and self.hasData():
PluginData.endUnendedFileEndTimes()
PluginData.background_worker.queue.put(self.json())
# check if we have data
def hasData(self):
if (self.keystrokes > 0):
return True
for fileName in self.source:
fileInfo = self.source[fileName]
if (fileInfo['close'] > 0 or
fileInfo['open'] > 0 or
fileInfo['paste'] > 0 or
fileInfo['delete'] > 0 or
fileInfo['add'] > 0 or
fileInfo['netkeys'] > 0):
return True
return False
# Return True if a keystroke payload has keystrokes
@staticmethod
def hasKeystrokeData():
for dir in PluginData.active_datas:
keystrokeCountObj = PluginData.active_datas[dir]
if keystrokeCountObj is not None and keystrokeCountObj.keystrokes is not None:
if keystrokeCountObj.keystrokes > 0:
return True
return False
@staticmethod
def reset_source_data():
PluginData.send_timer = None
for dir in PluginData.active_datas:
keystrokeCountObj = PluginData.active_datas[dir]
# get the lines so we can add that back
for fileName in keystrokeCountObj.source:
fileInfo = keystrokeCountObj.source[fileName]
# add the lines for this file so we can re-use again
PluginData.line_counts[fileName] = fileInfo.get("lines", 0)
if keystrokeCountObj is not None:
keystrokeCountObj.source = {}
keystrokeCountObj.keystrokes = 0
keystrokeCountObj.project['identifier'] = None
keystrokeCountObj.timezone = getTimezone()
@staticmethod
def get_active_data(view):
return_data = None
if view is None or view.window() is None:
return return_data
fileName = view.file_name()
if (fileName is None):
fileName = UNTITLED
sublime_variables = view.window().extract_variables()
project = Project()
# set it to none as a default
projectFolder = NO_PROJ_NAME
# set the project folder
if 'folder' in sublime_variables:
projectFolder = sublime_variables['folder']
elif 'file_path' in sublime_variables:
projectFolder = sublime_variables['file_path']
# if we have a valid project folder, set the project name from it
if projectFolder != NO_PROJ_NAME:
project['directory'] = projectFolder
if 'project_name' in sublime_variables and sublime_variables['project_name']:
project['name'] = sublime_variables['project_name']
else:
# use last file name in the folder as the project name
projectNameIdx = projectFolder.rfind('/')
if projectNameIdx > -1:
projectName = projectFolder[projectNameIdx + 1:]
project['name'] = projectName
else:
project['directory'] = NO_PROJ_NAME
project['name'] = UNTITLED
old_active_data = None
if project['directory'] in PluginData.active_datas:
old_active_data = PluginData.active_datas[project['directory']]
if old_active_data is None:
new_active_data = PluginData(project)
PluginData.active_datas[project['directory']] = new_active_data
return_data = new_active_data
else:
return_data = old_active_data
fileInfoData = PluginData.get_file_info_and_initialize_if_none(return_data, fileName)
# This activates the 60 second timer. The callback
# in the Timer sends the data
if (PluginData.send_timer is None):
PluginData.send_timer = Timer(DEFAULT_DURATION, return_data.send)
PluginData.send_timer.start()
return return_data
# ...
@staticmethod
def get_existing_file_info(fileName):
fileInfoData = None
# Get the FileInfo object within the KeystrokesCount object
# based on the specified fileName.
for dir in PluginData.active_datas:
keystrokeCountObj = PluginData.active_datas[dir]
if keystrokeCountObj is not None:
hasExistingKeystrokeObj = True
# we have a keystroke count object, get the fileInfo
if keystrokeCountObj.source is not None and fileName in keystrokeCountObj.source:
# set the fileInfoData we'll return the calling def
fileInfoData = keystrokeCountObj.source[fileName]
else:
# end the other files end times
for fileName in keystrokeCountObj.source:
fileInfo = keystrokeCountObj.source[fileName]
nowTimes = getNowTimes()
fileInfo["end"] = nowTimes['nowInSec']
fileInfo["local_end"] = nowTimes['localNowInSec']
return fileInfoData
@staticmethod
def endUnendedFileEndTimes():
for dir in PluginData.active_datas:
keystrokeCountObj = PluginData.active_datas[dir]
if keystrokeCountObj is not None and keystrokeCountObj.source is not None:
for fileName in keystrokeCountObj.source:
fileInfo = keystrokeCountObj.source[fileName]
if (fileInfo.get("end", 0) == 0):
nowTimes = getNowTimes()
fileInfo["end"] = nowTimes['nowInSec']
fileInfo["local_end"] = nowTimes['localNowInSec']
@staticmethod
def send_all_datas():
for dir in PluginData.active_datas:
PluginData.active_datas[dir].send()
#.........
@staticmethod
def initialize_file_info(keystrokeCount, fileName):
if keystrokeCount is None:
return
if fileName is None or fileName == '':
fileName = UNTITLED
# create the new FileInfo, which will contain a dictionary
# of fileName and it's metrics
fileInfoData = PluginData.get_existing_file_info(fileName)
nowTimes = getNowTimes()
if keystrokeCount.start == 0:
keystrokeCount.start = nowTimes['nowInSec']
keystrokeCount.local_start = nowTimes['localNowInSec']
keystrokeCount.timezone = getTimezone()
# "add" = additive keystrokes
# "netkeys" = add - delete
# "keys" = add + delete
# "delete" = delete keystrokes
if fileInfoData is None:
fileInfoData = {}
fileInfoData['paste'] = 0
fileInfoData['open'] = 0
fileInfoData['close'] = 0
fileInfoData['length'] = 0
fileInfoData['delete'] = 0
fileInfoData['netkeys'] = 0
fileInfoData['keystrokes'] = 0
fileInfoData['add'] = 0
fileInfoData['lines'] = 0
fileInfoData['linesAdded'] = 0
fileInfoData['linesRemoved'] = 0
fileInfoData['syntax'] = ""
fileInfoData['start'] = nowTimes['nowInSec']
fileInfoData['local_start'] = nowTimes['localNowInSec']
fileInfoData['end'] = 0
fileInfoData['local_end'] = 0
fileInfoData['chars_pasted'] = 0
fileInfoData['project_name'] = NO_PROJ_NAME
fileInfoData['project_directory'] = ''
fileInfoData['file_name'] = ''
fileInfoData['file_path'] = ''
fileInfoData['plugin_id'] = getPluginId()
fileInfoData['plugin_version'] = getVersion()
fileInfoData['plugin_name'] = getPluginName()
fileInfoData['repo_identifier'] = ''
fileInfoData['repo_name'] = ''
fileInfoData['repo_owner_id'] = ''
fileInfoData['git_branch'] = ''
fileInfoData['git_tag'] = ''
fileInfoData['document_change_info'] = {}
fileInfoData['document_change_info']['lines_added'] = 0
fileInfoData['document_change_info']['lines_deleted'] = 0
fileInfoData['document_change_info']['characters_added'] = 0
fileInfoData['document_change_info']['characters_deleted'] = 0
fileInfoData['document_change_info']['single_deletes'] = 0
fileInfoData['document_change_info']['multi_deletes'] = 0
fileInfoData['document_change_info']['single_adds'] = 0
fileInfoData['document_change_info']['multi_adds'] = 0
fileInfoData['document_change_info']['auto_indents'] = 0
fileInfoData['document_change_info']['replacements'] = 0
fileInfoData['document_change_info']['is_net_change'] = False
keystrokeCount.source[fileName] = fileInfoData
else:
# update the end and local_end to zero since the file is still getting modified
fileInfoData['end'] = 0
fileInfoData['local_end'] = 0
@staticmethod
def get_file_info_and_initialize_if_none(keystrokeCount, fileName):
fileInfoData = PluginData.get_existing_file_info(fileName)
if fileInfoData is None:
PluginData.initialize_file_info(keystrokeCount, fileName)
fileInfoData = PluginData.get_existing_file_info(fileName)
return fileInfoData
|
lambda_function.py
|
import requests_unixsocket
import time
import json
import subprocess
import uuid
import multiprocessing
import queue
import threading
import signal
import os
# note: see https://aws.amazon.com/blogs/compute/parallel-processing-in-python-with-aws-lambda/
def _enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
class HsdsLogger:
def __init__(self):
# set log level based on LOG_LEVEL env
if "LOG_LEVEL" in os.environ:
log_level_cfg = os.environ["LOG_LEVEL"]
else:
log_level_cfg = "INFO"
if log_level_cfg not in ("DEBUG", "WARN", "INFO", "ERROR"):
print(f"unsupported log_level: {log_level_cfg}, using INFO instead")
log_level_cfg = "INFO"
self._log_level = log_level_cfg
def debug(self, msg):
if self._log_level == "DEBUG":
print(f"DEUBG: {msg}")
def info(self, msg):
if self._log_level in ("INFO", "DEBUG"):
print(f"INFO: {msg}")
def warn(self, msg):
if self._log_level in ("WARN", "INFO", "DEBUG"):
print(f"WARN: {msg}")
def error(self, msg):
print(f"ERROR: {msg}")
class HsdsApp:
"""
Class to initiate and manage sub-process HSDS service
"""
def __init__(self, username=None, password=None, dn_count=1, readonly=False, logfile=None):
"""
Initializer for class
"""
rand_name = uuid.uuid4().hex[:8]
tmp_dir = f"/tmp/hs{rand_name}/"
os.mkdir(tmp_dir)
self._dn_urls = []
self._socket_paths = []
self._processes = []
self._queues = []
self._threads = []
self._dn_count = dn_count
self._username = username
self._password = password
self._logfile = logfile
self._readonly = readonly
self.log = HsdsLogger()
# url-encode any slashed in the socket dir
socket_url = ""
for ch in tmp_dir:
if ch == '/':
socket_url += "%2F"
else:
socket_url += ch
for i in range(dn_count):
socket_name = f"dn_{(i+1)}.sock"
dn_url = f"http+unix://{socket_url}{socket_name}"
self._dn_urls.append(dn_url)
socket_path = f"{tmp_dir}{socket_name}"
self._socket_paths.append(socket_path)
# sort the ports so that node_number can be determined based on dn_url
self._dn_urls.sort()
self._endpoint = f"http+unix://{socket_url}sn_1.sock"
self._socket_paths.append(f"{tmp_dir}sn_1.sock")
self._rangeget_url = f"http+unix://{socket_url}rangeget.sock"
self._socket_paths.append(f"{tmp_dir}rangeget.sock")
@property
def endpoint(self):
return self._endpoint
def print_process_output(self):
""" print any queue output from sub-processes
"""
#print("print_process_output")
while True:
got_output = False
for q in self._queues:
try:
line = q.get_nowait() # or q.get(timeout=.1)
except queue.Empty:
pass # no output on this queue yet
else:
if isinstance(line, bytes):
#self.log.debug(line.decode("utf-8").strip())
print(line.decode("utf-8").strip())
else:
print(line.strip())
got_output = True
if not got_output:
break # all queues empty for now
def check_processes(self):
#print("check processes")
self.print_process_output()
for p in self._processes:
if p.poll() is not None:
result = p.communicate()
msg = f"process {p.args[0]} ended, result: {result}"
self.log.warn(msg)
# TBD - restart failed process
def run(self):
""" startup hsds processes
"""
if self._processes:
# just check process state and restart if necessary
self.check_processes()
return
dn_urls_arg = ""
for dn_url in self._dn_urls:
if dn_urls_arg:
dn_urls_arg += ','
dn_urls_arg += dn_url
pout = subprocess.PIPE # will pipe to parent
# create processes for count dn nodes, sn node, and rangeget node
count = self._dn_count + 2 # plus 2 for rangeget proxy and sn
# set PYTHONUNBUFFERED so we can get any output immediately
os.environ["PYTHONUNBUFFERED"] = "1"
common_args = ["--standalone", ]
# print("setting log_level to:", args.loglevel)
# common_args.append(f"--log_level={args.loglevel}")
common_args.append(f"--dn_urls={dn_urls_arg}")
common_args.append(f"--rangeget_url={self._rangeget_url}")
common_args.append(f"--hsds_endpoint={self._endpoint}")
common_args.append("--password_file=")
common_args.append("--server_name=HSDS on AWS Lambda")
common_args.append("--use_socket")
if self._readonly:
common_args.append("--readonly")
for i in range(count):
if i == 0:
# args for service node
pargs = ["hsds-servicenode", "--log_prefix=sn "]
if self._username:
pargs.append(f"--hs_username={self._username}")
if self._password:
pargs.append(f"--hs_password={self._password}")
pargs.append(f"--sn_url={self._endpoint}")
pargs.append("--logfile=sn1.log")
elif i == 1:
# args for rangeget node
pargs = ["hsds-rangeget", "--log_prefix=rg "]
else:
node_number = i - 2 # start with 0
pargs = ["hsds-datanode", f"--log_prefix=dn{node_number+1} "]
pargs.append(f"--dn_urls={dn_urls_arg}")
pargs.append(f"--node_number={node_number}")
# self.log.info(f"starting {pargs[0]}")
pargs.extend(common_args)
p = subprocess.Popen(pargs, bufsize=1, universal_newlines=True, shell=False, stdout=pout)
self._processes.append(p)
if not self._logfile:
# setup queue so we can check on process output without blocking
q = queue.Queue()
t = threading.Thread(target=_enqueue_output, args=(p.stdout, q))
self._queues.append(q)
t.daemon = True # thread dies with the program
t.start()
self._threads.append(t)
# wait to sockets are initialized
start_ts = time.time()
SLEEP_TIME = 0.1 # time to sleep between checking on socket connection
MAX_INIT_TIME = 10.0 # max time to wait for socket to be initialized
while True:
ready = 0
for socket_path in self._socket_paths:
if os.path.exists(socket_path):
ready += 1
if ready == count:
self.log.info("all processes ready!")
break
else:
self.log.debug(f"{ready}/{count} ready")
self.log.debug(f"sleeping for {SLEEP_TIME}")
time.sleep(SLEEP_TIME)
if time.time() > start_ts + MAX_INIT_TIME:
msg = f"failed to initialzie socket after {MAX_INIT_TIME} seconds"
self.log.error(msg)
break
self.log.info(f"Ready after: {(time.time()-start_ts):4.2f} s")
def stop(self):
""" terminate hsds processes
"""
if not self._processes:
return
now = time.time()
self.log.info(f"hsds app stop at {now}")
for p in self._processes:
self.log.info(f"sending SIGINT to {p.args[0]}")
p.send_signal(signal.SIGINT)
# wait for sub-proccesses to exit
SLEEP_TIME = 0.1 # time to sleep between checking on process state
MAX_WAIT_TIME = 10.0 # max time to wait for sub-process to terminate
start_ts = time.time()
while True:
is_alive = False
for p in self._processes:
if p.poll() is None:
is_alive = True
if is_alive:
self.log.debug(f"still alive, sleep {SLEEP_TIME}")
time.sleep(SLEEP_TIME)
else:
self.log.debug("all subprocesses exited")
break
if time.time() > start_ts + MAX_WAIT_TIME:
msg = f"failed to terminate after {MAX_WAIT_TIME} seconds"
self.log.error(msg)
break
# kill any reluctant to die processes
for p in self._processes:
if p.poll():
self.log.info(f"terminating {p.args[0]}")
p.terminate()
self._processes = []
for t in self._threads:
del t
self._threads = []
def invoke(self, method, path, params=None, headers=None, body=None):
# invoke given request
req = self.endpoint + path
print(f"make_request: {req}")
result = {}
with requests_unixsocket.Session() as s:
try:
if method == "GET":
rsp = s.get(req, params=params, headers=headers)
elif method == "POST":
rsp = s.post(req, params=params, headers=headers, data=body)
elif method == "PUT":
rsp = s.put(req, params=params, headers=headers, data=body)
elif method == "DELETE":
rsp = s.delete(req, params=params, headers=headers)
else:
msg = f"Unexpected request method: {method}"
print(msg)
raise ValueError(msg)
print(f"got status_code: {rsp.status_code} from req: {req}")
# TBD - return dataset data in base64
result["isBase64Encoded"] = False
result["statusCode"] = rsp.status_code
# convert case-insisitive headers to dict
result["headers"] = json.dumps(dict(rsp.headers))
#print_process_output(processes)
if rsp.status_code == 200:
print(f"rsp.text: {rsp.text}")
result["body"] = rsp.text
else:
result["body"] = "{}"
except Exception as e:
print(f"got exception: {e}, quitting")
except KeyboardInterrupt:
print("got KeyboardInterrupt, quitting")
finally:
print("request done")
return result
def __del__(self):
""" cleanup class resources """
self.stop()
#
# End HsdsApp class
#
def getEventMethod(event):
method = "GET" # default
if "method" in event:
method = event["method"]
else:
# scan for method in the api gateway 2.0 format
if "requestContext" in event:
reqContext = event["requestContext"]
if "http" in reqContext:
http = reqContext["http"]
if "method" in http:
method = http["method"]
return method
def getEventPath(event):
path = "/about" # default
if "path" in event:
path = event["path"]
else:
# scan for path in the api gateway 2.0 format
if "requestContext" in event:
reqContext = event["requestContext"]
if "http" in reqContext:
http = reqContext["http"]
if "path" in http:
path = http["path"]
return path
def getEventHeaders(event):
headers = {} # default
if "headers" in event:
headers = event["headers"]
return headers
def getEventParams(event):
params = {} # default
if "params" in event:
params = event["params"]
elif "queryStringParameters" in event:
params = event["queryStringParameters"]
return params
def getEventBody(event):
body = {} # default
if "body" in event:
body = event["body"]
return body
def lambda_handler(event, context):
# setup logging
# process event data
function_name = context.function_name
print(f"lambda_handler(event, context) for function {function_name}")
if "AWS_ROLE_ARN" in os.environ:
print(f"using AWS_ROLE_ARN: {os.environ['AWS_ROLE_ARN']}")
if "AWS_SESSION_TOKEN" in os.environ:
print(f"using AWS_SESSION_TOKEN: {os.environ['AWS_SESSION_TOKEN']}")
print(f"event: {event}")
method = getEventMethod(event)
if method not in ("GET", "POST", "PUT", "DELETE"):
err_msg = f"method: {method} is unsupported"
print(err_msg)
return {"status_code": 400, "error": err_msg}
headers = getEventHeaders(event)
params = getEventParams(event)
req = getEventPath(event)
print(f"got req path: {req}")
# determine if this method will modify storage
# if not, we'll pass readonly to the dn nodes so they
# will not run s3sync task
if method == "GET":
readonly = True
elif method == "PUT":
readonly = False
elif method == "DELETE":
readonly = False
elif method == "POST":
# post is write unless we are doing a point selection
if req.startswith("/datasets") and req.endswith("value"):
readonly = True
else:
readonly = False
else:
print(f"unexpected method: {method}")
readonly = False
if not isinstance(headers, dict):
err_msg = f"expected headers to be a dict, but got: {type(headers)}"
print(err_msg)
return {"status_code": 400, "error": err_msg}
if not isinstance(params, dict):
err_msg = f"expected params to be a dict, but got: {type(params)}"
print(err_msg)
return {"status_code": 400, "error": err_msg}
if "accept" in headers:
accept = headers["accept"]
print(f"request accept type: {accept}")
if accept == "application/octet-stream":
print("replacing binary accept with json")
headers["accept"] = "aplication/json"
body = getEventBody(event)
if body and method not in ("PUT", "POST"):
err_msg = "body only support with PUT and POST methods"
print(err_msg)
return {"status_code": 400, "error": err_msg}
cpu_count = multiprocessing.cpu_count()
print(f"got cpu_count of: {cpu_count}")
if "TARGET_DN_COUNT" in os.environ:
target_dn_count = int(os.environ["TARGET_DN_COUNT"])
print(f"get env override for target_dn_count of: {target_dn_count}")
else:
# base dn count on half the VCPUs (rounded up)
target_dn_count = - (-cpu_count // 2)
print(f"setting dn count to: {target_dn_count}")
# instantiate hsdsapp object
hsds = HsdsApp(username=function_name, password="lambda", dn_count=target_dn_count, readonly=readonly)
hsds.run()
result = hsds.invoke(method, req, params=params, headers=headers, body=body)
print(f"got result: {result}")
hsds.check_processes()
hsds.stop()
return result
### main
if __name__ == "__main__":
# export PYTHONUNBUFFERED=1
print("main")
#req = "/about"
req = "/datasets/d-d38053ea-3418fe27-22d9-478e7b-913279/value"
#params = {}
params = {"domain": "/shared/tall.h5", "bucket": "hdflab2"}
class Context:
@property
def function_name(self):
return "hslambda"
# simplified event format
# see: https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html
# for a description of the API Gateway 2.0 format which is also supported
event = {"method": "GET", "path": req, "params": params}
context = Context()
result = lambda_handler(event, context)
print(f"got result: {result}")
|
multi_process_runner.py
|
# Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-process runner for testing purpose."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import json
import os
import signal
import sys
import threading
import time
import unittest
from absl import logging
import six
from six.moves import queue as Queue
from tensorflow.python import tf2
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import multi_process_lib
from tensorflow.python.eager import context
multiprocessing = multi_process_lib.multiprocessing
# pylint: disable=g-import-not-at-top
try:
# `faulthandler` is not available in py2.
import faulthandler
except ImportError:
faulthandler = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import dill
except ImportError:
dill = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import tblib.pickling_support
# For pickling traceback objects.
tblib.pickling_support.install()
except ImportError:
pass
# _ProcessStatusInfo contains process status information. When is_successful
# attribute is True, the subprocess has ended successfully, or if False, the
# exception stack trace info is stored in exc_info to pass on to parent process
# to be re-raised.
_ProcessStatusInfo = collections.namedtuple(
'_ProcessStatusInfo', ['is_successful', 'exc_info', 'return_value'])
# Information returned from a successful MultiProcessRunner run.
MultiProcessRunnerResult = collections.namedtuple('MultiProcessRunnerResult',
['return_value', 'stdout'])
TestEnvironment = collections.namedtuple('TestEnvironment', [
'task_type', 'task_id', 'cluster_spec', 'rpc_layer', 'grpc_fail_fast',
'v2_enabled', 'executing_eagerly'
])
# Resources for communication between worker processes and the main process.
#
# `process_status_queue` is used by `multi_process_runner` internally for
# communication from subprocesses to the parent process for whether it's been
# successful, and if not what the error stack trace is.
# `parent_to_sub_queue` is used for communications from parent to subprocess.
# Currently this is only used to terminate subprocesses.
# TODO(rchao): Remove this once subprocess is terminated by SIGKILL.
# `streaming_pipe_w` is to stream stdout and stderr from subprocesses to parent
# process.
# `barrier` is a barrier for the party of all subprocesses.
Resources = collections.namedtuple('Resources', [
'process_status_queue', 'parent_to_sub_queue', 'streaming_pipe_w', 'barrier'
])
# Default time out sec is selected so that it's handled before the default
# "medium" timeout of the test runs.
_DEFAULT_TIMEOUT_SEC = 200
class MultiProcessRunner(object):
"""A utility class to start multiple processes to simulate a cluster.
We need to use multiple processes to simulate a cluster in TF 2.0 tests
because TF 2.0 has some process-global data structures that have to be
separated by processes. We also need child processes to test out our fault
tolerance because shutting down a standard TensorFlow server within its
process is not supported.
Note: the main test program that uses this runner class must run main program
via `test_main` defined in this file. Using this runner in non-test binaries
is not supported yet.
This class is not thread-safe. Child processes will inherit TF2 behavior flag.
"""
def __init__(self,
proc_func,
cluster_spec,
rpc_layer=None,
max_run_time=None,
grpc_fail_fast=None,
stream_stdout=True,
list_stdout=False,
use_dill_for_args=True,
daemon=False,
args=None,
kwargs=None):
"""Creates a multi-process runner.
Args:
proc_func: Function to be run on child processes. This will be run on
processes for all task types.
cluster_spec: Dict for cluster spec. The following is an example of
cluster with three workers and two ps's.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"],
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]}
rpc_layer: RPC layer to use. Default value is 'grpc+loas'.
max_run_time: If set, child processes is forced to exit at approximately
this many seconds after `start` is called. We achieve this through
`signal.alarm()` api. Note that this is best effort at Python level
since Python signal handler does not get executed when it runs lower
level C/C++ code. So it can be delayed for arbitrarily long time.
If any of the child process is still running when `max_run_time` is up,
they will be force-terminated and a `UnexpectedSubprocessExitError`
may be raised at `join()`.
grpc_fail_fast: Whether GRPC connection between processes should fail
without retrying. Defaults to None, in which case the environment
variable is not explicitly set.
stream_stdout: True if the output/error from the subprocesses should be
streamed to be printed in parent process' log. Defaults to True.
list_stdout: True if the output/error from the subprocesses should be
collected to be attached to the resulting `MultiProcessRunnerResult`
returned from `MultiProcessRunner.join()`. If True, the list of stdout
can be retrieved via `MultiProcessRunnerResult.stdout` attribute.
Defaults to False.
use_dill_for_args: Whether to use dill to pickle `args` and `kwargs`. dill
can pickle more objects, but doesn't work with types in
`multiprocessing` library like `Mutex`.
daemon: Whether to start processes as daemons.
args: Positional arguments to be sent to functions run on processes.
kwargs: Keyword arguments to be sent to functions run on processes.
Raises:
RuntimeError: if `multi_process_runner.test_main()` is not called.
ValueError: if there are more than one chief in the `cluster_spec`.
"""
assert cluster_spec is not None
if 'chief' in cluster_spec and len(cluster_spec['chief']) > 1:
raise ValueError('If chief exists in the cluster, there must be at most '
'one chief. Current `cluster_spec` has {} chiefs.'
.format(len(cluster_spec['chief'])))
if not multi_process_lib.initialized():
raise RuntimeError('`multi_process_runner` is not initialized. '
'Please call `multi_process_runner.test_main()` '
'within `if __name__ == \'__main__\':` block '
'in your python module to properly initialize '
'`multi_process_runner`.')
if not callable(proc_func):
raise ValueError('proc_func is not a callable')
self._proc_func = proc_func
self._cluster_spec = cluster_spec
self._rpc_layer = rpc_layer
self._max_run_time = max_run_time
self._grpc_fail_fast = grpc_fail_fast
self._stream_stdout = stream_stdout
# TODO(rchao): Revisit list_stdout argument to consider other solution.
self._list_stdout = list_stdout
self._dependence_on_chief = True
self._use_dill_for_args = use_dill_for_args
self._daemon = daemon
self._args = args or ()
self._kwargs = kwargs or {}
# Child processes should have the same v2 and eager behavior.
self._v2_enabled = tf2.enabled()
self._executing_eagerly = context.executing_eagerly()
self._joined = False
self._processes = {}
self._outstanding_subprocess_count = 0
self._reading_threads = []
self._manager = multiprocessing.Manager()
self._process_status_queue = self._manager.Queue()
self._parent_to_sub_queue = self._manager.Queue()
parties = sum(len(addresses) for addresses in self._cluster_spec.values())
self._barrier = self._manager.Barrier(parties)
# We use a queue to collect outputs from worker processes since it's thread
# safe.
self._streaming_queue = self._manager.Queue()
# This flag will be set to True once terminate_all() is called.
self._all_forced_terminated = False
def _continuously_readline_from_sub(self, pipe_r, task_type, task_id):
"""Function to continuously read lines from subprocesses."""
with os.fdopen(pipe_r.fileno(), 'r', closefd=False) as reader:
for line in reader:
task_string = '[{}-{}]:'.format(task_type, task_id)
formatted_line = '{} {}'.format(task_string.ljust(14), line)
if self._stream_stdout:
# TODO(rchao): Use a lock here to ensure the printed lines are not
# broken.
print(formatted_line, end='', flush=True)
if self._list_stdout:
self._streaming_queue.put(formatted_line)
def _start_subprocess_and_reading_thread(self,
task_type,
task_id,
cluster_spec=None,
proc_func=None,
args=None,
kwargs=None):
"""Start a subprocess and a thread the reads lines from the subprocess."""
if dill is None:
raise unittest.SkipTest(
'TODO(b/150264776): Resolve dependency issue in CI')
test_env = TestEnvironment(
task_type=task_type,
task_id=task_id,
cluster_spec=cluster_spec or self._cluster_spec,
rpc_layer=self._rpc_layer,
grpc_fail_fast=self._grpc_fail_fast,
v2_enabled=self._v2_enabled,
executing_eagerly=self._executing_eagerly,
)
pipe_r, pipe_w = multiprocessing.Pipe(duplex=False)
resources = Resources(
process_status_queue=self._process_status_queue,
parent_to_sub_queue=self._parent_to_sub_queue,
streaming_pipe_w=pipe_w,
barrier=self._barrier,
)
if proc_func is None:
proc_func, args, kwargs = self._proc_func, self._args, self._kwargs
# Always use dill to pickle proc_func so that we support more callable
# types, e.g. lambda.
proc_func = dill.dumps(proc_func, dill.HIGHEST_PROTOCOL)
if self._use_dill_for_args:
args = dill.dumps(args, dill.HIGHEST_PROTOCOL)
kwargs = dill.dumps(kwargs, dill.HIGHEST_PROTOCOL)
p = _Process(
test_env=test_env,
target=_ProcFunc(),
args=(resources, test_env, proc_func, args, kwargs,
self._use_dill_for_args),
daemon=self._daemon)
p.start()
self._processes[(task_type, task_id)] = p
self._outstanding_subprocess_count += 1
# For each subprocess, we dedicate a thread continuously reading lines
# from them.
thread = threading.Thread( # pylint: disable=unexpected-keyword-arg
target=self._continuously_readline_from_sub,
args=(pipe_r, task_type, task_id))
thread.start()
self._reading_threads.append(thread)
def start(self):
"""Starts processes, one for each task in `cluster_spec`.
Note that this is best effort by the applicable multiprocessing library,
and it may take up to seconds for a subprocess to be successfully started.
"""
if self._processes:
raise ValueError('MultiProcessRunner already started.')
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
self._start_subprocess_and_reading_thread(task_type, task_id)
# TODO(rchao): Remove the need of using SIGALRM if possible. At this time,
# without this the tests become very flaky.
if self._max_run_time is not None:
def handler(signum, frame):
del signum, frame
self.terminate_all()
signal.signal(signal.SIGALRM, handler)
signal.alarm(self._max_run_time)
def start_in_process_as(self, as_task_type, as_task_id):
"""Start the processes, with the specified task run in main process.
This is similar to `start()` except that the task with task_type
`as_task_type` and task_id `as_task_id` is run in the main process.
This method is particularly useful when debugging tool such as `pdb` is
needed in some specific task. Note that since this method is blocking until
that specific task exits, additional actions would need a thread to be
called:
```python
def proc_func():
# user code to be run
import pdb; pdb.set_trace()
def follow_ups():
time.sleep(5)
mpr.start_single_process(
task_type='evaluator',
task_id=0)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1))
threading.Thread(target=follow_ups).start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
mpr.join()
```
Note that if `list_stdout=True`, the logs/stdout by task
run by the main process is not available in result.stdout.
Args:
as_task_type: The task type to be run in the main process.
as_task_id: The task id to be run in the main process.
"""
if self._processes:
raise ValueError('MultiProcessRunner already started.')
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
if not (task_type == as_task_type and task_id == as_task_id):
self._start_subprocess_and_reading_thread(task_type, task_id)
_set_tf_config(as_task_type, as_task_id, self._cluster_spec,
self._rpc_layer)
self._proc_func(*self._args, **self._kwargs)
def start_single_process(self,
task_type,
task_id,
cluster_spec=None,
proc_func=None,
args=None,
kwargs=None):
"""Starts a single process.
This starts a process in the cluster with the task type, task id, and the
process function (`proc_func`). If process function is `None`, the function
provided at `__init__` will be used. If `cluster_spec` is `None`, the
cluster spec provided at `__init__` will be used.
TODO(rchao): It is meant that all subprocesses will be updated with the new
cluster spec, but this has yet to be implemented. At this time only the
newly started subprocess picks up this updated cluster spec.
Args:
task_type: The task type.
task_id: The task id.
cluster_spec: The cluster spec to be used on the newly started
process. If `None`, the cluster spec provided at `__init__` will be
used.
proc_func: The process function to be run on the newly started
process. If specified, specify `args` and `kwargs` as well. If `None`,
the function provided at `__init__` will be used.
args: Optional positional arguments to be supplied in `proc_func`.
kwargs: Optional keyword arguments to be supplied in `proc_func`.
"""
self._start_subprocess_and_reading_thread(
task_type,
task_id,
cluster_spec=cluster_spec,
proc_func=proc_func,
args=args or (),
kwargs=kwargs or {})
def _queue_to_list(self, queue_to_convert):
"""Convert `queue.Queue` to `list`."""
list_to_return = []
# Calling `queue.empty()` is not reliable.
while True:
try:
list_to_return.append(queue_to_convert.get(block=False))
except Queue.Empty:
break
return list_to_return
def get_process_id(self, task_type, task_id):
"""Returns the subprocess id given the task type and task id."""
p = self._processes.get((task_type, task_id), None)
return p.pid if p else None
def _join_or_terminate(self, task_type, task_id, process, timeout):
"""Joins a process. If it times out, terminate all procsses."""
logging.info('joining %s-%d', task_type, task_id)
process.join(timeout)
# If exitcode is None, the process aren't terminated and this is a
# timeout.
if process.exitcode is None:
# Force termination to dump worker processes stack trace.
self.terminate_all(sig=signal.SIGTERM)
process_statuses = self._queue_to_list(self._process_status_queue)
raise SubprocessTimeoutError(
'%s-%d and possibly more subprocesses timed out.' %
(task_type, task_id), self._get_mpr_result(process_statuses))
def join(self, timeout=_DEFAULT_TIMEOUT_SEC):
"""Joins all the processes with timeout.
If any of the subprocesses does not exit approximately after `timeout`
seconds has passed after `join` call, this raises a
`SubprocessTimeoutError`.
Note: At timeout, it uses SIGTERM to terminate the subprocesses, in order to
log the stack traces of the subprocesses when they exit. However, this
results in timeout when the test runs with tsan (thread sanitizer); if tsan
is being run on the test targets that rely on timeout to assert information,
`MultiProcessRunner.terminate_all()` must be called after `join()`, before
the test exits, so the subprocesses are terminated with SIGKILL, and data
race is removed.
Args:
timeout: if set and not all processes report status within roughly
`timeout` seconds, a `SubprocessTimeoutError` exception will be raised.
Returns:
A MultiProcessRunnerResult object, which has two attributes,
`return_value` and `stdout`. `return_value` always contains the return
values from the subprocesses. If `list_stdout` argument is True at
`__init__`, `stdout` is available that contains a list of all messages
from subprocesses' stdout and stderr.
Raises:
SubprocessTimeoutError: if not all processes report status approximately
within `timeout` seconds. When this is raised, a
`MultiProcessRunnerResult` object can be retrieved by
`SubprocessTimeoutError`'s mpr_result attribute, which has the same
structure as above 'Returns' section describes.
UnexpectedSubprocessExitError: If any of the subprocesses did not exit
properly (for example, they exit on SIGTERM or SIGKILL signal). When
this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`UnexpectedSubprocessExitError`'s mpr_result attribute, which has the
same structure as above 'Returns' section describes. If `max_run_time`
is not `None`, it is expected that some subprocesses may be
force-killed when `max_run_time` is up, and this is raised in those
cases.
Exception: if there is an Exception propagated from any subprocess.
"""
if self._joined:
raise ValueError("MultiProcessRunner can't be joined twice.")
self._joined = True
chief = self._processes.get(('chief', 0), None)
if self._dependence_on_chief and chief:
self._join_or_terminate('chief', 0, chief, timeout)
# Give other processes a chance to exit on their own.
for p in self._processes.values():
p.join(timeout=3)
self.terminate_all()
else:
for (task_type, task_id), p in self._processes.items():
self._join_or_terminate(task_type, task_id, p, timeout)
for (task_type, task_id), p in self._processes.items():
logging.info('%s-%d exit code: %s', task_type, task_id, p.exitcode)
process_statuses = self._queue_to_list(self._process_status_queue)
if not self._all_forced_terminated and len(
process_statuses) != self._outstanding_subprocess_count:
raise UnexpectedSubprocessExitError(
'Missing status(es) from %d subprocess(es). See logs for details.' %
(self._outstanding_subprocess_count - len(process_statuses)),
self._get_mpr_result(process_statuses))
for process_status in process_statuses:
assert isinstance(process_status, _ProcessStatusInfo)
if not process_status.is_successful:
six.reraise(*process_status.exc_info)
# Checking all the processes that are expected to exit properly.
for (task_type, task_id), p in self._processes.items():
if self._dependence_on_chief and task_type != 'chief':
# If _dependence_on_chief, other processes may have been
# forced-terminated, which is expected.
continue
# Successfully exiting process has exit code 0.
if p.exitcode > 0:
raise UnexpectedSubprocessExitError(
'Subprocess %s-%d exited with exit code %d. See logs for details.' %
(task_type, task_id, p.exitcode),
self._get_mpr_result(process_statuses))
logging.info('Joining log reading threads.')
for thread in self._reading_threads:
thread.join()
logging.info('Joined log reading threads.')
# Clear the alarm.
signal.alarm(0)
return self._get_mpr_result(process_statuses)
def _get_mpr_result(self, process_statuses):
stdout = self._queue_to_list(self._streaming_queue)
return_values = []
for process_status in process_statuses:
if process_status.return_value is not None:
return_values.append(process_status.return_value)
return MultiProcessRunnerResult(stdout=stdout, return_value=return_values)
def terminate(self, task_type, task_id):
"""Terminates the process with `task_type` and `task_id`."""
p = self._processes.get((task_type, task_id), None)
if p is None:
raise ValueError('{}-{} does not exist'.format(task_type, task_id))
# TODO(crccw): change to use Process.terminate() as well.
self._parent_to_sub_queue.put('terminate {} {}'.format(task_type, task_id))
p.join()
def terminate_all(self, sig=None):
"""Terminates all subprocesses."""
# Use SIGKILL as default. In systems where that's unavailable such as
# windows, use SIGTERM.
sig = sig or getattr(signal, 'SIGKILL', signal.SIGTERM)
for (task_type, task_id), p in self._processes.items():
try:
os.kill(p.pid, sig)
logging.info('%s-%d terminated with signal %r.', task_type, task_id,
sig)
except ProcessLookupError:
logging.info('Attempting to kill %s-%d but it does not exist.',
task_type, task_id)
self._all_forced_terminated = True
class _Process(multi_process_lib.Process):
"""A modified `multiprocessing.Process` that can set up environment variables."""
# TODO(crccw): consider moving other logics in _ProcFunc to _Process.
def __init__(self, test_env, **kwargs):
super(_Process, self).__init__(**kwargs)
self._test_env = test_env
self._actual_run = getattr(self, 'run')
self.run = self._run_with_setenv
def _run_with_setenv(self):
# We need to set environment variables before doing anything because
# setenv() is not thread-safe.
test_env = self._test_env
if test_env.grpc_fail_fast is not None:
os.environ['GRPC_FAIL_FAST'] = str(test_env.grpc_fail_fast)
_set_tf_config(test_env.task_type, test_env.task_id, test_env.cluster_spec,
test_env.rpc_layer)
return self._actual_run()
class _ProcFunc(object):
"""Represents a callable to run in a subprocess."""
@contextlib.contextmanager
def _runtime_mode(self, executing_eagerly):
if executing_eagerly:
with context.eager_mode():
yield
else:
with context.graph_mode():
yield
def _message_checking_func(self, task_type, task_id):
"""A function that regularly checks messages from parent process."""
# TODO(rchao): Remove this once parent uses SIGKILL to terminate subprocess.
while True:
try:
message = self._resources.parent_to_sub_queue.get(block=False)
# Currently the only possible message is termination.
if not message.startswith('terminate'):
raise ValueError('Unrecognized message: {}'.format(message))
if message == 'terminate {} {}'.format(task_type, task_id):
break
else:
# If the message is not targeting this process, put it back to the
# queue.
self._resources.parent_to_sub_queue.put(message)
time.sleep(1)
except Queue.Empty:
time.sleep(0.1)
self._resources.process_status_queue.put(
_ProcessStatusInfo(
is_successful=True,
exc_info=None,
return_value=None))
# `os._exit(0)` is used to more reliably terminate a subprocess.
os._exit(0) # pylint: disable=protected-access
def _close_streaming(self):
"""Close stdout, stderr and streaming pipe.
We need to explicitly close them since Tensorflow may take a while to exit,
so that the reading threads in the main process can exit more quickly.
"""
sys.stdout.flush()
sys.stderr.flush()
sys.stdout.close()
sys.stderr.close()
self._resources.streaming_pipe_w.close()
def __call__(self, resources, test_env, proc_func, args, kwargs,
use_dill_for_args):
"""The wrapper function that actually gets run in child process(es)."""
global _barrier
self._resources = resources
_barrier = self._resources.barrier
proc_func = dill.loads(proc_func)
if use_dill_for_args:
args = dill.loads(args)
kwargs = dill.loads(kwargs)
if faulthandler is not None:
faulthandler.enable()
faulthandler.register(signal.SIGTERM, chain=True)
# All logging should go to stderr to be streamed to the main process.
logging.set_stderrthreshold(logging.DEBUG)
# Assign sys.stdout and sys.stderr as duplicates of `streaming_pipe_w` so
# print() and logging.*() write directly to `streaming_pipe_w`.
# Unfortunately since we cannot prepend task_type and task_id information to
# the streamed logs we will need a thread per subprocess to distinguish
# where the piece of message is from.
os.dup2(resources.streaming_pipe_w.fileno(), sys.stdout.fileno())
os.dup2(resources.streaming_pipe_w.fileno(), sys.stderr.fileno())
pid = os.getpid()
logging.info('Subprocess with PID %d (%s, %d) is now being started.', pid,
test_env.task_type, test_env.task_id)
# The thread will be dedicated to checking messages from the parent process.
threading.Thread( # pylint: disable=unexpected-keyword-arg
target=self._message_checking_func,
args=(test_env.task_type, test_env.task_id),
daemon=True).start()
if test_env.v2_enabled:
v2_compat.enable_v2_behavior()
with self._runtime_mode(test_env.executing_eagerly):
info = _run_contained(proc_func, args, kwargs)
self._resources.process_status_queue.put(info)
# Re-raise the exception in addition to reporting it to the parent
# process, so that even if `--test_timeout` flag is set and the
# error doesn't make it to be shown in parent process before bazel's
# timeout, the log would still show what happens in this subprocess,
# instead of silently suppressing the error due to early bazel
# timeout. Raising an error in the subprocess produces stack trace in
# the log, but the program continues running.
if not info.is_successful:
six.reraise(*info.exc_info)
self._close_streaming()
# Exit with code 0 as it's considered successful exit at this point.
sys.exit(0)
class MultiProcessPoolRunner(object):
"""A utility class to start a process pool to simulate a cluster.
It's similar to MultiProcessRunner, but uses a pool of processes to avoid the
expensive initialization cost of Tensorflow.
"""
def __init__(self, cluster_spec, initializer=None):
"""Creates a multi-process pool runner.
Args:
cluster_spec: Dict for cluster spec. The following is an example of
cluster with three workers.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"]}
initializer: a callable to called at the startup of worker processes.
Raises:
RuntimeError: if `multi_process_runner.test_main()` is not called.
ValueError: if there are more than one chief in the `cluster_spec`.
"""
self._cluster_spec = cluster_spec
self._initializer = initializer
self._conn = {}
self._runner = None
def __del__(self):
self.shutdown()
def shutdown(self):
"""Shuts down the worker pool."""
for conn in self._conn.values():
conn.close()
self._conn = {}
if self._runner is not None:
self._runner.join()
self._runner = None
def _start(self):
"""Starts the worker pool."""
# We need different arguments for different processes so we're passing a
# no-op proc_func here and use start_single_process instead.
#
# We also need to start the process pool as daemon, so that they don't block
# the program from exiting. Note that __del__ may not get called when
# there's an exception. The user may also store a pool runner in a global
# object to share across test cases
if dill is None:
raise unittest.SkipTest(
'TODO(b/150264776): Resolve dependency issue in CI')
self._runner = MultiProcessRunner(
proc_func=lambda: None,
cluster_spec=self._cluster_spec,
use_dill_for_args=False,
daemon=True)
if self._initializer:
initializer = dill.dumps(self._initializer, dill.HIGHEST_PROTOCOL)
else:
initializer = None
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
conn1, conn2 = multiprocessing.Pipe(duplex=True)
self._conn[(task_type, task_id)] = conn1
self._runner.start_single_process(
task_type,
task_id,
proc_func=_pool_runner_worker,
args=(initializer, conn2))
def run(self, proc_func, args=None, kwargs=None):
"""Runs `proc_func` with `args` and `kwargs` on all jobs.
Args:
proc_func: The function to be run.
args: Optional positional arguments to be supplied in `proc_func`.
kwargs: Optional keyword arguments to be supplied in `proc_func`.
Returns:
A list of return values.
"""
# TODO(b/150264776): skip in OSS until it's implemented.
multi_process_lib.Process()
if self._runner is None:
self._start()
proc_func = dill.dumps(proc_func, dill.HIGHEST_PROTOCOL)
for conn in self._conn.values():
conn.send((proc_func, args or [], kwargs or {}))
process_statuses = []
for (task_type, task_id), conn in self._conn.items():
logging.info('Waiting for the result from %s-%d', task_type, task_id)
try:
process_statuses.append(conn.recv())
except EOFError:
# This shouldn't happen due to exceptions in proc_func. This usually
# means bugs in the runner.
self.shutdown()
raise RuntimeError('Unexpected EOF. Worker process may have died. '
'Please report a bug')
return_values = []
for process_status in process_statuses:
assert isinstance(process_status, _ProcessStatusInfo)
if not process_status.is_successful:
six.reraise(*process_status.exc_info)
if process_status.return_value is not None:
return_values.append(process_status.return_value)
return return_values
def _pool_runner_worker(initializer, conn):
"""Function that runs on the workers in a pool.
It listens for callables to run and returns the result until `conn` is closed.
It captures the exceptions during executing the callable and return it through
`conn`.
Args:
initializer: A callable to execute during startup.
conn: A multiprocessing.Connection object to listen for tasks and send
results.
"""
if initializer:
initializer = dill.loads(initializer)
initializer()
while True:
try:
proc_func, args, kwargs = conn.recv()
except EOFError:
break
proc_func = dill.loads(proc_func)
info = _run_contained(proc_func, args, kwargs)
sys.stdout.flush()
sys.stderr.flush()
conn.send(info)
def _run_contained(proc_func, args, kwargs):
"""Runs `proc_func` with `args` and `kwargs`.
The function returns _ProcessStatusInfo which captures the return value and
the exception.
Args:
proc_func: The function to be run.
args: Optional positional arguments to be supplied in `proc_func`.
kwargs: Optional keyword arguments to be supplied in `proc_func`.
Returns:
a _ProcessStatusInfo.
"""
try:
return_value = proc_func(*args, **kwargs)
is_successful = True
exc_info = None
except Exception: # pylint: disable=broad-except
return_value = None
is_successful = False
exc_info = sys.exc_info()
finally:
return _ProcessStatusInfo( # pylint: disable=lost-exception
is_successful=is_successful,
exc_info=exc_info,
return_value=return_value)
class SubprocessTimeoutError(RuntimeError):
"""An error that indicates there is at least one subprocess timing out.
When this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`SubprocessTimeoutError`'s mpr_result attribute. See
`MultiProcessRunner.join()` for more information.
"""
def __init__(self, msg, mpr_result):
super(SubprocessTimeoutError, self).__init__(msg)
self.mpr_result = mpr_result
class UnexpectedSubprocessExitError(RuntimeError):
"""An error indicating there is at least one subprocess with unexpected exit.
When this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`UnexpectedSubprocessExitError`'s mpr_result attribute. See
`MultiProcessRunner.join()` for more information.
"""
def __init__(self, msg, mpr_result):
super(UnexpectedSubprocessExitError, self).__init__(msg)
self.mpr_result = mpr_result
def _set_tf_config(task_type, task_id, cluster_spec, rpc_layer=None):
"""Set TF_CONFIG environment variable."""
tf_config_dict = {
'cluster': cluster_spec,
'task': {
'type': task_type,
'index': task_id,
},
}
if rpc_layer is not None:
tf_config_dict['rpc_layer'] = rpc_layer
os.environ['TF_CONFIG'] = json.dumps(tf_config_dict)
def run(proc_func,
cluster_spec,
rpc_layer=None,
max_run_time=None,
grpc_fail_fast=None,
stream_stdout=True,
list_stdout=False,
timeout=_DEFAULT_TIMEOUT_SEC,
args=None,
kwargs=None): # pylint: disable=g-doc-args
"""Runs functions in local child processes.
It is a convenience method that creates a `MultiProcessRunner` object and
invokes `start` and `join` method. Please see these methods for detailed
documentations.
Returns:
A MultiProcessRunnerResult object returned from `MultiProcessRunner.join()`.
"""
runner = MultiProcessRunner(
proc_func,
cluster_spec,
rpc_layer,
max_run_time=max_run_time,
grpc_fail_fast=grpc_fail_fast,
stream_stdout=stream_stdout,
list_stdout=list_stdout,
args=args,
kwargs=kwargs)
runner.start()
return runner.join(timeout)
# This is set by MultiProcessRunner in worker processes.
_barrier = None
def barrier():
if _barrier is None:
raise ValueError(
'barrier is not defined. It is likely because you are calling barrier()'
'in the main process. barrier() can only be called in the subprocesses.'
)
return _barrier
def test_main():
"""Main function to be called within `__main__` of a test file."""
multi_process_lib.test_main()
|
heatmap.py
|
import pygame
from library import *
import util
import math
import threading
def debugger(pot_mat):
game = threading.Thread(target=heat_map, args=(pot_mat,))
game.start()
def heat_map(pot_mat):
EXPLORDING = False
pygame.init() #Start Pygame
SCALLER = 4
HEIGHT = pot_mat.height*SCALLER
screen = pygame.display.set_mode((pot_mat.width*SCALLER, HEIGHT)) #Start the screen
clock = pygame.time.Clock()
background_color = (0, 0, 0)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT: #The user closed the window!
running = False #Stop running
ground = []
potensial = []
intrest = []
explored = []
radius = 5
for y in range(pot_mat.height):
for x in range(pot_mat.width):
if pot_mat.is_valid(Point2DI(x,y)):
ground.append(pygame.Rect(x*SCALLER, HEIGHT - y*SCALLER,SCALLER,SCALLER))
if EXPLORDING and pot_mat.map_tools.is_explored(Point2DI(x,y)) and pot_mat.is_valid(Point2DI(x,y)):
explored.append(pygame.Rect(x*SCALLER, HEIGHT - y*SCALLER,SCALLER,SCALLER)) # map_tools.get_least_recently_seen_tile
if pot_mat.get_potensial(Point2DI(x,y)) > 0:
p = pot_mat.get_potensial(Point2DI(x,y))
intnes = min(max(int(math.sqrt(p)), 60), 255) # upper lower bound
potensial.append((pygame.Rect(x*SCALLER, HEIGHT - y*SCALLER,radius + SCALLER,radius + SCALLER), intnes))
screen.fill(background_color)
for rec in ground:
pygame.draw.rect(screen, (0,0, 255), rec)
if EXPLORDING:
for rec in explored:
pygame.draw.rect(screen, (0, 150, 150), rec)
for tup in potensial:
pygame.draw.rect(screen, (255, 255-tup[1], 255-tup[1]), tup[0])
for pos in pot_mat.debug_lst:
rec = pygame.Rect(pos.x*SCALLER, HEIGHT - pos.y*SCALLER,SCALLER,SCALLER)
pygame.draw.rect(screen, (0,255, 255), rec)
max_intrest_rec = pygame.Rect(pot_mat.intrest_point.x*SCALLER, HEIGHT - pot_mat.intrest_point.y*SCALLER,radius + SCALLER,radius + SCALLER)
pygame.draw.rect(screen, (255,255, 0), max_intrest_rec)
scout = pygame.Rect(pot_mat.scout_position.x*SCALLER, HEIGHT - pot_mat.scout_position.y*SCALLER,radius + SCALLER,radius + SCALLER)
pygame.draw.rect(screen, (0,255, 0), scout)
pygame.display.flip()
# Logic goes here
pygame.quit() #Close the window
|
test_client.py
|
import os
import socket
import threading
import time
import msgpack
import pytest
from pynats import NATSClient
from pynats.exceptions import NATSInvalidSchemeError, NATSReadSocketError
@pytest.fixture
def nats_plain_url():
return os.environ.get("NATS_PLAIN_URL", "nats://127.0.0.1:4222")
@pytest.fixture
def nats_tls_url():
return os.environ.get("NATS_TLS_URL", "tls://127.0.0.1:4224")
def test_connect_and_close(nats_plain_url):
client = NATSClient(nats_plain_url, socket_timeout=2)
client.connect()
client.ping()
client.close()
def test_connect_and_close_using_context_manager(nats_plain_url):
with NATSClient(nats_plain_url, socket_timeout=2) as client:
client.ping()
def test_connect_timeout():
client = NATSClient("nats://127.0.0.1:4223", socket_timeout=2)
with pytest.raises(socket.error):
client.connect()
def test_reconnect(nats_plain_url):
client = NATSClient(nats_plain_url, socket_timeout=2)
client.connect()
client.ping()
client.reconnect()
client.ping()
client.close()
def test_tls_connect(nats_tls_url):
client = NATSClient(nats_tls_url, socket_timeout=2)
client.connect()
client.ping()
client.close()
def test_invalid_scheme():
client = NATSClient("http://127.0.0.1:4224")
with pytest.raises(NATSInvalidSchemeError):
client.connect()
def test_subscribe_unsubscribe(nats_plain_url):
with NATSClient(nats_plain_url, socket_timeout=2) as client:
sub = client.subscribe(
"test-subject", callback=lambda x: x, queue="test-queue", max_messages=2
)
client.unsubscribe(sub)
def test_subscribe_timeout(nats_plain_url):
with NATSClient(nats_plain_url, socket_timeout=2) as client:
sub = client.subscribe(
"test-subject", callback=lambda x: x, queue="test-queue", max_messages=1
)
with pytest.raises(socket.timeout):
client.wait(count=1)
client.unsubscribe(sub)
def test_publish(nats_plain_url):
received = []
def worker():
with NATSClient(nats_plain_url, socket_timeout=2) as client:
def callback(message):
received.append(message)
client.subscribe(
"test-subject", callback=callback, queue="test-queue", max_messages=2
)
client.wait(count=2)
t = threading.Thread(target=worker)
t.start()
time.sleep(1)
with NATSClient(nats_plain_url, socket_timeout=2) as client:
# publish without payload
client.publish("test-subject")
# publish with payload
client.publish("test-subject", payload=b"test-payload")
t.join()
assert len(received) == 2
assert received[0].subject == "test-subject"
assert received[0].reply == ""
assert received[0].payload == b""
assert received[1].subject == "test-subject"
assert received[1].reply == ""
assert received[1].payload == b"test-payload"
def test_request(nats_plain_url):
def worker():
with NATSClient(nats_plain_url, socket_timeout=2) as client:
def callback(message):
client.publish(message.reply, payload=b"test-callback-payload")
client.subscribe(
"test-subject", callback=callback, queue="test-queue", max_messages=2
)
client.wait(count=2)
t = threading.Thread(target=worker)
t.start()
time.sleep(1)
with NATSClient(nats_plain_url, socket_timeout=2) as client:
# request without payload
resp = client.request("test-subject")
assert resp.subject.startswith("_INBOX.")
assert resp.reply == ""
assert resp.payload == b"test-callback-payload"
# request with payload
resp = client.request("test-subject", payload=b"test-payload")
assert resp.subject.startswith("_INBOX.")
assert resp.reply == ""
assert resp.payload == b"test-callback-payload"
t.join()
def test_request_msgpack(nats_plain_url):
def worker():
with NATSClient(nats_plain_url, socket_timeout=2) as client:
def callback(message):
client.publish(
message.reply,
payload=msgpack.packb(
{b"v": 3338} if message.payload else {b"v": 32}
),
)
client.subscribe(
"test-subject", callback=callback, queue="test-queue", max_messages=2
)
client.wait(count=2)
t = threading.Thread(target=worker)
t.start()
time.sleep(1)
with NATSClient(nats_plain_url, socket_timeout=2) as client:
# request without payload
resp = client.request("test-subject")
assert resp.subject.startswith("_INBOX.")
assert resp.reply == ""
assert msgpack.unpackb(resp.payload) == {b"v": 32}
# request with payload
resp = client.request("test-subject", payload=msgpack.packb("test-payload"))
assert resp.subject.startswith("_INBOX.")
assert resp.reply == ""
assert msgpack.unpackb(resp.payload) == {b"v": 3338}
t.join()
def test_request_timeout(nats_plain_url):
with NATSClient(nats_plain_url, socket_timeout=2) as client:
with pytest.raises(socket.timeout):
client.request("test-subject")
def test_graceful_shutdown(nats_plain_url):
def worker(client, connected_event):
client.connect()
connected_event.set()
try:
client.wait()
except NATSReadSocketError:
assert True
except Exception:
raise AssertionError("unexpected Exception raised")
client = NATSClient(nats_plain_url)
connected_event = threading.Event()
thread = threading.Thread(target=worker, args=[client, connected_event])
thread.start()
assert connected_event.wait(5), "unable to connect"
client.close()
thread.join(5)
assert not thread.is_alive(), "thread did not finish"
|
test_client.py
|
import asyncio
import concurrent.futures
import copy
import datetime
import functools
import os
import re
import threading
import warnings
from base64 import b64decode, b64encode
from queue import Empty
from unittest.mock import MagicMock, Mock
import nbformat
import pytest
import xmltodict
from ipython_genutils.py3compat import string_types
from jupyter_client import KernelManager
from jupyter_client.kernelspec import KernelSpecManager
from nbconvert.filters import strip_ansi
from nbformat import NotebookNode
from testpath import modified_env
from traitlets import TraitError
from .. import NotebookClient, execute
from ..exceptions import CellExecutionError
from .base import NBClientTestsBase
addr_pat = re.compile(r'0x[0-9a-f]{7,9}')
ipython_input_pat = re.compile(r'<ipython-input-\d+-[0-9a-f]+>')
current_dir = os.path.dirname(__file__)
class AsyncMock(Mock):
pass
def make_async(mock_value):
async def _():
return mock_value
return _()
def normalize_base64(b64_text):
# if it's base64, pass it through b64 decode/encode to avoid
# equivalent values from being considered unequal
try:
return b64encode(b64decode(b64_text.encode('ascii'))).decode('ascii')
except (ValueError, TypeError):
return b64_text
def run_notebook(filename, opts, resources=None):
"""Loads and runs a notebook, returning both the version prior to
running it and the version after running it.
"""
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
if resources:
opts = {'resources': resources, **opts}
executor = NotebookClient(cleaned_input_nb, **opts)
with warnings.catch_warnings():
# suppress warning from jupyter_client's deprecated cleanup()
warnings.simplefilter(action='ignore', category=FutureWarning)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
output_nb = executor.execute()
return input_nb, output_nb
def run_notebook_wrapper(args):
# since concurrent.futures.ProcessPoolExecutor doesn't have starmap,
# we need to unpack the arguments
return run_notebook(*args)
async def async_run_notebook(filename, opts, resources=None):
"""Loads and runs a notebook, returning both the version prior to
running it and the version after running it.
"""
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
if resources:
opts = {'resources': resources, **opts}
executor = NotebookClient(cleaned_input_nb, **opts)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
output_nb = await executor.async_execute()
return input_nb, output_nb
def prepare_cell_mocks(*messages, reply_msg=None):
"""
This function prepares a executor object which has a fake kernel client
to mock the messages sent over zeromq. The mock kernel client will return
the messages passed into this wrapper back from ``preproc.kc.iopub_channel.get_msg``
callbacks. It also appends a kernel idle message to the end of messages.
"""
parent_id = 'fake_id'
messages = list(messages)
# Always terminate messages with an idle to exit the loop
messages.append({'msg_type': 'status', 'content': {'execution_state': 'idle'}})
def shell_channel_message_mock():
# Return the message generator for
# self.kc.shell_channel.get_msg => {'parent_header': {'msg_id': parent_id}}
return AsyncMock(
return_value=make_async(
NBClientTestsBase.merge_dicts(
{
'parent_header': {'msg_id': parent_id},
'content': {'status': 'ok', 'execution_count': 1},
},
reply_msg or {},
)
)
)
def iopub_messages_mock():
# Return the message generator for
# self.kc.iopub_channel.get_msg => messages[i]
return AsyncMock(
side_effect=[
# Default the parent_header so mocks don't need to include this
make_async(
NBClientTestsBase.merge_dicts({'parent_header': {'msg_id': parent_id}}, msg)
)
for msg in messages
]
)
def prepared_wrapper(func):
@functools.wraps(func)
def test_mock_wrapper(self):
"""
This inner function wrapper populates the executor object with
the fake kernel client. This client has its iopub and shell
channels mocked so as to fake the setup handshake and return
the messages passed into prepare_cell_mocks as the execute_cell loop
processes them.
"""
cell_mock = NotebookNode(
source='"foo" = "bar"', metadata={}, cell_type='code', outputs=[]
)
executor = NotebookClient({})
executor.nb = {'cells': [cell_mock]}
# self.kc.iopub_channel.get_msg => message_mock.side_effect[i]
message_mock = iopub_messages_mock()
executor.kc = MagicMock(
iopub_channel=MagicMock(get_msg=message_mock),
shell_channel=MagicMock(get_msg=shell_channel_message_mock()),
execute=MagicMock(return_value=parent_id),
is_alive=MagicMock(return_value=make_async(True)),
)
executor.parent_id = parent_id
return func(self, executor, cell_mock, message_mock)
return test_mock_wrapper
return prepared_wrapper
def normalize_output(output):
"""
Normalizes outputs for comparison.
"""
output = dict(output)
if 'metadata' in output:
del output['metadata']
if 'text' in output:
output['text'] = re.sub(addr_pat, '<HEXADDR>', output['text'])
if 'text/plain' in output.get('data', {}):
output['data']['text/plain'] = re.sub(addr_pat, '<HEXADDR>', output['data']['text/plain'])
if 'application/vnd.jupyter.widget-view+json' in output.get('data', {}):
output['data']['application/vnd.jupyter.widget-view+json']['model_id'] = '<MODEL_ID>'
if 'image/svg+xml' in output.get('data', {}):
output['data']['image/svg+xml'] = xmltodict.parse(output['data']['image/svg+xml'])
for key, value in output.get('data', {}).items():
if isinstance(value, string_types):
output['data'][key] = normalize_base64(value)
if 'traceback' in output:
tb = [
re.sub(ipython_input_pat, '<IPY-INPUT>', strip_ansi(line))
for line in output['traceback']
]
output['traceback'] = tb
return output
def assert_notebooks_equal(expected, actual):
expected_cells = expected['cells']
actual_cells = actual['cells']
assert len(expected_cells) == len(actual_cells)
for expected_cell, actual_cell in zip(expected_cells, actual_cells):
# Uncomment these to help debug test failures better
# from pprint import pprint
# pprint(expected_cell)
# pprint(actual_cell)
expected_outputs = expected_cell.get('outputs', [])
actual_outputs = actual_cell.get('outputs', [])
normalized_expected_outputs = list(map(normalize_output, expected_outputs))
normalized_actual_outputs = list(map(normalize_output, actual_outputs))
assert normalized_expected_outputs == normalized_actual_outputs
expected_execution_count = expected_cell.get('execution_count', None)
actual_execution_count = actual_cell.get('execution_count', None)
assert expected_execution_count == actual_execution_count
def notebook_resources():
"""
Prepare a notebook resources dictionary for executing test
notebooks in the ``files`` folder.
"""
return {'metadata': {'path': os.path.join(current_dir, 'files')}}
@pytest.mark.parametrize(
["input_name", "opts"],
[
("Other Comms.ipynb", dict(kernel_name="python")),
("Clear Output.ipynb", dict(kernel_name="python")),
("Empty Cell.ipynb", dict(kernel_name="python")),
("Factorials.ipynb", dict(kernel_name="python")),
("HelloWorld.ipynb", dict(kernel_name="python")),
("Inline Image.ipynb", dict(kernel_name="python")),
(
"Interrupt.ipynb",
dict(kernel_name="python", timeout=1, interrupt_on_timeout=True, allow_errors=True),
),
("JupyterWidgets.ipynb", dict(kernel_name="python")),
("Skip Exceptions with Cell Tags.ipynb", dict(kernel_name="python")),
("Skip Exceptions.ipynb", dict(kernel_name="python", allow_errors=True)),
("Skip Execution with Cell Tag.ipynb", dict(kernel_name="python")),
("SVG.ipynb", dict(kernel_name="python")),
("Unicode.ipynb", dict(kernel_name="python")),
("UnicodePy3.ipynb", dict(kernel_name="python")),
("update-display-id.ipynb", dict(kernel_name="python")),
("Check History in Memory.ipynb", dict(kernel_name="python")),
],
)
def test_run_all_notebooks(input_name, opts):
"""Runs a series of test notebooks and compares them to their actual output"""
input_file = os.path.join(current_dir, 'files', input_name)
input_nb, output_nb = run_notebook(input_file, opts, notebook_resources())
assert_notebooks_equal(input_nb, output_nb)
def test_parallel_notebooks(capfd, tmpdir):
"""Two notebooks should be able to be run simultaneously without problems.
The two notebooks spawned here use the filesystem to check that the other notebook
wrote to the filesystem."""
opts = dict(kernel_name="python")
input_name = "Parallel Execute {label}.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}):
threads = [
threading.Thread(target=run_notebook, args=(input_file.format(label=label), opts, res))
for label in ("A", "B")
]
[t.start() for t in threads]
[t.join(timeout=2) for t in threads]
captured = capfd.readouterr()
assert captured.err == ""
def test_many_parallel_notebooks(capfd):
"""Ensure that when many IPython kernels are run in parallel, nothing awful happens.
Specifically, many IPython kernels when run simultaneously would encounter errors
due to using the same SQLite history database.
"""
opts = dict(kernel_name="python", timeout=5)
input_name = "HelloWorld.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = NBClientTestsBase().build_resources()
res["metadata"]["path"] = os.path.join(current_dir, "files")
with warnings.catch_warnings():
# suppress warning from jupyter_client's deprecated cleanup()
warnings.simplefilter(action='ignore', category=FutureWarning)
# run once, to trigger creating the original context
run_notebook(input_file, opts, res)
with concurrent.futures.ProcessPoolExecutor(max_workers=2) as executor:
executor.map(run_notebook_wrapper, [(input_file, opts, res) for i in range(8)])
captured = capfd.readouterr()
assert captured.err == ""
def test_async_parallel_notebooks(capfd, tmpdir):
"""Two notebooks should be able to be run simultaneously without problems.
The two notebooks spawned here use the filesystem to check that the other notebook
wrote to the filesystem."""
opts = dict(kernel_name="python")
input_name = "Parallel Execute {label}.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
with modified_env({"NBEXECUTE_TEST_PARALLEL_TMPDIR": str(tmpdir)}):
tasks = [
async_run_notebook(input_file.format(label=label), opts, res) for label in ("A", "B")
]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
captured = capfd.readouterr()
assert captured.err == ""
def test_many_async_parallel_notebooks(capfd):
"""Ensure that when many IPython kernels are run in parallel, nothing awful happens.
Specifically, many IPython kernels when run simultaneously would encounter errors
due to using the same SQLite history database.
"""
opts = dict(kernel_name="python", timeout=5)
input_name = "HelloWorld.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = NBClientTestsBase().build_resources()
res["metadata"]["path"] = os.path.join(current_dir, "files")
# run once, to trigger creating the original context
run_notebook(input_file, opts, res)
tasks = [async_run_notebook(input_file, opts, res) for i in range(4)]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(*tasks))
captured = capfd.readouterr()
assert captured.err == ""
def test_execution_timing():
"""Compare the execution timing information stored in the cell with the
actual time it took to run the cell. Also check for the cell timing string
format."""
opts = dict(kernel_name="python")
input_name = "Sleep1s.ipynb"
input_file = os.path.join(current_dir, "files", input_name)
res = notebook_resources()
input_nb, output_nb = run_notebook(input_file, opts, res)
def get_time_from_str(s):
time_format = '%Y-%m-%dT%H:%M:%S.%fZ'
return datetime.datetime.strptime(s, time_format)
execution_timing = output_nb['cells'][1]['metadata']['execution']
status_busy = get_time_from_str(execution_timing['iopub.status.busy'])
execute_input = get_time_from_str(execution_timing['iopub.execute_input'])
execute_reply = get_time_from_str(execution_timing['shell.execute_reply'])
status_idle = get_time_from_str(execution_timing['iopub.status.idle'])
cell_start = get_time_from_str(output_nb['cells'][2]['outputs'][0]['text'])
cell_end = get_time_from_str(output_nb['cells'][3]['outputs'][0]['text'])
delta = datetime.timedelta(milliseconds=100)
assert status_busy - cell_start < delta
assert execute_input - cell_start < delta
assert execute_reply - cell_end < delta
assert status_idle - cell_end < delta
def test_synchronous_setup_kernel():
nb = nbformat.v4.new_notebook()
executor = NotebookClient(nb)
with executor.setup_kernel():
# Prove it initalized client
assert executor.kc is not None
# Prove it removed the client (and hopefully cleaned up)
assert executor.kc is None
def test_startnewkernel_with_kernelmanager():
nb = nbformat.v4.new_notebook()
km = KernelManager()
executor = NotebookClient(nb, km=km)
executor.start_new_kernel()
kc = executor.start_new_kernel_client()
# prove it initalized client
assert kc is not None
# since we are not using the setup_kernel context manager,
# cleanup has to be done manually
kc.shutdown()
km.cleanup_resources()
kc.stop_channels()
def test_start_new_kernel_history_file_setting():
nb = nbformat.v4.new_notebook()
km = KernelManager()
executor = NotebookClient(nb, km=km)
kc = km.client()
# Should start empty
assert executor.extra_arguments == []
# Should assign memory setting for ipykernel
executor.start_new_kernel()
assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:']
# Should not add a second hist_file assignment
executor.start_new_kernel()
assert executor.extra_arguments == ['--HistoryManager.hist_file=:memory:']
# since we are not using the setup_kernel context manager,
# cleanup has to be done manually
kc.shutdown()
km.cleanup_resources()
kc.stop_channels()
class TestExecute(NBClientTestsBase):
"""Contains test functions for execute.py"""
maxDiff = None
def test_constructor(self):
NotebookClient({})
def test_populate_language_info(self):
nb = nbformat.v4.new_notebook() # Certainly has no language_info.
executor = NotebookClient(nb, kernel_name="python")
nb = executor.execute()
assert 'language_info' in nb.metadata
def test_empty_path(self):
"""Can the kernel be started when the path is empty?"""
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
res = self.build_resources()
res['metadata']['path'] = ''
input_nb, output_nb = run_notebook(filename, {}, res)
assert_notebooks_equal(input_nb, output_nb)
@pytest.mark.xfail(
"python3" not in KernelSpecManager().find_kernel_specs(),
reason="requires a python3 kernelspec",
)
def test_empty_kernel_name(self):
"""Can kernel in nb metadata be found when an empty string is passed?
Note: this pattern should be discouraged in practice.
Passing in no kernel_name to NotebookClient is recommended instead.
"""
filename = os.path.join(current_dir, 'files', 'UnicodePy3.ipynb')
res = self.build_resources()
input_nb, output_nb = run_notebook(filename, {"kernel_name": ""}, res)
assert_notebooks_equal(input_nb, output_nb)
with pytest.raises(TraitError):
input_nb, output_nb = run_notebook(filename, {"kernel_name": None}, res)
def test_disable_stdin(self):
"""Test disabling standard input"""
filename = os.path.join(current_dir, 'files', 'Disable Stdin.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
input_nb, output_nb = run_notebook(filename, dict(allow_errors=True), res)
# We need to special-case this particular notebook, because the
# traceback contains machine-specific stuff like where IPython
# is installed. It is sufficient here to just check that an error
# was thrown, and that it was a StdinNotImplementedError
self.assertEqual(len(output_nb['cells']), 1)
self.assertEqual(len(output_nb['cells'][0]['outputs']), 1)
output = output_nb['cells'][0]['outputs'][0]
self.assertEqual(output['output_type'], 'error')
self.assertEqual(output['ename'], 'StdinNotImplementedError')
self.assertEqual(
output['evalue'],
'raw_input was called, but this frontend does not support input requests.',
)
def test_timeout(self):
"""Check that an error is raised when a computation times out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(TimeoutError) as err:
run_notebook(filename, dict(timeout=1), res)
self.assertEqual(
str(err.value.args[0]),
"""A cell timed out while it was being executed, after 1 seconds.
The message was: Cell execution timed out.
Here is a preview of the cell contents:
-------------------
while True: continue
-------------------
""",
)
def test_timeout_func(self):
"""Check that an error is raised when a computation times out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
def timeout_func(source):
return 10
with pytest.raises(TimeoutError):
run_notebook(filename, dict(timeout_func=timeout_func), res)
def test_kernel_death_after_timeout(self):
"""Check that an error is raised when the kernel is_alive is false after a cell timed out"""
filename = os.path.join(current_dir, 'files', 'Interrupt.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
executor = NotebookClient(input_nb, timeout=1)
with pytest.raises(TimeoutError):
executor.execute()
km = executor.create_kernel_manager()
async def is_alive():
return False
km.is_alive = is_alive
# Will be a RuntimeError or subclass DeadKernelError depending
# on if jupyter_client or nbconvert catches the dead client first
with pytest.raises(RuntimeError):
input_nb, output_nb = executor.execute()
def test_kernel_death_during_execution(self):
"""Check that an error is raised when the kernel is_alive is false during a cell
execution.
"""
filename = os.path.join(current_dir, 'files', 'Autokill.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(input_nb)
with pytest.raises(RuntimeError):
executor.execute()
def test_allow_errors(self):
"""
Check that conversion halts if ``allow_errors`` is False.
"""
filename = os.path.join(current_dir, 'files', 'Skip Exceptions.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(CellExecutionError) as exc:
run_notebook(filename, dict(allow_errors=False), res)
self.assertIsInstance(str(exc.value), str)
assert "# üñîçø∂é" in str(exc.value)
def test_force_raise_errors(self):
"""
Check that conversion halts if the ``force_raise_errors`` traitlet on
NotebookClient is set to True.
"""
filename = os.path.join(current_dir, 'files', 'Skip Exceptions with Cell Tags.ipynb')
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(filename)
with pytest.raises(CellExecutionError) as exc:
run_notebook(filename, dict(force_raise_errors=True), res)
self.assertIsInstance(str(exc.value), str)
assert "# üñîçø∂é" in str(exc.value)
def test_reset_kernel_client(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(
input_nb,
resources=self.build_resources(),
)
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client, a new one must have been created
kc = executor.kc
assert kc is not None
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client, the previously created one must have been reused
assert kc == executor.kc
executor.execute(reset_kc=True, cleanup_kc=False)
# we asked to reset the kernel client, the previous one must have been cleaned up,
# a new one must have been created
assert kc != executor.kc
def test_cleanup_kernel_client(self):
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
executor = NotebookClient(
input_nb,
resources=self.build_resources(),
)
executor.execute()
# we asked to cleanup the kernel client (default is True)
assert executor.kc is None
executor.execute(cleanup_kc=False)
# we didn't ask to reset the kernel client
# a new one must have been created and should still be available
assert executor.kc is not None
def test_custom_kernel_manager(self):
from .fake_kernelmanager import FakeCustomKernelManager
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
cleaned_input_nb = copy.deepcopy(input_nb)
for cell in cleaned_input_nb.cells:
if 'execution_count' in cell:
del cell['execution_count']
cell['outputs'] = []
executor = NotebookClient(
cleaned_input_nb,
resources=self.build_resources(),
kernel_manager_class=FakeCustomKernelManager,
)
# Override terminal size to standardise traceback format
with modified_env({'COLUMNS': '80', 'LINES': '24'}):
executor.execute()
expected = FakeCustomKernelManager.expected_methods.items()
for method, call_count in expected:
self.assertNotEqual(call_count, 0, f'{method} was called')
def test_process_message_wrapper(self):
outputs = []
class WrappedPreProc(NotebookClient):
def process_message(self, msg, cell, cell_index):
result = super().process_message(msg, cell, cell_index)
if result:
outputs.append(result)
return result
current_dir = os.path.dirname(__file__)
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
original = copy.deepcopy(input_nb)
wpp = WrappedPreProc(input_nb)
executed = wpp.execute()
assert outputs == [{'name': 'stdout', 'output_type': 'stream', 'text': 'Hello World\n'}]
assert_notebooks_equal(original, executed)
def test_execute_function(self):
# Test the execute() convenience API
filename = os.path.join(current_dir, 'files', 'HelloWorld.ipynb')
with open(filename) as f:
input_nb = nbformat.read(f, 4)
original = copy.deepcopy(input_nb)
executed = execute(original, os.path.dirname(filename))
assert_notebooks_equal(original, executed)
def test_widgets(self):
"""Runs a test notebook with widgets and checks the widget state is saved."""
input_file = os.path.join(current_dir, 'files', 'JupyterWidgets.ipynb')
opts = dict(kernel_name="python")
res = self.build_resources()
res['metadata']['path'] = os.path.dirname(input_file)
input_nb, output_nb = run_notebook(input_file, opts, res)
output_data = [
output.get('data', {}) for cell in output_nb['cells'] for output in cell['outputs']
]
model_ids = [
data['application/vnd.jupyter.widget-view+json']['model_id']
for data in output_data
if 'application/vnd.jupyter.widget-view+json' in data
]
wdata = output_nb['metadata']['widgets']['application/vnd.jupyter.widget-state+json']
for k in model_ids:
d = wdata['state'][k]
assert 'model_name' in d
assert 'model_module' in d
assert 'state' in d
assert 'version_major' in wdata
assert 'version_minor' in wdata
class TestRunCell(NBClientTestsBase):
"""Contains test functions for NotebookClient.execute_cell"""
@prepare_cell_mocks()
def test_idle_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# Just the exit message should be fetched
assert message_mock.call_count == 1
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'execute_reply'},
'parent_header': {'msg_id': 'wrong_parent'},
'content': {'name': 'stdout', 'text': 'foo'},
}
)
def test_message_for_wrong_parent(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An ignored stream followed by an idle
assert message_mock.call_count == 2
# Ensure no output was written
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'status',
'header': {'msg_type': 'status'},
'content': {'execution_state': 'busy'},
}
)
def test_busy_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# One busy message, followed by an idle
assert message_mock.call_count == 2
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_deadline_exec_reply(self, executor, cell_mock, message_mock):
# exec_reply is never received, so we expect to hit the timeout.
async def get_msg(timeout):
await asyncio.sleep(timeout)
raise Empty
executor.kc.shell_channel.get_msg = get_msg
executor.timeout = 1
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
assert message_mock.call_count == 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks()
def test_deadline_iopub(self, executor, cell_mock, message_mock):
# The shell_channel will complete, so we expect only to hit the iopub timeout.
message_mock.side_effect = Empty()
executor.raise_on_iopub_timeout = True
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_eventual_deadline_iopub(self, executor, cell_mock, message_mock):
# Process a few messages before raising a timeout from iopub
def message_seq(messages):
yield from messages
while True:
yield Empty()
message_mock.side_effect = message_seq(list(message_mock.side_effect)[:-1])
executor.kc.shell_channel.get_msg = Mock(
return_value=make_async({'parent_header': {'msg_id': executor.parent_id}})
)
executor.raise_on_iopub_timeout = True
with pytest.raises(TimeoutError):
executor.execute_cell(cell_mock, 0)
assert message_mock.call_count >= 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks(
{'msg_type': 'execute_input', 'header': {'msg_type': 'execute_input'}, 'content': {}}
)
def test_execute_input_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# One ignored execute_input, followed by an idle
assert message_mock.call_count == 2
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_stream_messages(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout then stderr stream followed by an idle
assert message_mock.call_count == 3
# Ensure the output was captured
self.assertListEqual(
cell_mock.outputs,
[
{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'},
{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'},
],
)
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'execute_reply'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{'msg_type': 'clear_output', 'header': {'msg_type': 'clear_output'}, 'content': {}},
)
def test_clear_output_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A stream, followed by a clear, and then an idle
assert message_mock.call_count == 3
# Ensure the output was cleared
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
)
def test_clear_output_wait_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A stream, followed by a clear, and then an idle
assert message_mock.call_count == 3
# Should be true without another message to trigger the clear
self.assertTrue(executor.clear_before_next_output)
# Ensure the output wasn't cleared yet
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stderr', 'text': 'bar'},
},
)
def test_clear_output_wait_then_message_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout stream, followed by a wait clear, an stderr stream, and then an idle
assert message_mock.call_count == 4
# Should be false after the stderr message
assert not executor.clear_before_next_output
# Ensure the output wasn't cleared yet
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stderr', 'text': 'bar'}]
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'name': 'stdout', 'text': 'foo'},
},
{
'msg_type': 'clear_output',
'header': {'msg_type': 'clear_output'},
'content': {'wait': True},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
},
)
def test_clear_output_wait_then_update_display_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An stdout stream, followed by a wait clear, an stderr stream, and then an idle
assert message_mock.call_count == 4
# Should be false after the stderr message
assert executor.clear_before_next_output
# Ensure the output wasn't cleared yet because update_display doesn't add outputs
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
'content': {'execution_count': 42},
}
)
def test_execution_count_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
'content': {'execution_count': 42},
}
)
def test_execution_count_message_ignored_on_override(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0, execution_count=21)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 21
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'stream',
'header': {'msg_type': 'stream'},
'content': {'execution_count': 42, 'name': 'stdout', 'text': 'foo'},
}
)
def test_execution_count_with_stream_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execution count followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should also consume the message stream
assert cell_mock.outputs == [{'output_type': 'stream', 'name': 'stdout', 'text': 'foo'}]
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'content': {'comm_id': 'foobar', 'data': {'state': {'foo': 'bar'}}},
}
)
def test_widget_comm_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message without buffer info followed by an idle
assert message_mock.call_count == 2
self.assertEqual(executor.widget_state, {'foobar': {'foo': 'bar'}})
# Buffers should still be empty
assert not executor.widget_buffers
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'buffers': [b'123'],
'content': {
'comm_id': 'foobar',
'data': {'state': {'foo': 'bar'}, 'buffer_paths': ['path']},
},
}
)
def test_widget_comm_buffer_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A comm message with buffer info followed by an idle
assert message_mock.call_count == 2
assert executor.widget_state == {'foobar': {'foo': 'bar'}}
assert executor.widget_buffers == {
'foobar': [{'data': 'MTIz', 'encoding': 'base64', 'path': 'path'}]
}
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'comm',
'header': {'msg_type': 'comm'},
'content': {
'comm_id': 'foobar',
# No 'state'
'data': {'foo': 'bar'},
},
}
)
def test_unknown_comm_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An unknown comm message followed by an idle
assert message_mock.call_count == 2
# Widget states should be empty as the message has the wrong shape
assert not executor.widget_state
assert not executor.widget_buffers
# Ensure no outputs were generated
assert cell_mock.outputs == []
@prepare_cell_mocks(
{
'msg_type': 'execute_result',
'header': {'msg_type': 'execute_result'},
'content': {
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
},
}
)
def test_execute_result_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execute followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'execute_result',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
}
]
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'execute_result',
'header': {'msg_type': 'execute_result'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
},
}
)
def test_execute_result_with_display_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An execute followed by an idle
assert message_mock.call_count == 2
assert cell_mock.execution_count == 42
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'execute_result',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
'execution_count': 42,
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
}
)
def test_display_data_without_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 2
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
}
]
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
}
)
def test_display_data_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 2
# Should generate an associated message
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
},
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar_other'},
'metadata': {'metafoo_other': 'metabar_other'},
'data': {'foo': 'bar_other'},
},
},
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_display_data_same_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an idle
assert message_mock.call_count == 4
# Original output should be manipulated and a copy of the second now
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
{
'output_type': 'display_data',
'metadata': {'metafoo_other': 'metabar_other'},
'data': {'foo': 'bar_other'},
},
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {'metadata': {'metafoo': 'metabar'}, 'data': {'foo': 'bar'}},
}
)
def test_update_display_data_without_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An update followed by an idle
assert message_mock.call_count == 2
# Display updates don't create any outputs
assert cell_mock.outputs == []
# No display id was provided
assert not executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {
'transient': {'display_id': 'foobar2'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_update_display_data_mismatch_id_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An update followed by an idle
assert message_mock.call_count == 3
# Display updates don't create any outputs
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'display_data',
'header': {'msg_type': 'display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo': 'metabar'},
'data': {'foo': 'bar'},
},
},
{
'msg_type': 'update_display_data',
'header': {'msg_type': 'update_display_data'},
'content': {
'transient': {'display_id': 'foobar'},
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
},
},
)
def test_update_display_data_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# A display followed by an update then an idle
assert message_mock.call_count == 3
# Original output should be manipulated
assert cell_mock.outputs == [
{
'output_type': 'display_data',
'metadata': {'metafoo2': 'metabar2'},
'data': {'foo': 'bar2', 'baz': 'foobarbaz'},
}
]
assert 'foobar' in executor._display_id_map
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
}
)
def test_error_message(self, executor, cell_mock, message_mock):
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Should also consume the message stream
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
},
)
def test_error_and_error_status_messages(self, executor, cell_mock, message_mock):
with self.assertRaises(CellExecutionError):
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Cell outputs should still be copied
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
{
'msg_type': 'error',
'header': {'msg_type': 'error'},
'content': {'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']},
},
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# OK
'content': {'status': 'ok'},
},
)
def test_error_message_only(self, executor, cell_mock, message_mock):
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 2
# Should also consume the message stream
assert cell_mock.outputs == [
{'output_type': 'error', 'ename': 'foo', 'evalue': 'bar', 'traceback': ['Boom']}
]
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_allow_errors(self, executor, cell_mock, message_mock):
executor.allow_errors = True
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error', 'ename': 'NotImplementedError'},
}
)
def test_allow_error_names(self, executor, cell_mock, message_mock):
executor.allow_error_names = ['NotImplementedError']
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_raises_exception_tag(self, executor, cell_mock, message_mock):
cell_mock.metadata['tags'] = ['raises-exception']
# Should NOT raise
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 1
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_non_code_cell(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(source='"foo" = "bar"', metadata={}, cell_type='raw', outputs=[])
# Should NOT raise nor execute any code
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 0
# Should also consume the message stream
assert cell_mock.outputs == []
@prepare_cell_mocks(
reply_msg={
'msg_type': 'execute_reply',
'header': {'msg_type': 'execute_reply'},
# ERROR
'content': {'status': 'error'},
}
)
def test_no_source(self, executor, cell_mock, message_mock):
cell_mock = NotebookNode(
# Stripped source is empty
source=' ',
metadata={},
cell_type='code',
outputs=[],
)
# Should NOT raise nor execute any code
executor.execute_cell(cell_mock, 0)
# An error followed by an idle
assert message_mock.call_count == 0
# Should also consume the message stream
assert cell_mock.outputs == []
|
VirtualSmartcard.py
|
#
# Copyright (C) 2011 Frank Morgner, Dominik Oepen
#
# This file is part of virtualsmartcard.
#
# virtualsmartcard is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# virtualsmartcard is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# virtualsmartcard. If not, see <http://www.gnu.org/licenses/>.
#
import atexit
import errno
import logging
import socket
import struct
import sys
from virtualsmartcard.ConstantDefinitions import MAX_EXTENDED_LE, MAX_SHORT_LE
from virtualsmartcard.SWutils import SwError, SW
from virtualsmartcard.SmartcardFilesystem import make_property
from virtualsmartcard.utils import C_APDU, R_APDU, hexdump, inttostring
from virtualsmartcard.CardGenerator import CardGenerator
# ADDED CODE SECTION IN ORDER TO INTEGRATE ESP32 TO GNUPG STARTS HERE
import SocketServer, time, threading
from socket import error as SocketError
# ADDED CODE SECTION ENDS HERE
class SmartcardOS(object):
"""Base class for a smart card OS"""
def getATR(self):
"""Returns the ATR of the card as string of characters"""
return ""
def powerUp(self):
"""Powers up the card"""
pass
def powerDown(self):
"""Powers down the card"""
pass
def reset(self):
"""Performs a warm reset of the card (no power down)"""
pass
def execute(self, msg):
"""Returns response to the given APDU as string of characters
:param msg: the APDU as string of characters
"""
return ""
class Iso7816OS(SmartcardOS):
mf = make_property("mf", "master file")
SAM = make_property("SAM", "secure access module")
def __init__(self, mf, sam, ins2handler=None, extended_length=False):
self.mf = mf
self.SAM = sam
if not ins2handler:
self.ins2handler = {
0x0c: self.mf.eraseRecord,
0x0e: self.mf.eraseBinaryPlain,
0x0f: self.mf.eraseBinaryEncapsulated,
0x2a: self.SAM.perform_security_operation,
0x20: self.SAM.verify,
0x22: self.SAM.manage_security_environment,
0x24: self.SAM.change_reference_data,
0x46: self.SAM.generate_public_key_pair,
0x82: self.SAM.external_authenticate,
0x84: self.SAM.get_challenge,
0x88: self.SAM.internal_authenticate,
0xa0: self.mf.searchBinaryPlain,
0xa1: self.mf.searchBinaryEncapsulated,
0xa4: self.mf.selectFile,
0xb0: self.mf.readBinaryPlain,
0xb1: self.mf.readBinaryEncapsulated,
0xb2: self.mf.readRecordPlain,
0xb3: self.mf.readRecordEncapsulated,
0xc0: self.getResponse,
0xca: self.mf.getDataPlain,
0xcb: self.mf.getDataEncapsulated,
0xd0: self.mf.writeBinaryPlain,
0xd1: self.mf.writeBinaryEncapsulated,
0xd2: self.mf.writeRecord,
0xd6: self.mf.updateBinaryPlain,
0xd7: self.mf.updateBinaryEncapsulated,
0xda: self.mf.putDataPlain,
0xdb: self.mf.putDataEncapsulated,
0xdc: self.mf.updateRecordPlain,
0xdd: self.mf.updateRecordEncapsulated,
0xe0: self.mf.createFile,
0xe2: self.mf.appendRecord,
0xe4: self.mf.deleteFile,
}
else:
self.ins2handler = ins2handler
if extended_length:
self.maxle = MAX_EXTENDED_LE
else:
self.maxle = MAX_SHORT_LE
self.lastCommandOffcut = ""
self.lastCommandSW = SW["NORMAL"]
el = extended_length # only needed to keep following line short
tsft = Iso7816OS.makeThirdSoftwareFunctionTable(extendedLe=el)
card_capabilities = self.mf.firstSFT + self.mf.secondSFT + tsft
self.atr = Iso7816OS.makeATR(T=1, directConvention=True, TA1=0x13,
histChars=chr(0x80) +
chr(0x70 + len(card_capabilities)) +
card_capabilities)
def getATR(self):
return self.atr
@staticmethod
def makeATR(**args):
"""Calculate Answer to Reset (ATR) and returns the bitstring.
- directConvention (bool): Whether to use direct convention or
inverse convention.
- TAi, TBi, TCi (optional): Value between 0 and 0xff. Interface
Characters (for meaning see ISO 7816-3). Note that
if no transmission protocol is given, it is
automatically selected with T=max{j-1|TAj in args
OR TBj in args OR TCj in args}.
- T (optional): Value between 0 and 15. Transmission Protocol.
Note that if T is set, TAi/TBi/TCi for i>T are
omitted.
- histChars (optional): Bitstring with 0 <= len(histChars) <= 15.
Historical Characters T1 to T15 (for
meaning see ISO 7816-4).
T0, TDi and TCK are automatically calculated.
"""
# first byte TS
if args["directConvention"]:
atr = "\x3b"
else:
atr = "\x3f"
if "T" in args:
T = args["T"]
else:
T = 0
# find maximum i of TAi/TBi/TCi in args
maxTD = 0
i = 15
while i > 0:
if ("TA" + str(i) in args or "TB" + str(i) in args or
"TC" + str(i) in args):
maxTD = i-1
break
i -= 1
if maxTD == 0 and T > 0:
maxTD = 2
# insert TDi into args (TD0 is actually T0)
for i in range(0, maxTD+1):
if i == 0 and "histChars" in args:
args["TD0"] = len(args["histChars"])
else:
args["TD"+str(i)] = T
if i < maxTD:
args["TD"+str(i)] |= 1 << 7
if "TA" + str(i+1) in args:
args["TD"+str(i)] |= 1 << 4
if "TB" + str(i+1) in args:
args["TD"+str(i)] |= 1 << 5
if "TC" + str(i+1) in args:
args["TD"+str(i)] |= 1 << 6
# initialize checksum
TCK = 0
# add TDi, TAi, TBi and TCi to ATR (TD0 is actually T0)
for i in range(0, maxTD+1):
atr = atr + "%c" % args["TD" + str(i)]
TCK ^= args["TD" + str(i)]
for j in ["A", "B", "C"]:
if "T" + j + str(i+1) in args:
atr += "%c" % args["T" + j + str(i+1)]
# calculate checksum for all bytes from T0 to the end
TCK ^= args["T" + j + str(i+1)]
# add historical characters
if "histChars" in args:
atr += args["histChars"]
for i in range(0, len(args["histChars"])):
TCK ^= ord(args["histChars"][i])
# checksum is omitted for T=0
if T > 0:
atr += "%c" % TCK
return atr
@staticmethod
def makeThirdSoftwareFunctionTable(commandChainging=False,
extendedLe=False,
assignLogicalChannel=0,
maximumChannels=0):
"""
Returns a byte according to the third software function table from the
historical bytes of the card capabilities.
"""
tsft = 0
if commandChainging:
tsft |= 1 << 7
if extendedLe:
tsft |= 1 << 6
if assignLogicalChannel:
if not (0 <= assignLogicalChannel and assignLogicalChannel <= 3):
raise ValueError
tsft |= assignLogicalChannel << 3
if maximumChannels:
if not (0 <= maximumChannels and maximumChannels <= 7):
raise ValueError
tsft |= maximumChannels
return inttostring(tsft)
def formatResult(self, seekable, le, data, sw, sm):
if not seekable:
self.lastCommandOffcut = data[le:]
l = len(self.lastCommandOffcut)
if l == 0:
self.lastCommandSW = SW["NORMAL"]
else:
self.lastCommandSW = sw
sw = SW["NORMAL_REST"] + min(0xff, l)
else:
if le > len(data):
sw = SW["WARN_EOFBEFORENEREAD"]
if le is not None:
result = data[:le]
else:
result = data[:0]
if sm:
sw, result = self.SAM.protect_result(sw, result)
return R_APDU(result, inttostring(sw)).render()
@staticmethod
def seekable(ins):
if ins in [0xb0, 0xb1, 0xd0, 0xd1, 0xd6, 0xd7, 0xa0, 0xa1, 0xb2, 0xb3,
0xdc, 0xdd]:
return True
else:
return False
def getResponse(self, p1, p2, data):
if not (p1 == 0 and p2 == 0):
raise SwError(SW["ERR_INCORRECTP1P2"])
return self.lastCommandSW, self.lastCommandOffcut
def execute(self, msg):
def notImplemented(*argz, **args):
"""
If an application tries to use a function which is not implemented
by the currently emulated smartcard we raise an exception which
should result in an appropriate response APDU being passed to the
application.
"""
raise SwError(SW["ERR_INSNOTSUPPORTED"])
try:
c = C_APDU(msg)
except ValueError as e:
logging.warning(str(e))
return self.formatResult(False, 0, "",
SW["ERR_INCORRECTPARAMETERS"], False)
logging.info("Parsed APDU:\n%s", str(c))
# Handle Class Byte
# {{{
class_byte = c.cla
SM_STATUS = None
logical_channel = 0
command_chaining = 0
header_authentication = 0
# Ugly Hack for OpenSC-explorer
if(class_byte == 0xb0):
logging.debug("Open SC APDU")
SM_STATUS = "No SM"
# If Bit 8,7,6 == 0 then first industry values are used
if (class_byte & 0xE0 == 0x00):
# Bit 1 and 2 specify the logical channel
logical_channel = class_byte & 0x03
# Bit 3 and 4 specify secure messaging
secure_messaging = class_byte >> 2
secure_messaging &= 0x03
if (secure_messaging == 0x00):
SM_STATUS = "No SM"
elif (secure_messaging == 0x01):
SM_STATUS = "Proprietary SM" # Not supported ?
elif (secure_messaging == 0x02):
SM_STATUS = "Standard SM"
elif (secure_messaging == 0x03):
SM_STATUS = "Standard SM"
header_authentication = 1
# If Bit 8,7 == 01 then further industry values are used
elif (class_byte & 0x0C == 0x0C):
# Bit 1 to 4 specify logical channel. 4 is added, value range is
# from four to nineteen
logical_channel = class_byte & 0x0f
logical_channel += 4
# Bit 6 indicates secure messaging
secure_messaging = class_byte >> 6
if (secure_messaging == 0x00):
SM_STATUS = "No SM"
elif (secure_messaging == 0x01):
SM_STATUS = "Standard SM"
else:
# Bit 8 is set to 1, which is not specified by ISO 7816-4
SM_STATUS = "Proprietary SM"
# In both cases Bit 5 specifies command chaining
command_chaining = class_byte >> 5
command_chaining &= 0x01
# }}}
sm = False
try:
if SM_STATUS == "Standard SM" or SM_STATUS == "Proprietary SM":
c = self.SAM.parse_SM_CAPDU(c, header_authentication)
logging.info("Decrypted APDU:\n%s", str(c))
sm = True
sw, result = self.ins2handler.get(c.ins, notImplemented)(c.p1,
c.p2,
c.data)
answer = self.formatResult(Iso7816OS.seekable(c.ins),
c.effective_Le, result, sw, sm)
except SwError as e:
logging.info(e.message)
import traceback
traceback.print_exception(*sys.exc_info())
sw = e.sw
result = ""
answer = self.formatResult(False, 0, result, sw, sm)
return answer
def powerUp(self):
self.mf.current = self.mf
def reset(self):
self.mf.current = self.mf
# sizeof(int) taken from asizof-package {{{
_Csizeof_short = len(struct.pack('h', 0))
# }}}
VPCD_CTRL_LEN = 1
VPCD_CTRL_OFF = 0
VPCD_CTRL_ON = 1
VPCD_CTRL_RESET = 2
VPCD_CTRL_ATR = 4
# ADDED CODE SECTION IN ORDER TO INTEGRATE ESP32 TO GNUPG STARTS HERE
class handleConnection(SocketServer.BaseRequestHandler):
def handle(self):
global command # The command APDU
global response # The response APDU
global condCommand # Condition to wait until a new APDU command arrives
global condResponse # Condition to wait until a response is available
global newCommand # Flag for the handler that there is a new command
global processing # Flag for the run function that the processing has finished
global err # Flag for the run function that an error happened
with condCommand:
while (newCommand == 0):
condCommand.wait()
with condResponse:
try:
self.request.sendall(command) # Send the command APDU to the ESP32
response = self.request.recv(257).strip() # Get the response APDU
except SocketError: # ESP32 probably disconnected
err = 1 # Set the error flag
processing = 0 # Processing finished, got the response
newCommand = 0 # Reset the newCommand flag
condResponse.notify()
# ADDED CODE SECTION ENDS HERE
class VirtualICC(object):
"""
This class is responsible for maintaining the communication of the virtual
PCD and the emulated smartcard. vpicc and vpcd communicate via a socket.
The vpcd sends command APDUs (which it receives from an application) to the
vicc. The vicc passes these CAPDUs on to an emulated smartcard, which
produces a response APDU. This RAPDU is then passed back by the vicc to
the vpcd, which forwards it to the application.
"""
def __init__(self, datasetfile, card_type, host, port, mode, localIP, # MODIFIED ARGUMENTS
readernum=None, ef_cardsecurity=None, ef_cardaccess=None,
ca_key=None, cvca=None, disable_checks=False, esign_key=None,
esign_ca_cert=None, esign_cert=None,
logginglevel=logging.INFO):
from os.path import exists
logging.basicConfig(level=logginglevel,
format="%(asctime)s [%(levelname)s] %(message)s",
datefmt="%d.%m.%Y %H:%M:%S")
self.cardGenerator = CardGenerator(card_type)
# If a dataset file is specified, read the card's data groups from disk
if datasetfile is not None:
if exists(datasetfile):
logging.info("Reading Data Groups from file %s.",
datasetfile)
self.cardGenerator.readDatagroups(datasetfile)
MF, SAM = self.cardGenerator.getCard()
# Generate an OS object of the correct card_type
if card_type == "iso7816" or card_type == "ePass":
self.os = Iso7816OS(MF, SAM)
elif card_type == "nPA":
from virtualsmartcard.cards.nPA import NPAOS
self.os = NPAOS(MF, SAM, ef_cardsecurity=ef_cardsecurity,
ef_cardaccess=ef_cardaccess, ca_key=ca_key,
cvca=cvca, disable_checks=disable_checks,
esign_key=esign_key, esign_ca_cert=esign_ca_cert,
esign_cert=esign_cert)
elif card_type == "cryptoflex":
from virtualsmartcard.cards.cryptoflex import CryptoflexOS
self.os = CryptoflexOS(MF, SAM)
elif card_type == "relay":
from virtualsmartcard.cards.Relay import RelayOS
self.os = RelayOS(readernum)
elif card_type == "handler_test":
from virtualsmartcard.cards.HandlerTest import HandlerTestOS
self.os = HandlerTestOS()
else:
logging.warning("Unknown cardtype %s. Will use standard card_type \
(ISO 7816)", card_type)
card_type = "iso7816"
self.os = Iso7816OS(MF, SAM)
self.type = card_type
# Connect to the VPCD
self.host = host
self.port = port
if host:
# use normal connection mode
try:
self.sock = self.connectToPort(host, port)
self.sock.settimeout(None)
self.server_sock = None
except socket.error as e:
logging.error("Failed to open socket: %s", str(e))
logging.error("Is pcscd running at %s? Is vpcd loaded? Is a \
firewall blocking port %u?", host, port)
sys.exit()
else:
# use reversed connection mode
try:
local_ip = [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]
custom_url = 'vicc://%s:%d' % (local_ip, port)
print('VICC hostname: %s' % local_ip);
print('VICC port: %d' % port)
print('On your NFC phone with the Android Smart Card Emulator app scan this code:')
try:
import qrcode
qr = qrcode.QRCode()
qr.add_data(custom_url)
qr.print_ascii()
except ImportError:
print('https://api.qrserver.com/v1/create-qr-code/?data=%s' % custom_url)
(self.sock, self.server_sock, host) = self.openPort(port)
self.sock.settimeout(None)
except socket.error as e:
logging.error("Failed to open socket: %s", str(e))
logging.error("Is pcscd running? Is vpcd loaded and in \
reversed connection mode? Is a firewall \
blocking port %u?", port)
sys.exit()
logging.info("Connected to virtual PCD at %s:%u", host, port)
# ADDED CODE SECTION IN ORDER TO INTEGRATE ESP32 TO GNUPG STARTS HERE
if (mode == "esp"):
SocketServer.TCPServer.allow_reuse_address = True
server = SocketServer.TCPServer((localIP, 5511), handleConnection)
srvThrd = threading.Thread(target=server.serve_forever)
srvThrd.daemon = True
srvThrd.start()
# ADDED CODE SECTION ENDS HERE
atexit.register(self.stop)
@staticmethod
def connectToPort(host, port):
"""
Open a connection to a given host on a given port.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
return sock
@staticmethod
def openPort(port):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('', port))
server_socket.listen(0)
logging.info("Waiting for vpcd on port " + str(port))
(client_socket, address) = server_socket.accept()
return (client_socket, server_socket, address[0])
def __sendToVPICC(self, msg):
""" Send a message to the vpcd """
self.sock.sendall(struct.pack('!H', len(msg)) + msg)
def __recvFromVPICC(self):
""" Receive a message from the vpcd """
# receive message size
while True:
try:
sizestr = self.sock.recv(_Csizeof_short)
except socket.error as e:
if e.errno == errno.EINTR:
continue
break
if len(sizestr) == 0:
logging.info("Virtual PCD shut down")
raise socket.error
size = struct.unpack('!H', sizestr)[0]
# receive and return message
if size:
while True:
try:
msg = self.sock.recv(size)
except socket.error as e:
if e.errno == errno.EINTR:
continue
break
if len(msg) == 0:
logging.info("Virtual PCD shut down")
raise socket.error
else:
msg = None
return size, msg
def run(self, mode): # MODIFIED ARGUMENTS
"""
Main loop of the vpicc. Receives command APDUs via a socket from the
vpcd, dispatches them to the emulated smartcard and sends the resulting
respsonse APDU back to the vpcd.
"""
# ADDED CODE SECTION IN ORDER TO INTEGRATE ESP32 TO GNUPG STARTS HERE
global command # The command APDU
global response # The response APDU
global condCommand # Condition to wait until a new APDU command arrives
global condResponse # Condition to wait until a response is available
global newCommand # Flag for the handler that there is a new command
global processing # Flag for the run function that the processing has finished
global err # Flag for the run function that an error happened
condCommand = threading.Condition()
condResponse = threading.Condition()
# ADDED CODE SECTION ENDS HERE
while True:
try:
(size, msg) = self.__recvFromVPICC()
except socket.error as e:
if not self.host:
logging.info("Waiting for vpcd on port " + str(self.port))
(self.sock, address) = self.server_sock.accept()
continue
else:
sys.exit()
# ADDED CODE SECTION IN ORDER TO INTEGRATE ESP32 TO GNUPG STARTS HERE
newCommand = 0
processing = 0
command = ""
response = ""
err = 0
# ADDED CODE SECTION ENDS HERE
if not size:
logging.warning("Error in communication protocol (missing \
size parameter)")
elif size == VPCD_CTRL_LEN:
if msg == chr(VPCD_CTRL_OFF):
logging.info("Power Down")
self.os.powerDown()
elif msg == chr(VPCD_CTRL_ON):
logging.info("Power Up")
self.os.powerUp()
elif msg == chr(VPCD_CTRL_RESET):
logging.info("Reset")
# ADDED CODE SECTION IN ORDER TO INTEGRATE ESP32 TO GNUPG STARTS HERE
if (mode == "esp"):
with condCommand:
command = '\x00\x55\x00\x00\x00' # Custom command INS to reset
newCommand = 1
processing = 1
condCommand.notify()
with condResponse:
while (processing == 1):
condResponse.wait()
# ADDED CODE SECTION ENDS HERE
self.os.reset()
elif msg == chr(VPCD_CTRL_ATR):
self.__sendToVPICC(self.os.getATR())
else:
logging.warning("unknown control command")
else:
if size != len(msg):
logging.warning("Expected %u bytes, but received only %u",
size, len(msg))
# ADDED CODE SECTION IN ORDER TO INTEGRATE ESP32 TO GNUPG STARTS HERE
if (mode == "esp"):
with condCommand:
command = msg
newCommand = 1
processing = 1
condCommand.notify()
with condResponse:
while (processing == 1):
condResponse.wait(0)
if (err == 0):
self.__sendToVPICC(response)
else: # ESP32 was probably disconnected
sys.exit() # Terminate execution
else:
# ADDED CODE SECTION ENDS HERE
answer = self.os.execute(msg)
logging.info("Response APDU (%d Bytes):\n%s\n", len(answer),
hexdump(answer))
self.__sendToVPICC(answer)
def stop(self):
self.sock.close()
if self.server_sock:
self.server_sock.close()
|
parallel_gc.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2019 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import gc
import os
import time
import json
import ctypes
import threading
import re
import sys
def singleton(cls, *args, **kw):
instances = {}
def _singleton():
if cls not in instances:
instances[cls] = cls(*args, **kw)
return instances[cls]
return _singleton
script_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
@singleton
class ParallelGC(object):
def __init__(self):
self.pipe_name = 'fifo_' + script_name #str(os.getpid())
self.t0, self.t1, self.t2 = gc.get_threshold()
self.use_threshold = False
self.stop_pgc =False
self.check_intvl = 300 # collect per 5 minutes
def shutdown(self):
self.stop_pgc = True
def restart(self,check_intvl = 300):
self.stop_pgc = False
if self.stop_pgc is True:
self.start_thread_collect(check_intvl)
def set_intvl(self, check_intvl):
self.check_intvl = check_intvl
def gc_collect(self):
# gc0 = gc.collect()
gc.collect()
list_grbg_id = []
for item in gc.garbage:
list_grbg_id.append(id(item))
return list_grbg_id
def set_threshold(self, t0=700, t1=10, t2=10):
gc.set_threshold(t0,t1,t2)
self.t0, self.t1, self.t2 = t0, t1, t2
self.use_threshold = True
def prcv(self):
while 1:
pipein = open(self.pipe_name, 'r')
line = pipein.read()
if line is not '[]' and line is not'[][]':
line = re.search(r'(\[.*?\])+?$', line).group(1)
try:
line = json.loads(line)
except:
print "failed loads"
list_grbg = []
for grbg_id in line:
grbg = ctypes.cast(grbg_id, ctypes.py_object).value
list_grbg.append(grbg)
for index in list_grbg:
if isinstance(index, list):
del index[:]
elif isinstance(index, dict):
index.clear()
else:
print index
del list_grbg[:]
time.sleep(self.check_intvl)
pipein.close()
if self.stop_pgc is True:
break
os.remove(self.pipe_name)
os._exit(0)
def prcv_once(self):
pipein = open(self.pipe_name, 'r')
line = pipein.read()
if line is not '[]' and line is not'[][]':
line = re.search(r'(\[.*?\])+?$', line).group(1)
try:
line = json.loads(line)
except:
print "failed loads"
list_grbg = []
for grbg_id in line:
grbg = ctypes.cast(grbg_id, ctypes.py_object).value
list_grbg.append(grbg)
for index in list_grbg:
if isinstance(index, list):
del index[:]
elif isinstance(index, dict):
index.clear()
else:
print index
del list_grbg[:]
pipein.close()
os.remove(self.pipe_name)
def find_send(self):
gc.set_debug(gc.DEBUG_SAVEALL)
list_grbg_id = self.gc_collect()
fd_out = os.open(self.pipe_name, os.O_WRONLY)
s = json.dumps(list_grbg_id)
os.write(fd_out, s)
os.close(fd_out)
os._exit(0)
def fork_child(self):
if self.use_threshold is True:
gc.collect()
while True:
if self.use_threshold is True:
c0, c1, c2 = gc.get_count()
if c0 >= self.t0:
pid = os.fork()
if pid == 0:
self.find_send()
else:
pid = os.fork()
if pid == 0:
self.find_send()
time.sleep(self.check_intvl)
if self.stop_pgc is True:
break
def fork_child_once(self):
pid = os.fork()
if pid == 0:
self.find_send()
def start_thread_collect(self, check_intvl):
gc.disable()
self.check_intvl = check_intvl
if os.path.exists(self.pipe_name):
os.unlink(self.pipe_name)
os.mkfifo(self.pipe_name)
rcv_thread = threading.Thread(target=self.prcv)
rcv_thread.damen = True
rcv_thread.start()
fork_thread = threading.Thread(target=self.fork_child)
fork_thread.damen = True
fork_thread.start()
def collect(self):
gc.disable()
if os.path.exists(self.pipe_name):
os.unlink(self.pipe_name)
os.mkfifo(self.pipe_name)
self.fork_child_once()
rcv_thread = threading.Thread(target=self.prcv_once)
rcv_thread.damen = True
rcv_thread.start()
pgc = ParallelGC()
def start(check_intvl):
if pgc.stop_pgc is False:
pgc.start_thread_collect(check_intvl)
else:
pgc.restart(check_intvl)
def collect():
if pgc.stop_pgc is False:
pgc.shutdown()
pgc.collect()
def set_intvl(check_intvl):
pgc.set_intvl(check_intvl)
def set_threshold(self, t0=700, t1=10, t2=10):
pgc.set_threshold(t0, t1, t2)
|
modeling.py
|
'''
Main module for "modeling" endpoints
'''
__author__ = 'Elisha Yadgaran'
from quart import request, render_template, flash, redirect, url_for
import imagehash
from squirrel.database.models import ModelHistory, UserLabel, SquirrelDescription
from simpleml.utils.scoring.load_persistable import PersistableLoader
import base64
import pandas as pd
import asyncio
import threading
import tensorflow as tf
from PIL import Image
from io import BytesIO
class ModelWrapper(object):
'''
Lot of hackery to get the model to load in parallel when the service
starts up
Had trouble getting asyncio to actually execute in parallel so hacked the following:
1) Load in thread
2) Create new event loop for thread
3) Save graph from thread to use in main thread at predict time
'''
def __init__(self):
self._model = None
self._graph = None
# self.concurrent_load_model()
@property
def model(self):
if self._model is None:
self.load_model()
return self._model
@property
def graph(self):
if self._graph is None:
self.load_model()
return self._graph
def predict_proba(self, *args):
with self.graph.as_default():
return self.model.predict_proba(*args)
def load_model(self):
self._model = PersistableLoader.load_model('squirrel')
self._model.load(load_externals=True)
self._graph = tf.get_default_graph()
def async_load_model(self):
event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(event_loop)
self.load_model()
def concurrent_load_model(self):
t = threading.Thread(target=self.async_load_model)
t.daemon = True
t.start()
MODEL = ModelWrapper()
async def upload(feature):
files = await request.files
if request.method == 'POST' and 'photo' in files:
filename = files['photo'].filename
image_stream = files['photo'].stream.read()
if feature == 'squirrel_not_squirrel':
history = await predict(filename, image_stream)
negation = '' if history.prediction else 'NOT'
# .decode is necessary on python 3 for bytes to str conversion
return await render_template('pages/prediction.html', prediction=negation, image=base64.b64encode(image_stream).decode())
if feature == 'which_squirrel':
squirrel = get_hash(image_stream)
return await render_template('pages/matching.html', filename=squirrel.filename, description=squirrel.description)
return await render_template('forms/upload.html')
async def predict(filename, image_stream):
x = pd.Series([image_stream])
prediction_probability = float(MODEL.predict_proba(x)[:, 1])
prediction = int(round(prediction_probability, 0))
# DB
history = ModelHistory.create(
filename=filename,
prediction_probability=prediction_probability,
prediction=prediction
)
return history
def get_hash(image_stream):
image = Image.open(BytesIO(image_stream))
hash = imagehash.average_hash(image)
num_of_pics = len(SquirrelDescription.all())
pic_id = int(str(hash), 16) % num_of_pics + 1
return SquirrelDescription.find(pic_id)
async def model_feedback():
form = await request.form
user_label = form['user_label']
UserLabel.create(user_label=user_label)
await flash("Thank you for making squirrel-nado smarter!")
return redirect(url_for('squirrel_not_squirrel'))
|
mainMethods.py
|
from links import RemoteLinks
import file_management
import web_data
import threading
import sys
def downloadExamSection(currentPath, examClass):
'''
Used for multithreaded downloading.
Parameters:
currentPath (string): The path to the class folder(accounting, comp sci)
examSection (linkClass): The linkClass object to a section.
Returns:
Nothing.
'''
test = []
sectionPath = file_management.createFolder(currentPath, examClass.name)
seasonExams = web_data.getExamSeasons(examClass.url)
for seasonExam in seasonExams:
seasonPath = file_management.createFolder(sectionPath, seasonExam.name)
exams = web_data.getExams(seasonExam.url)
test.append(exams)
file_management.populateFolders(seasonPath, exams)
sys.exit()
def downloadCAIE(folderName, pattern, url, syllabusCode):
'''
Downloads a CAIE exam (no PRE-U).
Enter the syllabus code for the exam to download. Type ALL to download all the exams.
Parameters:
folderName (string): the name of the folder to store the files
pattern (string): pattern used to identify whether a link is valid
Returns:
Nothing.
'''
currentPath = file_management.createFolder('output', folderName)
examClasses = web_data.getExamClasses(url, pattern)
if syllabusCode.lower() == 'all':
for examClass in examClasses:
processThread = threading.Thread(
target=downloadExamSection, args=(currentPath, examClass))
processThread.start()
else:
examClass = list(filter(lambda x: syllabusCode in x.name, examClasses))
if len(examClass) > 0:
examClass = examClass[0]
print(examClass)
downloadExamSection(currentPath, examClass)
def printDivider():
bar = '-' * 50
print()
print(bar)
print('*' * 50)
print(bar)
print()
def downloadAICE(syllabusCode):
downloadCAIE('AS and A Levels', RemoteLinks.AICE_PATTERN.value,
RemoteLinks.AICE.value, syllabusCode)
def downloadIGCSE(syllabusCode):
downloadCAIE('IGCSEs', RemoteLinks.IGCSE_PATTERN.value,
RemoteLinks.IGCSE.value, syllabusCode)
def downloadO(syllabusCode):
downloadCAIE('O Levels', RemoteLinks.O_PATTERN.value,
RemoteLinks.O.value, syllabusCode)
def listClasses(value, pattern):
exams = web_data.getExamClasses(
value, pattern)
printDivider()
for exam in exams:
print(exam.name)
printDivider()
def listAICE():
listClasses(RemoteLinks.AICE.value, RemoteLinks.AICE_PATTERN.value)
def listIGCSE():
listClasses(RemoteLinks.IGCSE.value, RemoteLinks.IGCSE_PATTERN.value)
def listO():
listClasses(RemoteLinks.O.value, RemoteLinks.O_PATTERN.value)
|
test_tasks.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
from opentelemetry.instrumentation.celery import CeleryInstrumentor
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.test.test_base import TestBase
from opentelemetry.trace import SpanKind
from .celery_test_tasks import app, task_add
class TestCeleryInstrumentation(TestBase):
def setUp(self):
super().setUp()
self._worker = app.Worker(app=app, pool="solo", concurrency=1)
self._thread = threading.Thread(target=self._worker.start)
self._thread.daemon = True
self._thread.start()
def tearDown(self):
super().tearDown()
self._worker.stop()
self._thread.join()
def test_task(self):
CeleryInstrumentor().instrument()
result = task_add.delay(1, 2)
while not result.ready():
time.sleep(0.05)
spans = self.sorted_spans(self.memory_exporter.get_finished_spans())
self.assertEqual(len(spans), 2)
consumer, producer = spans
self.assertEqual(consumer.name, "run/tests.celery_test_tasks.task_add")
self.assertEqual(consumer.kind, SpanKind.CONSUMER)
self.assertSpanHasAttributes(
consumer,
{
"celery.action": "run",
"celery.state": "SUCCESS",
SpanAttributes.MESSAGING_DESTINATION: "celery",
"celery.task_name": "tests.celery_test_tasks.task_add",
},
)
self.assertEqual(
producer.name, "apply_async/tests.celery_test_tasks.task_add"
)
self.assertEqual(producer.kind, SpanKind.PRODUCER)
self.assertSpanHasAttributes(
producer,
{
"celery.action": "apply_async",
"celery.task_name": "tests.celery_test_tasks.task_add",
SpanAttributes.MESSAGING_DESTINATION_KIND: "queue",
SpanAttributes.MESSAGING_DESTINATION: "celery",
},
)
self.assertNotEqual(consumer.parent, producer.context)
self.assertEqual(consumer.parent.span_id, producer.context.span_id)
self.assertEqual(consumer.context.trace_id, producer.context.trace_id)
|
portscan.py
|
#!/usr/bin/env python
"""Quickly scans a range of TCP/IP ports for a given address
This script is intended to ONLY interact with targets for which You are
expressly authorized to scan
"""
import socket, threading, itertools, json, re, os, datetime as dt
from optparse import OptionParser
__author__ = "Sean Walsh"
__copyright__ = "Copyright (c) 2018 SeanWalsh95"
__credits__ = ["Sean Walsh", "user:6552846@stackoverflow"]
__license__ = "MIT"
__version__ = "0.1.0"
__maintainer__ = "Sean Walsh"
def TCP_connect(ip, port_range, delay, output):
for port_number in port_range:
TCPsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TCPsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
TCPsock.settimeout(delay)
try:
TCPsock.connect((ip, port_number))
output[port_number] = 'Listening'
except:
output[port_number] = 'Scanned'
# Splits range into n seperate ranges
def split_range(list, n):
return [list[i::n] for i in range(n)]
def scan_ports(host_ip, delay, port_range, thread_count):
port_ranges_list = split_range( port_range, thread_count )
threads = [] # To run TCP_connect concurrently
output = {} # For printing purposes
# Spawning threads to scan ports
for i in range(thread_count):
t = threading.Thread(target=TCP_connect, args=(host_ip, port_ranges_list[i], delay, output))
threads.append(t)
# Starting threads
for i in range(thread_count):
threads[i].start()
# Locking the script until all threads complete
for i in range(thread_count):
threads[i].join()
scan = {'Listening': [], 'Timeout': []}
for k,v in output.items():
if v == 'Listening':
scan['Listening'].append(k)
else:
scan['Timeout'].append(k)
return scan
def out(f,string):
print(string)
f.write(string+"\n")
def scan_ip(info_header, host_ip, delay, port_range, thread_count):
log_file = ".\logs\{}\{}[{}-{}]@{}.txt".format(host_ip, host_ip, port_range[0], port_range[-1], dt.datetime.utcnow().timestamp())
os.makedirs(os.path.dirname(log_file), exist_ok=True)
with open(log_file,"w+") as f:
out(f,info_header)
scan_result = scan_ports(host_ip, delay, port_range, thread_count)
dashes = "-"*59
out(f,dashes + "\n {:^8} | {:^45} |\n".format("Port", "Description") + dashes)
for listener in scan_result['Listening']:
out(f, port_info(listener) )
out(f,dashes)
if verbose:
out(f,"\n Timeout Ports:")
out(f,dashes + "\n {:^8} | {:^45} |\n".format("Port", "Description") + dashes)
for listener in scan_result['Timeout']:
out(f, port_info(listener) )
out(f,dashes)
def port_info( port ):
global port_list
port_desc = "Undefined"
meta = ""
if str(port) in port_list:
port_entry = port_list[str(port)]
if isinstance( port_entry, list ):
port_desc = port_entry[0]["description"]
meta = "1-of-{}".format(len(port_entry))
else:
port_desc = port_entry["description"]
if len(port_desc) > 35:
port_desc = port_desc[0:35] + "..."
return " {:8} | {:38} {:6} |".format(str(port), port_desc, meta)
def ip_range_gen(input_string):
octets = input_string.split('.')
chunks = [list(map(int, octet.split('-'))) for octet in octets]
ranges = [range(c[0], c[1] + 1) if len(c) == 2 else c for c in chunks]
for address in itertools.product(*ranges):
yield '.'.join(map(str, address))
def main():
global verbose, port_ranges
## DEFAULTS ##
delay = 0.5
verbose = False
ip_range = ["127.0.0.1"]
thread_count = 100
range_selection = "common"
port_ranges["reserved"] = range(1023)
port_ranges["full"] = range(65535)
usage = "usage: python %prog [options] ip address to scan"
parser = OptionParser(usage=usage)
parser.add_option("-v", "--verbose",action="store_true", dest="verbose",
help="prints extra information about scan")
parser.add_option("-t", action="store", type=float, dest="timeout",
help="Seconds the socket is going to wait until timeout : 0.5s")
parser.add_option("--threads", action="store", type=int, dest="threads", default=100,
help="number of threads to spawn : [100]")
parser.add_option(
"--common",
action="store_const",
const="common",
dest="range",
default="common",
help="Range of commonly used ports : [default]")
parser.add_option(
"--reserved",
action="store_const",
const="reserved",
dest="range",
help="Full port range (0-1023) scan")
parser.add_option(
"--full",
action="store_const",
const="full",
dest="range",
help="Full port range (0-65535) scan")
parser.add_option(
"-r",
action="store",
type='string',
dest="cust_range",
help="Specify name of custom range i.e. 'common_fast', 'simple'")
parser.add_option(
'-c',
action="store",
type='string',
dest='custom_ports',
help="Custom list of ports e.x. -c 80,443,25")
(options, args) = parser.parse_args()
if options.verbose != None:
verbose = options.verbose
if options.timeout != None:
delay = options.timeout
if options.threads != None:
thread_count = options.threads
if args[0] != None:
ip_arg = args[0]
if options.custom_ports != None:
port_range = list(map(int, options.custom_ports.split(',')))
else:
if options.cust_range != None:
range_selection = options.cust_range
elif options.range != None:
range_selection = options.range
port_range = port_ranges[range_selection]
if re.search(r'\d{1,3}\.[\d-]*\.[\d-]*\.[\d-]*', ip_arg):
ip_range = list(ip_range_gen(ip_arg))
if len(ip_range) > 1:
print( "\n"+"@"*45 )
print( "SCANNING {} IP ADDRESSES".format(len(ip_range)).center(45) )
total_ert_s = (len(ip_range) * ( (len(port_range)*delay) / thread_count ))
print( "TOTAL ESTIMATED RUNTIME: {}".format(str(dt.timedelta(seconds=total_ert_s))).center(45) )
print( "@"*45+"\n" )
else:
ip_range = [ip_arg]
print(port_range)
for ip in ip_range:
# ERT in seconds based on (scans_to_perform * socket_delay) / thread_count
ert_sec = ( len(port_range) * delay ) / thread_count
ert = dt.timedelta( seconds=ert_sec )
padding = 15 + max( len(ip), len(range_selection), len(str(delay))+1, len(str(ert)) )
info = "\n"+"-"*padding+"\n"
info += "{:>12}: {}\n".format("Scanning IP", ip )
info += "{:>12}: {}\n".format("Range", range_selection )
info += "{:>12}: {}\n".format("Timeout", str(delay)+"s" )
info += "{:>12}: {}".format("ERT", str(ert) )
scan_ip(info, ip, delay, port_range, thread_count)
if __name__ == "__main__":
global port_list, port_ranges
global port_ranges
with open('ports.json') as p:
json_dict = json.load(p)
port_list = json_dict["ports"]
port_ranges = json_dict["ranges"]
main()
|
client.py
|
import pickle
import socket
import random
import threading
import time
import ast
import sys
def println(s):
print(s)
def diff(a, b):
return set_dict(DictDiffer(a, b).new_or_changed(), a)
def set_dict(s, d):
return {k: d[k] for k in s}
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.current_keys, self.past_keys = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.current_keys.intersection(self.past_keys)
def added(self):
""" Find keys that have been added """
return self.current_keys - self.intersect
def removed(self):
""" Find keys that have been removed """
return self.past_keys - self.intersect
def changed(self):
""" Find keys that have been changed """
return set(o for o in self.intersect
if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
""" Find keys that are unchanged """
return set(o for o in self.intersect
if self.past_dict[o] == self.current_dict[o])
def new_or_changed(self):
""" Find keys that are new or changed """
# return set(k for k, v in self.current_dict.items()
# if k not in self.past_keys or v != self.past_dict[k])
return self.added().union(self.changed())
class Client():
def __init__(self, serverip, serverport, workers=range(0, 2)):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("gmail.com",80))
self.host = s.getsockname()[0]
s.close()
# self.host = "127.0.0.1"
self.new_data = {}
self.port = 0
self.server = (serverip,serverport)
self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.s.bind((self.host, self.port))
print(self.server)
for x in workers:
threading.Thread(target=self.run, args=(x,)).start()
def run(self,worker):
print(worker)
self.new_data = {}
while True:
if worker == 0:
data, _ = self.receive()
self.new_data = diff(data, self.new_data)
list(map(lambda i: println(str(i[0])), list(self.new_data.values())))
self.new_data = data
if worker == 1:
self.send(("{}".format(raw_input()), time.time()))
def send(self, data):
d = data
self.s.sendto(pickle.dumps(d), self.server)
def receive(self):
data, addr = self.s.recvfrom(4096)
return pickle.loads(data), addr
if __name__=="__main__":
c = Client(str("73.113.154.223"),7005)
c.start()
|
test_enum.py
|
import enum
import inspect
import pydoc
import sys
import unittest
import sys
import threading
from collections import OrderedDict
from enum import Enum, IntEnum, EnumMeta, Flag, IntFlag, unique, auto
from io import StringIO
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
from test import support
from datetime import timedelta
try:
import threading
except ImportError:
threading = None
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
Stooges = exc
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
IntStooges = exc
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception as exc:
FloatStooges = exc
try:
class FlagStooges(Flag):
LARRY = 1
CURLY = 2
MOE = 3
except Exception as exc:
FlagStooges = exc
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception as exc:
Name = exc
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception as exc:
Question = exc
try:
Answer = Enum('Answer', 'him this then there because')
except Exception as exc:
Answer = exc
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception as exc:
Theory = exc
# for doctests
try:
class Fruit(Enum):
TOMATO = 1
BANANA = 2
CHERRY = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None):
if target is None:
target = source
for protocol in range(HIGHEST_PROTOCOL + 1):
assertion(loads(dumps(source, protocol=protocol)), target)
def test_pickle_exception(assertion, exception, obj):
for protocol in range(HIGHEST_PROTOCOL + 1):
with assertion(exception):
dumps(obj, protocol=protocol)
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(enum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(enum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(enum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(enum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(enum._is_dunder(s))
# for subclassing tests
class classproperty:
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, instance, ownerclass):
return self.fget(ownerclass)
# tests
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_dir_on_class(self):
Season = self.Season
self.assertEqual(
set(dir(Season)),
set(['__class__', '__doc__', '__members__', '__module__',
'SPRING', 'SUMMER', 'AUTUMN', 'WINTER']),
)
def test_dir_on_item(self):
Season = self.Season
self.assertEqual(
set(dir(Season.WINTER)),
set(['__class__', '__doc__', '__module__', 'name', 'value']),
)
def test_dir_with_added_behavior(self):
class Test(Enum):
this = 'that'
these = 'those'
def wowser(self):
return ("Wowser! I'm %s!" % self.name)
self.assertEqual(
set(dir(Test)),
set(['__class__', '__doc__', '__members__', '__module__', 'this', 'these']),
)
self.assertEqual(
set(dir(Test.this)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'wowser']),
)
def test_dir_on_sub_with_behavior_on_super(self):
# see issue22506
class SuperEnum(Enum):
def invisible(self):
return "did you see me?"
class SubEnum(SuperEnum):
sample = 5
self.assertEqual(
set(dir(SubEnum.sample)),
set(['__class__', '__doc__', '__module__', 'name', 'value', 'invisible']),
)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertIs(Season(Season.WINTER), Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split(), 1):
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertIn(e, Season)
self.assertIs(type(e), Season)
self.assertIsInstance(e, Season)
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.{0}: {1}>'.format(season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
with self.assertRaises(AttributeError):
Season.SPRING.name = 'invierno'
with self.assertRaises(AttributeError):
Season.SPRING.value = 2
def test_changing_member(self):
Season = self.Season
with self.assertRaises(AttributeError):
Season.WINTER = 'really cold'
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
with self.assertRaises(AttributeError):
del Season.SPRING
with self.assertRaises(AttributeError):
del Season.DRY
with self.assertRaises(AttributeError):
del Season.SPRING.name
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
with self.assertRaises(ValueError):
class Wrong(Enum):
mro = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_create_= 11
with self.assertRaises(ValueError):
class Wrong(Enum):
_get_mixins_ = 9
with self.assertRaises(ValueError):
class Wrong(Enum):
_find_new_ = 1
with self.assertRaises(ValueError):
class Wrong(Enum):
_any_name_ = 9
def test_bool(self):
# plain Enum members are always True
class Logic(Enum):
true = True
false = False
self.assertTrue(Logic.true)
self.assertTrue(Logic.false)
# unless overridden
class RealLogic(Enum):
true = True
false = False
def __bool__(self):
return bool(self._value_)
self.assertTrue(RealLogic.true)
self.assertFalse(RealLogic.false)
# mixed Enums depend on mixed-in type
class IntLogic(int, Enum):
true = 1
false = 0
self.assertTrue(IntLogic.true)
self.assertFalse(IntLogic.false)
def test_contains(self):
Season = self.Season
self.assertIn(Season.AUTUMN, Season)
with self.assertRaises(TypeError):
3 in Season
with self.assertRaises(TypeError):
'AUTUMN' in Season
val = Season(3)
self.assertIn(val, Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertNotIn(OtherEnum.two, Season)
def test_comparisons(self):
Season = self.Season
with self.assertRaises(TypeError):
Season.SPRING < Season.WINTER
with self.assertRaises(TypeError):
Season.SPRING > 4
self.assertNotEqual(Season.SPRING, 1)
class Part(Enum):
SPRING = 1
CLIP = 2
BARREL = 3
self.assertNotEqual(Season.SPRING, Part.SPRING)
with self.assertRaises(TypeError):
Season.SPRING < Part.CLIP
def test_enum_duplicates(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertIs(Season.FALL, Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertIs(Season(3), Season.AUTUMN)
self.assertIs(Season(1), Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
[k for k,v in Season.__members__.items() if v.name != k],
['FALL', 'ANOTHER_SPRING'],
)
def test_duplicate_name(self):
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
with self.assertRaises(TypeError):
class Color(Enum):
red = 1
green = 2
blue = 3
def red(self):
return 'red'
with self.assertRaises(TypeError):
class Color(Enum):
@property
def red(self):
return 'redder'
red = 1
green = 2
blue = 3
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertIs(type(Huh.name), Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_format_enum(self):
Season = self.Season
self.assertEqual('{}'.format(Season.SPRING),
'{}'.format(str(Season.SPRING)))
self.assertEqual( '{:}'.format(Season.SPRING),
'{:}'.format(str(Season.SPRING)))
self.assertEqual('{:20}'.format(Season.SPRING),
'{:20}'.format(str(Season.SPRING)))
self.assertEqual('{:^20}'.format(Season.SPRING),
'{:^20}'.format(str(Season.SPRING)))
self.assertEqual('{:>20}'.format(Season.SPRING),
'{:>20}'.format(str(Season.SPRING)))
self.assertEqual('{:<20}'.format(Season.SPRING),
'{:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{}', Konstants.TAU)
self.assertFormatIsValue('{:}', Konstants.TAU)
self.assertFormatIsValue('{:20}', Konstants.TAU)
self.assertFormatIsValue('{:^20}', Konstants.TAU)
self.assertFormatIsValue('{:>20}', Konstants.TAU)
self.assertFormatIsValue('{:<20}', Konstants.TAU)
self.assertFormatIsValue('{:n}', Konstants.TAU)
self.assertFormatIsValue('{:5.2}', Konstants.TAU)
self.assertFormatIsValue('{:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{}', Grades.C)
self.assertFormatIsValue('{:}', Grades.C)
self.assertFormatIsValue('{:20}', Grades.C)
self.assertFormatIsValue('{:^20}', Grades.C)
self.assertFormatIsValue('{:>20}', Grades.C)
self.assertFormatIsValue('{:<20}', Grades.C)
self.assertFormatIsValue('{:+}', Grades.C)
self.assertFormatIsValue('{:08X}', Grades.C)
self.assertFormatIsValue('{:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{}', Directional.WEST)
self.assertFormatIsValue('{:}', Directional.WEST)
self.assertFormatIsValue('{:20}', Directional.WEST)
self.assertFormatIsValue('{:^20}', Directional.WEST)
self.assertFormatIsValue('{:>20}', Directional.WEST)
self.assertFormatIsValue('{:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target, 1):
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertIn(e, WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertIs(type(e), WeekDay)
self.assertIsInstance(e, int)
self.assertIsInstance(e, Enum)
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertIs(WeekDay.TEUSDAY, WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_intenum_from_bytes(self):
self.assertIs(IntStooges.from_bytes(b'\x00\x03', 'big'), IntStooges.MOE)
with self.assertRaises(ValueError):
IntStooges.from_bytes(b'\x00\x05', 'big')
def test_floatenum_fromhex(self):
h = float.hex(FloatStooges.MOE.value)
self.assertIs(FloatStooges.fromhex(h), FloatStooges.MOE)
h = float.hex(FloatStooges.MOE.value + 0.01)
with self.assertRaises(ValueError):
FloatStooges.fromhex(h)
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertIs, Stooges.CURLY)
test_pickle_dump_load(self.assertIs, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertIs, IntStooges.CURLY)
test_pickle_dump_load(self.assertIs, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertIs, FloatStooges.CURLY)
test_pickle_dump_load(self.assertIs, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertIs, Answer.him)
test_pickle_dump_load(self.assertIs, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertIs, Question.who)
test_pickle_dump_load(self.assertIs, Question)
def test_enum_function_with_qualname(self):
if isinstance(Theory, Exception):
raise Theory
self.assertEqual(Theory.__qualname__, 'spanish_inquisition')
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertIs, self.NestedEnum.twigs)
def test_pickle_by_name(self):
class ReplaceGlobalInt(IntEnum):
ONE = 1
TWO = 2
ReplaceGlobalInt.__reduce_ex__ = enum._reduce_ex_by_name
for proto in range(HIGHEST_PROTOCOL):
self.assertEqual(ReplaceGlobalInt.TWO.__reduce_ex__(proto), 'TWO')
def test_exploding_pickle(self):
BadPickle = Enum(
'BadPickle', 'dill sweet bread-n-butter', module=__name__)
globals()['BadPickle'] = BadPickle
# now break BadPickle to test exception raising
enum._make_class_unpicklable(BadPickle)
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertIs(Period(2), Period.noon)
self.assertIs(getattr(Period, 'night'), Period.night)
self.assertIs(Period['morning'], Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__eq__'))
def test_iteration_order(self):
class Season(Enum):
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_reversed_iteration_order(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_programmatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
OrderedDict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 1):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_programmatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertIn(e, SummerMonth)
self.assertIs(type(e), SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertIs(Name.BDFL, getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertIs, Name.BDFL)
def test_extending(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertIsNot(type(whatever.really), whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
with self.assertRaises(TypeError):
class Wrong(Enum, str):
NotHere = 'error before this point'
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.female), Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertIs(Monochrome(Gender.male), Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertIs(Number.one._member_type_, int)
self.assertIs(Number._member_type_, int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertIs(String.yarn._member_type_, str)
self.assertIs(String._member_type_, str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertIs(Plain.vanilla._member_type_, object)
self.assertIs(Plain._member_type_, object)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
Color(4)
with self.assertRaises(KeyError):
Color['chartreuse']
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(type(Enum)):
def __new__(metacls, cls, bases, classdict):
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v is Ellipsis:
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
class AutoNumberedEnum(Enum, metaclass=auto_enum):
pass
class AutoIntEnum(IntEnum, metaclass=auto_enum):
pass
class TestAutoNumber(AutoNumberedEnum):
a = ...
b = 3
c = ...
class TestAutoInt(AutoIntEnum):
a = ...
b = 3
c = ...
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, *args = args
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertIs, NEI.y)
test_pickle_dump_load(self.assertIs, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple' # needed for pickle protocol 4
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertIs(type(SomeTuple.first), SomeTuple)
self.assertIsInstance(SomeTuple.second, tuple)
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertIs, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class AutoNumber(Enum):
first = ()
second = ()
third = ()
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
self.assertEqual(
list(AutoNumber),
[AutoNumber.first, AutoNumber.second, AutoNumber.third],
)
self.assertEqual(int(AutoNumber.second), 2)
self.assertEqual(AutoNumber.third.value, 3)
self.assertIs(AutoNumber(1), AutoNumber.first)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber):
red = ()
green = ()
blue = ()
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class OrderedEnum(Enum):
def __ge__(self, other):
if self.__class__ is other.__class__:
return self._value_ >= other._value_
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self._value_ > other._value_
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self._value_ <= other._value_
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self._value_ < other._value_
return NotImplemented
class Grade(OrderedEnum):
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertGreater(Grade.A, Grade.B)
self.assertLessEqual(Grade.F, Grade.C)
self.assertLess(Grade.D, Grade.A)
self.assertGreaterEqual(Grade.B, Grade.B)
self.assertEqual(Grade.B, Grade.B)
self.assertNotEqual(Grade.C, Grade.D)
def test_extending2(self):
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
with self.assertRaises(TypeError):
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_subclass_duplicate_name(self):
class Base(Enum):
def test(self):
pass
class Test(Base):
test = 1
self.assertIs(type(Test.test), Test)
def test_subclass_duplicate_name_dynamic(self):
from types import DynamicClassAttribute
class Base(Enum):
@DynamicClassAttribute
def test(self):
return 'dynamic'
class Test(Base):
test = 1
self.assertEqual(Test.test.test, 'dynamic')
def test_no_duplicates(self):
class UniqueEnum(Enum):
def __init__(self, *args):
cls = self.__class__
if any(self.value == e.value for e in cls):
a = self.name
e = cls(self.value).name
raise ValueError(
"aliases not allowed in UniqueEnum: %r --> %r"
% (a, e)
)
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
with self.assertRaises(ValueError):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_ignore(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
def __new__(cls, value, period):
obj = timedelta.__new__(cls, value)
obj._value_ = value
obj.period = period
return obj
_ignore_ = 'Period i'
Period = vars()
for i in range(13):
Period['month_%d' % i] = i*30, 'month'
for i in range(53):
Period['week_%d' % i] = i*7, 'week'
for i in range(32):
Period['day_%d' % i] = i, 'day'
OneDay = day_1
OneWeek = week_1
OneMonth = month_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
self.assertTrue(Period.month_1 is Period.day_30)
self.assertTrue(Period.week_4 is Period.day_28)
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
for enum, value in zip(ColorInAList, range(3)):
value += 1
self.assertEqual(enum.value, [value])
self.assertIs(ColorInAList([value]), enum)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
def test_auto_number(self):
class Color(Enum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_auto_name(self):
class Color(Enum):
def _generate_next_value_(name, start, count, last):
return name
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_name_inherit(self):
class AutoNameEnum(Enum):
def _generate_next_value_(name, start, count, last):
return name
class Color(AutoNameEnum):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 'blue')
self.assertEqual(Color.green.value, 'green')
def test_auto_garbage(self):
class Color(Enum):
red = 'red'
blue = auto()
self.assertEqual(Color.blue.value, 1)
def test_auto_garbage_corrected(self):
class Color(Enum):
red = 'red'
blue = 2
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 'red')
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 3)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_missing(self):
class Color(Enum):
red = 1
green = 2
blue = 3
@classmethod
def _missing_(cls, item):
if item == 'three':
return cls.blue
elif item == 'bad return':
# trigger internal error
return 5
elif item == 'error out':
raise ZeroDivisionError
else:
# trigger not found
return None
self.assertIs(Color('three'), Color.blue)
self.assertRaises(ValueError, Color, 7)
try:
Color('bad return')
except TypeError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
try:
Color('error out')
except ZeroDivisionError as exc:
self.assertTrue(isinstance(exc.__context__, ValueError))
else:
raise Exception('Exception not raised.')
def test_multiple_mixin(self):
class MaxMixin:
@classproperty
def MAX(cls):
max = len(cls)
cls.MAX = max
return max
class StrMixin:
def __str__(self):
return self._name_.lower()
class SomeEnum(Enum):
def behavior(self):
return 'booyah'
class AnotherEnum(Enum):
def behavior(self):
return 'nuhuh!'
def social(self):
return "what's up?"
class Color(MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(MaxMixin, StrMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, MaxMixin, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 3)
self.assertEqual(Color.MAX, 3)
self.assertEqual(str(Color.BLUE), 'blue')
class CoolColor(StrMixin, SomeEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolColor.RED.value, 1)
self.assertEqual(CoolColor.GREEN.value, 2)
self.assertEqual(CoolColor.BLUE.value, 3)
self.assertEqual(str(CoolColor.BLUE), 'blue')
self.assertEqual(CoolColor.RED.behavior(), 'booyah')
class CoolerColor(StrMixin, AnotherEnum, Enum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolerColor.RED.value, 1)
self.assertEqual(CoolerColor.GREEN.value, 2)
self.assertEqual(CoolerColor.BLUE.value, 3)
self.assertEqual(str(CoolerColor.BLUE), 'blue')
self.assertEqual(CoolerColor.RED.behavior(), 'nuhuh!')
self.assertEqual(CoolerColor.RED.social(), "what's up?")
class CoolestColor(StrMixin, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(CoolestColor.RED.value, 1)
self.assertEqual(CoolestColor.GREEN.value, 2)
self.assertEqual(CoolestColor.BLUE.value, 3)
self.assertEqual(str(CoolestColor.BLUE), 'blue')
self.assertEqual(CoolestColor.RED.behavior(), 'booyah')
self.assertEqual(CoolestColor.RED.social(), "what's up?")
class ConfusedColor(StrMixin, AnotherEnum, SomeEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ConfusedColor.RED.value, 1)
self.assertEqual(ConfusedColor.GREEN.value, 2)
self.assertEqual(ConfusedColor.BLUE.value, 3)
self.assertEqual(str(ConfusedColor.BLUE), 'blue')
self.assertEqual(ConfusedColor.RED.behavior(), 'nuhuh!')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
class ReformedColor(StrMixin, IntEnum, SomeEnum, AnotherEnum):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(ReformedColor.RED.value, 1)
self.assertEqual(ReformedColor.GREEN.value, 2)
self.assertEqual(ReformedColor.BLUE.value, 3)
self.assertEqual(str(ReformedColor.BLUE), 'blue')
self.assertEqual(ReformedColor.RED.behavior(), 'booyah')
self.assertEqual(ConfusedColor.RED.social(), "what's up?")
self.assertTrue(issubclass(ReformedColor, int))
def test_multiple_inherited_mixin(self):
class StrEnum(str, Enum):
def __new__(cls, *args, **kwargs):
for a in args:
if not isinstance(a, str):
raise TypeError("Enumeration '%s' (%s) is not"
" a string" % (a, type(a).__name__))
return str.__new__(cls, *args, **kwargs)
@unique
class Decision1(StrEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class MyEnum(StrEnum):
pass
@unique
class Decision2(MyEnum):
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"
RETRY = "RETRY"
class TestOrder(unittest.TestCase):
def test_same_members(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
def test_same_members_with_aliases(self):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
verde = green
def test_same_members_wrong_order(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
blue = 3
green = 2
def test_order_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
def test_order_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue purple'
red = 1
green = 2
blue = 3
verde = green
def test_enum_has_extra_members(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
def test_enum_has_extra_members_with_aliases(self):
with self.assertRaisesRegex(TypeError, 'member order does not match _order_'):
class Color(Enum):
_order_ = 'red green blue'
red = 1
green = 2
blue = 3
purple = 4
verde = green
class TestFlag(unittest.TestCase):
"""Tests of the Flags."""
class Perm(Flag):
R, W, X = 4, 2, 1
class Open(Flag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(Flag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.0')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: 3>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: 5>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: 6>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: 1>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: 7>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: 524291>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: 524290>')
self.assertEqual(repr(~Open.AC), '<Open.CE: 524288>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC: 3>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: 2>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i | j), Perm(i.value | j.value))
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for i in Perm:
self.assertIs(i | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual((i & j).value, i.value & j.value)
self.assertIs(type(i & j), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & RWX, i)
self.assertIs(RWX & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for i in Perm:
self.assertIs(i ^ Perm(0), i)
self.assertIs(Perm(0) ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_programatic_function_string(self):
Perm = Flag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = Flag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = Flag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = Flag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = Flag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_pickle(self):
if isinstance(FlagStooges, Exception):
raise FlagStooges
test_pickle_dump_load(self.assertIs, FlagStooges.CURLY|FlagStooges.MOE)
test_pickle_dump_load(self.assertIs, FlagStooges)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertFalse(Color.BLACK in Open)
self.assertFalse(Open.RO in Color)
with self.assertRaises(TypeError):
'BLACK' in Color
with self.assertRaises(TypeError):
'RO' in Open
with self.assertRaises(TypeError):
1 in Color
with self.assertRaises(TypeError):
1 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
def test_auto_number(self):
class Color(Flag):
red = auto()
blue = auto()
green = auto()
self.assertEqual(list(Color), [Color.red, Color.blue, Color.green])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.blue.value, 2)
self.assertEqual(Color.green.value, 4)
def test_auto_number_garbage(self):
with self.assertRaisesRegex(TypeError, 'Invalid Flag value: .not an int.'):
class Color(Flag):
red = 'not an int'
blue = auto()
def test_cascading_failure(self):
class Bizarre(Flag):
c = 3
d = 4
f = 6
# Bizarre.c | Bizarre.d
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "5 is not a valid Bizarre", Bizarre, 5)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "2 is not a valid Bizarre", Bizarre, 2)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
self.assertRaisesRegex(ValueError, "1 is not a valid Bizarre", Bizarre, 1)
def test_duplicate_auto(self):
class Dupes(Enum):
first = primero = auto()
second = auto()
third = auto()
self.assertEqual([Dupes.first, Dupes.second, Dupes.third], list(Dupes))
def test_bizarre(self):
class Bizarre(Flag):
b = 3
c = 4
d = 6
self.assertEqual(repr(Bizarre(7)), '<Bizarre.d|c|b: 7>')
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, Flag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(Flag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestIntFlag(unittest.TestCase):
"""Tests of the IntFlags."""
class Perm(IntFlag):
X = 1 << 0
W = 1 << 1
R = 1 << 2
class Open(IntFlag):
RO = 0
WO = 1
RW = 2
AC = 3
CE = 1<<19
class Color(IntFlag):
BLACK = 0
RED = 1
GREEN = 2
BLUE = 4
PURPLE = RED|BLUE
def test_type(self):
Perm = self.Perm
Open = self.Open
for f in Perm:
self.assertTrue(isinstance(f, Perm))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Perm.W | Perm.X, Perm))
self.assertEqual(Perm.W | Perm.X, 3)
for f in Open:
self.assertTrue(isinstance(f, Open))
self.assertEqual(f, f.value)
self.assertTrue(isinstance(Open.WO | Open.RW, Open))
self.assertEqual(Open.WO | Open.RW, 3)
def test_str(self):
Perm = self.Perm
self.assertEqual(str(Perm.R), 'Perm.R')
self.assertEqual(str(Perm.W), 'Perm.W')
self.assertEqual(str(Perm.X), 'Perm.X')
self.assertEqual(str(Perm.R | Perm.W), 'Perm.R|W')
self.assertEqual(str(Perm.R | Perm.W | Perm.X), 'Perm.R|W|X')
self.assertEqual(str(Perm.R | 8), 'Perm.8|R')
self.assertEqual(str(Perm(0)), 'Perm.0')
self.assertEqual(str(Perm(8)), 'Perm.8')
self.assertEqual(str(~Perm.R), 'Perm.W|X')
self.assertEqual(str(~Perm.W), 'Perm.R|X')
self.assertEqual(str(~Perm.X), 'Perm.R|W')
self.assertEqual(str(~(Perm.R | Perm.W)), 'Perm.X')
self.assertEqual(str(~(Perm.R | Perm.W | Perm.X)), 'Perm.-8')
self.assertEqual(str(~(Perm.R | 8)), 'Perm.W|X')
self.assertEqual(str(Perm(~0)), 'Perm.R|W|X')
self.assertEqual(str(Perm(~8)), 'Perm.R|W|X')
Open = self.Open
self.assertEqual(str(Open.RO), 'Open.RO')
self.assertEqual(str(Open.WO), 'Open.WO')
self.assertEqual(str(Open.AC), 'Open.AC')
self.assertEqual(str(Open.RO | Open.CE), 'Open.CE')
self.assertEqual(str(Open.WO | Open.CE), 'Open.CE|WO')
self.assertEqual(str(Open(4)), 'Open.4')
self.assertEqual(str(~Open.RO), 'Open.CE|AC|RW|WO')
self.assertEqual(str(~Open.WO), 'Open.CE|RW')
self.assertEqual(str(~Open.AC), 'Open.CE')
self.assertEqual(str(~(Open.RO | Open.CE)), 'Open.AC|RW|WO')
self.assertEqual(str(~(Open.WO | Open.CE)), 'Open.RW')
self.assertEqual(str(Open(~4)), 'Open.CE|AC|RW|WO')
def test_repr(self):
Perm = self.Perm
self.assertEqual(repr(Perm.R), '<Perm.R: 4>')
self.assertEqual(repr(Perm.W), '<Perm.W: 2>')
self.assertEqual(repr(Perm.X), '<Perm.X: 1>')
self.assertEqual(repr(Perm.R | Perm.W), '<Perm.R|W: 6>')
self.assertEqual(repr(Perm.R | Perm.W | Perm.X), '<Perm.R|W|X: 7>')
self.assertEqual(repr(Perm.R | 8), '<Perm.8|R: 12>')
self.assertEqual(repr(Perm(0)), '<Perm.0: 0>')
self.assertEqual(repr(Perm(8)), '<Perm.8: 8>')
self.assertEqual(repr(~Perm.R), '<Perm.W|X: -5>')
self.assertEqual(repr(~Perm.W), '<Perm.R|X: -3>')
self.assertEqual(repr(~Perm.X), '<Perm.R|W: -2>')
self.assertEqual(repr(~(Perm.R | Perm.W)), '<Perm.X: -7>')
self.assertEqual(repr(~(Perm.R | Perm.W | Perm.X)), '<Perm.-8: -8>')
self.assertEqual(repr(~(Perm.R | 8)), '<Perm.W|X: -13>')
self.assertEqual(repr(Perm(~0)), '<Perm.R|W|X: -1>')
self.assertEqual(repr(Perm(~8)), '<Perm.R|W|X: -9>')
Open = self.Open
self.assertEqual(repr(Open.RO), '<Open.RO: 0>')
self.assertEqual(repr(Open.WO), '<Open.WO: 1>')
self.assertEqual(repr(Open.AC), '<Open.AC: 3>')
self.assertEqual(repr(Open.RO | Open.CE), '<Open.CE: 524288>')
self.assertEqual(repr(Open.WO | Open.CE), '<Open.CE|WO: 524289>')
self.assertEqual(repr(Open(4)), '<Open.4: 4>')
self.assertEqual(repr(~Open.RO), '<Open.CE|AC|RW|WO: -1>')
self.assertEqual(repr(~Open.WO), '<Open.CE|RW: -2>')
self.assertEqual(repr(~Open.AC), '<Open.CE: -4>')
self.assertEqual(repr(~(Open.RO | Open.CE)), '<Open.AC|RW|WO: -524289>')
self.assertEqual(repr(~(Open.WO | Open.CE)), '<Open.RW: -524290>')
self.assertEqual(repr(Open(~4)), '<Open.CE|AC|RW|WO: -5>')
def test_or(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i | j, i.value | j.value)
self.assertEqual((i | j).value, i.value | j.value)
self.assertIs(type(i | j), Perm)
for j in range(8):
self.assertEqual(i | j, i.value | j)
self.assertEqual((i | j).value, i.value | j)
self.assertIs(type(i | j), Perm)
self.assertEqual(j | i, j | i.value)
self.assertEqual((j | i).value, j | i.value)
self.assertIs(type(j | i), Perm)
for i in Perm:
self.assertIs(i | i, i)
self.assertIs(i | 0, i)
self.assertIs(0 | i, i)
Open = self.Open
self.assertIs(Open.RO | Open.CE, Open.CE)
def test_and(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
for j in values:
self.assertEqual(i & j, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertEqual((i & j).value, i.value & j.value, 'i is %r, j is %r' % (i, j))
self.assertIs(type(i & j), Perm, 'i is %r, j is %r' % (i, j))
for j in range(8):
self.assertEqual(i & j, i.value & j)
self.assertEqual((i & j).value, i.value & j)
self.assertIs(type(i & j), Perm)
self.assertEqual(j & i, j & i.value)
self.assertEqual((j & i).value, j & i.value)
self.assertIs(type(j & i), Perm)
for i in Perm:
self.assertIs(i & i, i)
self.assertIs(i & 7, i)
self.assertIs(7 & i, i)
Open = self.Open
self.assertIs(Open.RO & Open.CE, Open.RO)
def test_xor(self):
Perm = self.Perm
for i in Perm:
for j in Perm:
self.assertEqual(i ^ j, i.value ^ j.value)
self.assertEqual((i ^ j).value, i.value ^ j.value)
self.assertIs(type(i ^ j), Perm)
for j in range(8):
self.assertEqual(i ^ j, i.value ^ j)
self.assertEqual((i ^ j).value, i.value ^ j)
self.assertIs(type(i ^ j), Perm)
self.assertEqual(j ^ i, j ^ i.value)
self.assertEqual((j ^ i).value, j ^ i.value)
self.assertIs(type(j ^ i), Perm)
for i in Perm:
self.assertIs(i ^ 0, i)
self.assertIs(0 ^ i, i)
Open = self.Open
self.assertIs(Open.RO ^ Open.CE, Open.CE)
self.assertIs(Open.CE ^ Open.CE, Open.RO)
def test_invert(self):
Perm = self.Perm
RW = Perm.R | Perm.W
RX = Perm.R | Perm.X
WX = Perm.W | Perm.X
RWX = Perm.R | Perm.W | Perm.X
values = list(Perm) + [RW, RX, WX, RWX, Perm(0)]
for i in values:
self.assertEqual(~i, ~i.value)
self.assertEqual((~i).value, ~i.value)
self.assertIs(type(~i), Perm)
self.assertEqual(~~i, i)
for i in Perm:
self.assertIs(~~i, i)
Open = self.Open
self.assertIs(Open.WO & ~Open.WO, Open.RO)
self.assertIs((Open.WO|Open.CE) & ~Open.WO, Open.CE)
def test_programatic_function_string(self):
Perm = IntFlag('Perm', 'R W X')
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_with_start(self):
Perm = IntFlag('Perm', 'R W X', start=8)
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 8<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_string_list(self):
Perm = IntFlag('Perm', ['R', 'W', 'X'])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<i
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_iterable(self):
Perm = IntFlag('Perm', (('R', 2), ('W', 8), ('X', 32)))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_dict(self):
Perm = IntFlag('Perm', OrderedDict((('R', 2), ('W', 8), ('X', 32))))
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 3, Perm)
self.assertEqual(lst, [Perm.R, Perm.W, Perm.X])
for i, n in enumerate('R W X'.split()):
v = 1<<(2*i+1)
e = Perm(v)
self.assertEqual(e.value, v)
self.assertEqual(type(e.value), int)
self.assertEqual(e, v)
self.assertEqual(e.name, n)
self.assertIn(e, Perm)
self.assertIs(type(e), Perm)
def test_programatic_function_from_empty_list(self):
Perm = enum.IntFlag('Perm', [])
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', [])
lst = list(Thing)
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_programatic_function_from_empty_tuple(self):
Perm = enum.IntFlag('Perm', ())
lst = list(Perm)
self.assertEqual(len(lst), len(Perm))
self.assertEqual(len(Perm), 0, Perm)
Thing = enum.Enum('Thing', ())
self.assertEqual(len(lst), len(Thing))
self.assertEqual(len(Thing), 0, Thing)
def test_contains(self):
Open = self.Open
Color = self.Color
self.assertTrue(Color.GREEN in Color)
self.assertTrue(Open.RW in Open)
self.assertFalse(Color.GREEN in Open)
self.assertFalse(Open.RW in Color)
with self.assertRaises(TypeError):
'GREEN' in Color
with self.assertRaises(TypeError):
'RW' in Open
with self.assertRaises(TypeError):
2 in Color
with self.assertRaises(TypeError):
2 in Open
def test_member_contains(self):
Perm = self.Perm
R, W, X = Perm
RW = R | W
RX = R | X
WX = W | X
RWX = R | W | X
self.assertTrue(R in RW)
self.assertTrue(R in RX)
self.assertTrue(R in RWX)
self.assertTrue(W in RW)
self.assertTrue(W in WX)
self.assertTrue(W in RWX)
self.assertTrue(X in RX)
self.assertTrue(X in WX)
self.assertTrue(X in RWX)
self.assertFalse(R in WX)
self.assertFalse(W in RX)
self.assertFalse(X in RW)
with self.assertRaises(TypeError):
self.assertFalse('test' in RW)
def test_bool(self):
Perm = self.Perm
for f in Perm:
self.assertTrue(f)
Open = self.Open
for f in Open:
self.assertEqual(bool(f.value), bool(f))
def test_multiple_mixin(self):
class AllMixin:
@classproperty
def ALL(cls):
members = list(cls)
all_value = None
if members:
all_value = members[0]
for member in members[1:]:
all_value |= member
cls.ALL = all_value
return all_value
class StrMixin:
def __str__(self):
return self._name_.lower()
class Color(AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'Color.BLUE')
class Color(AllMixin, StrMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
class Color(StrMixin, AllMixin, IntFlag):
RED = auto()
GREEN = auto()
BLUE = auto()
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.GREEN.value, 2)
self.assertEqual(Color.BLUE.value, 4)
self.assertEqual(Color.ALL.value, 7)
self.assertEqual(str(Color.BLUE), 'blue')
@support.reap_threads
def test_unique_composite(self):
# override __eq__ to be identity only
class TestFlag(IntFlag):
one = auto()
two = auto()
three = auto()
four = auto()
five = auto()
six = auto()
seven = auto()
eight = auto()
def __eq__(self, other):
return self is other
def __hash__(self):
return hash(self._value_)
# have multiple threads competing to complete the composite members
seen = set()
failed = False
def cycle_enum():
nonlocal failed
try:
for i in range(256):
seen.add(TestFlag(i))
except Exception:
failed = True
threads = [
threading.Thread(target=cycle_enum)
for _ in range(8)
]
with support.start_threads(threads):
pass
# check that only 248 members were created
self.assertFalse(
failed,
'at least one thread failed while creating composite members')
self.assertEqual(256, len(seen), 'too many composite members created')
class TestUnique(unittest.TestCase):
def test_unique_clean(self):
@unique
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
@unique
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
def test_unique_dirty(self):
with self.assertRaisesRegex(ValueError, 'tres.*one'):
@unique
class Dirty(Enum):
one = 1
two = 'dos'
tres = 1
with self.assertRaisesRegex(
ValueError,
'double.*single.*turkey.*triple',
):
@unique
class Dirtier(IntEnum):
single = 1
double = 1
triple = 3
turkey = 3
def test_unique_with_name(self):
@unique
class Silly(Enum):
one = 1
two = 'dos'
name = 3
@unique
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
expected_help_output_with_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| An enumeration.
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
| The name of the Enum member.
|\x20\x20
| value
| The value of the Enum member.
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__
| Returns a mapping of member name->value.
|\x20\x20\x20\x20\x20\x20
| This mapping lists all enum members, including aliases. Note that this
| is a read-only view of the internal mapping."""
expected_help_output_without_docs = """\
Help on class Color in module %s:
class Color(enum.Enum)
| Color(value, names=None, *, module=None, qualname=None, type=None, start=1)
|\x20\x20
| Method resolution order:
| Color
| enum.Enum
| builtins.object
|\x20\x20
| Data and other attributes defined here:
|\x20\x20
| blue = <Color.blue: 3>
|\x20\x20
| green = <Color.green: 2>
|\x20\x20
| red = <Color.red: 1>
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.Enum:
|\x20\x20
| name
|\x20\x20
| value
|\x20\x20
| ----------------------------------------------------------------------
| Data descriptors inherited from enum.EnumMeta:
|\x20\x20
| __members__"""
class TestStdLib(unittest.TestCase):
maxDiff = None
class Color(Enum):
red = 1
green = 2
blue = 3
def test_pydoc(self):
# indirectly test __objclass__
if StrEnum.__doc__ is None:
expected_text = expected_help_output_without_docs % __name__
else:
expected_text = expected_help_output_with_docs % __name__
output = StringIO()
helper = pydoc.Helper(output=output)
helper(self.Color)
result = output.getvalue().strip()
self.assertEqual(result, expected_text)
def test_inspect_getmembers(self):
values = dict((
('__class__', EnumMeta),
('__doc__', 'An enumeration.'),
('__members__', self.Color.__members__),
('__module__', __name__),
('blue', self.Color.blue),
('green', self.Color.green),
('name', Enum.__dict__['name']),
('red', self.Color.red),
('value', Enum.__dict__['value']),
))
result = dict(inspect.getmembers(self.Color))
self.assertEqual(values.keys(), result.keys())
failed = False
for k in values.keys():
if result[k] != values[k]:
print()
print('\n%s\n key: %s\n result: %s\nexpected: %s\n%s\n' %
('=' * 75, k, result[k], values[k], '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
def test_inspect_classify_class_attrs(self):
# indirectly test __objclass__
from inspect import Attribute
values = [
Attribute(name='__class__', kind='data',
defining_class=object, object=EnumMeta),
Attribute(name='__doc__', kind='data',
defining_class=self.Color, object='An enumeration.'),
Attribute(name='__members__', kind='property',
defining_class=EnumMeta, object=EnumMeta.__members__),
Attribute(name='__module__', kind='data',
defining_class=self.Color, object=__name__),
Attribute(name='blue', kind='data',
defining_class=self.Color, object=self.Color.blue),
Attribute(name='green', kind='data',
defining_class=self.Color, object=self.Color.green),
Attribute(name='red', kind='data',
defining_class=self.Color, object=self.Color.red),
Attribute(name='name', kind='data',
defining_class=Enum, object=Enum.__dict__['name']),
Attribute(name='value', kind='data',
defining_class=Enum, object=Enum.__dict__['value']),
]
values.sort(key=lambda item: item.name)
result = list(inspect.classify_class_attrs(self.Color))
result.sort(key=lambda item: item.name)
failed = False
for v, r in zip(values, result):
if r != v:
print('\n%s\n%s\n%s\n%s\n' % ('=' * 75, r, v, '=' * 75), sep='')
failed = True
if failed:
self.fail("result does not equal expected, see print above")
class MiscTestCase(unittest.TestCase):
def test__all__(self):
support.check__all__(self, enum)
# These are unordered here on purpose to ensure that declaration order
# makes no difference.
CONVERT_TEST_NAME_D = 5
CONVERT_TEST_NAME_C = 5
CONVERT_TEST_NAME_B = 5
CONVERT_TEST_NAME_A = 5 # This one should sort first.
CONVERT_TEST_NAME_E = 5
CONVERT_TEST_NAME_F = 5
class TestIntEnumConvert(unittest.TestCase):
def test_convert_value_lookup_priority(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# We don't want the reverse lookup value to vary when there are
# multiple possible names for a given value. It should always
# report the first lexigraphical name in that case.
self.assertEqual(test_type(5).name, 'CONVERT_TEST_NAME_A')
def test_convert(self):
test_type = enum.IntEnum._convert_(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
# Ensure that test_type has all of the desired names and values.
self.assertEqual(test_type.CONVERT_TEST_NAME_F,
test_type.CONVERT_TEST_NAME_A)
self.assertEqual(test_type.CONVERT_TEST_NAME_B, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_C, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_D, 5)
self.assertEqual(test_type.CONVERT_TEST_NAME_E, 5)
# Ensure that test_type only picked up names matching the filter.
self.assertEqual([name for name in dir(test_type)
if name[0:2] not in ('CO', '__')],
[], msg='Names other than CONVERT_TEST_* found.')
@unittest.skipUnless(sys.version_info[:2] == (3, 8),
'_convert was deprecated in 3.8')
def test_convert_warn(self):
with self.assertWarns(DeprecationWarning):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
@unittest.skipUnless(sys.version_info >= (3, 9),
'_convert was removed in 3.9')
def test_convert_raise(self):
with self.assertRaises(AttributeError):
enum.IntEnum._convert(
'UnittestConvert',
('test.test_enum', '__main__')[__name__=='__main__'],
filter=lambda x: x.startswith('CONVERT_TEST_'))
if __name__ == '__main__':
unittest.main()
|
misc_utils.py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Miscellaneous utility functions
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
from future.utils import iteritems
try: # Python 2
from itertools import imap as map # pylint: disable=redefined-builtin
except ImportError:
pass
# try: # Python 3
# from io import StringIO
# except ImportError: # Python 2
# from StringIO import StringIO
try: # Python 3
from urllib.parse import quote, urlencode
except ImportError: # Python 2
from urllib import urlencode
from urllib2 import quote
from resources.lib.globals import g
from .logging import error
def find(value_to_find, attribute, search_space):
"""Find a video with matching id in a dict or list"""
for video in search_space:
if video[attribute] == value_to_find:
return video
raise KeyError('Metadata for {} does not exist'.format(value_to_find))
def find_episode_metadata(videoid, metadata):
"""Find metadata for a specific episode within a show metadata dict"""
season = find(int(videoid.seasonid), 'id', metadata['seasons'])
return (find(int(videoid.episodeid), 'id', season.get('episodes', {})),
season)
def get_class_methods(class_item=None):
"""
Returns the class methods of agiven class object
:param class_item: Class item to introspect
:type class_item: object
:returns: list -- Class methods
"""
from types import FunctionType
_type = FunctionType
return [x for x, y in iteritems(class_item.__dict__)
if isinstance(y, _type)]
def build_url(pathitems=None, videoid=None, params=None, mode=None):
"""Build a plugin URL from pathitems and query parameters. Add videoid to the path if it's present."""
if not (pathitems or videoid):
raise ValueError('Either pathitems or videoid must be set.')
path = '{netloc}/{path}/{qs}'.format(
netloc=g.BASE_URL,
path=_encode_path(mode, pathitems, videoid),
qs=_encode_params(params))
return path
def _expand_mode(mode):
return [mode] if mode else []
def _expand_videoid(videoid):
return videoid.to_path() if videoid else []
def _encode_path(mode, pathitems, videoid):
return quote(
'/'.join(_expand_mode(mode) +
(pathitems or []) +
_expand_videoid(videoid)).encode('utf-8'))
def _encode_params(params):
return ('?' + urlencode(params)) if params else ''
def is_numeric(string):
"""Return true if string represents an integer, else false"""
try:
int(string)
except ValueError:
return False
return True
def strp(value, form):
"""
Helper function to safely create datetime objects from strings
:return: datetime - parsed datetime object
"""
# pylint: disable=broad-except
from datetime import datetime
def_value = datetime.utcfromtimestamp(0)
try:
return datetime.strptime(value, form)
except TypeError:
# Python bug https://bugs.python.org/issue27400
try:
from time import strptime
return datetime(*(strptime(value, form)[0:6]))
except ValueError:
return def_value
except Exception:
return def_value
# def compress_data(data):
# """GZIP and b64 encode data"""
# out = StringIO()
# with gzip.GzipFile(fileobj=out, mode='w') as outh:
# outh.write(data)
# return base64.standard_b64encode(out.getvalue())
def merge_dicts(dict_to_merge, merged_dict):
"""Recursively merge the contents of dict_to_merge into merged_dict.
Values that are already present in merged_dict will be overwritten if they are also present in dict_to_merge"""
for key, value in iteritems(dict_to_merge):
if isinstance(merged_dict.get(key), dict):
merge_dicts(value, merged_dict[key])
else:
merged_dict[key] = value
return merged_dict
def compare_dicts(dict_a, dict_b, excluded_keys=None):
"""Compare two dict with same keys, with optional keys to exclude from compare"""
if excluded_keys is None:
excluded_keys = []
return all(dict_a[k] == dict_b[k] for k in dict_a if k not in excluded_keys)
def chunked_list(seq, chunk_len):
for start in range(0, len(seq), chunk_len):
yield seq[start:start+chunk_len]
def any_value_except(mapping, excluded_keys):
"""Return a random value from a dict that is not associated with excluded_key.
Raises StopIteration if there are no other keys than excluded_key"""
return next(mapping[key] for key in mapping if key not in excluded_keys)
def enclose_quotes(content):
return '"' + content + '"'
def is_edge_esn(esn):
"""Return True if the esn is an EDGE esn"""
return esn.startswith('NFCDIE-02-')
def is_minimum_version(version, min_version):
"""Return True if version is equal or greater to min_version"""
return list(map(int, version.split('.'))) >= list(map(int, min_version.split('.')))
def is_less_version(version, max_version):
"""Return True if version is less to max_version"""
return list(map(int, version.split('.'))) < list(map(int, max_version.split('.')))
def make_list(arg):
"""Return a list with arg as its member or arg if arg is already a list. Returns an empty list if arg is None"""
return (arg
if isinstance(arg, list)
else ([arg]
if arg is not None
else []))
def convert_seconds_to_hms_str(time):
h = int(time // 3600)
time %= 3600
m = int(time // 60)
s = int(time % 60)
return '{:02d}:{:02d}:{:02d}'.format(h, m, s)
def remove_html_tags(raw_html):
import re
h = re.compile('<.*?>')
text = re.sub(h, '', raw_html)
return text
def censure(value, length=3):
"""Censor part of the string with asterisks"""
if not value:
return value
return value[:-length] + '*' * length
def run_threaded(non_blocking, target_func, *args, **kwargs):
"""Call a function in a thread, when specified"""
if not non_blocking:
target_func(*args, **kwargs)
return
from threading import Thread
thread = Thread(target=target_func, args=args, kwargs=kwargs)
thread.start()
def update_cache_videoid_runtime(window_cls):
"""Try to update the bookmarkPosition value in cache data in order to get a updated watched status/resume time"""
# Other details in:
# progress_manager.py method: _save_resume_time()
# infolabels.py method: _set_progress_status()
runtime = window_cls.getProperty('nf_playback_resume_time')
if runtime and runtime.isdigit():
from resources.lib.api.data_types import VideoList, VideoListSorted, EpisodeList, SearchVideoList
from resources.lib.cache import CacheMiss
from resources.lib.database.db_utils import TABLE_SESSION
from resources.lib.common import VideoId
cache_last_dir_call = g.LOCAL_DB.get_value('cache_last_directory_call', {}, table=TABLE_SESSION)
if not cache_last_dir_call:
return
videoid = VideoId.from_dict(g.LOCAL_DB.get_value('last_videoid_played', {}, table=TABLE_SESSION))
try:
data_object = g.CACHE.get(cache_last_dir_call['bucket'], cache_last_dir_call['identifier'])
if isinstance(data_object, (VideoList, VideoListSorted, SearchVideoList)):
data_object.videos[str(videoid.value)]['bookmarkPosition'] = int(runtime)
elif isinstance(data_object, EpisodeList):
data_object.episodes[str(videoid.value)]['bookmarkPosition'] = int(runtime)
else:
error('update_cache_videoid_runtime: cache object not mapped, bookmarkPosition not updated')
g.CACHE.update(cache_last_dir_call['bucket'], cache_last_dir_call['identifier'], data_object,
cache_last_dir_call['to_disk'])
except CacheMiss:
# No more valid cache, manual update not needed
pass
window_cls.setProperty('nf_playback_resume_time', '')
|
multprocess_crawl.py
|
import time
from multiprocessing import Process
import pandas as pd
import requests
from scrapy.selector import Selector
def main(url,icon):
print("下载任务{}".format(icon))
r = requests.get(url,headers = {"user-agent":"Mozilla/5.0"})
r.encoding = r.apparent_encoding
selector = Selector(text=r.text)
css_for_title = 'body > div.bang_wrapper > div.bang_content > div.bang_list_box > ul li > div.name > a::text'
css_for_author = 'body > div.bang_wrapper > div.bang_content > div.bang_list_box > ul li > div:nth-child(5) > a:nth-child(1)::text'
css_for_price = 'body > div.bang_wrapper > div.bang_content > div.bang_list_box > ul li > div.price > p:nth-child(1) > span.price_n::text'
titles = selector.css(css_for_title).extract()
authors = selector.css(css_for_author).extract()
prices = selector.css(css_for_price).extract()
try:
data = {
"书名":titles,
"作者":authors,
"价格":prices,
}
df = pd.DataFrame(data, index = [i for i in range(1,21)])
df.to_excel("D:/process_version_popular_book{}.xlsx".format(icon),encoding=r.apparent_encoding)
print("任务{}下载完毕".format(icon))
except:
print("任务{}下载失败".format(icon))
if __name__ == "__main__":
url_list = ["http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-recent7-0-0-1-{}".format(i) for i in range(1,26)]
start_time = time.time()
for i in range(len(url_list)):
pro = Process(target = main, args = (url_list[i],i,))
pro.start()
|
scanbackup_20210224142513.py
|
"""
1、文件到这里
一份给ES 一份给自己
新增ES旧索引入库
在继承原有功能的基础上
重构备份程序,按照数据内的
国家-当前时间(年-月-日)
如果按照数据内的时间的话也会面临和按国家端口备份的问题
不用再分端口了
create by judy 20201217
"""
from pathlib import Path
import threading
import json
from queue import Queue
import traceback
import datetime
import time
from shutil import copyfile
import zipfile
import shutil
class ScanBackUP(object):
def __init__(self) -> None:
# super().__init__()
# 所有数据先到这
self._input = None
# 所有数据先复制一份到这, 这个是程序不用管的文件夹
self._esinput = None
# 将要备份的数据放到这, 要处理的数据全部放在这里
self._dbu_input = None
self._databack = None
self._zipdata: Path = None
self._zip_size = None
# 备份线程默认为一个,可以在配置里面更改重启
self.backup_thread = 1
self.zip_thread = 1
# 增加一个是否拷贝到ES的功能
self.copy_esinput_enable = True
self._tmp = Path('./tmp')
self._tmp.mkdir(exist_ok=True)
# 文件是否需要拷贝一份到旧索引
self._old_esinput = None
self.config_path = Path(r'./config_path.json')
try:
self._init_cpinfo()
except:
raise Exception(
f"初始化配置参数失败,请检查配置文件\nerror:{traceback.format_exc()}")
# 需要用到的参数
# 文件锁,同一时间只允许一个线程操作文件
self.__file_locker = threading.Lock()
self.__scan_file_locker = threading.Lock()
self._zipfile_locker = threading.Lock()
# 因为压缩可能处理的时间比较长,所以需要增加一个正在压缩的字典
self._zip_dealing = {}
# 根据后缀分配的需要处理的队列,目前只有iscan
self.iscan_task_queue = Queue()
self._zip_queue = Queue()
self.iscan_suffix = '.iscan_search'
# try:
# self._restore_existdata()
# except:
# raise Exception(
# "There's something wrong with restoring the environment")
def _init_cpinfo(self):
"""
初始化配置文件中的路径和参数
:return:
"""
conf_str = self.config_path.read_text(encoding='utf-8')
conf_dict = json.loads(conf_str)
_input = conf_dict.get('data_input')
if not isinstance(_input, str):
raise Exception("Unknown data_input path")
self._input = Path(_input)
self._input.mkdir(exist_ok=True)
print(
f"Start scan data file, input_file_path:{self._input.as_posix()}")
_esinput = conf_dict.get('es_input')
if not isinstance(_esinput, str):
raise Exception("Unknown es_input path")
self._esinput = Path(_esinput)
self._esinput.mkdir(exist_ok=True)
print(f"Save data to ES, es_path:{self._esinput.as_posix()}")
_dbuinput = conf_dict.get('backup_input')
if not isinstance(_dbuinput, str):
raise Exception("Unkown backup_input path")
self._dbu_input = Path(_dbuinput)
self._dbu_input.mkdir(exist_ok=True)
print(f"Data backup process path:{self._dbu_input.as_posix()}")
_databack = conf_dict.get('databackup')
if not isinstance(_databack, str):
raise Exception("Unknown databackup path")
self._databack = Path(_databack)
self._databack.mkdir(exist_ok=True)
print(f"Data save backup path:{self._databack.as_posix()}")
_zipdata = conf_dict.get('zipdata')
if not isinstance(_zipdata, str):
raise Exception("Unkown zipdata path")
self._zipdata = Path(_zipdata)
self._zipdata.mkdir(exist_ok=True)
print(f"Zipdata save path:{self._zipdata.as_posix()}")
_zip_size = conf_dict.get('zip_size')
if not isinstance(_zip_size, int):
raise Exception("Unknown zip_size type")
# 将单位换算成B
self._zip_size = _zip_size * 1024 * 1024
print(f"Zip data size:{_zip_size}MB")
backupthread = conf_dict.get('backup_thread')
if not isinstance(backupthread, int):
raise Exception("Unknown backupthread type")
self.backup_thread = backupthread
zipthread = conf_dict.get('zipdata_thread')
if not isinstance(zipthread, int):
raise Exception("Unknown zipthread type")
self.zip_thread = zipthread
time_limit = conf_dict.get('time_limit')
if not isinstance(time_limit, int):
raise Exception("Unknown time_limit type")
self._backup_interval_time = time_limit * 24 * 60 * 60
print(f"Zip data time expired after {time_limit} days")
# 默认拷贝到ES的功能为开放
copy_esinput_enable = conf_dict.get('copy_to_esinput', True)
self.copy_esinput_enable = copy_esinput_enable
def scan_file(self):
"""
扫描输入的文件
根据文件后缀进行分类,将文件放入待处理队列
:return:
"""
while True:
try:
for file in self._input.iterdir():
name = file.name
# 全部移动到tmp目录下去
tmpname = self._tmp / name
# file.replace(tmpname)
with self.__scan_file_locker:
# 这个文件得尽快移动到tmp文件夹,不然下次扫描又会扫描到它就会出问题
shutil.move(file.as_posix(), tmpname.as_posix())
try:
if tmpname.suffix == self.iscan_suffix:
# 只进行复制操作
# source: Path = self._input / name
target: Path = self._dbu_input / name
copyfile(tmpname.as_posix(), target.as_posix())
self.iscan_task_queue.put(target)
print(
f"Backup iscan_search data, filename:{file.as_posix()}")
except:
print(
f'Scan list file error, err:{traceback.format_exc()}')
finally:
# 最后无论如何都需要将文件输出到esinput
if self.copy_esinput_enable:
outname = self._esinput / name
tmpname.replace(outname)
# 一般来说是不会有文件存在的,但是意外不可避免嘛, 所以这里做一个判定,如果还存在文件就删了
if tmpname.exists():
tmpname.unlink()
except:
print(f'Scan task file error, err:{traceback.format_exc()}')
continue
finally:
print("There is no scan data to back up")
time.sleep(0.5)
def _process_file(self, tmpfile: Path):
"""
读取文件里面的数据打开一下,获取到信息后再关上
"""
with tmpfile.open('r', encoding='utf-8') as fp:
j_text = fp.read()
d_text = json.loads(j_text)
# scan_time = d_text.get('time')
# if scan_time is None:
# scan_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
country = d_text.get('geoinfo').get('country').get('code')
except:
country = 'UNKNOWN'
return country
def back_file(self):
"""
开始备份数据,先保存到文件夹
当这个文件夹到达一定大小然后压缩保存
:return:
"""
got = False
while True:
got = False
if self.iscan_task_queue.empty():
time.sleep(0.5)
continue
try:
bfile: Path = self.iscan_task_queue.get()
got = True
name = bfile.name
# 现在直接读文件里面的国家和日期
country = self._process_file(bfile)
# 每次保存之前去判断下是否需要修改文件名字并进行压缩备份
date_now_str = datetime.datetime.now().strftime("%Y-%m-%d")
# 新建文件夹的时候需要锁一下,其他时候直接移动即可
with self.__file_locker:
# 先把文件移动过去
dirname: Path = self._databack / country / date_now_str
dirname.mkdir(exist_ok=True, parents=True)
# 移过去的文件名
filename = dirname / name
# 移动到目标文件夹
bfile.replace(filename)
print(
f"Backup file, country:{country}, filename:{name}, date:{date_now_str}")
except:
print(f'Backup file error:\n{traceback.format_exc()}')
finally:
if got:
self.iscan_task_queue.task_done()
def scan_zip_file(self):
"""
压缩文件的线程,每天去扫描一次
将昨天的文件夹压缩到压缩文件夹下
"""
while True:
try:
date_now = datetime.datetime.now().date()
for country in self._databack.iterdir():
if not country.exists():
continue
country_name = country.name
for d_file in country.iterdir():
if self._zip_dealing.__contains__(d_file):
continue
d_name = d_file.name
d_date = datetime.datetime.strptime(
d_name, "%Y-%m-%d").date()
# 如果是今天以前的数据那么就进行压缩
if date_now > d_date:
self._zip_queue.put((d_file, country_name))
with self._zipfile_locker:
# 加入正在处理队列
self._zip_dealing[d_file] = 1
print(
f"A file wait to zip, filename:{d_file.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
print("There is no scan data to zip")
time.sleep(3600)
def process_zip_file(self):
"""
压缩今天以前的文件夹
"""
got = False
zipfile_path = None
while True:
got = False
if self._zip_queue.empty():
time.sleep(1)
continue
try:
zipfile_path, country = self._zip_queue.get()
got = True
zip_store_file = self._zipdata / country
zip_store_file.mkdir(exist_ok=True)
zipname = zip_store_file/f"{zipfile_path.name}.zip"
print(
f"Start zipfile, filename:{zipname.as_posix()}")
# 增加一个写入限制
with zipfile.ZipFile(zipname.as_posix(), 'a', zipfile.ZIP_DEFLATED) as write:
for file in zipfile_path.iterdir():
write.write(file.as_posix())
# 写入后删除
file.unlink()
write.close()
# 最后删除已经压缩好的文件夹
zipfile_path.rmdir()
print(
f"Store zipfile success, filename:{zipname.as_posix()}")
except:
print(f"Zip file error:\n{traceback.format_exc()}")
finally:
if got:
self._zip_queue.task_done()
with self._zipfile_locker:
self._zip_dealing.pop(zipfile_path, None)
def start(self):
"""
项目启动
:return:
"""
thread1 = threading.Thread(target=self.scan_file, name="scanfile")
thread1.start()
for i in range(self.backup_thread):
t = threading.Thread(target=self.back_file, name=f"backfile{i}")
t.start()
thread2 = threading.Thread(
target=self.scan_zip_file, name=f"scan_zipfile")
thread2.start()
for j in range(self.zip_thread):
tz = threading.Thread(
target=self.process_zip_file, name=f"zipfile{j}")
tz.start()
if __name__ == "__main__":
scup = ScanBackUP()
scup.start()
|
custom_gevent_pool_executor.py
|
# -*- coding: utf-8 -*-
# @Author : ydf
# @Time : 2019/7/2 14:11
import atexit
import time
import warnings
from collections import Callable
import threading
import gevent
from gevent import pool as gevent_pool
from gevent import monkey
from gevent.queue import JoinableQueue
from nb_log import LoggerMixin, nb_print, LogManager
def check_gevent_monkey_patch(raise_exc=True):
if not monkey.is_module_patched('socket'): # 随便选一个检测标志
if raise_exc:
warnings.warn(f'检测到 你还没有打gevent包的猴子补丁,请在所运行的起始脚本第一行写上 【import gevent.monkey;gevent.monkey.patch_all()】 这句话。')
raise Exception(f'检测到 你还没有打gevent包的猴子补丁,请在所运行的起始脚本第一行写上 【import gevent.monkey;gevent.monkey.patch_all()】 这句话。')
else:
return 1
logger_gevent_timeout_deco = LogManager('gevent_timeout_deco').get_logger_and_add_handlers()
def gevent_timeout_deco(timeout_t):
def _gevent_timeout_deco(f):
def __gevent_timeout_deceo(*args, **kwargs):
timeout = gevent.Timeout(timeout_t, )
timeout.start()
result = None
try:
result = f(*args, **kwargs)
except gevent.Timeout as t:
logger_gevent_timeout_deco.error(f'函数 {f} 运行超过了 {timeout_t} 秒')
if t is not timeout:
nb_print(t)
# raise # not my timeout
finally:
timeout.close()
return result
return __gevent_timeout_deceo
return _gevent_timeout_deco
class GeventPoolExecutor(gevent_pool.Pool):
def __init__(self, size=None, greenlet_class=None):
check_gevent_monkey_patch() # basecomer.py中检查。
super().__init__(size, greenlet_class)
atexit.register(self.shutdown)
def submit(self, *args, **kwargs):
self.spawn(*args, **kwargs)
def shutdown(self):
self.join()
class GeventPoolExecutor2(LoggerMixin):
def __init__(self, max_works, ):
self._q = JoinableQueue(maxsize=max_works)
# self._q = Queue(maxsize=max_works)
for _ in range(max_works):
gevent.spawn(self.__worker)
# atexit.register(self.__atexit)
self._q.join(timeout=100)
def __worker(self):
while True:
fn, args, kwargs = self._q.get()
try:
fn(*args, **kwargs)
except Exception as exc:
self.logger.exception(f'函数 {fn.__name__} 中发生错误,错误原因是 {type(exc)} {exc} ')
finally:
pass
self._q.task_done()
def submit(self, fn: Callable, *args, **kwargs):
self._q.put((fn, args, kwargs))
def __atexit(self):
self.logger.critical('想即将退出程序。')
self._q.join()
class GeventPoolExecutor3(LoggerMixin):
def __init__(self, max_works, ):
self._q = gevent.queue.Queue(max_works)
self.g_list = []
for _ in range(max_works):
self.g_list.append(gevent.spawn(self.__worker))
atexit.register(self.__atexit)
def __worker(self):
while True:
fn, args, kwargs = self._q.get()
try:
fn(*args, **kwargs)
except Exception as exc:
self.logger.exception(f'函数 {fn.__name__} 中发生错误,错误原因是 {type(exc)} {exc} ')
def submit(self, fn: Callable, *args, **kwargs):
self._q.put((fn, args, kwargs))
def joinall(self):
gevent.joinall(self.g_list)
def joinall_in_new_thread(self):
threading.Thread(target=self.joinall)
def __atexit(self):
self.logger.critical('想即将退出程序。')
self.joinall()
if __name__ == '__main__':
monkey.patch_all(thread=False)
def f2(x):
time.sleep(3)
nb_print(x * 10)
pool = GeventPoolExecutor3(40)
for i in range(20):
time.sleep(0.1)
nb_print(f'放入{i}')
pool.submit(gevent_timeout_deco(8)(f2), i)
# pool.joinall_in_new_thread()
nb_print(66666666)
|
fetch_refseq.py
|
#!/usr/bin/env python
from __future__ import division, print_function
import argparse
import functools
import gzip
import json
import os
import os.path
import sys
from datetime import date
from multiprocessing import Process, Queue
import requests
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
# Refseq structure
# - Release number
# - Divisions
# 1. archea
# 2. bacteria
# 3. fungi
# 4. invertebrate
# 5. mitochondrion
# 6. other
# 7. plant
# 8. plasmid
# 9. plastid
# 10. protozoa
# 11. vertebrate mammalian
# 12. vertebrate other
# 13. viral
# within each division
# DIVNAME.\d+(.\d+)?.(genomic|protein|rna).(fna|gbff|faa|gpff).gz
# where fna and faa are FASTA, gbff and gpff are Genbank
def _add_data_table_entry(data_manager_dict, data_table_entry, data_table_name):
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get('all_fasta', [])
data_manager_dict['data_tables'][data_table_name].append(data_table_entry)
return data_manager_dict
def unzip_to(conn, out_dir, output_filename, chunk_size=4096, debug=False, compress=False):
input_filename = conn.get()
if compress:
open_output = gzip.open
else:
open_output = open
with open_output(os.path.join(out_dir, output_filename), 'wb') as output_file:
while input_filename != 'STOP':
if debug:
print('Reading', input_filename, file=sys.stderr)
with gzip.open(input_filename, 'rb') as input_file:
read_chunk = functools.partial(input_file.read, (chunk_size))
for data in iter(read_chunk, b''): # use b'' as a sentinel to stop the loop. note '' != b'' in Python 3
output_file.write(data)
os.unlink(input_filename)
input_filename = conn.get()
def get_refseq_division(division_name, mol_types, output_directory, debug=False, compress=False):
base_url = 'https://ftp.ncbi.nlm.nih.gov/refseq/release/'
valid_divisions = set(['archea', 'bacteria', 'complete', 'fungi', 'invertebrate', 'mitochondrion', 'other',
'plant', 'plasmid', 'plastid', 'protozoa', 'vertebrate_mammalian', 'vertebrate_other', 'viral'])
ending_mappings = {
'genomic': '.genomic.fna.gz',
'protein': '.protein.faa.gz',
'rna': 'rna.fna.gz'
}
assert division_name in valid_divisions, "Unknown division name ({})".format(division_name)
for mol_type in mol_types:
assert mol_type in ending_mappings, "Unknown molecule type ({})".format(mol_type)
if not os.path.exists(output_directory):
os.mkdir(output_directory)
release_num_file = base_url + 'RELEASE_NUMBER'
r = requests.get(release_num_file)
release_num = r.text.strip()
division_base_url = base_url + division_name
if debug:
print('Retrieving {}'.format(division_base_url), file=sys.stderr)
r = requests.get(division_base_url)
listing_text = r.text
unzip_queues = {}
unzip_processes = []
final_output_filenames = []
for mol_type in mol_types:
q = unzip_queues[mol_type] = Queue()
output_filename = division_name + '.' + release_num + '.' + mol_type + '.fasta'
if compress:
output_filename += '.gz'
final_output_filenames.append(output_filename)
unzip_processes.append(Process(target=unzip_to, args=(q, output_directory, output_filename),
kwargs=dict(debug=debug, compress=compress)))
unzip_processes[-1].start()
# sample line: <a href="vertebrate_other.86.genomic.gbff.gz">vertebrate_other.86.genomic.gbff.gz</a> 2018-07-13 00:59 10M
for line in StringIO(listing_text):
if '.gz' not in line:
continue
parts = line.split('"')
assert len(parts) == 3, "Unexpected line format: {}".format(line.rstrip())
filename = parts[1]
for mol_type in mol_types:
ending = ending_mappings[mol_type]
if filename.endswith(ending):
if debug:
print('Downloading:', filename, ending, mol_type, file=sys.stderr)
output_filename = os.path.join(output_directory, filename)
with open(output_filename, 'wb') as output_file:
r = requests.get(division_base_url + '/' + filename)
for chunk in r.iter_content(chunk_size=4096):
output_file.write(chunk)
conn = unzip_queues[mol_type]
conn.put(output_filename)
for mol_type in mol_types:
conn = unzip_queues[mol_type]
conn.put('STOP')
return [release_num, final_output_filenames]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download RefSeq databases')
parser.add_argument('--debug', default=False, action='store_true', help='Print debugging output to stderr (verbose)')
parser.add_argument('--compress', default=False, action='store_true', help='Compress output files')
parser.add_argument('--output_directory', default='tmp', help='Directory to write output to')
parser.add_argument('--galaxy_datamanager_filename', help='Galaxy JSON format file describing data manager inputs')
parser.add_argument('--division_names', help='RefSeq divisions to download')
parser.add_argument('--mol_types', help='Molecule types (genomic, rna, protein) to fetch')
parser.add_argument('--pin_date', help='Force download date to this version string')
args = parser.parse_args()
division_names = args.division_names.split(',')
mol_types = args.mol_types.split(',')
if args.galaxy_datamanager_filename is not None:
with open(args.galaxy_datamanager_filename) as fh:
dm_opts = json.load(fh)
output_directory = dm_opts['output_data'][0]['extra_files_path'] # take the extra_files_path of the first output parameter
data_manager_dict = {}
else:
output_directory = args.output_directory
for division_name in division_names:
if args.pin_date is not None:
today_str = args.pin_date
else:
today_str = date.today().strftime('%Y-%m-%d') # ISO 8601 date format
[release_num, fasta_files] = get_refseq_division(division_name, mol_types, output_directory, args.debug, args.compress)
if args.galaxy_datamanager_filename is not None:
for i, mol_type in enumerate(mol_types):
assert mol_type in fasta_files[i], "Filename does not contain expected mol_type ({}, {})".format(mol_type, fasta_files[i])
unique_key = 'refseq_' + division_name + '.' + release_num + '.' + mol_type # note: this is now same as dbkey
dbkey = unique_key
desc = 'RefSeq ' + division_name + ' Release ' + release_num + ' ' + mol_type + ' (' + today_str + ')'
path = os.path.join(output_directory, fasta_files[i])
_add_data_table_entry(data_manager_dict=data_manager_dict,
data_table_entry=dict(value=unique_key, dbkey=dbkey, name=desc, path=path),
data_table_name='all_fasta')
with open(args.galaxy_datamanager_filename, 'w') as fh:
json.dump(data_manager_dict, fh, sort_keys=True)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 27339
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
rpc.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import json
import logging
import sys
import threading
import time
import grpc
from graphscope.config import GSConfig as gs_config
from graphscope.framework.errors import ConnectionError
from graphscope.framework.errors import GRPCError
from graphscope.framework.errors import check_grpc_response
from graphscope.proto import coordinator_service_pb2_grpc
from graphscope.proto import error_codes_pb2
from graphscope.proto import message_pb2
logger = logging.getLogger("graphscope")
def catch_grpc_error(fn):
"""Print error info from a :class:`grpc.RpcError`."""
@functools.wraps(fn)
def with_grpc_catch(*args, **kwargs):
try:
return fn(*args, **kwargs)
except grpc.RpcError as exc:
if isinstance(exc, grpc.Call):
# pylint: disable=no-member
raise GRPCError(
"rpc %s: failed with error code %s, details: %s"
% (fn.__name__, exc.code(), exc.details())
) from exc
else:
raise GRPCError(
"rpc %s failed: status %s" % (str(fn.__name__), exc)
) from exc
return with_grpc_catch
def suppress_grpc_error(fn):
"""Suppress the GRPC error."""
@functools.wraps(fn)
def with_grpc_catch(*args, **kwargs):
try:
return fn(*args, **kwargs)
except grpc.RpcError as exc:
if isinstance(exc, grpc.Call):
logger.warning(
"Grpc call '%s' failed: %s: %s",
fn.__name__,
exc.code(),
exc.details(),
)
except Exception as exc: # noqa: F841
logger.warning("RPC call failed: %s", exc)
return with_grpc_catch
class GRPCClient(object):
def __init__(self, endpoint):
"""Connect to GRAPE engine at the given :code:`endpoint`."""
# create the gRPC stub
options = [
("grpc.max_send_message_length", 2147483647),
("grpc.max_receive_message_length", 2147483647),
]
self._channel = grpc.insecure_channel(endpoint, options=options)
self._stub = coordinator_service_pb2_grpc.CoordinatorServiceStub(self._channel)
self._session_id = None
self._logs_fetching_thread = None
def waiting_service_ready(self, timeout_seconds=60):
begin_time = time.time()
request = message_pb2.HeartBeatRequest()
# Do not drop this line, which is for handling KeyboardInterrupt.
response = None
while True:
try:
response = self._stub.HeartBeat(request)
except Exception:
# grpc disconnect is expect
response = None
finally:
if response is not None:
if response.status.code == error_codes_pb2.OK:
logger.info("GraphScope coordinator service connected.")
break
time.sleep(1)
if time.time() - begin_time >= timeout_seconds:
if response is None:
msg = "grpc connnect failed."
else:
msg = response.status.error_msg
raise ConnectionError("Connect coordinator timeout, {}".format(msg))
def connect(self):
return self._connect_session_impl()
@property
def session_id(self):
return self._session_id
def __str__(self):
return "%s" % self._session_id
def __repr__(self):
return str(self)
def run(self, dag_def):
return self._run_step_impl(dag_def)
def fetch_logs(self):
if self._logs_fetching_thread is None:
self._logs_fetching_thread = threading.Thread(
target=self._fetch_logs_impl, args=()
)
self._logs_fetching_thread.daemon = True
self._logs_fetching_thread.start()
@catch_grpc_error
def send_heartbeat(self):
request = message_pb2.HeartBeatRequest()
return self._stub.HeartBeat(request)
@catch_grpc_error
def create_interactive_engine(
self,
object_id,
schema_path,
gremlin_server_cpu,
gremlin_server_mem,
engine_params,
):
request = message_pb2.CreateInteractiveRequest(
object_id=object_id,
schema_path=schema_path,
gremlin_server_cpu=gremlin_server_cpu,
gremlin_server_mem=gremlin_server_mem,
engine_params=engine_params,
)
response = self._stub.CreateInteractiveInstance(request)
return check_grpc_response(response)
@catch_grpc_error
def close_interactive_engine(self, object_id):
request = message_pb2.CloseInteractiveRequest(object_id=object_id)
response = self._stub.CloseInteractiveInstance(request)
return check_grpc_response(response)
@catch_grpc_error
def create_learning_engine(self, object_id, handle, config):
request = message_pb2.CreateLearningInstanceRequest(
object_id=object_id,
handle=handle,
config=config,
)
response = self._stub.CreateLearningInstance(request)
response = check_grpc_response(response)
return response.endpoints
@catch_grpc_error
def close_learning_engine(self, object_id):
request = message_pb2.CloseLearningInstanceRequest(object_id=object_id)
response = self._stub.CloseLearningInstance(request)
return check_grpc_response(response)
def close(self, stop_instance=True):
"""
Args:
stop_instance (bool, optional): If true,
also delete graphscope instance (such as pod) in closing process.
"""
if self._session_id:
self._close_session_impl(stop_instance=stop_instance)
self._session_id = None
if self._logs_fetching_thread:
self._logs_fetching_thread.join(timeout=5)
@catch_grpc_error
def _connect_session_impl(self):
request = message_pb2.ConnectSessionRequest()
response = self._stub.ConnectSession(request)
response = check_grpc_response(response)
self._session_id = response.session_id
return (
response.session_id,
response.session_type,
json.loads(response.engine_config),
response.pod_name_list,
response.num_workers,
response.namespace,
)
@suppress_grpc_error
def _fetch_logs_impl(self):
request = message_pb2.FetchLogsRequest(session_id=self._session_id)
responses = self._stub.FetchLogs(request)
for resp in responses:
resp = check_grpc_response(resp)
message = resp.message.rstrip()
if message:
logger.info(message, extra={"simple": True})
@catch_grpc_error
def _close_session_impl(self, stop_instance=True):
request = message_pb2.CloseSessionRequest(
session_id=self._session_id, stop_instance=stop_instance
)
response = self._stub.CloseSession(request)
return check_grpc_response(response)
@catch_grpc_error
def _run_step_impl(self, dag_def):
request = message_pb2.RunStepRequest(
session_id=self._session_id, dag_def=dag_def
)
response = self._stub.RunStep(request)
return check_grpc_response(response)
|
datasets.py
|
# -*- coding: utf-8 -*-
"""
© Michael Widrich, Markus Hofmarcher, 2017
Template and parent classes for creating reader/loader classes for datasets
"""
import glob
import time
import numpy as np
import multiprocessing
import pandas as pd
from os import path
from PIL import Image
from TeLL.utility.misc import load_files_in_dir
from abc import ABCMeta, abstractmethod
def load_image(image_path, image_scaling_factor, resampling_method):
"""Load an image from image_path, resize according to self.image_scaling_factor with resampling_method
and apply optional function 'preprocessing'
"""
# Load image
with Image.open(image_path) as im:
image = im.copy()
# Resize if necessary
if image_scaling_factor != 1:
width, height = image.size
resolution = (int(width / image_scaling_factor), int(height / image_scaling_factor))
image = image.resize(resolution, resample=resampling_method)
return image
def add_color_jittering(image, jitter):
image = np.asarray(image, dtype=np.float32)
r = np.clip((image[:, :, 0] + jitter[0])[:, :, None], 0, 255)
g = np.clip((image[:, :, 1] + jitter[1])[:, :, None], 0, 255)
b = np.clip((image[:, :, 2] + jitter[2])[:, :, None], 0, 255)
image = np.concatenate((r, g, b), axis=2)
return image
def add_luminance(image):
"""Calculate luminance and add it as channel to the input image"""
image = np.asarray(image, dtype=np.float32)
image = np.concatenate((image, (0.2126 * image[:, :, 0]
+ 0.7152 * image[:, :, 1]
+ 0.0722 * image[:, :, 2])[:, :, None]), axis=2)
return image
def stretch_values(image):
"""Stretch values in an image/array to be within [0,1]"""
image = np.asarray(image, dtype=np.float32)
# Stretch pixel values from 0-1 (and make sure division is not by 0)
image -= np.min(image)
image /= (np.max(image) or 1)
return image
def zoom_into_image(image: Image, zoom_factor: float, left_lower_corner: tuple = (0, 0), resample: int = Image.NEAREST):
"""Zoom into area of image (i.e. crop to area at position left_lower_corner and rescale area to original image size)
Parameters
-------
image : PIL.Image
PIL image
zoom_factor : float
Zoom into image with a factor zoom_factor >= 1.
left_lower_corner: tuple
Tuple with position of left lower corner of area to be zoomed into as (horizontal_pos, vertical_pos)
resample: int
Resampling filter to be used by PIL resize
"""
if zoom_factor < 1.:
raise ValueError("zoom_factor has to be >= 1. but is {}".format(zoom_factor))
elif zoom_factor == 1.:
return image
full_size = image.size
zoom_area_shape = [np.round(size / zoom_factor).astype(np.int) for size in full_size]
crop_box = (left_lower_corner[0], left_lower_corner[1],
left_lower_corner[0] + zoom_area_shape[0], left_lower_corner[1] + zoom_area_shape[1])
zoom_area = image.crop(crop_box) # Error in PIL documentation: crop_box is actually (left, lower, right, upper)!!!
zoom_area = zoom_area.resize(full_size, resample=resample)
return zoom_area
def ising_dropout(shape, generate: bool = False, directory: str = None, keep_prob: float = 0.5,
beta: float = 0.5, beta_step: float = 1.1, num_steps: int = 400, **kwargs):
"""Apply Ising dropout
On the fly generation is not yet supported
Parameters
-------
shape : vector
Contains width and height of the image
generate : bool
If false, pictures from path are going to be sampled
directory: string
The directory where precomputed Ising images can be found
"""
import os
if generate:
from TeLL.scripts.dropoutmask import make_ising_mask
from PIL import Image
import tensorflow as tf
shape = np.asarray(shape[0:2])
shape = np.insert(shape, 0, 1)
shape = np.append(shape, 1)
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
ising_img_tf = make_ising_mask(shape=shape, keep_prob=keep_prob, num_steps=num_steps, beta=beta,
beta_step=beta_step)
sess.run(tf.global_variables_initializer())
samples_np = sess.run(ising_img_tf)
ising_img = Image.fromarray(samples_np[0, :, :, 0])
sess.close()
else:
import random
from PIL import Image
random.seed(12345)
ising_files = glob.glob(os.path.join(directory, '*.png'))
ising_file = ising_files[random.randint(0, len(ising_files) - 1)]
with Image.open(ising_file) as im:
ising_img = im.copy()
ising_img = ising_img.resize((shape[1], shape[0]), Image.ANTIALIAS)
return ising_img
def ising_image_overlay_input(input_img, ising_img):
input_img = np.asarray(input_img)
ising_img = np.asarray(ising_img)
# Sample and label image have different shapes (label has dim of 2)
if input_img.shape[-1] is 3:
return input_img * np.repeat(ising_img, 3).reshape((ising_img.shape[0], ising_img.shape[1], 3))
else:
return input_img * ising_img
def ising_image_overlay_label(input_img, ising_img, void_class=255):
input_img = np.asarray(input_img)
ising_img = np.asarray(ising_img)
image_out = np.where(ising_img == 1, input_img, void_class)
return image_out
class DatareaderSimpleFiles(object):
def __init__(self, sample_directory: str, label_directory: str, batchsize: int, sample_suffix: str = '',
label_suffix: str = '', verbose: bool = True):
"""Dataset reader template
Template for a dataset reader with background workers for loading the samples in sample_directory and the
labels in label_directory into minibatches of size batchsize; Parts that need to be adapted to the specific
tasks are indicated with "TODO"; Derive from this class to create new datareaders and overwrite __init__ and
other functions as required for your task;
Parameters
-------
sample_directory : str
Path to input files
label_directory : str
Path to label files
batchsize : int
Batchsize
sample_suffix : str
Optional suffix to filter sample files by (e.g. '.png')
label_suffix : str
Optional suffix to filter label files by (e.g. '.png')
"""
#
# Search for and store the filenames
#
self.log("Collecting sample data...")
samples = load_files_in_dir(sample_directory, sample_suffix)
self.log("Found {} sample files".format(len(samples)))
self.log("Collecting label data...")
labels = load_files_in_dir(label_directory, label_suffix)
self.log("Found {} label files".format(len(labels)))
#
# Calculate number of minibatches
#
n_mbs = np.int(np.ceil(len(labels) / batchsize))
#
# Determine shape of samples and labels
#
# TODO: Since this is a template, the shape determination has to modified to fit the specific task
X_shape = (batchsize, 5)
y_shape = (batchsize, 1)
#
# Set attributes of reader (these are required for the class to work properly)
#
self.verbose = verbose
self.processes = list()
self.samples = samples
self.n_samples = len(samples)
self.labels = labels
self.n_labels = len(labels)
self.batchsize = batchsize
self.n_mbs = n_mbs
self.X_shape = X_shape
self.y_shape = y_shape
def create_minibatch(self, mb_indices, mb_id):
"""This function shall load the data at label index mb_indices into one minibatch and has to be adapted to the
specific task; It should return an object that will then automatically be returned for each iteration of
batch_loader;
Parameters
-------
mb_indices : int
(Shuffled) index for self.labels to use in this minibatch
mb_id : int
Current minibatch number
Returns
------
: object
Yields a new minibatch for each iteration over batch_loader
"""
self.log("Fetching batch {}...".format(mb_id))
# TODO: In this function you specify how you want to load or preprocess your data for the label-index mb_indices
def some_loader_function(something_to_load):
"""Load and preprocess your data"""
some_input, some_target, some_id = np.zeros((50, 7))
return some_input, some_target, some_id
# Reset minibatch values
X, y, ID = some_loader_function(self.labels[mb_indices])
return dict(X=X, y=y, ID=ID)
def batch_loader(self, num_cached: int = 5, num_threads: int = 3, rnd_gen=None, shuffle=True):
"""Function to use for loading minibatches
This function will start num_threads background workers to load the minibatches via the create_minibatch
function; At most num_cached minibatches will be hold in memory at once per thread; The returned object is
the minibatch that is yielded (i.e. this function can be iterated over);
Parameters
-------
num_cached : int
Maximum number of minibatches to be stored in memory at once per thread
num_threads : int
Number of background workers to use for loading the minibatches
rnd_gen : numpy random generator or None
Random generator to use for shuffling of samples; If None, a new numpy random generator will be created and
used;
shuffle : bool
True: Shuffle the samples
Yields
------
: object
Yields a new minibatch for each iteration over batch_loader
Example
------
>>> trainingset = DatareaderSimpleFiles(...)
>>> mb_loader = trainingset.batch_loader(...):
>>> for mb_i, mb in enumerate(mb_loader):
>>> print("Minibatch number {} has the contents {}".format(mb_i, mb))
"""
#
# Create queues and workers
#
mb_ind_queues = [multiprocessing.Queue(0) for _ in range(num_threads)]
mb_queues = [multiprocessing.Queue(num_cached) for _ in range(num_threads)]
self.log("Starting background loaders...", end=" ")
for thread in range(num_threads):
proc = multiprocessing.Process(target=self.__load_mb__, args=(mb_ind_queues[thread], mb_queues[thread]))
proc.daemon = False
proc.start()
self.processes.append(proc)
self.log("DONE")
#
# Get indices of valid samples to load
#
indices = np.arange(self.n_labels)
# shuffle batches across minibatches
self.log(" Shuffling samples...", end=" ")
if shuffle:
if rnd_gen is None:
np.random.shuffle(indices)
else:
rnd_gen.shuffle(indices)
self.log("DONE")
minibatch_slices = [slice(i * self.batchsize, (i + 1) * self.batchsize) for i in np.arange(self.n_mbs)]
# Put indices to be processed into queue
self.log(" Filling input queue...", end=" ")
thread = 0
for mb_sl_i, mb_sl in enumerate(minibatch_slices):
mb_ind_queues[thread].put([indices[mb_sl], mb_sl_i])
thread += 1
if thread >= num_threads:
thread = 0
# Put None at end of queue to signal end
for thread in range(num_threads):
mb_ind_queues[thread].put(None)
self.log("DONE")
# Get results from background workers, loop through different worker queues to keep order
thread = 0
for _ in minibatch_slices:
# each subprocess returns a minibatch and its index in the procs list
mb = mb_queues[thread].get()
yield mb
thread += 1
if thread >= num_threads:
thread = 0
# Check if each worker has reached its end
for thread in range(num_threads):
if mb_queues[thread].get() is not None:
raise ValueError("Error in queues!")
# Close queue and processes
for thread in range(num_threads):
mb_ind_queues[thread].close()
mb_queues[thread].close()
self.close()
def __load_mb__(self, in_queue, out_queue):
"""
Load sample ids from in_queue and write loaded samples into out queue
:param in_queue:
:param out_queue:
:return:
"""
while True:
input = in_queue.get()
if input is None:
self.log("Putting sentinel", end=" ")
out_queue.put(None)
self.log("Terminated")
return 0
mb_indices, mb_id = input
minibatch = self.create_minibatch(mb_indices, mb_id)
out_queue.put(minibatch)
self.log("Fetched batch {}!".format(mb_id))
def log(self, message, end="\n"):
if self.verbose:
print(message, end=end)
def close(self):
timeout = 10
for proc in self.processes:
try:
start = time.time()
proc.join(timeout)
if time.time() - start >= timeout:
proc.terminate()
except:
self.log("Error when closing background worker")
del proc
self.processes.clear()
class DatareaderAdvancedImageFiles(object):
def __init__(self,
sample_directory: str,
label_directory: str,
sample_suffix: str = None,
label_suffix: str = None,
batchsize: int = 128,
subset: bool = False,
preceding_frames: int = 0,
frame_steps: int = 1,
stretch_values: bool = True,
image_scaling_factor: float = 1,
add_luminance: bool = False,
add_color_jittering: bool = False,
add_zoom: bool = False,
add_flip: bool = False,
apply_ising: bool = False,
ising_params: str = None,
void_class=None,
id2label=None,
num_classes: int = None,
verbose: bool = True):
"""Dataset reader template, works with cityscapes Img8bit_sequence data
Each sample contains of sample_len frames, with the last frame being labeled
Parameters
-------
sample_directory : str
Path to PNG input files
label_directory : str
Path to PNG label files
label_suffix: str (optional)
Suffix of the label file names(e.g. "_label.png")
batchsize : int
Batchsize
num_classes : int
Number of classes, not used if id2label dict is passed
id2label : dict
Dictionary mapping label ids to training ids, if passed number of samples will be set to length of this dict
void_class : int
Training id of void class (no error signal for pixels of this class)
subset:
False or fraction of dataset to load
add_zoom : None, float, int, or list
If not False: Add zoomed input augmentation with factor add_zoom or loop through factors in add_zoom, if
add_zoom is an array
add_flip : bool
If True: Add flipped input augmentation; add_zoom also applies to flipped images
apply_ising : bool
If True: An Ising filter is going to be deployed
ising_params:
Additional parameters for Ising
void_class: int or None
If int: Use class void_class as void class, i.e. pixels of this class do not contribute to the loss
"""
self.label_interpolation = Image.NEAREST
self.input_interpolation = Image.BICUBIC
self.preceding_frames = preceding_frames
self.frame_steps = frame_steps
self.resolution = None
self.stretch_values = stretch_values
self.image_scaling_factor = image_scaling_factor
self.add_luminance = add_luminance
if add_zoom and not isinstance(add_zoom, list):
add_zoom = [add_zoom]
self.add_zoom = add_zoom
self.add_flip = add_flip
self.add_color_jittering = add_color_jittering
self.apply_ising = apply_ising
self.ising_params = ising_params
self.id2label = id2label
#
# Load list of labels and samples, store everything in dictionaries and prepare some stuff
#
# prepare list for background worker
self.verbose = verbose
self.batchsize = batchsize
self.processes = list()
if id2label is not None:
self.num_classes = len(set(id2label.values()))
if void_class is not None:
self.num_classes -= 1
elif num_classes is not None:
self.num_classes = num_classes
# Load label filenames
self.log("Collecting label data...")
self.labels, self.n_labels = self.load_file_dataframe(label_directory, label_suffix, subset=subset)
if self.add_zoom or self.add_flip or self.add_color_jittering:
self.labels, self.n_labels = self.prepare_augmented_dataframe(self.labels, flip=self.add_flip,
jittering=self.add_color_jittering,
zoom=self.add_zoom)
# Load a single label to determine shape
label_image = self.load_label_image(path=self.labels[0][0])
self.label_image_shape = label_image.shape
# Get coordinates for potential zoom
if self.add_zoom:
left_zoom_border = np.round(self.label_image_shape[-1] / 10).astype(np.int)
right_zoom_border = np.round(self.label_image_shape[-1] / max(self.add_zoom) -
left_zoom_border).astype(np.int)
self.label_zoom_area = (np.linspace(left_zoom_border, right_zoom_border, num=5, dtype=np.int),
[np.round(self.label_image_shape[-2] / 2).astype(np.int)])
self.log("Found {} label images with shape {} (including augmentations)".format(self.n_labels,
self.label_image_shape))
# Calculate number of minibatches
self.n_mbs = np.int(np.ceil(self.n_labels / batchsize))
if (self.n_labels % batchsize) != 0:
raise AttributeError("Number of samples not dividable by minibatch-size! Please tell Michael to fiiiinally "
"allow for this... ;)")
# Load input image filenames
self.log("Collecting input data...")
self.samples, self.n_samples = self.load_file_dataframe(sample_directory, sample_suffix)
# Load a single input image to determine shape
input_image = self.load_input_image(path=self.samples[0][0])
self.input_image_shape = input_image.shape
# Get coordinates for potential zoom
if self.add_zoom:
left_zoom_border = np.round(self.input_image_shape[-2] / 10).astype(np.int)
right_zoom_border = np.round(self.input_image_shape[-2] / max(self.add_zoom) -
left_zoom_border).astype(np.int)
self.input_zoom_area = (np.linspace(left_zoom_border, right_zoom_border, num=5, dtype=np.int),
[np.round(self.input_image_shape[-3] / 2).astype(np.int)])
if self.add_color_jittering:
lower_bound = -20
upper_bound = 20
self.jitters = (np.roll(np.linspace(lower_bound, upper_bound, num=6, dtype=np.int), 2),
np.roll(np.linspace(upper_bound, lower_bound, num=6, dtype=np.int), 3),
np.concatenate((np.linspace(lower_bound, upper_bound / 2, num=3, dtype=np.int),
np.linspace(upper_bound / 2, lower_bound, num=3, dtype=np.int)), axis=0))
self.log("Found {} input images with shape {}".format(self.n_samples, self.input_image_shape))
# structure of inputs will be (samples, x_axis, y_axis, channels)
self.X_shape = (batchsize, preceding_frames + 1) + self.input_image_shape
# structure of targets will be (samples, x_axis, y_axis, channels)
self.y_shape = (batchsize,) + self.label_image_shape
# structure of optional pixel-weights would be same as y_shape
self.pixel_weights_shape = self.y_shape
self.void_class = void_class
def load_file_dataframe(self, directory: str, suffix: str = "", subset=False):
"""Load all filenames and file paths into pandas dataframe"""
pattern = "**/*{}".format(suffix)
# Collect files in path, sort them by name, and store them into dictionary
file_paths = glob.glob(path.join(directory, pattern))
file_paths.sort()
# If subset is specified load only fraction of file_paths
if subset:
file_paths = file_paths[:int(len(file_paths) * subset)]
# Extract keys that correspond to file base name without path and suffix
keys = [path.basename(file)[:-len(suffix)] for file in file_paths]
# Store in data frame for easy indexing and key->value resolution
file_dataframe = pd.DataFrame(index=keys, data=file_paths)
return file_dataframe, len(file_dataframe)
def load_input_image(self, path, flip=False, zoom_factor=1, left_lower_corner=(0, 0), jitter=None):
"""Load a single input image"""
image = load_image(image_path=path, image_scaling_factor=self.image_scaling_factor,
resampling_method=self.input_interpolation)
if flip:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
if zoom_factor != 1:
image = zoom_into_image(image=image, zoom_factor=zoom_factor, left_lower_corner=left_lower_corner,
resample=self.input_interpolation)
if jitter:
image = add_color_jittering(image, jitter)
if self.add_luminance:
image = add_luminance(image)
if self.stretch_values:
image = stretch_values(image)
imarr = np.asarray(image, dtype=np.float32)
if len(imarr.shape) == 2:
imarr = np.reshape(imarr, imarr.shape + (1,))
return imarr
def load_label_image(self, path, flip=False, zoom_factor=1, left_lower_corner=(0, 0)):
"""Load a single label image"""
image = load_image(image_path=path, image_scaling_factor=self.image_scaling_factor,
resampling_method=self.label_interpolation)
if flip:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
if zoom_factor != 1:
image = zoom_into_image(image=image, zoom_factor=zoom_factor, left_lower_corner=left_lower_corner,
resample=self.label_interpolation)
label = np.asarray(image, dtype=np.uint8)
if self.id2label is not None:
temp = np.array(image)
for k, v in self.id2label.items():
temp[label == k] = v
label = temp
return label
def load_sample(self, input_image_index: int, augmentation_params=None):
"""Load multiple input images as frames for a sample"""
if augmentation_params is None:
augmentation_params = dict()
# Create slice for found input image and include prec_frames, suc_frames, and the frame step size
sample_slice = slice(input_image_index - self.preceding_frames * self.frame_steps,
input_image_index + 1, self.frame_steps)
# Use slice to get array of input image names, corresponding to the desired subsequence
frames = self.samples.iloc[sample_slice, 0].values
sample = np.empty((self.preceding_frames + 1,) + self.input_image_shape, dtype=np.float32)
# Loop through subsequence filenames and load images into matrix X
for frame_idx, frame_path in enumerate(frames):
# Load image in read-only mode
self.log("Loading Frame {}".format(frame_path))
frame = self.load_input_image(path=frame_path, **augmentation_params)
sample[frame_idx, :] = frame
return sample
def prepare_augmented_dataframe(self, dataframe, flip=True, zoom=True, jittering=True):
"""Duplicate dataframe and add columns for augmentations"""
if flip:
dataframe['flip'] = False
flipped = dataframe.copy()
flipped['flip'] = True
dataframe = dataframe.append(flipped)
if zoom:
dataframe['zoom'] = False
zoomed = dataframe.copy()
zoomed['flip'] = True
dataframe = dataframe.append(zoomed)
if jittering:
dataframe['jittering'] = False
jittered = dataframe.copy()
jittered['jittering'] = True
dataframe = dataframe.append(jittered)
return dataframe, len(dataframe)
def get_class_occurrences(self, dset='train'):
""" Return occurrences of classes in training, validation, and test set"""
if dset == 'train':
class_occ = np.array([718432047, 2036049361, 336031674, 1259776091, 36211223,
48487162, 67768934, 11509943, 30521298, 878734354,
63965201, 221459496, 67202363, 7444910, 386502819,
14775009, 12995807, 12863940, 5445904, 22849664])
elif dset == 'val':
class_occ = np.array([131939476, 345222010, 49559045, 200895273, 6720678,
7527026, 13564731, 1813749, 6110454, 158682893,
7625936, 30708074, 11890229, 1970543, 59759319,
2760469, 3564221, 1032100, 728922, 6500852])
elif dset == 'test':
# occurrences for the testset are unknown
class_occ = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
else:
raise AttributeError("Occurrences of classes in dataset {} unknown!".format(dset))
return class_occ
def get_class_weights(self, dset='train', max_w=1.):
""" Return factors for normalizing weights
Example: loss_normed = loss_per_class * get_class_weights()
"""
# get occurrences of classes
class_occ = self.get_class_occurrences(dset)
# get ratio of classes wrt largest class
ratios = class_occ / class_occ.sum()
# turn ratios into factors for normalization to a max factor of max_w
return max_w * ratios.min() / ratios
def log(self, message, end="\n"):
if self.verbose:
print(message, end=end)
def __load_mb__(self, in_queue, out_queue):
"""
Load sample ids from in_queue and write loaded samples into out queue
:param in_queue:
:param out_queue:
:return:
"""
while True:
input = in_queue.get()
if input is None:
self.log("Putting sentinel", end=" ")
out_queue.put(None)
self.log("Terminated")
return 0
mb_samples, mb_nr = input
# X will be [batchsize, frames, cameras, x, y, channel]
batchsize = mb_samples.size
X_shape = self.X_shape
y_shape = self.y_shape
self.log("Fetching batch {}...".format(mb_nr))
# Reset minibatch values
X = np.empty(X_shape, np.float32)
y = np.empty(y_shape, np.uint8)
ID = np.empty((batchsize,), np.object_)
# Loop through all label indices in the current minibatch
for i, label_ind in enumerate(mb_samples):
# Find index of input image in self.samples dataframe corresponding to label key
label_key = self.labels.index[label_ind]
input_image_index = self.samples.index.get_loc(label_key)
# Get augmentation specifications from dataframe
sample_dataframe = self.labels.iloc[label_ind]
label_path = sample_dataframe[0]
flip = sample_dataframe.get('flip', False)
zoom = sample_dataframe.get('zoom', False)
jittering = sample_dataframe.get('jittering', False)
input_augmentation_dict = dict(flip=flip)
label_augmentation_dict = input_augmentation_dict.copy()
if zoom:
input_augmentation_dict['zoom_factor'] = self.add_zoom[mb_nr % len(self.add_zoom)]
label_augmentation_dict['zoom_factor'] = self.add_zoom[mb_nr % len(self.add_zoom)]
hori_pos = self.label_zoom_area[0][mb_nr % len(self.label_zoom_area[0])]
vert_pos = self.label_zoom_area[1][mb_nr % len(self.label_zoom_area[1])]
vert_pos -= np.round(vert_pos / label_augmentation_dict['zoom_factor']).astype(np.int)
label_augmentation_dict['left_lower_corner'] = (hori_pos, vert_pos)
hori_pos = self.input_zoom_area[0][mb_nr % len(self.input_zoom_area[0])]
vert_pos = self.input_zoom_area[1][mb_nr % len(self.input_zoom_area[1])]
vert_pos -= np.round(vert_pos / input_augmentation_dict['zoom_factor']).astype(np.int)
input_augmentation_dict['left_lower_corner'] = (hori_pos, vert_pos)
if jittering:
jitter_r = self.jitters[0][label_ind % len(self.jitters[0])]
jitter_g = self.jitters[1][label_ind % len(self.jitters[1])]
jitter_b = self.jitters[2][label_ind % len(self.jitters[2])]
input_augmentation_dict['jitter'] = (jitter_r, jitter_g, jitter_b)
if self.apply_ising:
# Load/generate Ising image
ising_img = ising_dropout(shape=self.input_image_shape, **self.ising_params)
# Load sample into minibatch
sample_img = self.load_sample(input_image_index=input_image_index,
augmentation_params=input_augmentation_dict)
X[i, :] = ising_image_overlay_input(sample_img, ising_img)
# Load labeled image into correct sample position at y
if self.ising_params.get('apply_on_label', False):
label_img = self.load_label_image(path=label_path, **label_augmentation_dict)
y[i, :] = ising_image_overlay_label(label_img, ising_img, self.void_class)
else:
y[i, :] = self.load_label_image(path=label_path, **label_augmentation_dict)
else:
# Load sample into minibatch
X[i, :] = self.load_sample(input_image_index=input_image_index,
augmentation_params=input_augmentation_dict)
# Load labeled image into correct sample position at y
y[i, :] = self.load_label_image(path=label_path, **label_augmentation_dict)
self.log("Key: {} Label: {} Sample: {}".format(label_key, label_path,
self.samples.iloc[input_image_index][0]))
# Store label key into sample position at ID
ID[i] = label_key
if self.void_class is not None:
pixel_weights = np.array(y != self.void_class, dtype=np.float32)
y[y == self.void_class] = 0
minibatch = dict(X=X, y=y, pixel_weights=pixel_weights, ID=ID)
else:
minibatch = dict(X=X, y=y, ID=ID)
out_queue.put(minibatch)
self.log("Fetched batch {}!".format(mb_nr))
def batch_loader(self, num_cached=3, num_threads=3, rnd_gen=None, shuffle=True):
"""Function to use for loading minibatches
This function will start num_threads background workers to load the minibatches; At most num_cached minibatches
will be hold in memory at once per thread; The returned object is a dictionary with the minibatch that is
yielded (i.e. this function can be iterated over);
Parameters
-------
num_cached : int
Maximum number of minibatches to be stored in memory at once
num_threads : int
Number of background workers to use for loading the minibatches
rnd_gen : numpy random generator or None
Random generator to use for shuffling of samples; If None, a new numpy random generator will be created and
used;
shuffle : bool
True: Shuffle the samples
Yields
------
: object
Yields a new minibatch for each iteration over batch_loader
Example
------
>>> trainingset = DatareaderSimpleFiles(...)
>>> mb_loader = trainingset.batch_loader(...):
>>> for mb_i, mb in enumerate(mb_loader):
>>> print("Minibatch number {} has the contents {}".format(mb_i, mb))
"""
#
# Create queues and workers
#
mb_ind_queues = [multiprocessing.Queue(0) for _ in range(num_threads)]
mb_queues = [multiprocessing.Queue(num_cached) for _ in range(num_threads)]
self.log("Starting background loaders...", end=" ")
for thread in range(num_threads):
proc = multiprocessing.Process(target=self.__load_mb__, args=(mb_ind_queues[thread], mb_queues[thread]))
proc.daemon = False
proc.start()
self.processes.append(proc)
self.log("DONE")
#
# Put indices in input queue
#
label_inds = np.arange(self.n_labels)
# shuffle batches across minibatches
self.log(" Shuffling samples...", end=" ")
if shuffle:
if rnd_gen is None:
np.random.shuffle(label_inds)
else:
rnd_gen.shuffle(label_inds)
self.log("DONE")
minibatch_slices = [slice(i * self.batchsize, (i + 1) * self.batchsize) for i in np.arange(self.n_mbs)]
# Put indices to be processed into queues, distribute them among workers to keep order
self.log(" Filling input queue...", end=" ")
thread = 0
for mb_sl_i, mb_sl in enumerate(minibatch_slices):
mb_ind_queues[thread].put([label_inds[mb_sl], mb_sl_i])
thread += 1
if thread >= num_threads:
thread = 0
# Put None at end of queue to signal end
for thread in range(num_threads):
mb_ind_queues[thread].put(None)
self.log("DONE")
# Get results from background workers, loop through different worker queues to keep order
thread = 0
for _ in minibatch_slices:
# each subprocess returns a minibatch and its index in the procs list
mb = mb_queues[thread].get()
yield mb
thread += 1
if thread >= num_threads:
thread = 0
# Check if each worker has reached its end
for thread in range(num_threads):
if mb_queues[thread].get() is not None:
raise ValueError("Error in queues!")
# Close queue and processes
for thread in range(num_threads):
mb_ind_queues[thread].close()
mb_queues[thread].close()
self.close()
def close(self):
timeout = 10
for proc in self.processes:
try:
start = time.time()
proc.join(timeout)
if time.time() - start >= timeout:
proc.terminate()
except:
self.log("Error when closing background worker")
del proc
self.processes.clear()
|
eventbus.py
|
import logging
import time
from threading import Thread, Lock
from queue import Queue
from .events import Event, TickEvent, isEventMatching
from .reflection import publishesHint
class ExitEvent(Event):
"""
A local event that instructs the main event loop to exit
"""
class EventBusSubscriber:
"""
The base class that every event bus subscriber should implement
"""
def __init__(self, eventbus):
self.eventbus = eventbus
class EventBus:
"""
The event bus handles delivery of in-system messages
"""
def __init__(self, clockFrequency=30, threadCount=8):
self.logger = logging.getLogger('EventBus')
self.subscribers = []
self.queue = Queue()
self.threadCount = threadCount
self.threads = []
self.activeBlockedSyncs = []
self.activeBlockedSyncLock = Lock()
self.active = False
self.clockThread = None
self.clockTicks = 0
if clockFrequency == 0:
self.clockInterval = 0
else:
self.clockInterval = float(1) / clockFrequency
self.lastTickMs = 0
def subscribe(self, callback, order=5, events=None, args=[], kwargs={}):
"""
Subscribe a callback to the bus
"""
self.subscribers.append((order, callback, events, args, kwargs))
self.subscribers = sorted(self.subscribers, key=lambda x: x[0])
def unsubscribe(self, callback):
"""
Remove a callback from the bus
"""
for subscriber in self.subscribers:
if subscriber[1] == callback:
self.subscribers.remove(subscriber)
def publish(self, event: Event):
"""
Publish an event to all subscribers
"""
if not type(event) is TickEvent:
self.logger.debug('Publishing \'{}\''.format(str(event)))
self.queue.put(event)
def start(self):
"""
Start the event bus thread loop
"""
self.logger.debug('Starting event bus')
# Start thread pool
self.logger.debug(
'Starting thread pool of {} threads'.format(self.threadCount))
for i in range(0, self.threadCount):
t = Thread(target=self._loopthread, name='eventbus-{}'.format(i + 1))
t.start()
self.threads.append(t)
# Start clock thread
self.active = True
self.lastTickMs = time.time()
if self.clockInterval:
self.clockThread = Thread(target=self._clockthread, name="eventbus-clock")
self.clockThread.start()
def stop(self):
"""
Gracefully stop the event bus thread loop
"""
self.logger.debug('Stopping event bus')
self.logger.debug('Cancelling next tick event')
self.active = False
if self.clockThread:
self.clockThread.join()
self.logger.debug('Waiting for queue to drain')
self.queue.join()
self.logger.debug('Posting the ExitEvent')
for i in range(0, self.threadCount):
self.queue.put(ExitEvent())
self.logger.debug('Waiting for thread pool to exit')
for thread in self.threads:
thread.join()
self.threads = []
def flush(self):
"""
Wait until the queue is drained
"""
if not self.queue.empty():
self.queue.join()
@publishesHint(TickEvent)
def _clockthread(self):
"""
Helper thread that dispatches a clock tick every second
"""
while self.active:
# Calculate actual time drift & publish event
ts = time.time()
self.clockTicks += 1
self.publish(TickEvent(self.clockTicks, ts - self.lastTickMs))
self.lastTickMs = ts
# Sleep interval time
time.sleep(self.clockInterval)
def _loopthread(self):
"""
Main event bus thread that dispatches all events from a single thread
"""
self.logger.debug('Event bus thread started')
while True:
event = self.queue.get()
if type(event) is ExitEvent:
self.queue.task_done()
break
for order, sub, events, args, kwargs in self.subscribers:
try:
start_ts = time.time()
if events is None or any(
map(lambda cls: isEventMatching(event, cls), events)):
sub(event, *args, **kwargs)
delta = time.time() - start_ts
if delta > 0.1:
self.logger.warning('Slow consumer ({:.2f}s) {} for event {}'.
format(delta, sub, type(event).__name__))
except Exception as e:
self.logger.error(
'Exception while dispatching event {}'.format(event.event))
self.logger.exception(e)
# Mark task as done
self.queue.task_done()
self.logger.debug('Event bus thread exited')
|
WikiExtractor.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# =============================================================================
# Version: 2.39 (September 29, 2015)
# Author: Giuseppe Attardi (attardi@di.unipi.it), University of Pisa
#
# Contributors:
# Antonio Fuschetto (fuschett@aol.com)
# Leonardo Souza (lsouza@amtera.com.br)
# Juan Manuel Caicedo (juan@cavorite.com)
# Humberto Pereira (begini@gmail.com)
# Siegfried-A. Gevatter (siegfried@gevatter.com)
# Pedro Assis (pedroh2306@gmail.com)
# Wim Muskee (wimmuskee@gmail.com)
# Radics Geza (radicsge@gmail.com)
#
# =============================================================================
# Copyright (c) 2009-2015. Giuseppe Attardi (attardi@di.unipi.it).
# =============================================================================
# This file is part of Tanl.
#
# Tanl is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License, version 3,
# as published by the Free Software Foundation.
#
# Tanl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
"""Wikipedia Extractor:
Extracts and cleans text from a Wikipedia database dump and stores output in a
number of files of similar size in a given directory.
Each file will contain several documents in the format:
<doc id="" url="" title="">
...
</doc>
This version performs template expansion by preprocesssng the whole dump and
collecting template definitions.
"""
import sys, os.path, time
import re # TODO use regex when it will be standard
import argparse, random
from itertools import izip, izip_longest
import logging
import urllib
import bz2
import codecs
from htmlentitydefs import name2codepoint
from multiprocessing import Queue, JoinableQueue, Process, Manager, cpu_count
from cStringIO import StringIO
import fileinput
from timeit import default_timer
#===========================================================================
# Program version
version = '2.39'
### PARAMS ####################################################################
##
# Defined in <siteinfo>
# We include as default Template, when loading external template file.
knownNamespaces = set(['Template'])
##
# The namespace used for template definitions
# It is the name associated with namespace key=10 in the siteinfo header.
templateNamespace = ''
##
# The namespace used for module definitions
# It is the name associated with namespace key=828 in the siteinfo header.
moduleNamespace = ''
##
# Recognize only these namespaces
# w: Internal links to the Wikipedia
# wiktionary: Wiki dictionary
# wikt: shortcut for Wiktionary
#
acceptedNamespaces = ['w', 'wiktionary', 'wikt']
##
# Drop these elements from article text
#
discardElements = [
'gallery', 'timeline', 'noinclude', 'pre',
'table', 'tr', 'td', 'th', 'caption', 'div',
'form', 'input', 'select', 'option', 'textarea',
'ul', 'li', 'ol', 'dl', 'dt', 'dd', 'menu', 'dir',
'ref', 'references', 'img', 'imagemap', 'source', 'small'
]
# This is obtained from <siteinfo>
urlbase = None
def get_url(id):
global urlbase
return "%s?curid=%s" % (urlbase, id)
#=========================================================================
#
# MediaWiki Markup Grammar
# https://www.mediawiki.org/wiki/Preprocessor_ABNF
# xml-char = %x9 / %xA / %xD / %x20-D7FF / %xE000-FFFD / %x10000-10FFFF
# sptab = SP / HTAB
# ; everything except ">" (%x3E)
# attr-char = %x9 / %xA / %xD / %x20-3D / %x3F-D7FF / %xE000-FFFD / %x10000-10FFFF
# literal = *xml-char
# title = wikitext-L3
# part-name = wikitext-L3
# part-value = wikitext-L3
# part = ( part-name "=" part-value ) / ( part-value )
# parts = [ title *( "|" part ) ]
# tplarg = "{{{" parts "}}}"
# template = "{{" parts "}}"
# link = "[[" wikitext-L3 "]]"
# comment = "<!--" literal "-->"
# unclosed-comment = "<!--" literal END
# ; the + in the line-eating-comment rule was absent between MW 1.12 and MW 1.22
# line-eating-comment = LF LINE-START *SP +( comment *SP ) LINE-END
# attr = *attr-char
# nowiki-element = "<nowiki" attr ( "/>" / ( ">" literal ( "</nowiki>" / END ) ) )
# wikitext-L2 = heading / wikitext-L3 / *wikitext-L2
# wikitext-L3 = literal / template / tplarg / link / comment /
# line-eating-comment / unclosed-comment / xmlish-element /
# *wikitext-L3
#------------------------------------------------------------------------------
selfClosingTags = [ 'br', 'hr', 'nobr', 'ref', 'references', 'nowiki' ]
# These tags are dropped, keeping their content.
# handle 'a' separately, depending on keepLinks
ignoredTags = [
'abbr', 'b', 'big', 'blockquote', 'center', 'cite', 'div', 'em',
'font', 'h1', 'h2', 'h3', 'h4', 'hiero', 'i', 'kbd', 'nowiki',
'p', 'plaintext', 's', 'span', 'strike', 'strong',
'sub', 'sup', 'tt', 'u', 'var'
]
placeholder_tags = {'math':'formula', 'code':'codice'}
def normalizeTitle(title):
"""Normalize title"""
# remove leading/trailing whitespace and underscores
title = title.strip(' _')
# replace sequences of whitespace and underscore chars with a single space
title = re.sub(r'[\s_]+', ' ', title)
m = re.match(r'([^:]*):(\s*)(\S(?:.*))', title)
if m:
prefix = m.group(1)
if m.group(2):
optionalWhitespace = ' '
else:
optionalWhitespace = ''
rest = m.group(3)
ns = normalizeNamespace(prefix)
if ns in knownNamespaces:
# If the prefix designates a known namespace, then it might be
# followed by optional whitespace that should be removed to get
# the canonical page name
# (e.g., "Category: Births" should become "Category:Births").
title = ns + ":" + ucfirst(rest)
else:
# No namespace, just capitalize first letter.
# If the part before the colon is not a known namespace, then we
# must not remove the space after the colon (if any), e.g.,
# "3001: The_Final_Odyssey" != "3001:The_Final_Odyssey".
# However, to get the canonical page name we must contract multiple
# spaces into one, because
# "3001: The_Final_Odyssey" != "3001: The_Final_Odyssey".
title = ucfirst(prefix) + ":" + optionalWhitespace + ucfirst(rest)
else:
# no namespace, just capitalize first letter
title = ucfirst(title)
return title
##
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.
def unescape(text):
def fixup(m):
text = m.group(0)
code = m.group(1)
try:
if text[1] == "#": # character reference
if text[2] == "x":
return unichr(int(code[1:], 16))
else:
return unichr(int(code))
else: # named entity
return unichr(name2codepoint[code])
except:
return text # leave as is
return re.sub("&#?(\w+);", fixup, text)
# Match HTML comments
# The buggy template {{Template:T}} has a comment terminating with just "->"
comment = re.compile(r'<!--.*?-->', re.DOTALL)
# Match ignored tags
ignored_tag_patterns = []
def ignoreTag(tag):
left = re.compile(r'<%s\b.*?>' % tag, re.IGNORECASE | re.DOTALL) # both <ref> and <reference>
right = re.compile(r'</\s*%s>' % tag, re.IGNORECASE)
ignored_tag_patterns.append((left, right))
for tag in ignoredTags:
ignoreTag(tag)
# Match selfClosing HTML tags
selfClosing_tag_patterns = [
re.compile(r'<\s*%s\b[^>]*/\s*>' % tag, re.DOTALL | re.IGNORECASE) for tag in selfClosingTags
]
# Match HTML placeholder tags
placeholder_tag_patterns = [
(re.compile(r'<\s*%s(\s*| [^>]+?)>.*?<\s*/\s*%s\s*>' % (tag, tag), re.DOTALL | re.IGNORECASE),
repl) for tag, repl in placeholder_tags.items()
]
# Match preformatted lines
preformatted = re.compile(r'^ .*?$')
# Match external links (space separates second optional parameter)
externalLink = re.compile(r'\[\w+[^ ]*? (.*?)]')
externalLinkNoAnchor = re.compile(r'\[\w+[&\]]*\]')
# Matches bold/italic
bold_italic = re.compile(r"'''''(.*?)'''''")
bold = re.compile(r"'''(.*?)'''")
italic_quote = re.compile(r"''\"([^\"]*?)\"''")
italic = re.compile(r"''(.*?)''")
quote_quote = re.compile(r'""([^"]*?)""')
# Matches space
spaces = re.compile(r' {2,}')
# Matches dots
dots = re.compile(r'\.{4,}')
#======================================================================
class Template(list):
"""
A Template is a list of TemplateText or TemplateArgs
"""
@classmethod
def parse(cls, body):
tpl = Template()
# we must handle nesting, s.a.
# {{{1|{{PAGENAME}}}
# {{{italics|{{{italic|}}}
# {{#if:{{{{{#if:{{{nominee|}}}|nominee|candidate}}|}}}|
#
start = 0
for s,e in findMatchingBraces(body, 3):
tpl.append(TemplateText(body[start:s]))
tpl.append(TemplateArg(body[s+3:e-3]))
start = e
tpl.append(TemplateText(body[start:])) # leftover
return tpl
def subst(self, params, extractor, depth=0):
# We perform parameter substitutions recursively.
# We also limit the maximum number of iterations to avoid too long or
# even endless loops (in case of malformed input).
# :see: http://meta.wikimedia.org/wiki/Help:Expansion#Distinction_between_variables.2C_parser_functions.2C_and_templates
#
# Parameter values are assigned to parameters in two (?) passes.
# Therefore a parameter name in a template can depend on the value of
# another parameter of the same template, regardless of the order in
# which they are specified in the template call, for example, using
# Template:ppp containing "{{{{{{p}}}}}}", {{ppp|p=q|q=r}} and even
# {{ppp|q=r|p=q}} gives r, but using Template:tvvv containing
# "{{{{{{{{{p}}}}}}}}}", {{tvvv|p=q|q=r|r=s}} gives s.
#logging.debug('subst tpl (%d, %d) %s', len(extractor.frame), depth, self)
if depth > extractor.maxParameterRecursionLevels:
extractor.recursion_exceeded_3_errs += 1
return ''
return ''.join([tpl.subst(params, extractor, depth) for tpl in self])
def __str__(self):
return ''.join([unicode(x) for x in self])
class TemplateText(unicode):
"""Fixed text of template"""
def subst(self, params, extractor, depth):
return self
class TemplateArg(object):
"""
parameter to a template.
Has a name and a default value, both of which are Templates.
"""
def __init__(self, parameter):
"""
:param parameter: the parts of a tplarg.
"""
# the parameter name itself might contain templates, e.g.:
# appointe{{#if:{{{appointer14|}}}|r|d}}14|
# 4|{{{{{subst|}}}CURRENTYEAR}}
# any parts in a tplarg after the first (the parameter default) are
# ignored, and an equals sign in the first part is treated as plain text.
#logging.debug('TemplateArg %s', parameter)
parts = splitParts(parameter)
self.name = Template.parse(parts[0])
if len(parts) > 1:
# This parameter has a default value
self.default = Template.parse(parts[1])
else:
self.default = None
def __str__(self):
if self.default:
return '{{{%s|%s}}}' % (self.name, self.default)
else:
return '{{{%s}}}' % self.name
def subst(self, params, extractor, depth):
"""
Substitute value for this argument from dict :param params:
Use :param extractor: to evaluate expressions for name and default.
Limit substitution to the maximun :param depth:.
"""
# the parameter name itself might contain templates, e.g.:
# appointe{{#if:{{{appointer14|}}}|r|d}}14|
paramName = self.name.subst(params, extractor, depth+1)
paramName = extractor.expandTemplates(paramName)
res = ''
if paramName in params:
res = params[paramName] # use parameter value specified in template invocation
elif self.default: # use the default value
defaultValue = self.default.subst(params, extractor, depth+1)
res = extractor.expandTemplates(defaultValue)
#logging.debug('subst arg %d %s -> %s' % (depth, paramName, res))
return res
#======================================================================
substWords = 'subst:|safesubst:'
class Extractor(object):
"""
An extraction task on a article.
"""
##
# Whether to preserve links in output
keepLinks = False
##
# Whether to transform sections into HTML
keepSections = False
##
# Whether to output HTML instead of text
toHTML = False
def __init__(self, id, title, page):
"""
:param page: a list of lines.
"""
self.id = id
self.title = title
self.page = page
self.magicWords = MagicWords()
self.frame = []
self.recursion_exceeded_1_errs = 0 # template recursion within expandTemplates()
self.recursion_exceeded_2_errs = 0 # template recursion within expandTemplate()
self.recursion_exceeded_3_errs = 0 # parameter recursion
self.template_title_errs = 0
def extract(self, out):
"""
:param out: a memory file.
"""
logging.debug("%s\t%s", self.id, self.title)
text = ''.join(self.page)
url = get_url(self.id)
header = '<doc id="%s" url="%s" title="%s">\n' % (self.id, url, self.title)
# Separate header from text with a newline.
header += self.title + '\n\n'
header = header.encode('utf-8')
self.magicWords['pagename'] = self.title
self.magicWords['fullpagename'] = self.title
self.magicWords['currentyear'] = time.strftime('%Y')
self.magicWords['currentmonth'] = time.strftime('%m')
self.magicWords['currentday'] = time.strftime('%d')
self.magicWords['currenthour'] = time.strftime('%H')
self.magicWords['currenttime'] = time.strftime('%H:%M:%S')
text = clean(self, text)
footer = "\n</doc>\n"
out.write(header)
for line in compact(text):
out.write(line.encode('utf-8'))
out.write('\n')
out.write(footer)
errs = (self.template_title_errs,
self.recursion_exceeded_1_errs,
self.recursion_exceeded_2_errs,
self.recursion_exceeded_3_errs)
if any(errs):
logging.warn("Template errors in article '%s' (%s): title(%d) recursion(%d, %d, %d)",
self.title, self.id, *errs)
#----------------------------------------------------------------------
# Expand templates
maxTemplateRecursionLevels = 30
maxParameterRecursionLevels = 10
# check for template beginning
reOpen = re.compile('(?<!{){{(?!{)', re.DOTALL)
def expandTemplates(self, wikitext):
"""
:param wikitext: the text to be expanded.
Templates are frequently nested. Occasionally, parsing mistakes may
cause template insertion to enter an infinite loop, for instance when
trying to instantiate Template:Country
{{country_{{{1}}}|{{{2}}}|{{{2}}}|size={{{size|}}}|name={{{name|}}}}}
which is repeatedly trying to insert template 'country_', which is
again resolved to Template:Country. The straightforward solution of
keeping track of templates that were already inserted for the current
article would not work, because the same template may legally be used
more than once, with different parameters in different parts of the
article. Therefore, we limit the number of iterations of nested
template inclusion.
"""
# Test template expansion at:
# https://en.wikipedia.org/wiki/Special:ExpandTemplates
res = ''
if len(self.frame) >= self.maxTemplateRecursionLevels:
self.recursion_exceeded_1_errs += 1
return res
#logging.debug('<expandTemplates ' + str(len(self.frame)))
cur = 0
# look for matching {{...}}
for s,e in findMatchingBraces(wikitext, 2):
res += wikitext[cur:s] + self.expandTemplate(wikitext[s+2:e-2])
cur = e
# leftover
res += wikitext[cur:]
#logging.debug(' expandTemplates> %d %s', len(self.frame), res)
return res
def templateParams(self, parameters):
"""
Build a dictionary with positional or name key to expanded parameters.
:param parameters: the parts[1:] of a template, i.e. all except the title.
:param depth: recusion depth.
"""
templateParams = {}
if not parameters:
return templateParams
logging.debug('<templateParams: %s', '|'.join(parameters))
# Parameters can be either named or unnamed. In the latter case, their
# name is defined by their ordinal position (1, 2, 3, ...).
unnamedParameterCounter = 0
# It's legal for unnamed parameters to be skipped, in which case they
# will get default values (if available) during actual instantiation.
# That is {{template_name|a||c}} means parameter 1 gets
# the value 'a', parameter 2 value is not defined, and parameter 3 gets
# the value 'c'. This case is correctly handled by function 'split',
# and does not require any special handling.
for param in parameters:
# Spaces before or after a parameter value are normally ignored,
# UNLESS the parameter contains a link (to prevent possible gluing
# the link to the following text after template substitution)
# Parameter values may contain "=" symbols, hence the parameter
# name extends up to the first such symbol.
# It is legal for a parameter to be specified several times, in
# which case the last assignment takes precedence. Example:
# "{{t|a|b|c|2=B}}" is equivalent to "{{t|a|B|c}}".
# Therefore, we don't check if the parameter has been assigned a
# value before, because anyway the last assignment should override
# any previous ones.
# FIXME: Don't use DOTALL here since parameters may be tags with
# attributes, e.g. <div class="templatequotecite">
# Parameters may span several lines, like:
# {{Reflist|colwidth=30em|refs=
# <ref name="Goode">Title</ref>
# The '=' might occurr within an HTML attribute:
# "<ref name=value"
# but we stop at first.
m = re.match(' *([^=]*?) *=(.*)', param, re.DOTALL)
if m:
# This is a named parameter. This case also handles parameter
# assignments like "2=xxx", where the number of an unnamed
# parameter ("2") is specified explicitly - this is handled
# transparently.
parameterName = m.group(1).strip()
parameterValue = m.group(2)
if ']]' not in parameterValue: # if the value does not contain a link, trim whitespace
parameterValue = parameterValue.strip()
templateParams[parameterName] = parameterValue
else:
# this is an unnamed parameter
unnamedParameterCounter += 1
if ']]' not in param: # if the value does not contain a link, trim whitespace
param = param.strip()
templateParams[str(unnamedParameterCounter)] = param
logging.debug(' templateParams> %s', '|'.join(templateParams.values()))
return templateParams
def expandTemplate(self, body):
"""Expands template invocation.
:param body: the parts of a template.
:see http://meta.wikimedia.org/wiki/Help:Expansion for an explanation
of the process.
See in particular: Expansion of names and values
http://meta.wikimedia.org/wiki/Help:Expansion#Expansion_of_names_and_values
For most parser functions all names and values are expanded,
regardless of what is relevant for the result. The branching functions
(#if, #ifeq, #iferror, #ifexist, #ifexpr, #switch) are exceptions.
All names in a template call are expanded, and the titles of the
tplargs in the template body, after which it is determined which
values must be expanded, and for which tplargs in the template body
the first part (default).
In the case of a tplarg, any parts beyond the first are never
expanded. The possible name and the value of the first part is
expanded if the title does not match a name in the template call.
:see code for braceSubstitution at
https://doc.wikimedia.org/mediawiki-core/master/php/html/Parser_8php_source.html#3397:
"""
# template = "{{" parts "}}"
# Templates and tplargs are decomposed in the same way, with pipes as
# separator, even though eventually any parts in a tplarg after the first
# (the parameter default) are ignored, and an equals sign in the first
# part is treated as plain text.
# Pipes inside inner templates and tplargs, or inside double rectangular
# brackets within the template or tplargs are not taken into account in
# this decomposition.
# The first part is called title, the other parts are simply called parts.
# If a part has one or more equals signs in it, the first equals sign
# determines the division into name = value. Equals signs inside inner
# templates and tplargs, or inside double rectangular brackets within the
# part are not taken into account in this decomposition. Parts without
# equals sign are indexed 1, 2, .., given as attribute in the <name> tag.
if len(self.frame) >= self.maxTemplateRecursionLevels:
self.recursion_exceeded_2_errs += 1
#logging.debug(' INVOCATION> %d %s', len(self.frame), body)
return ''
logging.debug('INVOCATION %d %s', len(self.frame), body)
parts = splitParts(body)
# title is the portion before the first |
logging.debug('TITLE %s', parts[0].strip())
title = self.expandTemplates(parts[0].strip())
# SUBST
# Apply the template tag to parameters without
# substituting into them, e.g.
# {{subst:t|a{{{p|q}}}b}} gives the wikitext start-a{{{p|q}}}b-end
# @see https://www.mediawiki.org/wiki/Manual:Substitution#Partial_substitution
subst = False
if re.match(substWords, title, re.IGNORECASE):
title = re.sub(substWords, '', title, 1, re.IGNORECASE)
subst = True
if title.lower() in self.magicWords.values:
return self.magicWords[title.lower()]
# Parser functions
# The first argument is everything after the first colon.
# It has been evaluated above.
colon = title.find(':')
if colon > 1:
funct = title[:colon]
parts[0] = title[colon+1:].strip() # side-effect (parts[0] not used later)
# arguments after first are not evaluated
ret = callParserFunction(funct, parts, self.frame)
return self.expandTemplates(ret)
title = fullyQualifiedTemplateTitle(title)
if not title:
self.template_title_errs += 1
return ''
redirected = redirects.get(title)
if redirected:
title = redirected
# get the template
if title in templateCache:
template = templateCache[title]
elif title in templates:
template = Template.parse(templates[title])
# add it to cache
templateCache[title] = template
del templates[title]
else:
# The page being included could not be identified
return ''
#logging.debug('TEMPLATE %s: %s', title, template)
# tplarg = "{{{" parts "}}}"
# parts = [ title *( "|" part ) ]
# part = ( part-name "=" part-value ) / ( part-value )
# part-name = wikitext-L3
# part-value = wikitext-L3
# wikitext-L3 = literal / template / tplarg / link / comment /
# line-eating-comment / unclosed-comment /
# xmlish-element / *wikitext-L3
# A tplarg may contain other parameters as well as templates, e.g.:
# {{{text|{{{quote|{{{1|{{error|Error: No text given}}}}}}}}}}}
# hence no simple RE like this would work:
# '{{{((?:(?!{{{).)*?)}}}'
# We must use full CF parsing.
# the parameter name itself might be computed, e.g.:
# {{{appointe{{#if:{{{appointer14|}}}|r|d}}14|}}}
# Because of the multiple uses of double-brace and triple-brace
# syntax, expressions can sometimes be ambiguous.
# Precedence rules specifed here:
# http://www.mediawiki.org/wiki/Preprocessor_ABNF#Ideal_precedence
# resolve ambiguities like this:
# {{{{ }}}} -> { {{{ }}} }
# {{{{{ }}}}} -> {{ {{{ }}} }}
#
# :see: https://en.wikipedia.org/wiki/Help:Template#Handling_parameters
params = parts[1:]
if not subst:
# Evaluate parameters, since they may contain templates, including
# the symbol "=".
# {{#ifexpr: {{{1}}} = 1 }}
params = [self.expandTemplates(p) for p in params]
# build a dict of name-values for the parameter values
params = self.templateParams(params)
# Perform parameter substitution
# extend frame before subst, since there may be recursion in default
# parameter value, e.g. {{OTRS|celebrative|date=April 2015}} in article
# 21637542 in enwiki.
self.frame.append((title, params))
instantiated = template.subst(params, self)
#logging.debug('instantiated %d %s', len(self.frame), instantiated)
value = self.expandTemplates(instantiated)
self.frame.pop()
#logging.debug(' INVOCATION> %s %d %s', title, len(self.frame), value)
return value
# ----------------------------------------------------------------------
# parameter handling
def splitParts(paramsList):
"""
:param paramList: the parts of a template or tplarg.
Split template parameters at the separator "|".
separator "=".
Template parameters often contain URLs, internal links, text or even
template expressions, since we evaluate templates outside in.
This is required for cases like:
{{#if: {{{1}}} | {{lc:{{{1}}} | "parameter missing"}}
Parameters are separated by "|" symbols. However, we
cannot simply split the string on "|" symbols, since these
also appear inside templates and internal links, e.g.
{{if:|
|{{#if:the president|
|{{#if:|
[[Category:Hatnote templates|A{{PAGENAME}}]]
}}
}}
}}
We split parts at the "|" symbols that are not inside any pair
{{{...}}}, {{...}}, [[...]], {|...|}.
"""
# Must consider '[' as normal in expansion of Template:EMedicine2:
# #ifeq: ped|article|[http://emedicine.medscape.com/article/180-overview|[http://www.emedicine.com/ped/topic180.htm#{{#if: |section~}}
# as part of:
# {{#ifeq: ped|article|[http://emedicine.medscape.com/article/180-overview|[http://www.emedicine.com/ped/topic180.htm#{{#if: |section~}}}} ped/180{{#if: |~}}]
# should handle both tpl arg like:
# 4|{{{{{subst|}}}CURRENTYEAR}}
# and tpl parameters like:
# ||[[Category:People|{{#if:A|A|{{PAGENAME}}}}]]
sep = '|'
parameters = []
cur = 0
for s,e in findMatchingBraces(paramsList):
par = paramsList[cur:s].split(sep)
if par:
if parameters:
# portion before | belongs to previous parameter
parameters[-1] += par[0]
if len(par) > 1:
# rest are new parameters
parameters.extend(par[1:])
else:
parameters = par
elif not parameters:
parameters = [''] # create first param
# add span to last previous parameter
parameters[-1] += paramsList[s:e]
cur = e
# leftover
par = paramsList[cur:].split(sep)
if par:
if parameters:
# portion before | belongs to previous parameter
parameters[-1] += par[0]
if len(par) > 1:
# rest are new parameters
parameters.extend(par[1:])
else:
parameters = par
#logging.debug('splitParts %s %s\nparams: %s', sep, paramsList, str(parameters))
return parameters
def findMatchingBraces(text, ldelim=0):
"""
:param ldelim: number of braces to match. 0 means match [[]], {{}} and {{{}}}.
"""
# Parsing is done with respect to pairs of double braces {{..}} delimiting
# a template, and pairs of triple braces {{{..}}} delimiting a tplarg.
# If double opening braces are followed by triple closing braces or
# conversely, this is taken as delimiting a template, with one left-over
# brace outside it, taken as plain text. For any pattern of braces this
# defines a set of templates and tplargs such that any two are either
# separate or nested (not overlapping).
# Unmatched double rectangular closing brackets can be in a template or
# tplarg, but unmatched double rectangular opening brackets cannot.
# Unmatched double or triple closing braces inside a pair of
# double rectangular brackets are treated as plain text.
# Other formulation: in ambiguity between template or tplarg on one hand,
# and a link on the other hand, the structure with the rightmost opening
# takes precedence, even if this is the opening of a link without any
# closing, so not producing an actual link.
# In the case of more than three opening braces the last three are assumed
# to belong to a tplarg, unless there is no matching triple of closing
# braces, in which case the last two opening braces are are assumed to
# belong to a template.
# We must skip individual { like in:
# {{#ifeq: {{padleft:|1|}} | { | | }}
# We must resolve ambiguities like this:
# {{{{ }}}} -> { {{{ }}} }
# {{{{{ }}}}} -> {{ {{{ }}} }}
# {{#if:{{{{{#if:{{{nominee|}}}|nominee|candidate}}|}}}|...}}
# Handle:
# {{{{{|safesubst:}}}#Invoke:String|replace|{{{1|{{{{{|safesubst:}}}PAGENAME}}}}}|%s+%([^%(]-%)$||plain=false}}
# as well as expressions with stray }:
# {{{link|{{ucfirst:{{{1}}}}}} interchange}}}
if ldelim: # 2-3
reOpen = re.compile('[{]{%d,}' % ldelim) # at least ldelim
reNext = re.compile('[{]{2,}|}{2,}') # at least 2
else:
reOpen = re.compile('{{2,}|\[{2,}')
reNext = re.compile('{{2,}|}{2,}|\[{2,}|]{2,}') # at least 2
cur = 0
while True:
m1 = reOpen.search(text, cur)
if not m1:
return
lmatch = m1.end()-m1.start()
if m1.group()[0] == '{':
stack = [lmatch] # stack of opening braces lengths
else:
stack = [-lmatch] # negative means [
end = m1.end()
while True:
m2 = reNext.search(text, end)
if not m2:
return # unbalanced
end = m2.end()
brac = m2.group()[0]
lmatch = m2.end()-m2.start()
if brac == '{':
stack.append(lmatch)
elif brac == '}':
while stack:
openCount = stack.pop() # opening span
if openCount == 0: # illegal unmatched [[
continue
if lmatch >= openCount:
lmatch -= openCount
if lmatch <= 1: # either close or stray }
break
else:
# put back unmatched
stack.append(openCount - lmatch)
break
if not stack:
yield m1.start(), end-lmatch
cur = end
break
elif len(stack) == 1 and 0 < stack[0] < ldelim:
# ambiguous {{{{{ }}} }}
yield m1.start() + stack[0], end
cur = end
break
elif brac == '[': # [[
stack.append(-lmatch)
else: # ]]
while stack and stack[-1] < 0: # matching [[
openCount = -stack.pop()
if lmatch >= openCount:
lmatch -= openCount
if lmatch <= 1: # either close or stray ]
break
else:
# put back unmatched (negative)
stack.append(lmatch - openCount)
break
if not stack:
yield m1.start(), end-lmatch
cur = end
break
# unmatched ]] are discarded
cur = end
def findBalanced(text, openDelim, closeDelim):
"""
Assuming that text contains a properly balanced expression using
:param openDelim: as opening delimiters and
:param closeDelim: as closing delimiters.
:return: an iterator producing pairs (start, end) of start and end
positions in text containing a balanced expression.
"""
openPat = '|'.join([re.escape(x) for x in openDelim])
# patter for delimiters expected after each opening delimiter
afterPat = { o:re.compile(openPat+'|'+c, re.DOTALL) for o,c in izip(openDelim, closeDelim)}
stack = []
start = 0
cur = 0
end = len(text)
startSet = False
startPat = re.compile(openPat)
nextPat = startPat
while True:
next = nextPat.search(text, cur)
if not next:
return
if not startSet:
start = next.start()
startSet = True
delim = next.group(0)
if delim in openDelim:
stack.append(delim)
nextPat = afterPat[delim]
else:
opening = stack.pop()
# assert opening == openDelim[closeDelim.index(next.group(0))]
if stack:
nextPat = afterPat[stack[-1]]
else:
yield start, next.end()
nextPat = startPat
start = next.end()
startSet = False
cur = next.end()
# ----------------------------------------------------------------------
# Modules
# Only minimal support
# FIXME: import Lua modules.
modules = {
'convert' : {
'convert': lambda x, u, *rest: x+' '+u, # no conversion
}
}
# ----------------------------------------------------------------------
# variables
class MagicWords(object):
"""
One copy in each Extractor.
@see https://doc.wikimedia.org/mediawiki-core/master/php/MagicWord_8php_source.html
"""
names = [
'!',
'currentmonth',
'currentmonth1',
'currentmonthname',
'currentmonthnamegen',
'currentmonthabbrev',
'currentday',
'currentday2',
'currentdayname',
'currentyear',
'currenttime',
'currenthour',
'localmonth',
'localmonth1',
'localmonthname',
'localmonthnamegen',
'localmonthabbrev',
'localday',
'localday2',
'localdayname',
'localyear',
'localtime',
'localhour',
'numberofarticles',
'numberoffiles',
'numberofedits',
'articlepath',
'pageid',
'sitename',
'server',
'servername',
'scriptpath',
'stylepath',
'pagename',
'pagenamee',
'fullpagename',
'fullpagenamee',
'namespace',
'namespacee',
'namespacenumber',
'currentweek',
'currentdow',
'localweek',
'localdow',
'revisionid',
'revisionday',
'revisionday2',
'revisionmonth',
'revisionmonth1',
'revisionyear',
'revisiontimestamp',
'revisionuser',
'revisionsize',
'subpagename',
'subpagenamee',
'talkspace',
'talkspacee',
'subjectspace',
'subjectspacee',
'talkpagename',
'talkpagenamee',
'subjectpagename',
'subjectpagenamee',
'numberofusers',
'numberofactiveusers',
'numberofpages',
'currentversion',
'rootpagename',
'rootpagenamee',
'basepagename',
'basepagenamee',
'currenttimestamp',
'localtimestamp',
'directionmark',
'contentlanguage',
'numberofadmins',
'cascadingsources',
]
def __init__(self):
self.values = {}
self.values['!'] = '|'
def __getitem__(self, name):
return self.values.get(name)
def __setitem__(self, name, value):
self.values[name] = value
switches = [
'__NOTOC__',
'__FORCETOC__',
'__TOC__',
'__TOC__',
'__NEWSECTIONLINK__',
'__NONEWSECTIONLINK__',
'__NOGALLERY__',
'__HIDDENCAT__',
'__NOCONTENTCONVERT__',
'__NOCC__',
'__NOTITLECONVERT__',
'__NOTC__',
'__START__',
'__END__',
'__INDEX__',
'__NOINDEX__',
'__STATICREDIRECT__',
'__DISAMBIG__'
]
magicWordsRE = re.compile('|'.join(MagicWords.switches))
# ----------------------------------------------------------------------
# parser functions utilities
def ucfirst(string):
""":return: a string with just its first character uppercase
We can't use title() since it coverts all words.
"""
if string:
if len(string) > 1:
return string[0].upper() + string[1:]
else:
return string.upper()
else:
return ''
def lcfirst(string):
""":return: a string with its first character lowercase"""
if string:
if len(string) > 1:
return string[0].lower() + string[1:]
else:
return string.lower()
else:
return ''
def fullyQualifiedTemplateTitle(templateTitle):
"""
Determine the namespace of the page being included through the template
mechanism
"""
global templatePrefix
if templateTitle.startswith(':'):
# Leading colon by itself implies main namespace, so strip this colon
return ucfirst(templateTitle[1:])
else:
m = re.match('([^:]*)(:.*)', templateTitle)
if m:
# colon found but not in the first position - check if it
# designates a known namespace
prefix = normalizeNamespace(m.group(1))
if prefix in knownNamespaces:
return prefix + ucfirst(m.group(2))
# The title of the page being included is NOT in the main namespace and
# lacks any other explicit designation of the namespace - therefore, it
# is resolved to the Template namespace (that's the default for the
# template inclusion mechanism).
# This is a defense against pages whose title only contains UTF-8 chars
# that are reduced to an empty string. Right now I can think of one such
# case - <C2><A0> which represents the non-breaking space.
# In this particular case, this page is a redirect to [[Non-nreaking
# space]], but having in the system a redirect page with an empty title
# causes numerous problems, so we'll live happier without it.
if templateTitle:
return templatePrefix + ucfirst(templateTitle)
else:
return '' # caller may log as error
def normalizeNamespace(ns):
return ucfirst(ns)
# ----------------------------------------------------------------------
# Parser functions
# see http://www.mediawiki.org/wiki/Help:Extension:ParserFunctions
# https://github.com/Wikia/app/blob/dev/extensions/ParserFunctions/ParserFunctions_body.php
class Infix:
"""Infix operators.
The calling sequence for the infix is:
x |op| y
"""
def __init__(self, function):
self.function = function
def __ror__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __or__(self, other):
return self.function(other)
def __rlshift__(self, other):
return Infix(lambda x, self=self, other=other: self.function(other, x))
def __rshift__(self, other):
return self.function(other)
def __call__(self, value1, value2):
return self.function(value1, value2)
ROUND = Infix(lambda x,y: round(x, y))
def sharp_expr(expr):
try:
expr = re.sub('=', '==', expr)
expr = re.sub('mod', '%', expr)
expr = re.sub('\bdiv\b', '/', expr)
expr = re.sub('\bround\b', '|ROUND|', expr)
return unicode(eval(expr))
except:
return '<span class="error"></span>'
def sharp_if(testValue, valueIfTrue, valueIfFalse=None, *args):
# In theory, we should evaluate the first argument here,
# but it was evaluated while evaluating part[0] in expandTemplate().
if testValue.strip():
# The {{#if:}} function is an if-then-else construct.
# The applied condition is: "The condition string is non-empty".
valueIfTrue = valueIfTrue.strip()
if valueIfTrue:
return valueIfTrue
elif valueIfFalse:
return valueIfFalse.strip()
return ""
def sharp_ifeq(lvalue, rvalue, valueIfTrue, valueIfFalse=None, *args):
rvalue = rvalue.strip()
if rvalue:
# lvalue is always defined
if lvalue.strip() == rvalue:
# The {{#ifeq:}} function is an if-then-else construct. The
# applied condition is "is rvalue equal to lvalue". Note that this
# does only string comparison while MediaWiki implementation also
# supports numerical comparissons.
if valueIfTrue:
return valueIfTrue.strip()
else:
if valueIfFalse:
return valueIfFalse.strip()
return ""
def sharp_iferror(test, then='', Else=None, *args):
if re.match('<(?:strong|span|p|div)\s(?:[^\s>]*\s+)*?class="(?:[^"\s>]*\s+)*?error(?:\s[^">]*)?"', test):
return then
elif Else is None:
return test.strip()
else:
return Else.strip()
def sharp_switch(primary, *params):
# FIXME: we don't support numeric expressions in primary
# {{#switch: comparison string
# | case1 = result1
# | case2
# | case4 = result2
# | 1 | case5 = result3
# | #default = result4
# }}
primary = primary.strip()
found = False # for fall through cases
default = None
rvalue = None
lvalue = ''
for param in params:
# handle cases like:
# #default = [http://www.perseus.tufts.edu/hopper/text?doc=Perseus...]
pair = param.split('=', 1)
lvalue = pair[0].strip()
rvalue = None
if len(pair) > 1:
# got "="
rvalue = pair[1].strip()
# check for any of multiple values pipe separated
if found or primary in [v.strip() for v in lvalue.split('|')]:
# Found a match, return now
return rvalue
elif lvalue == '#default':
default = rvalue
rvalue = None # avoid defaulting to last case
elif lvalue == primary:
# If the value matches, set a flag and continue
found = True
# Default case
# Check if the last item had no = sign, thus specifying the default case
if rvalue is not None:
return lvalue
elif default is not None:
return default
return ''
# Extension Scribuntu
def sharp_invoke(module, function, frame):
functions = modules.get(module)
if functions:
funct = functions.get(function)
if funct:
# find parameters in frame whose title is the one of the original
# template invocation
templateTitle = fullyQualifiedTemplateTitle(function)
if not templateTitle:
logging.warn("Template with empty title")
pair = next((x for x in frame if x[0] == templateTitle), None)
if pair:
params = pair[1]
# extract positional args
params = [params.get(str(i+1)) for i in range(len(params))]
return funct(*params)
else:
return funct()
return ''
parserFunctions = {
'#expr': sharp_expr,
'#if': sharp_if,
'#ifeq': sharp_ifeq,
'#iferror': sharp_iferror,
'#ifexpr': lambda *args: '', # not supported
'#ifexist': lambda *args: '', # not supported
'#rel2abs': lambda *args: '', # not supported
'#switch': sharp_switch,
'#language': lambda *args: '', # not supported
'#time': lambda *args: '', # not supported
'#timel': lambda *args: '', # not supported
'#titleparts': lambda *args: '', # not supported
# This function is used in some pages to construct links
# http://meta.wikimedia.org/wiki/Help:URL
'urlencode': lambda string, *rest: urllib.quote(string.encode('utf-8')),
'lc': lambda string, *rest: string.lower() if string else '',
'lcfirst': lambda string, *rest: lcfirst(string),
'uc': lambda string, *rest: string.upper() if string else '',
'ucfirst': lambda string, *rest: ucfirst(string),
'int': lambda string, *rest: str(int(string)),
}
def callParserFunction(functionName, args, frame):
"""
Parser functions have similar syntax as templates, except that
the first argument is everything after the first colon.
:return: the result of the invocation, None in case of failure.
http://meta.wikimedia.org/wiki/Help:ParserFunctions
"""
try:
if functionName == '#invoke':
# special handling of frame
ret = sharp_invoke(args[0].strip(), args[1].strip(), frame)
#logging.debug('parserFunction> %s %s', functionName, ret)
return ret
if functionName in parserFunctions:
ret = parserFunctions[functionName](*args)
#logging.debug('parserFunction> %s %s', functionName, ret)
return ret
except:
return "" # FIXME: fix errors
return ""
# ----------------------------------------------------------------------
# Expand using WikiMedia API
# import json
# def expandTemplates(text):
# """Expand templates invoking MediaWiki API"""
# text = urlib.urlencodew(text.encode('utf-8'))
# base = urlbase[:urlbase.rfind('/')]
# url = base + "/w/api.php?action=expandtemplates&format=json&text=" + text
# exp = json.loads(urllib.urlopen(url))
# return exp['expandtemplates']['*']
# ----------------------------------------------------------------------
# Extract Template definition
reNoinclude = re.compile(r'<noinclude>(?:.*?)</noinclude>', re.DOTALL)
reIncludeonly = re.compile(r'<includeonly>|</includeonly>', re.DOTALL)
# These are built before spawning processes, hence thay are shared.
templates = {}
redirects = {}
# cache of parser templates
# FIXME: sharing this with a Manager slows down.
templateCache = {}
def define_template(title, page):
"""
Adds a template defined in the :param page:.
@see https://en.wikipedia.org/wiki/Help:Template#Noinclude.2C_includeonly.2C_and_onlyinclude
"""
global templates
global redirects
#title = normalizeTitle(title)
# check for redirects
m = re.match('#REDIRECT.*?\[\[([^\]]*)]]', page[0], re.IGNORECASE)
if m:
redirects[title] = m.group(1) #normalizeTitle(m.group(1))
return
text = unescape(''.join(page))
# We're storing template text for future inclusion, therefore,
# remove all <noinclude> text and keep all <includeonly> text
# (but eliminate <includeonly> tags per se).
# However, if <onlyinclude> ... </onlyinclude> parts are present,
# then only keep them and discard the rest of the template body.
# This is because using <onlyinclude> on a text fragment is
# equivalent to enclosing it in <includeonly> tags **AND**
# enclosing all the rest of the template body in <noinclude> tags.
# remove comments
text = comment.sub('', text)
# eliminate <noinclude> fragments
text = reNoinclude.sub('', text)
# eliminate unterminated <noinclude> elements
text = re.sub(r'<noinclude\s*>.*$', '', text, flags=re.DOTALL)
text = re.sub(r'<noinclude/>', '', text)
onlyincludeAccumulator = ''
for m in re.finditer('<onlyinclude>(.*?)</onlyinclude>', text, re.DOTALL):
onlyincludeAccumulator += m.group(1)
if onlyincludeAccumulator:
text = onlyincludeAccumulator
else:
text = reIncludeonly.sub('', text)
if text:
if title in templates:
logging.warn('Redefining: %s', title)
templates[title] = text
# ----------------------------------------------------------------------
def dropNested(text, openDelim, closeDelim):
"""
A matching function for nested expressions, e.g. namespaces and tables.
"""
openRE = re.compile(openDelim, re.IGNORECASE)
closeRE = re.compile(closeDelim, re.IGNORECASE)
# partition text in separate blocks { } { }
spans = [] # pairs (s, e) for each partition
nest = 0 # nesting level
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
if not next: # termination
while nest: # close all pending
nest -=1
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
# { } {
if nest:
nest -= 1
# try closing more
last = end.end()
end = closeRE.search(text, end.end())
if not end: # unbalanced
if spans:
span = (spans[0][0], last)
else:
span = (start.start(), last)
spans = [span]
break
else:
spans.append((start.start(), end.end()))
# advance start, find next close
start = next
end = closeRE.search(text, next.end())
break # { }
if next != start:
# { { }
nest += 1
# collect text outside partitions
return dropSpans(spans, text)
def dropSpans(spans, text):
"""
Drop from text the blocks identified in :param spans:, possibly nested.
"""
spans.sort()
res = ''
offset = 0
for s, e in spans:
if offset <= s: # handle nesting
if offset < s:
res += text[offset:s]
offset = e
res += text[offset:]
return res
# ----------------------------------------------------------------------
# WikiLinks
# See https://www.mediawiki.org/wiki/Help:Links#Internal_links
# Can be nested [[File:..|..[[..]]..|..]], [[Category:...]], etc.
# Also: [[Help:IPA for Catalan|[andora]]]
def replaceInternalLinks(text):
"""
Replaces external links of the form:
[[title |...|label]]trail
with title concatenated with trail, when present, e.g. 's' for plural.
"""
# call this after removal of external links, so we need not worry about
# triple closing ]]].
cur = 0
res = ''
for s,e in findBalanced(text, ['[['], [']]']):
m = tailRE.match(text, e)
if m:
trail = m.group(0)
end = m.end()
else:
trail = ''
end = e
inner = text[s+2:e-2]
# find first |
pipe = inner.find('|')
if pipe < 0:
title = inner
label = title
else:
title = inner[:pipe].rstrip()
# find last |
curp = pipe+1
for s1,e1 in findBalanced(inner, ['[['], [']]']):
last = inner.rfind('|', curp, s1)
if last >= 0:
pipe = last # advance
curp = e1
label = inner[pipe+1:].strip()
res += text[cur:s] + makeInternalLink(title, label) + trail
cur = end
return res + text[cur:]
# the official version is a method in class Parser, similar to this:
# def replaceInternalLinks2(text):
# global wgExtraInterlanguageLinkPrefixes
# # the % is needed to support urlencoded titles as well
# tc = Title::legalChars() + '#%'
# # Match a link having the form [[namespace:link|alternate]]trail
# e1 = re.compile("([%s]+)(?:\\|(.+?))?]](.*)" % tc, re.S | re.D)
# # Match cases where there is no "]]", which might still be images
# e1_img = re.compile("([%s]+)\\|(.*)" % tc, re.S | re.D)
# holders = LinkHolderArray(self)
# # split the entire text string on occurrences of [[
# iterBrackets = re.compile('[[').finditer(text)
# m in iterBrackets.next()
# # get the first element (all text up to first [[)
# s = text[:m.start()]
# cur = m.end()
# line = s
# useLinkPrefixExtension = self.getTargetLanguage().linkPrefixExtension()
# e2 = None
# if useLinkPrefixExtension:
# # Match the end of a line for a word that is not followed by whitespace,
# # e.g. in the case of "The Arab al[[Razi]]", "al" will be matched
# global wgContLang
# charset = wgContLang.linkPrefixCharset()
# e2 = re.compile("((?>.*[^charset]|))(.+)", re.S | re.D | re.U)
# if self.mTitle is None:
# raise MWException(__METHOD__ + ": \self.mTitle is null\n")
# nottalk = not self.mTitle.isTalkPage()
# if useLinkPrefixExtension:
# m = e2.match(s)
# if m:
# first_prefix = m.group(2)
# else:
# first_prefix = false
# else:
# prefix = ''
# useSubpages = self.areSubpagesAllowed()
# for m in iterBrackets:
# line = text[cur:m.start()]
# cur = m.end()
# # TODO: Check for excessive memory usage
# if useLinkPrefixExtension:
# m = e2.match(e2)
# if m:
# prefix = m.group(2)
# s = m.group(1)
# else:
# prefix = ''
# # first link
# if first_prefix:
# prefix = first_prefix
# first_prefix = False
# might_be_img = False
# m = e1.match(line)
# if m: # page with normal label or alt
# label = m.group(2)
# # If we get a ] at the beginning of m.group(3) that means we have a link that is something like:
# # [[Image:Foo.jpg|[http://example.com desc]]] <- having three ] in a row fucks up,
# # the real problem is with the e1 regex
# # See bug 1300.
# #
# # Still some problems for cases where the ] is meant to be outside punctuation,
# # and no image is in sight. See bug 2095.
# #
# if label and m.group(3)[0] == ']' and '[' in label:
# label += ']' # so that replaceExternalLinks(label) works later
# m.group(3) = m.group(3)[1:]
# # fix up urlencoded title texts
# if '%' in m.group(1):
# # Should anchors '#' also be rejected?
# m.group(1) = str_replace(array('<', '>'), array('<', '>'), rawurldecode(m.group(1)))
# trail = m.group(3)
# else:
# m = e1_img.match(line):
# if m:
# # Invalid, but might be an image with a link in its caption
# might_be_img = true
# label = m.group(2)
# if '%' in m.group(1):
# m.group(1) = rawurldecode(m.group(1))
# trail = ""
# else: # Invalid form; output directly
# s += prefix + '[[' + line
# continue
# origLink = m.group(1)
# # Dont allow internal links to pages containing
# # PROTO: where PROTO is a valid URL protocol these
# # should be external links.
# if (preg_match('/^(?i:' + self.mUrlProtocols + ')/', origLink)) {
# s += prefix + '[[' + line
# continue
# }
# # Make subpage if necessary
# if useSubpages:
# link = self.maybeDoSubpageLink(origLink, label)
# else:
# link = origLink
# noforce = origLink[0] != ':'
# if not noforce:
# # Strip off leading ':'
# link = link[1:]
# nt = Title::newFromText(self.mStripState.unstripNoWiki(link))
# if nt is None:
# s += prefix + '[[' + line
# continue
# ns = nt.getNamespace()
# iw = nt.getInterwiki()
# if might_be_img { # if this is actually an invalid link
# if (ns == NS_FILE and noforce) { # but might be an image
# found = False
# while True:
# # look at the next 'line' to see if we can close it there
# next_line = iterBrakets.next()
# if not next_line:
# break
# m = explode(']]', next_line, 3)
# if m.lastindex == 3:
# # the first ]] closes the inner link, the second the image
# found = True
# label += "[[%s]]%s" % (m.group(0), m.group(1))
# trail = m.group(2)
# break
# elif m.lastindex == 2:
# # if there is exactly one ]] that is fine, we will keep looking
# label += "[[{m[0]}]]{m.group(1)}"
# else:
# # if next_line is invalid too, we need look no further
# label += '[[' + next_line
# break
# if not found:
# # we couldnt find the end of this imageLink, so output it raw
# # but dont ignore what might be perfectly normal links in the text we ve examined
# holders.merge(self.replaceInternalLinks2(label))
# s += "{prefix}[[%s|%s" % (link, text)
# # note: no trail, because without an end, there *is* no trail
# continue
# } else: # it is not an image, so output it raw
# s += "{prefix}[[%s|%s" % (link, text)
# # note: no trail, because without an end, there *is* no trail
# continue
# }
# wasblank = (text == '')
# if wasblank:
# text = link
# else:
# # Bug 4598 madness. Handle the quotes only if they come from the alternate part
# # [[Lista d''e paise d''o munno]] . <a href="...">Lista d''e paise d''o munno</a>
# # [[Criticism of Harry Potter|Criticism of ''Harry Potter'']]
# # . <a href="Criticism of Harry Potter">Criticism of <i>Harry Potter</i></a>
# text = self.doQuotes(text)
# # Link not escaped by : , create the various objects
# if noforce and not nt.wasLocalInterwiki():
# # Interwikis
# if iw and mOptions.getInterwikiMagic() and nottalk and (
# Language::fetchLanguageName(iw, None, 'mw') or
# in_array(iw, wgExtraInterlanguageLinkPrefixes)):
# # Bug 24502: filter duplicates
# if iw not in mLangLinkLanguages:
# self.mLangLinkLanguages[iw] = True
# self.mOutput.addLanguageLink(nt.getFullText())
# s = rstrip(s + prefix)
# s += strip(trail, "\n") == '' ? '': prefix + trail
# continue
# if ns == NS_FILE:
# if not wfIsBadImage(nt.getDBkey(), self.mTitle):
# if wasblank:
# # if no parameters were passed, text
# # becomes something like "File:Foo.png",
# # which we dont want to pass on to the
# # image generator
# text = ''
# else:
# # recursively parse links inside the image caption
# # actually, this will parse them in any other parameters, too,
# # but it might be hard to fix that, and it doesnt matter ATM
# text = self.replaceExternalLinks(text)
# holders.merge(self.replaceInternalLinks2(text))
# # cloak any absolute URLs inside the image markup, so replaceExternalLinks() wont touch them
# s += prefix + self.armorLinks(
# self.makeImage(nt, text, holders)) + trail
# else:
# s += prefix + trail
# continue
# if ns == NS_CATEGORY:
# s = rstrip(s + "\n") # bug 87
# if wasblank:
# sortkey = self.getDefaultSort()
# else:
# sortkey = text
# sortkey = Sanitizer::decodeCharReferences(sortkey)
# sortkey = str_replace("\n", '', sortkey)
# sortkey = self.getConverterLanguage().convertCategoryKey(sortkey)
# self.mOutput.addCategory(nt.getDBkey(), sortkey)
# s += strip(prefix + trail, "\n") == '' ? '' : prefix + trail
# continue
# }
# }
# # Self-link checking. For some languages, variants of the title are checked in
# # LinkHolderArray::doVariants() to allow batching the existence checks necessary
# # for linking to a different variant.
# if ns != NS_SPECIAL and nt.equals(self.mTitle) and !nt.hasFragment():
# s += prefix + Linker::makeSelfLinkObj(nt, text, '', trail)
# continue
# # NS_MEDIA is a pseudo-namespace for linking directly to a file
# # @todo FIXME: Should do batch file existence checks, see comment below
# if ns == NS_MEDIA:
# # Give extensions a chance to select the file revision for us
# options = []
# descQuery = False
# Hooks::run('BeforeParserFetchFileAndTitle',
# [this, nt, &options, &descQuery])
# # Fetch and register the file (file title may be different via hooks)
# file, nt = self.fetchFileAndTitle(nt, options)
# # Cloak with NOPARSE to avoid replacement in replaceExternalLinks
# s += prefix + self.armorLinks(
# Linker::makeMediaLinkFile(nt, file, text)) + trail
# continue
# # Some titles, such as valid special pages or files in foreign repos, should
# # be shown as bluelinks even though they are not included in the page table
# #
# # @todo FIXME: isAlwaysKnown() can be expensive for file links; we should really do
# # batch file existence checks for NS_FILE and NS_MEDIA
# if iw == '' and nt.isAlwaysKnown():
# self.mOutput.addLink(nt)
# s += self.makeKnownLinkHolder(nt, text, array(), trail, prefix)
# else:
# # Links will be added to the output link list after checking
# s += holders.makeHolder(nt, text, array(), trail, prefix)
# }
# return holders
def makeInternalLink(title, label):
colon = title.find(':')
if colon > 0 and title[:colon] not in acceptedNamespaces:
return ''
if colon == 0:
# drop also :File:
colon2 = title.find(':', colon+1)
if colon2 > 1 and title[colon+1:colon2] not in acceptedNamespaces:
return ''
if Extractor.keepLinks:
return '<a href="%s">%s</a>' % (urllib.quote(title.encode('utf-8')), label)
else:
return label
# ----------------------------------------------------------------------
# External links
# from: https://doc.wikimedia.org/mediawiki-core/master/php/DefaultSettings_8php_source.html
wgUrlProtocols = [
'bitcoin:', 'ftp://', 'ftps://', 'geo:', 'git://', 'gopher://', 'http://',
'https://', 'irc://', 'ircs://', 'magnet:', 'mailto:', 'mms://', 'news:',
'nntp://', 'redis://', 'sftp://', 'sip:', 'sips:', 'sms:', 'ssh://',
'svn://', 'tel:', 'telnet://', 'urn:', 'worldwind://', 'xmpp:', '//'
]
# from: https://doc.wikimedia.org/mediawiki-core/master/php/Parser_8php_source.html
# Constants needed for external link processing
# Everything except bracket, space, or control characters
# \p{Zs} is unicode 'separator, space' category. It covers the space 0x20
# as well as U+3000 is IDEOGRAPHIC SPACE for bug 19052
EXT_LINK_URL_CLASS = r'[^][<>"\x00-\x20\x7F\s]'
ExtLinkBracketedRegex = re.compile('\[(((?i)' + '|'.join(wgUrlProtocols) + ')' + EXT_LINK_URL_CLASS + r'+)\s*([^\]\x00-\x08\x0a-\x1F]*?)\]', re.S | re.U)
EXT_IMAGE_REGEX = re.compile(
r"""^(http://|https://)([^][<>"\x00-\x20\x7F\s]+)
/([A-Za-z0-9_.,~%\-+&;#*?!=()@\x80-\xFF]+)\.((?i)gif|png|jpg|jpeg)$""",
re.X | re.S | re.U)
def replaceExternalLinks(text):
s = ''
cur = 0
for m in ExtLinkBracketedRegex.finditer(text):
s += text[cur:m.start()]
cur = m.end()
url = m.group(1)
label = m.group(3)
# # The characters '<' and '>' (which were escaped by
# # removeHTMLtags()) should not be included in
# # URLs, per RFC 2396.
# m2 = re.search('&(lt|gt);', url)
# if m2:
# link = url[m2.end():] + ' ' + link
# url = url[0:m2.end()]
# If the link text is an image URL, replace it with an <img> tag
# This happened by accident in the original parser, but some people used it extensively
m = EXT_IMAGE_REGEX.match(label)
if m:
label = makeExternalImage(label)
# Use the encoded URL
# This means that users can paste URLs directly into the text
# Funny characters like ö aren't valid in URLs anyway
# This was changed in August 2004
s += makeExternalLink(url, label) #+ trail
return s + text[cur:]
# Function applied to wikiLinks
def makeExternalLink(title, anchor):
colon = title.find(':')
if colon > 0 and title[:colon] not in acceptedNamespaces:
return ''
if colon == 0:
# drop also :File:
colon2 = title.find(':', colon+1)
if colon2 > 1 and title[colon+1:colon2] not in acceptedNamespaces:
return ''
if Extractor.keepLinks:
return '<a href="%s">%s</a>' % (urllib.quote(title.encode('utf-8')), anchor)
else:
return anchor
def makeExternalImage(url, alt=''):
if Extractor.keepLinks:
return '<img src="%s" alt="%s">' % (url, alt)
else:
return alt
# ----------------------------------------------------------------------
# match tail after wikilink
tailRE = re.compile('\w+')
syntaxhighlight = re.compile('<syntaxhighlight .*?>(.*?)</syntaxhighlight>', re.DOTALL)
expand_templates = True
def clean(extractor, text):
"""
Transforms wiki markup.
@see https://www.mediawiki.org/wiki/Help:Formatting
"""
if (expand_templates):
# expand templates
# See: http://www.mediawiki.org/wiki/Help:Templates
text = extractor.expandTemplates(text)
else:
# Drop transclusions (template, parser functions)
text = dropNested(text, r'{{', r'}}')
# Drop tables
text = dropNested(text, r'{\|', r'\|}')
# replace external links
text = replaceExternalLinks(text)
# replace internal links
text = replaceInternalLinks(text)
# drop MagicWords behavioral switches
text = magicWordsRE.sub('', text)
################ Process HTML ###############
# turn into HTML, except for the content of <syntaxhighlight>
res = ''
cur = 0
for m in syntaxhighlight.finditer(text):
end = m.end()
res += unescape(text[cur:m.start()]) + m.group(1)
cur = end
text = res + unescape(text[cur:])
# Handle bold/italic/quote
if extractor.toHTML:
text = bold_italic.sub(r'<b>\1</b>', text)
text = bold.sub(r'<b>\1</b>', text)
text = italic.sub(r'<i>\1</i>', text)
else:
text = bold_italic.sub(r'\1', text)
text = bold.sub(r'\1', text)
text = italic_quote.sub(r'"\1"', text)
text = italic.sub(r'"\1"', text)
text = quote_quote.sub(r'"\1"', text)
# residuals of unbalanced quotes
text = text.replace("'''", '').replace("''", '"')
# Collect spans
spans = []
# Drop HTML comments
for m in comment.finditer(text):
spans.append((m.start(), m.end()))
# Drop self-closing tags
for pattern in selfClosing_tag_patterns:
for m in pattern.finditer(text):
spans.append((m.start(), m.end()))
# Drop ignored tags
for left, right in ignored_tag_patterns:
for m in left.finditer(text):
spans.append((m.start(), m.end()))
for m in right.finditer(text):
spans.append((m.start(), m.end()))
# Bulk remove all spans
text = dropSpans(spans, text)
# Drop discarded elements
for tag in discardElements:
text = dropNested(text, r'<\s*%s\b[^>/]*>' % tag, r'<\s*/\s*%s>' % tag)
if not extractor.toHTML:
# Turn into text what is left (&nbsp;) and <syntaxhighlight>
text = unescape(text)
# Expand placeholders
for pattern, placeholder in placeholder_tag_patterns:
index = 1
for match in pattern.finditer(text):
text = text.replace(match.group(), '%s_%d' % (placeholder, index))
index += 1
text = text.replace('<<', u'«').replace('>>', u'»')
#############################################
# Cleanup text
text = text.replace('\t', ' ')
text = spaces.sub(' ', text)
text = dots.sub('...', text)
text = re.sub(u' (,:\.\)\]»)', r'\1', text)
text = re.sub(u'(\[\(«) ', r'\1', text)
text = re.sub(r'\n\W+?\n', '\n', text, flags=re.U) # lines with only punctuations
text = text.replace(',,', ',').replace(',.', '.')
return text
# skip level 1, it is page name level
section = re.compile(r'(==+)\s*(.*?)\s*\1')
listOpen = { '*': '<ul>', '#': '<ol>', ';': '<dl>', ':': '<dl>' }
listClose = { '*': '</ul>', '#': '</ol>', ';': '</dl>', ':': '</dl>' }
listItem = { '*': '<li>%s</li>', '#': '<li>%s</<li>', ';': '<dt>%s</dt>',
':': '<dd>%s</dd>' }
def compact(text):
"""Deal with headers, lists, empty sections, residuals of tables.
:param toHTML: convert to HTML
"""
page = [] # list of paragraph
headers = {} # Headers for unfilled sections
emptySection = False # empty sections are discarded
listLevel = '' # nesting of lists
for line in text.split('\n'):
if not line:
continue
# Handle section titles
m = section.match(line)
if m:
title = m.group(2)
lev = len(m.group(1))
if Extractor.toHTML:
page.append("<h%d>%s</h%d>" % (lev, title, lev))
if title and title[-1] not in '!?':
title += '.'
headers[lev] = title
# drop previous headers
for i in headers.keys():
if i > lev:
del headers[i]
emptySection = True
continue
# Handle page title
if line.startswith('++'):
title = line[2:-2]
if title:
if title[-1] not in '!?':
title += '.'
page.append(title)
# handle indents
elif line[0] == ':':
#page.append(line.lstrip(':*#;'))
continue
# handle lists
elif line[0] in '*#;:':
if Extractor.toHTML:
i = 0
for c,n in izip_longest(listLevel, line, fillvalue=''):
if not n or n not in '*#;:':
if c:
page.append(listClose[c])
listLevel = listLevel[:-1]
continue
else:
break
# n != ''
if c != n and (not c or (c not in ';:' and n not in ';:')):
if c:
# close level
page.append(listClose[c])
listLevel = listLevel[:-1]
listLevel += n
page.append(listOpen[n])
i += 1
n = line[i-1] # last list char
line = line[i:].strip()
if line:
page.append(listItem.get(n, '<li>%s</<li>') % line)
else:
continue
elif len(listLevel):
for c in reversed(listLevel):
page.append(listClose[c])
listLevel = []
# Drop residuals of lists
elif line[0] in '{|' or line[-1] == '}':
continue
# Drop irrelevant lines
elif (line[0] == '(' and line[-1] == ')') or line.strip('.-') == '':
continue
elif len(headers):
if not Extractor.keepSections:
items = headers.items()
items.sort()
for (i, v) in items:
page.append(v)
headers.clear()
page.append(line) # first line
emptySection = False
elif not emptySection:
page.append(line)
# dangerous
# # Drop preformatted
# elif line[0] == ' ':
# continue
return page
def handle_unicode(entity):
numeric_code = int(entity[2:-1])
if numeric_code >= 0x10000: return ''
return unichr(numeric_code)
#------------------------------------------------------------------------------
# Output
class NextFile(object):
"""
Synchronous generation of next available file name.
"""
filesPerDir = 100
def __init__(self, path_name):
self.path_name = path_name
self.dir_index = -1
self.file_index = -1
def next(self):
self.file_index = (self.file_index + 1) % NextFile.filesPerDir
if self.file_index == 0:
self.dir_index += 1
dirname = self._dirname()
if not os.path.isdir(dirname):
os.makedirs(dirname)
return self._filepath()
def _dirname(self):
char1 = self.dir_index % 26
char2 = self.dir_index / 26 % 26
return os.path.join(self.path_name, '%c%c' % (ord('A') + char2, ord('A') + char1))
def _filepath(self):
return '%s/wiki_%02d' % (self._dirname(), self.file_index)
class OutputSplitter(object):
"""
File-like object, that splits output to multiple files of a given max size.
"""
def __init__(self, nextFile, max_file_size=0, compress=True):
"""
:param nextfile: a NextFile object from which to obtain filenames
to use.
:param max_file_size: the maximum size of each file.
:para compress: whether to write data with bzip compression.
"""
self.nextFile = nextFile
self.compress = compress
self.max_file_size = max_file_size
self.file = self.open(self.nextFile.next())
def reserve(self, size):
if self.file.tell() + size > self.max_file_size:
self.close()
self.file = self.open(self.nextFile.next())
def write(self, data):
self.reserve(len(data))
self.file.write(data)
def close(self):
self.file.close()
def open(self, filename):
if self.compress:
return bz2.BZ2File(filename + '.bz2', 'w')
else:
return open(filename, 'w')
# ----------------------------------------------------------------------
# READER
tagRE = re.compile(r'(.*?)<(/?\w+)[^>]*>(?:([^<]*)(<.*?>)?)?')
# 1 2 3 4
def load_templates(file, output_file=None):
"""
Load templates from :param file:.
:param output_file: file where to save templates and modules.
"""
global templateNamespace, templatePrefix
templatePrefix = templateNamespace + ':'
global moduleNamespace, modulePrefix
modulePrefix = moduleNamespace + ':'
articles = 0
page = []
inText = False
if output_file:
output = codecs.open(output_file, 'wb', 'utf-8')
for line in file:
line = line.decode('utf-8')
if '<' not in line: # faster than doing re.search()
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
elif tag == 'title':
title = m.group(3)
elif tag == 'text':
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
if m.lastindex == 4: # open-close
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
if not output_file and not templateNamespace: # do not know it yet
# we reconstruct it from the first title
colon = title.find(':')
if colon > 1:
templateNamespace = title[:colon]
templatePrefix = title[:colon+1]
# FIXME: should reconstruct also moduleNamespace
if title.startswith(templatePrefix):
define_template(title, page)
# save templates and modules to file
if output_file and (title.startswith(templatePrefix) or
title.startswith(modulePrefix)):
output.write('<page>\n')
output.write(' <title>%s</title>\n' % title)
output.write(' <ns>10</ns>\n')
output.write(' <text>')
for line in page:
output.write(line)
output.write(' </text>\n')
output.write('</page>\n')
page = []
articles += 1
if articles % 100000 == 0:
logging.info("Preprocessed %d pages", articles)
if output_file:
output.close()
logging.info("Saved %d templates to '%s'", len(templates), output_file)
def process_dump(input_file, template_file, out_file, file_size, file_compress,
process_count):
"""
:param input_file: name of the wikipedia dump file; '-' to read from stdin
:param template_file: optional file with template definitions.
:param out_file: directory where to store extracted data, or '-' for stdout
:param file_size: max size of each extracted file, or None for no max (one file)
:param file_compress: whether to compress files with bzip.
:param process_count: number of extraction processes to spawn.
"""
global urlbase
global knownNamespaces
global templateNamespace, templatePrefix
global moduleNamespace, modulePrefix
global expand_templates
if input_file == '-':
input = sys.stdin
else:
input = fileinput.FileInput(input_file, openhook=fileinput.hook_compressed)
# collect siteinfo
for line in input:
line = line.decode('utf-8')
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'base':
# discover urlbase from the xml dump file
# /mediawiki/siteinfo/base
base = m.group(3)
urlbase = base[:base.rfind("/")]
elif tag == 'namespace':
knownNamespaces.add(m.group(3))
if re.search('key="10"', line):
templateNamespace = m.group(3)
templatePrefix = templateNamespace + ':'
elif re.search('key="828"', line):
moduleNamespace = m.group(3)
modulePrefix = moduleNamespace + ':'
elif tag == '/siteinfo':
break
if expand_templates:
# preprocess
template_load_start = default_timer()
if template_file and os.path.exists(template_file):
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", template_file)
file = fileinput.FileInput(template_file, openhook=fileinput.hook_compressed)
load_templates(file)
file.close()
else:
if input_file == '-':
# can't scan then reset stdin; must error w/ suggestion to specify template_file
raise ValueError("to use templates with stdin dump, must supply explicit template-file")
logging.info("Preprocessing '%s' to collect template definitions: this may take some time.", input_file)
load_templates(input, template_file)
input.close()
input = fileinput.FileInput(input_file,openhook=fileinput.hook_compressed)
template_load_elapsed = default_timer() - template_load_start
logging.info("Loaded %d templates in %.1fs", len(templates), template_load_elapsed)
if out_file == '-':
output = sys.stdout
if file_compress:
logging.warn("writing to stdout, so no output compression (use external tool)")
else:
nextFile = NextFile(out_file)
output = OutputSplitter(nextFile, file_size, file_compress)
# process pages
logging.info("Starting page extraction from %s.", input_file)
extract_start = default_timer()
# Parallel Map/Reduce:
# - pages to be processed are dispatched to workers
# - a reduce process collects the results, sort them and print them.
maxsize = 10 * process_count
# output queue
output_queue = Queue(maxsize=maxsize)
# Reduce job that sorts and prints output
reduce = Process(target=reduce_process, args=(output_queue, output))
reduce.start()
# initialize jobs queue
jobs_queue = Queue(maxsize=maxsize)
# start worker processes
logging.info("Using %d extract processes.", process_count)
workers = []
for _ in xrange(max(1, process_count)):
extractor = Process(target=extract_process,
args=(jobs_queue, output_queue))
extractor.daemon = True # only live while parent process lives
extractor.start()
workers.append(extractor)
# Mapper process
# we collect individual lines, since str.join() is significantly faster
# than concatenation
page = []
id = None
last_id = None
ordinal = 0 # page count
inText = False
redirect = False
for line in input:
line = line.decode('utf-8')
if '<' not in line: # faster than doing re.search()
if inText:
page.append(line)
continue
m = tagRE.search(line)
if not m:
continue
tag = m.group(2)
if tag == 'page':
page = []
redirect = False
elif tag == 'id' and not id:
id = m.group(3)
elif tag == 'title':
title = m.group(3)
elif tag == 'redirect':
redirect = True
elif tag == 'text':
inText = True
line = line[m.start(3):m.end(3)]
page.append(line)
if m.lastindex == 4: # open-close
inText = False
elif tag == '/text':
if m.group(1):
page.append(m.group(1))
inText = False
elif inText:
page.append(line)
elif tag == '/page':
colon = title.find(':')
if (colon < 0 or title[:colon] in acceptedNamespaces) and id != last_id and \
not redirect and not title.startswith(templateNamespace):
job = (id, title, page, ordinal)
jobs_queue.put(job) # goes to any available extract_process
last_id = id
ordinal += 1
id = None
page = []
input.close()
# signal termination
for w in workers:
jobs_queue.put(None)
# wait for workers to terminate
for w in workers:
w.join()
# signal end of work to reduce process
output_queue.put(None)
# wait for it to finish
reduce.join()
if output != sys.stdout:
output.close()
extract_duration = default_timer() - extract_start
extract_rate = ordinal / extract_duration
logging.info("Finished %d-process extraction of %d articles in %.1fs (%.1f art/s)",
process_count, ordinal, extract_duration, extract_rate)
#----------------------------------------------------------------------
# Multiprocess support
def extract_process(jobs_queue, output_queue):
"""Pull tuples of raw page content, do CPU/regex-heavy fixup, push finished text
:param job_queue: where to get jobs.
:param output_queue: where to queue extracted text for output.
"""
while True:
job = jobs_queue.get() # job is (id, title, page, ordinal)
if job:
out = StringIO() # memory buffer
Extractor(*job[:3]).extract(out) # (id, title, page)
text = out.getvalue()
output_queue.put((job[3], text)) # (ordinal, extracted_text)
out.close()
else:
break
def reduce_process(output_queue, output):
"""Pull finished article text, write series of files (or stdout)
:param output_queue: text to be output.
:param output: file object where to print.
"""
interval_start = default_timer()
period = 100000
# FIXME: use a heap
ordering_buffer = {} # collected pages
next_ordinal = 0 # sequence number of pages
while True:
if next_ordinal in ordering_buffer:
output.write(ordering_buffer.pop(next_ordinal))
next_ordinal += 1
# progress report
if next_ordinal % period == 0:
interval_rate = period / (default_timer() - interval_start)
logging.info("Extracted %d articles (%.1f art/s)",
next_ordinal, interval_rate)
interval_start = default_timer()
else:
# mapper puts None to signal finish
pair = output_queue.get()
if not pair:
break
ordinal, text = pair
ordering_buffer[ordinal] = text
# ----------------------------------------------------------------------
# Minimum size of output files
minFileSize = 200 * 1024
def main():
global urlbase, acceptedNamespaces
global expand_templates, templateCache
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("input",
help="XML wiki dump file")
groupO = parser.add_argument_group('Output')
groupO.add_argument("-o", "--output", default="text",
help="directory for extracted files (or '-' for dumping to stdin)")
groupO.add_argument("-b", "--bytes", default="1M",
help="maximum bytes per output file (default %(default)s)",
metavar="n[KMG]")
groupO.add_argument("-c", "--compress", action="store_true",
help="compress output files using bzip")
groupP = parser.add_argument_group('Processing')
groupP.add_argument("--html", action="store_true",
help="produce HTML output, subsumes --links and --sections")
groupP.add_argument("-l", "--links", action="store_true",
help="preserve links")
groupP.add_argument("-ns", "--namespaces", default="", metavar="ns1,ns2",
help="accepted namespaces")
groupP.add_argument("-s", "--sections", action="store_true",
help="preserve sections")
groupP.add_argument("--templates",
help="use or create file containing templates")
groupP.add_argument("--no-templates", action="store_false",
help="Do not expand templates")
default_process_count = cpu_count() - 1
parser.add_argument("--processes", type=int, default=default_process_count,
help="Number of extract processes (default %(default)s)")
groupS = parser.add_argument_group('Special')
groupS.add_argument("-q", "--quiet", action="store_true",
help="suppress reporting progress info")
groupS.add_argument("--debug", action="store_true",
help="print debug info")
groupS.add_argument("-a", "--article", action="store_true",
help="analyze a file containing a single article (debug option)")
groupS.add_argument("-v", "--version", action="version",
version='%(prog)s ' + version,
help="print program version")
args = parser.parse_args()
Extractor.keepLinks = args.links
Extractor.keepSections = args.sections
Extractor.toHTML = args.html
if args.html:
Extractor.keepLinks = True
Extractor.keepSections = True
expand_templates = args.no_templates
try:
power = 'kmg'.find(args.bytes[-1].lower()) + 1
file_size = int(args.bytes[:-1]) * 1024 ** power
if file_size < minFileSize:
raise ValueError()
except ValueError:
logging.error('Insufficient or invalid size: %s', args.bytes)
return
if args.namespaces:
acceptedNamespaces = set(args.namespaces.split(','))
FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger()
if not args.quiet:
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
input_file = args.input
if not Extractor.keepLinks:
ignoreTag('a')
# sharing cache of parser templates is too slow:
#manager = Manager()
#templateCache = manager.dict()
if args.article:
if args.templates:
if os.path.exists(args.templates):
with open(args.templates) as file:
load_templates(file)
with open(input_file) as file:
page = file.read().decode('utf-8')
m = re.search(r'<id>(.*)</id>', page)
id = m.group(1) if m else 0
m = re.search(r'<title>(.*)</title>', page)
if m:
title = m.group(1)
else:
logging.error('Missing title element')
return
Extractor(id, title, [page]).extract(sys.stdout)
return
output_path = args.output
if output_path != '-' and not os.path.isdir(output_path):
try:
os.makedirs(output_path)
except:
logging.error('Could not create: %s', output_path)
return
process_dump(input_file, args.templates, output_path, file_size,
args.compress, args.processes)
if __name__ == '__main__':
main()
|
screen.py
|
import numpy as np
from mss import mss
from logger import Logger
from typing import Tuple
from utils.misc import WindowSpec, find_d2r_window, wait
from config import Config
import threading
sct = mss()
monitor_roi = sct.monitors[0]
found_offsets = False
monitor_x_range = None
monitor_y_range = None
detect_window = True
detect_window_thread = None
FIND_WINDOW = WindowSpec(
title_regex=Config().advanced_options["hwnd_window_title"],
process_name_regex=Config().advanced_options["hwnd_window_process"],
)
def start_detecting_window():
global detect_window, detect_window_thread
detect_window = True
if detect_window_thread is None:
detect_window_thread = threading.Thread(target=detect_window_position)
detect_window_thread.start()
def detect_window_position():
global detect_window
Logger.debug('Detect window thread started')
while detect_window:
find_and_set_window_position()
Logger.debug('Detect window thread stopped')
def find_and_set_window_position():
position = find_d2r_window(FIND_WINDOW, offset=Config(
).advanced_options["window_client_area_offset"])
if position is not None:
set_window_position(*position)
wait(0.5)
def set_window_position(offset_x: int, offset_y: int):
global monitor_roi, monitor_x_range, monitor_y_range, found_offsets
if found_offsets and monitor_roi["top"] == offset_y and monitor_roi["left"] == offset_x:
return
Logger.debug(f"Set offsets: left {offset_x}px, top {offset_y}px")
monitor_roi["top"] = offset_y
monitor_roi["left"] = offset_x
monitor_roi["width"] = Config().ui_pos["screen_width"]
monitor_roi["height"] = Config().ui_pos["screen_height"]
monitor_x_range = (
monitor_roi["left"] + 10, monitor_roi["left"] + monitor_roi["width"] - 10)
monitor_y_range = (
monitor_roi["top"] + 10, monitor_roi["top"] + monitor_roi["height"] - 10)
found_offsets = True
def stop_detecting_window():
global detect_window, detect_window_thread
detect_window = False
detect_window_thread.join()
def grab() -> np.ndarray:
global monitor_roi
img = np.array(sct.grab(monitor_roi))
return img[:, :, :3]
# TODO: Move the below funcs to utils(?)
def convert_monitor_to_screen(screen_coord: Tuple[float, float]) -> Tuple[float, float]:
global monitor_roi
if screen_coord is None:
Logger.error("convert_monitor_to_screen: empty coordinates passed")
return None
return (screen_coord[0] - monitor_roi["left"], screen_coord[1] - monitor_roi["top"])
def convert_screen_to_monitor(screen_coord: Tuple[float, float]) -> Tuple[float, float]:
global monitor_roi
if screen_coord is None:
Logger.error("convert_screen_to_monitor: empty coordinates passed")
return None
x = screen_coord[0] + monitor_roi["left"]
y = screen_coord[1] + monitor_roi["top"]
return (np.clip(x, *monitor_x_range), np.clip(y, *monitor_y_range))
def convert_abs_to_screen(abs_coord: Tuple[float, float]) -> Tuple[float, float]:
global monitor_roi
if abs_coord is None:
Logger.error("convert_screen_to_monitor: empty coordinates passed")
return None
# abs has it's center on char which is the center of the screen
return ((monitor_roi["width"] // 2) + abs_coord[0], (monitor_roi["height"] // 2) + abs_coord[1])
def convert_screen_to_abs(screen_coord: Tuple[float, float]) -> Tuple[float, float]:
global monitor_roi
if screen_coord is None:
Logger.error("convert_screen_to_abs: empty coordinates passed")
return None
return (screen_coord[0] - (monitor_roi["width"] // 2), screen_coord[1] - (monitor_roi["height"] // 2))
def convert_abs_to_monitor(abs_coord: Tuple[float, float]) -> Tuple[float, float]:
if abs_coord is None:
Logger.error("convert_abs_to_monitor: empty coordinates passed")
return None
screen_coord = convert_abs_to_screen(abs_coord)
monitor_coord = convert_screen_to_monitor(screen_coord)
return monitor_coord
Logger.debug(f"Using WinAPI to search for window: {FIND_WINDOW}")
|
test_client.py
|
import asyncio
from collections import deque
from concurrent.futures import CancelledError
import gc
import logging
from operator import add
import os
import pickle
import psutil
import random
import subprocess
import sys
import threading
from threading import Semaphore
from time import sleep
import traceback
import warnings
import weakref
import zipfile
import pytest
from toolz import identity, isdistinct, concat, pluck, valmap, partial, first, merge
from tornado import gen
import dask
from dask import delayed
from dask.optimization import SubgraphCallable
import dask.bag as db
from distributed import (
Worker,
Nanny,
fire_and_forget,
LocalCluster,
get_client,
secede,
get_worker,
Executor,
profile,
TimeoutError,
)
from distributed.comm import CommClosedError
from distributed.client import (
Client,
Future,
wait,
as_completed,
tokenize,
_get_global_client,
default_client,
futures_of,
temp_default_client,
)
from distributed.compatibility import WINDOWS
from distributed.metrics import time
from distributed.scheduler import Scheduler, KilledWorker
from distributed.sizeof import sizeof
from distributed.utils import (
ignoring,
mp_context,
sync,
tmp_text,
tokey,
tmpfile,
is_valid_xml,
)
from distributed.utils_test import (
cluster,
slowinc,
slowadd,
slowdec,
randominc,
inc,
dec,
div,
throws,
geninc,
asyncinc,
gen_cluster,
gen_test,
double,
popen,
captured_logger,
varying,
map_varying,
wait_for,
async_wait_for,
pristine_loop,
save_sys_modules,
)
from distributed.utils_test import ( # noqa: F401
client as c,
client_secondary as c2,
cluster_fixture,
loop,
loop_in_thread,
nodebug,
s,
a,
b,
)
@gen_cluster(client=True, timeout=None)
def test_submit(c, s, a, b):
x = c.submit(inc, 10)
assert not x.done()
assert isinstance(x, Future)
assert x.client is c
result = yield x
assert result == 11
assert x.done()
y = c.submit(inc, 20)
z = c.submit(add, x, y)
result = yield z
assert result == 11 + 21
s.validate_state()
@gen_cluster(client=True)
def test_map(c, s, a, b):
L1 = c.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = yield L1[0]
assert result == inc(0)
assert len(s.tasks) == 5
L2 = c.map(inc, L1)
result = yield L2[1]
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = c.submit(sum, L2)
result = yield total
assert result == sum(map(inc, map(inc, range(5))))
L3 = c.map(add, L1, L2)
result = yield L3[1]
assert result == inc(1) + inc(inc(1))
L4 = c.map(add, range(3), range(4))
results = yield c.gather(L4)
if sys.version_info[0] >= 3:
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = c.map(f, range(5), y=5)
results = yield c.gather(L5)
assert results == list(range(5, 10))
y = c.submit(f, 10)
L6 = c.map(f, range(5), y=y)
results = yield c.gather(L6)
assert results == list(range(20, 25))
s.validate_state()
@gen_cluster(client=True)
def test_map_empty(c, s, a, b):
L1 = c.map(inc, [], pure=False)
assert len(L1) == 0
results = yield c.gather(L1)
assert results == []
@gen_cluster(client=True)
def test_map_keynames(c, s, a, b):
futures = c.map(inc, range(4), key="INC")
assert all(f.key.startswith("INC") for f in futures)
assert isdistinct(f.key for f in futures)
futures2 = c.map(inc, [5, 6, 7, 8], key="INC")
assert [f.key for f in futures] != [f.key for f in futures2]
keys = ["inc-1", "inc-2", "inc-3", "inc-4"]
futures = c.map(inc, range(4), key=keys)
assert [f.key for f in futures] == keys
@gen_cluster(client=True)
def test_map_retries(c, s, a, b):
args = [
[ZeroDivisionError("one"), 2, 3],
[4, 5, 6],
[ZeroDivisionError("seven"), ZeroDivisionError("eight"), 9],
]
x, y, z = c.map(*map_varying(args), retries=2)
assert (yield x) == 2
assert (yield y) == 4
assert (yield z) == 9
x, y, z = c.map(*map_varying(args), retries=1, pure=False)
assert (yield x) == 2
assert (yield y) == 4
with pytest.raises(ZeroDivisionError, match="eight"):
yield z
x, y, z = c.map(*map_varying(args), retries=0, pure=False)
with pytest.raises(ZeroDivisionError, match="one"):
yield x
assert (yield y) == 4
with pytest.raises(ZeroDivisionError, match="seven"):
yield z
@gen_cluster(client=True)
def test_compute_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check for varying() use
x = c.compute(delayed(varying(args))())
with pytest.raises(ZeroDivisionError, match="one"):
yield x
# Same retries for all
x = c.compute(delayed(varying(args))(), retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
yield x
x = c.compute(delayed(varying(args))(), retries=2)
assert (yield x) == 3
args.append(4)
x = c.compute(delayed(varying(args))(), retries=2)
assert (yield x) == 3
# Per-future retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x, y = [delayed(varying(args))() for args in (xargs, yargs)]
x, y = c.compute([x, y], retries={x: 2})
gc.collect()
assert (yield x) == 30
with pytest.raises(ZeroDivisionError, match="five"):
yield y
x, y, z = [delayed(varying(args))() for args in (xargs, yargs, zargs)]
x, y, z = c.compute([x, y, z], retries={(y, z): 2})
with pytest.raises(ZeroDivisionError, match="one"):
yield x
assert (yield y) == 70
assert (yield z) == 80
def test_retries_get(c):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
assert x.compute(retries=5) == 3
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
with pytest.raises(ZeroDivisionError):
x.compute()
@gen_cluster(client=True)
def test_compute_persisted_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check
x = c.persist(delayed(varying(args))())
fut = c.compute(x)
with pytest.raises(ZeroDivisionError, match="one"):
yield fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
yield fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=2)
assert (yield fut) == 3
args.append(4)
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=3)
assert (yield fut) == 3
@gen_cluster(client=True)
def test_persist_retries(c, s, a, b):
# Same retries for all
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = c.persist(delayed(varying(args))(), retries=1)
x = c.compute(x)
with pytest.raises(ZeroDivisionError, match="two"):
yield x
x = c.persist(delayed(varying(args))(), retries=2)
x = c.compute(x)
assert (yield x) == 3
# Per-key retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x, y, z = [delayed(varying(args))() for args in (xargs, yargs, zargs)]
x, y, z = c.persist([x, y, z], retries={(y, z): 2})
x, y, z = c.compute([x, y, z])
with pytest.raises(ZeroDivisionError, match="one"):
yield x
assert (yield y) == 70
assert (yield z) == 80
@gen_cluster(client=True)
def test_retries_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = yield future
assert y == 100
@gen_cluster(client=True)
def test_future_repr(c, s, a, b):
x = c.submit(inc, 10)
for func in [repr, lambda x: x._repr_html_()]:
assert str(x.key) in func(x)
assert str(x.status) in func(x)
assert str(x.status) in repr(c.futures[x.key])
@gen_cluster(client=True)
def test_future_tuple_repr(c, s, a, b):
da = pytest.importorskip("dask.array")
y = da.arange(10, chunks=(5,)).persist()
f = futures_of(y)[0]
for func in [repr, lambda x: x._repr_html_()]:
for k in f.key:
assert str(k) in func(f)
@gen_cluster(client=True)
def test_Future_exception(c, s, a, b):
x = c.submit(div, 1, 0)
result = yield x.exception()
assert isinstance(result, ZeroDivisionError)
x = c.submit(div, 1, 1)
result = yield x.exception()
assert result is None
def test_Future_exception_sync(c):
x = c.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = c.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(client=True)
def test_Future_release(c, s, a, b):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
yield x
x.release()
yield gen.moment
assert not c.futures
x = c.submit(slowinc, 1, delay=0.5)
x.release()
yield gen.moment
assert not c.futures
x = c.submit(div, 1, 0)
yield x.exception()
x.release()
yield gen.moment
assert not c.futures
def test_Future_release_sync(c):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
x.result()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(slowinc, 1, delay=0.8)
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(div, 1, 0)
x.exception()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
def test_short_tracebacks(loop, c):
tblib = pytest.importorskip("tblib")
future = c.submit(div, 1, 0)
try:
future.result()
except Exception:
_, _, tb = sys.exc_info()
tb = tblib.Traceback(tb).to_dict()
n = 0
while tb is not None:
n += 1
tb = tb["tb_next"]
assert n < 5
@gen_cluster(client=True)
def test_map_naming(c, s, a, b):
L1 = c.map(inc, range(5))
L2 = c.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = c.map(inc, [1, 1, 1, 1])
assert len({x._state for x in L3}) == 1
L4 = c.map(inc, [1, 1, 1, 1], pure=False)
assert len({x._state for x in L4}) == 4
@gen_cluster(client=True)
def test_submit_naming(c, s, a, b):
a = c.submit(inc, 1)
b = c.submit(inc, 1)
assert a._state is b._state
c = c.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(client=True)
def test_exceptions(c, s, a, b):
x = c.submit(div, 1, 2)
result = yield x
assert result == 1 / 2
x = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
result = yield x
x = c.submit(div, 10, 2) # continues to operate
result = yield x
assert result == 10 / 2
@gen_cluster()
def test_gc(s, a, b):
c = yield Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
yield x
assert s.tasks[x.key].who_has
x.__del__()
yield async_wait_for(
lambda: x.key not in s.tasks or not s.tasks[x.key].who_has, timeout=0.3
)
yield c.close()
def test_thread(c):
x = c.submit(inc, 1)
assert x.result() == 2
x = c.submit(slowinc, 1, delay=0.3)
with pytest.raises(gen.TimeoutError):
x.result(timeout=0.01)
assert x.result() == 2
def test_sync_exceptions(c):
x = c.submit(div, 10, 2)
assert x.result() == 5
y = c.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = c.submit(div, 10, 5)
assert z.result() == 2
@gen_cluster(client=True)
def test_gather(c, s, a, b):
x = c.submit(inc, 10)
y = c.submit(inc, x)
result = yield c.gather(x)
assert result == 11
result = yield c.gather([x])
assert result == [11]
result = yield c.gather({"x": x, "y": [y]})
assert result == {"x": 11, "y": [12]}
@gen_cluster(client=True)
def test_gather_lost(c, s, a, b):
[x] = yield c.scatter([1], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
yield a.close()
with pytest.raises(Exception):
res = yield c.gather([x, y])
def test_gather_sync(c):
x = c.submit(inc, 1)
assert c.gather(x) == 2
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
c.gather([x, y])
[xx] = c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True)
def test_gather_strict(c, s, a, b):
x = c.submit(div, 2, 1)
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
yield c.gather([x, y])
[xx] = yield c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
def test_gather_skip(c, s, a):
x = c.submit(div, 1, 0, priority=10)
y = c.submit(slowinc, 1, delay=0.5)
with captured_logger(logging.getLogger("distributed.scheduler")) as sched:
with captured_logger(logging.getLogger("distributed.client")) as client:
L = yield c.gather([x, y], errors="skip")
assert L == [2]
assert not client.getvalue()
assert not sched.getvalue()
@gen_cluster(client=True)
def test_limit_concurrent_gathering(c, s, a, b):
futures = c.map(inc, range(100))
results = yield futures
assert len(a.outgoing_transfer_log) + len(b.outgoing_transfer_log) < 100
@gen_cluster(client=True, timeout=None)
def test_get(c, s, a, b):
future = c.get({"x": (inc, 1)}, "x", sync=False)
assert isinstance(future, Future)
result = yield future
assert result == 2
futures = c.get({"x": (inc, 1)}, ["x"], sync=False)
assert isinstance(futures[0], Future)
result = yield futures
assert result == [2]
result = yield c.get({}, [], sync=False)
assert result == []
result = yield c.get(
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}, ("x", 2), sync=False
)
assert result == 3
def test_get_sync(c):
assert c.get({"x": (inc, 1)}, "x") == 2
def test_no_future_references(c):
from weakref import WeakSet
ws = WeakSet()
futures = c.map(inc, range(10))
ws.update(futures)
del futures
import gc
gc.collect()
start = time()
while list(ws):
sleep(0.01)
assert time() < start + 2
def test_get_sync_optimize_graph_passes_through(c):
bag = db.range(10, npartitions=3).map(inc)
dask.compute(bag.sum(), optimize_graph=False)
@gen_cluster(client=True)
def test_gather_errors(c, s, a, b):
def f(a, b):
raise TypeError
def g(a, b):
raise AttributeError
future_f = c.submit(f, 1, 2)
future_g = c.submit(g, 1, 2)
with pytest.raises(TypeError):
yield c.gather(future_f)
with pytest.raises(AttributeError):
yield c.gather(future_g)
yield a.close()
@gen_cluster(client=True)
def test_wait(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
done, not_done = yield wait([x, y, z])
assert done == {x, y, z}
assert not_done == set()
assert x.status == y.status == "finished"
@gen_cluster(client=True)
def test_wait_first_completed(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, 1)
z = c.submit(inc, 2)
done, not_done = yield wait([x, y, z], return_when="FIRST_COMPLETED")
assert done == {z}
assert not_done == {x, y}
assert z.status == "finished"
assert x.status == "pending"
assert y.status == "pending"
@gen_cluster(client=True, timeout=2)
def test_wait_timeout(c, s, a, b):
future = c.submit(sleep, 0.3)
with pytest.raises(gen.TimeoutError):
yield wait(future, timeout=0.01)
def test_wait_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == "finished"
future = c.submit(sleep, 0.3)
with pytest.raises(gen.TimeoutError):
wait(future, timeout=0.01)
def test_wait_informative_error_for_timeouts(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
try:
wait(x, y)
except Exception as e:
assert "timeout" in str(e)
assert "list" in str(e)
@gen_cluster(client=True)
def test_garbage_collection(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
assert c.refcount[x.key] == 2
x.__del__()
yield gen.moment
assert c.refcount[x.key] == 1
z = c.submit(inc, y)
y.__del__()
yield gen.moment
result = yield z
assert result == 3
ykey = y.key
y.__del__()
yield gen.moment
assert ykey not in c.futures
@gen_cluster(client=True)
def test_garbage_collection_with_scatter(c, s, a, b):
[future] = yield c.scatter([1])
assert future.key in c.futures
assert future.status == "finished"
assert s.who_wants[future.key] == {c.id}
key = future.key
assert c.refcount[key] == 1
future.__del__()
yield gen.moment
assert c.refcount[key] == 0
start = time()
while True:
if key not in s.tasks or not s.tasks[key].who_has:
break
else:
assert time() < start + 3
yield gen.sleep(0.1)
@gen_cluster(timeout=1000, client=True)
def test_recompute_released_key(c, s, a, b):
x = c.submit(inc, 100)
result1 = yield x
xkey = x.key
del x
import gc
gc.collect()
yield gen.moment
assert c.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.tasks and s.tasks[xkey].who_has or xkey in a.data or xkey in b.data:
yield gen.sleep(0.1)
x = c.submit(inc, 100)
assert x.key in c.futures
result2 = yield x
assert result1 == result2
@pytest.mark.slow
@gen_cluster(client=True)
def test_long_tasks_dont_trigger_timeout(c, s, a, b):
from time import sleep
x = c.submit(sleep, 3)
yield x
@pytest.mark.skip
@gen_cluster(client=True)
def test_missing_data_heals(c, s, a, b):
a.validate = False
b.validate = False
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
yield wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
a.release_key(y.key)
if y.key in b.data:
del b.data[y.key]
b.release_key(y.key)
yield gen.moment
w = c.submit(add, y, z)
result = yield w
assert result == 3 + 4
@pytest.mark.skip
@gen_cluster(client=True)
def test_gather_robust_to_missing_data(c, s, a, b):
a.validate = False
b.validate = False
x, y, z = c.map(inc, range(3))
yield wait([x, y, z]) # everything computed
for f in [x, y]:
for w in [a, b]:
if f.key in w.data:
del w.data[f.key]
yield gen.moment
w.release_key(f.key)
xx, yy, zz = yield c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@pytest.mark.skip
@gen_cluster(client=True)
def test_gather_robust_to_nested_missing_data(c, s, a, b):
a.validate = False
b.validate = False
w = c.submit(inc, 1)
x = c.submit(inc, w)
y = c.submit(inc, x)
z = c.submit(inc, y)
yield wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
yield gen.moment
worker.release_key(datum.key)
result = yield c.gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(client=True)
def test_tokenize_on_futures(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
c.futures[x.key].finish()
assert tok == tokenize(y)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_restrictions_submit(c, s, a, b):
x = c.submit(inc, 1, workers={a.ip})
y = c.submit(inc, x, workers={b.ip})
yield wait([x, y])
assert s.host_restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.host_restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(client=True)
def test_restrictions_ip_port(c, s, a, b):
x = c.submit(inc, 1, workers={a.address})
y = c.submit(inc, x, workers={b.address})
yield wait([x, y])
assert s.worker_restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.worker_restrictions[y.key] == {b.address}
assert y.key in b.data
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_restrictions_map(c, s, a, b):
L = c.map(inc, range(5), workers={a.ip})
yield wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.host_restrictions[x.key] == {a.ip}
L = c.map(inc, [10, 11, 12], workers=[{a.ip}, {a.ip, b.ip}, {b.ip}])
yield wait(L)
assert s.host_restrictions[L[0].key] == {a.ip}
assert s.host_restrictions[L[1].key] == {a.ip, b.ip}
assert s.host_restrictions[L[2].key] == {b.ip}
with pytest.raises(ValueError):
c.map(inc, [10, 11, 12], workers=[{a.ip}])
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_restrictions_get(c, s, a, b):
dsk = {"x": 1, "y": (inc, "x"), "z": (inc, "y")}
restrictions = {"y": {a.ip}, "z": {b.ip}}
futures = c.get(dsk, ["y", "z"], restrictions, sync=False)
result = yield futures
assert result == [2, 3]
assert "y" in a.data
assert "z" in b.data
@gen_cluster(client=True)
def dont_test_bad_restrictions_raise_exception(c, s, a, b):
z = c.submit(inc, 2, workers={"bad-address"})
try:
yield z
assert False
except ValueError as e:
assert "bad-address" in str(e)
assert z.key in str(e)
@gen_cluster(client=True, timeout=None)
def test_remove_worker(c, s, a, b):
L = c.map(inc, range(20))
yield wait(L)
yield b.close()
assert b.address not in s.workers
result = yield c.gather(L)
assert result == list(map(inc, range(20)))
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
def test_errors_dont_block(c, s, w):
L = [c.submit(inc, 1), c.submit(throws, 1), c.submit(inc, 2), c.submit(throws, 2)]
start = time()
while not (L[0].status == L[2].status == "finished"):
assert time() < start + 5
yield gen.sleep(0.01)
result = yield c.gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(client=True)
def test_submit_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = c.submit(assert_list, [1, 2, 3])
result = yield x
assert result
x = c.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = yield x
assert result
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(assert_list, [x, y])
result = yield z
assert result
@gen_cluster(client=True)
def test_map_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = c.map(assert_list, [[1, 2, 3], [4]])
result = yield c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = yield c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = yield c.gather(L)
assert all(result)
@gen_cluster()
def test_two_consecutive_clients_share_results(s, a, b):
c = yield Client(s.address, asynchronous=True)
x = c.submit(random.randint, 0, 1000, pure=True)
xx = yield x
f = yield Client(s.address, asynchronous=True)
y = f.submit(random.randint, 0, 1000, pure=True)
yy = yield y
assert xx == yy
yield c.close()
yield f.close()
@gen_cluster(client=True)
def test_submit_then_get_with_Future(c, s, a, b):
x = c.submit(slowinc, 1)
dsk = {"y": (inc, x)}
result = yield c.get(dsk, "y", sync=False)
assert result == 3
@gen_cluster(client=True)
def test_aliases(c, s, a, b):
x = c.submit(inc, 1)
dsk = {"y": x}
result = yield c.get(dsk, "y", sync=False)
assert result == 2
@gen_cluster(client=True)
def test_aliases_2(c, s, a, b):
dsk_keys = [
({"x": (inc, 1), "y": "x", "z": "x", "w": (add, "y", "z")}, ["y", "w"]),
({"x": "y", "y": 1}, ["x"]),
({"x": 1, "y": "x", "z": "y", "w": (inc, "z")}, ["w"]),
]
for dsk, keys in dsk_keys:
result = yield c.get(dsk, keys, sync=False)
assert list(result) == list(dask.get(dsk, keys))
yield gen.moment
@gen_cluster(client=True)
def test__scatter(c, s, a, b):
d = yield c.scatter({"y": 20})
assert isinstance(d["y"], Future)
assert a.data.get("y") == 20 or b.data.get("y") == 20
y_who_has = s.get_who_has(keys=["y"])["y"]
assert a.address in y_who_has or b.address in y_who_has
assert s.get_nbytes(summary=False) == {"y": sizeof(20)}
yy = yield c.gather([d["y"]])
assert yy == [20]
[x] = yield c.scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = yield c.gather([x])
x_who_has = s.get_who_has(keys=[x.key])[x.key]
assert s.tasks[x.key].who_has
assert (
s.workers[a.address] in s.tasks[x.key].who_has
or s.workers[b.address] in s.tasks[x.key].who_has
)
assert s.get_nbytes(summary=False) == {"y": sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = c.submit(add, x, d["y"]) # submit works on Future
result = yield z
assert result == 10 + 20
result = yield c.gather([z, x])
assert result == [30, 10]
@gen_cluster(client=True)
def test__scatter_types(c, s, a, b):
d = yield c.scatter({"x": 1})
assert isinstance(d, dict)
assert list(d) == ["x"]
for seq in [[1], (1,), {1}, frozenset([1])]:
L = yield c.scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
s.validate_state()
seq = yield c.scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
s.validate_state()
@gen_cluster(client=True)
def test__scatter_non_list(c, s, a, b):
x = yield c.scatter(1)
assert isinstance(x, Future)
result = yield x
assert result == 1
@gen_cluster(client=True)
def test_scatter_hash(c, s, a, b):
[a] = yield c.scatter([1])
[b] = yield c.scatter([1])
assert a.key == b.key
s.validate_state()
@gen_cluster(client=True)
def test_scatter_tokenize_local(c, s, a, b):
from dask.base import normalize_token
class MyObj(object):
pass
L = []
@normalize_token.register(MyObj)
def f(x):
L.append(x)
return "x"
obj = MyObj()
future = yield c.scatter(obj)
assert L and L[0] is obj
@gen_cluster(client=True)
def test_scatter_singletons(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
for x in [1, np.ones(5), pd.DataFrame({"x": [1, 2, 3]})]:
future = yield c.scatter(x)
result = yield future
assert str(result) == str(x)
@gen_cluster(client=True)
def test_scatter_typename(c, s, a, b):
future = yield c.scatter(123)
assert future.key.startswith("int")
@gen_cluster(client=True)
def test_scatter_hash(c, s, a, b):
x = yield c.scatter(123)
y = yield c.scatter(123)
assert x.key == y.key
z = yield c.scatter(123, hash=False)
assert z.key != y.key
@gen_cluster(client=True)
def test_get_releases_data(c, s, a, b):
[x] = yield c.get({"x": (inc, 1)}, ["x"], sync=False)
import gc
gc.collect()
start = time()
while c.refcount["x"]:
yield gen.sleep(0.01)
assert time() < start + 2
def test_Current(s, a, b):
with Client(s["address"]) as c:
assert Client.current() is c
with pytest.raises(ValueError):
Client.current()
with Client(s["address"]) as c:
assert Client.current() is c
def test_global_clients(loop):
assert _get_global_client() is None
with pytest.raises(ValueError):
default_client()
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert _get_global_client() is c
assert default_client() is c
with Client(s["address"], loop=loop) as f:
assert _get_global_client() is f
assert default_client() is f
assert default_client(c) is c
assert default_client(f) is f
assert _get_global_client() is None
@gen_cluster(client=True)
def test_exception_on_exception(c, s, a, b):
x = c.submit(lambda: 1 / 0)
y = c.submit(inc, x)
with pytest.raises(ZeroDivisionError):
yield y
z = c.submit(inc, y)
with pytest.raises(ZeroDivisionError):
yield z
@gen_cluster(client=True)
def test_get_nbytes(c, s, a, b):
[x] = yield c.scatter([1])
assert s.get_nbytes(summary=False) == {x.key: sizeof(1)}
y = c.submit(inc, x)
yield y
assert s.get_nbytes(summary=False) == {x.key: sizeof(1), y.key: sizeof(2)}
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_nbytes_determines_worker(c, s, a, b):
x = c.submit(identity, 1, workers=[a.ip])
y = c.submit(identity, tuple(range(100)), workers=[b.ip])
yield c.gather([x, y])
z = c.submit(lambda x, y: None, x, y)
yield z
assert s.tasks[z.key].who_has == {s.workers[b.address]}
@gen_cluster(client=True)
def test_if_intermediates_clear_on_error(c, s, a, b):
x = delayed(div, pure=True)(1, 0)
y = delayed(div, pure=True)(1, 2)
z = delayed(add, pure=True)(x, y)
f = c.compute(z)
with pytest.raises(ZeroDivisionError):
yield f
s.validate_state()
assert not any(ts.who_has for ts in s.tasks.values())
@gen_cluster(client=True)
def test_pragmatic_move_small_data_to_large_data(c, s, a, b):
np = pytest.importorskip("numpy")
lists = c.map(np.ones, [10000] * 10, pure=False)
sums = c.map(np.sum, lists)
total = c.submit(sum, sums)
def f(x, y):
return None
s.task_duration["f"] = 0.001
results = c.map(f, lists, [total] * 10)
yield wait([total])
yield wait(results)
assert (
sum(
s.tasks[r.key].who_has.issubset(s.tasks[l.key].who_has)
for l, r in zip(lists, results)
)
>= 9
)
@gen_cluster(client=True)
def test_get_with_non_list_key(c, s, a, b):
dsk = {("x", 0): (inc, 1), 5: (inc, 2)}
x = yield c.get(dsk, ("x", 0), sync=False)
y = yield c.get(dsk, 5, sync=False)
assert x == 2
assert y == 3
@gen_cluster(client=True)
def test_get_with_error(c, s, a, b):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
yield c.get(dsk, "y", sync=False)
def test_get_with_error_sync(c):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
c.get(dsk, "y")
@gen_cluster(client=True)
def test_directed_scatter(c, s, a, b):
yield c.scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
yield c.scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(c, s, a, b, loop):
futures = c.scatter([1, 2, 3], workers=[b["address"]])
has_what = sync(loop, c.scheduler.has_what)
assert len(has_what[b["address"]]) == len(futures)
assert len(has_what[a["address"]]) == 0
@gen_cluster(client=True)
def test_scatter_direct(c, s, a, b):
future = yield c.scatter(123, direct=True)
assert future.key in a.data or future.key in b.data
assert s.tasks[future.key].who_has
assert future.status == "finished"
result = yield future
assert result == 123
assert not s.counters["op"].components[0]["scatter"]
result = yield future
assert not s.counters["op"].components[0]["gather"]
result = yield c.gather(future)
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
def test_scatter_direct_numpy(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.ones(5)
future = yield c.scatter(x, direct=True)
result = yield future
assert np.allclose(x, result)
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True)
def test_scatter_direct_broadcast(c, s, a, b):
future2 = yield c.scatter(456, direct=True, broadcast=True)
assert future2.key in a.data
assert future2.key in b.data
assert s.tasks[future2.key].who_has == {s.workers[a.address], s.workers[b.address]}
result = yield future2
assert result == 456
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test_scatter_direct_balanced(c, s, *workers):
futures = yield c.scatter([1, 2, 3], direct=True)
assert sorted([len(w.data) for w in workers]) == [0, 1, 1, 1]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test_scatter_direct_broadcast_target(c, s, *workers):
futures = yield c.scatter([123, 456], direct=True, workers=workers[0].address)
assert futures[0].key in workers[0].data
assert futures[1].key in workers[0].data
futures = yield c.scatter(
[123, 456],
direct=True,
broadcast=True,
workers=[w.address for w in workers[:3]],
)
assert (
f.key in w.data and w.address in s.tasks[f.key].who_has
for f in futures
for w in workers[:3]
)
@gen_cluster(client=True, nthreads=[])
def test_scatter_direct_empty(c, s):
with pytest.raises((ValueError, gen.TimeoutError)):
yield c.scatter(123, direct=True, timeout=0.1)
@gen_cluster(client=True, timeout=None, nthreads=[("127.0.0.1", 1)] * 5)
def test_scatter_direct_spread_evenly(c, s, *workers):
futures = []
for i in range(10):
future = yield c.scatter(i, direct=True)
futures.append(future)
assert all(w.data for w in workers)
@pytest.mark.parametrize("direct", [True, False])
@pytest.mark.parametrize("broadcast", [True, False])
def test_scatter_gather_sync(c, direct, broadcast):
futures = c.scatter([1, 2, 3], direct=direct, broadcast=broadcast)
results = c.gather(futures, direct=direct)
assert results == [1, 2, 3]
delayed(inc)(1).compute(direct=direct)
@gen_cluster(client=True)
def test_gather_direct(c, s, a, b):
futures = yield c.scatter([1, 2, 3])
data = yield c.gather(futures, direct=True)
assert data == [1, 2, 3]
@gen_cluster(client=True)
def test_many_submits_spread_evenly(c, s, a, b):
L = [c.submit(inc, i) for i in range(10)]
yield wait(L)
assert a.data and b.data
@gen_cluster(client=True)
def test_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
tb = yield x.traceback()
if sys.version_info[0] >= 3:
assert any("x / y" in line for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(client=True)
def test_get_traceback(c, s, a, b):
try:
yield c.get({"x": (div, 1, 0)}, "x", sync=False)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
@gen_cluster(client=True)
def test_gather_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
try:
yield c.gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
def test_traceback_sync(c):
x = c.submit(div, 1, 0)
tb = x.traceback()
if sys.version_info[0] >= 3:
assert any(
"x / y" in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
y = c.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb)))
)
z = c.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(client=True)
def test_upload_file(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", "def f():\n return {}".format(value)) as fn:
yield c.upload_file(fn)
x = c.submit(g, pure=False)
result = yield x
assert result == value
@gen_cluster(client=True)
def test_upload_file_no_extension(c, s, a, b):
with tmp_text("myfile", "") as fn:
yield c.upload_file(fn)
@gen_cluster(client=True)
def test_upload_file_zip(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
try:
for value in [123, 456]:
with tmp_text(
"myfile.py", "def f():\n return {}".format(value)
) as fn_my_file:
with zipfile.ZipFile("myfile.zip", "w") as z:
z.write(fn_my_file, arcname=os.path.basename(fn_my_file))
yield c.upload_file("myfile.zip")
x = c.submit(g, pure=False)
result = yield x
assert result == value
finally:
if os.path.exists("myfile.zip"):
os.remove("myfile.zip")
@gen_cluster(client=True)
def test_upload_file_egg(c, s, a, b):
def g():
import package_1, package_2
return package_1.a, package_2.b
# c.upload_file tells each worker to
# - put this file in their local_directory
# - modify their sys.path to include it
# we don't care about the local_directory
# but we do care about restoring the path
with save_sys_modules():
for value in [123, 456]:
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "setup.py"), "w") as f:
f.write("from setuptools import setup, find_packages\n")
f.write(
'setup(name="my_package", packages=find_packages(), version="{}")\n'.format(
value
)
)
# test a package with an underscore in the name
package_1 = os.path.join(dirname, "package_1")
os.mkdir(package_1)
with open(os.path.join(package_1, "__init__.py"), "w") as f:
f.write("a = {}\n".format(value))
# test multiple top-level packages
package_2 = os.path.join(dirname, "package_2")
os.mkdir(package_2)
with open(os.path.join(package_2, "__init__.py"), "w") as f:
f.write("b = {}\n".format(value))
# compile these into an egg
subprocess.check_call(
[sys.executable, "setup.py", "bdist_egg"], cwd=dirname
)
egg_root = os.path.join(dirname, "dist")
# first file ending with '.egg'
egg_name = [
fname for fname in os.listdir(egg_root) if fname.endswith(".egg")
][0]
egg_path = os.path.join(egg_root, egg_name)
yield c.upload_file(egg_path)
os.remove(egg_path)
x = c.submit(g, pure=False)
result = yield x
assert result == (value, value)
@gen_cluster(client=True)
def test_upload_large_file(c, s, a, b):
assert a.local_directory
assert b.local_directory
with tmp_text("myfile", "abc") as fn:
with tmp_text("myfile2", "def") as fn2:
yield c._upload_large_file(fn, remote_filename="x")
yield c._upload_large_file(fn2)
for w in [a, b]:
assert os.path.exists(os.path.join(w.local_directory, "x"))
assert os.path.exists(os.path.join(w.local_directory, "myfile2"))
with open(os.path.join(w.local_directory, "x")) as f:
assert f.read() == "abc"
with open(os.path.join(w.local_directory, "myfile2")) as f:
assert f.read() == "def"
def test_upload_file_sync(c):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
c.upload_file(fn)
x = c.submit(g)
assert x.result() == 123
@gen_cluster(client=True)
def test_upload_file_exception(c, s, a, b):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
yield c.upload_file(fn)
def test_upload_file_exception_sync(c):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
c.upload_file(fn)
@pytest.mark.skip
@gen_cluster()
def test_multiple_clients(s, a, b):
a = yield Client(s.address, asynchronous=True)
b = yield Client(s.address, asynchronous=True)
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.client is a
assert y.client is b
xx = yield x
yy = yield y
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.client is a
zz = yield z
assert zz == 5
yield a.close()
yield b.close()
@gen_cluster(client=True)
def test_async_compute(c, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = c.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = yield c.gather([yy, zz])
assert result == [2, 0]
assert isinstance(c.compute(y), Future)
assert isinstance(c.compute([y]), (tuple, list))
@gen_cluster(client=True)
def test_async_compute_with_scatter(c, s, a, b):
d = yield c.scatter({("x", 1): 1, ("y", 1): 2})
x, y = d[("x", 1)], d[("y", 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = c.compute(z)
[result] = yield c.gather([zz])
assert result == 2 + 3
def test_sync_compute(c):
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = c.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(client=True)
def test_remote_scatter_gather(c, s, a, b):
x, y, z = yield c.scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = yield c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(timeout=1000, client=True)
def test_remote_submit_on_Future(c, s, a, b):
x = c.submit(lambda x: x + 1, 1)
y = c.submit(lambda x: x + 1, x)
result = yield y
assert result == 3
def test_start_is_idempotent(c):
c.start()
c.start()
c.start()
x = c.submit(inc, 1)
assert x.result() == 2
@gen_cluster(client=True)
def test_client_with_scheduler(c, s, a, b):
assert s.nthreads == {a.address: a.nthreads, b.address: b.nthreads}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y)
result = yield x
assert result == 1 + 1
result = yield z
assert result == 1 + 1 + 1 + 2
A, B, C = yield c.scatter([1, 2, 3])
AA, BB, xx = yield c.gather([A, B, x])
assert (AA, BB, xx) == (1, 2, 2)
result = yield c.get({"x": (inc, 1), "y": (add, "x", 10)}, "y", sync=False)
assert result == 12
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_allow_restrictions(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[a.address]
x = c.submit(inc, 1, workers=a.ip)
yield x
assert s.tasks[x.key].who_has == {aws}
assert not s.loose_restrictions
x = c.submit(inc, 2, workers=a.ip, allow_other_workers=True)
yield x
assert s.tasks[x.key].who_has == {aws}
assert x.key in s.loose_restrictions
L = c.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
yield wait(L)
assert all(s.tasks[f.key].who_has == {aws} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
x = c.submit(inc, 15, workers="127.0.0.3", allow_other_workers=True)
yield x
assert s.tasks[x.key].who_has
assert x.key in s.loose_restrictions
L = c.map(inc, range(15, 25), workers="127.0.0.3", allow_other_workers=True)
yield wait(L)
assert all(s.tasks[f.key].who_has for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
c.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
c.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
c.submit(inc, 20, workers="127.0.0.1", allow_other_workers="Hello!")
with pytest.raises(TypeError):
c.map(inc, [20], workers="127.0.0.1", allow_other_workers="Hello!")
@pytest.mark.skipif("True", reason="because")
def test_bad_address():
try:
Client("123.123.123.123:1234", timeout=0.1)
except (IOError, gen.TimeoutError) as e:
assert "connect" in str(e).lower()
try:
Client("127.0.0.1:1234", timeout=0.1)
except (IOError, gen.TimeoutError) as e:
assert "connect" in str(e).lower()
@gen_cluster(client=True)
def test_long_error(c, s, a, b):
def bad(x):
raise ValueError("a" * 100000)
x = c.submit(bad, 10)
try:
yield x
except ValueError as e:
assert len(str(e)) < 100000
tb = yield x.traceback()
assert all(
len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
@gen_cluster(client=True)
def test_map_on_futures_with_kwargs(c, s, a, b):
def f(x, y=10):
return x + y
futures = c.map(inc, range(10))
futures2 = c.map(f, futures, y=20)
results = yield c.gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = c.submit(inc, 100)
future2 = c.submit(f, future, y=200)
result = yield future2
assert result == 100 + 1 + 200
class BadlySerializedObject(object):
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject(object):
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(client=True)
def test_badly_serialized_input(c, s, a, b):
o = BadlySerializedObject()
future = c.submit(inc, o)
futures = c.map(inc, range(10))
L = yield c.gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == "error"
@pytest.mark.skipif("True", reason="")
def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
start = time()
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert time() - start < 20
assert future.status == "error"
def test_repr(loop):
funcs = [str, repr, lambda x: x._repr_html_()]
with cluster(nworkers=3, worker_kwargs={"memory_limit": "2 GB"}) as (s, [a, b, c]):
with Client(s["address"], loop=loop) as c:
for func in funcs:
text = func(c)
assert c.scheduler.address in text
assert "3" in text
assert "6" in text
assert "GB" in text
if "<table" not in text:
assert len(text) < 80
for func in funcs:
text = func(c)
assert "not connected" in text
@gen_cluster(client=True)
def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
def test_repr_localcluster():
cluster = yield LocalCluster(
processes=False, dashboard_address=None, asynchronous=True
)
client = yield Client(cluster, asynchronous=True)
try:
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
finally:
yield client.close()
yield cluster.close()
@gen_cluster(client=True)
def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
yield wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
def test_forget_complex(e, s, A, B):
a, b, c, d = yield e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
yield wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
start = time()
while b.key in A.data or b.key in B.data:
yield gen.sleep(0.01)
assert time() < start + 10
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = [delayed2(slowinc)(i) for i in range(4)]
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for i in range(5):
yield gen.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
yield wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s or "threads" in s
@gen_cluster(client=True)
def test_waiting_data(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
yield wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
@gen_cluster()
def test_multi_client(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
yield wait([x, y])
assert s.wants_what == {
c.id: {x.key, y.key},
f.id: {y.key},
"fire-and-forget": set(),
}
assert s.who_wants == {x.key: {c.id}, y.key: {c.id, f.id}}
yield c.close()
start = time()
while c.id in s.wants_what:
yield gen.sleep(0.01)
assert time() < start + 5
assert c.id not in s.wants_what
assert c.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
yield f.close()
start = time()
while s.tasks:
yield gen.sleep(0.01)
assert time() < start + 2, s.tasks
def long_running_client_connection(address):
with pristine_loop():
c = Client(address)
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
def test_cleanup_after_broken_client_connection(s, a, b):
proc = mp_context.Process(target=long_running_client_connection, args=(s.address,))
proc.daemon = True
proc.start()
start = time()
while not s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
proc.terminate()
start = time()
while s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
@gen_cluster()
def test_multi_garbage_collection(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
yield wait([x, y])
x.__del__()
start = time()
while x.key in a.data or x.key in b.data:
yield gen.sleep(0.01)
assert time() < start + 5
assert s.wants_what == {c.id: {y.key}, f.id: {y.key}, "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id, f.id}}
y.__del__()
start = time()
while x.key in s.wants_what[f.id]:
yield gen.sleep(0.01)
assert time() < start + 5
yield gen.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {c.id: {y.key}, f.id: set(), "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id}}
y2.__del__()
start = time()
while y.key in a.data or y.key in b.data:
yield gen.sleep(0.01)
assert time() < start + 5
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
yield c.close()
yield f.close()
@gen_cluster(client=True)
def test__broadcast(c, s, a, b):
x, y = yield c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test__broadcast_integer(c, s, *workers):
x, y = yield c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True)
def test__broadcast_dict(c, s, a, b):
d = yield c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
def test_broadcast(c, s, a, b):
x, y = c.scatter([1, 2], broadcast=True)
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key},
b["address"]: {x.key, y.key},
}
[z] = c.scatter([3], broadcast=True, workers=[a["address"]])
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key, z.key},
b["address"]: {x.key, y.key},
}
@gen_cluster(client=True)
def test_proxy(c, s, a, b):
msg = yield c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
def test__cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
yield gen.sleep(0.01)
yield c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
start = time()
while not y.cancelled():
yield gen.sleep(0.01)
assert time() < start + 5
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
def test__cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
result = yield x
yield c.cancel(x)
with pytest.raises(CancelledError):
yield x
@gen_cluster()
def test__cancel_multi_client(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
yield c.cancel([x])
assert x.cancelled()
assert not y.cancelled()
start = time()
while y.key not in s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
out = yield y
assert out == 2
with pytest.raises(CancelledError):
yield x
yield c.close()
yield f.close()
@gen_cluster(client=True)
def test__cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
yield c.cancel(x)
yield c.cancel([x])
assert all(f.cancelled() for f in L)
assert not s.tasks
def test_cancel(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 5
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
yield wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
yield x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
def test_async_persist(c, s, a, b):
from dask.delayed import delayed, Delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
yield gen.sleep(0.01)
assert s.who_wants[y.key] == {c.id}
assert s.who_wants[w.key] == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = yield c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
def test__persist(c, s, a, b):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = yield c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
yield wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
yield wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
sg = SubgraphCallable(
{"x": x, "y": y, "z": z, "out": (add, (add, (add, x, y), z), "in")},
"out",
("in",),
)
assert set(futures_of(sg)) == {x, y, z}
def test_futures_of_class():
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
yield c.cancel([x])
with pytest.raises(CancelledError):
yield x
with pytest.raises(CancelledError):
yield c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
with pytest.raises(CancelledError):
c.submit(inc, x)
with pytest.raises(CancelledError):
c.submit(add, 1, y=x)
with pytest.raises(CancelledError):
c.map(add, [1], y=x)
assert "y" not in s.tasks
@pytest.mark.skip
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
def test_dont_delete_recomputed_results(c, s, w):
x = c.submit(inc, 1) # compute first time
yield wait([x])
x.__del__() # trigger garbage collection
yield gen.moment
xx = c.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
yield gen.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
yield gen.sleep(0.01)
@gen_cluster(nthreads=[], client=True)
def test_fatally_serialized_input(c, s):
o = FatallySerializedObject()
future = c.submit(inc, o)
while not s.tasks:
yield gen.sleep(0.01)
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
yield wait(x)
y = c.submit(inc, 2)
yield wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
def test_run(c, s, a, b):
results = yield c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = yield c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = yield c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
yield wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = yield c.run_on_scheduler(func)
assert results == func()
results = yield c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
def test_run_coroutine(c, s, a, b):
results = yield c.run(geninc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = yield c.run(geninc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = yield c.run(geninc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
yield c.run(throws, 1)
if sys.version_info >= (3, 5):
results = yield c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(geninc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(geninc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(geninc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
def test_run_exception(c):
def raise_exception(exc_type, exc_msg):
raise exc_type(exc_msg)
for exc_type in [ValueError, RuntimeError]:
with pytest.raises(exc_type, match="informative message"):
c.run(raise_exception, exc_type, "informative message")
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
yield wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_test()
def test_worker_aliases():
s = yield Scheduler(validate=True, port=0)
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
yield [a, b, w]
c = yield Client(s.address, asynchronous=True)
L = c.map(inc, range(10), workers="alice")
future = yield c.scatter(123, workers=3)
yield wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = yield c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
yield c.close()
yield [a.close(), b.close(), w.close()]
yield s.close()
def test_persist_get_sync(c):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@gen_cluster(client=True)
def test_persist_get(c, s, a, b):
dadd = delayed(add)
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
yield gen.sleep(0.5)
result = yield c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False)
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = yield c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = yield c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = yield c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
psutil = pytest.importorskip("psutil")
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for i in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 4
@gen_cluster()
def test_startup_close_startup(s, a, b):
c = yield Client(s.address, asynchronous=True)
yield c.close()
c = yield Client(s.address, asynchronous=True)
yield c.close()
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"]) as c:
pass
with Client(s["address"]) as c:
pass
sleep(0.1)
with Client(s["address"]) as c:
pass
@gen_cluster(client=True)
def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
try:
result = yield x
except Exception as e:
assert "hello world" in str(e)
else:
assert False
@gen_cluster(client=True)
def test_rebalance(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[b.address]
x, y = yield c.scatter([1, 2], workers=[a.address])
assert len(a.data) == 2
assert len(b.data) == 0
s.validate_state()
yield c.rebalance()
s.validate_state()
assert len(b.data) == 1
assert {ts.key for ts in bws.has_what} == set(b.data)
assert bws in s.tasks[x.key].who_has or bws in s.tasks[y.key].who_has
assert len(a.data) == 1
assert {ts.key for ts in aws.has_what} == set(a.data)
assert aws not in s.tasks[x.key].who_has or aws not in s.tasks[y.key].who_has
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 4, client=True)
def test_rebalance_workers(e, s, a, b, c, d):
w, x, y, z = yield e.scatter([1, 2, 3, 4], workers=[a.address])
assert len(a.data) == 4
assert len(b.data) == 0
assert len(c.data) == 0
assert len(d.data) == 0
yield e.rebalance([x, y], workers=[a.address, c.address])
assert len(a.data) == 3
assert len(b.data) == 0
assert len(c.data) == 1
assert len(d.data) == 0
assert c.data == {x.key: 2} or c.data == {y.key: 3}
yield e.rebalance()
assert len(a.data) == 1
assert len(b.data) == 1
assert len(c.data) == 1
assert len(d.data) == 1
s.validate_state()
@gen_cluster(client=True)
def test_rebalance_execution(c, s, a, b):
futures = c.map(inc, range(10), workers=a.address)
yield c.rebalance(futures)
assert len(a.data) == len(b.data) == 5
s.validate_state()
def test_rebalance_sync(c, s, a, b):
futures = c.map(inc, range(10), workers=[a["address"]])
c.rebalance(futures)
has_what = c.has_what()
assert len(has_what) == 2
assert list(valmap(len, has_what).values()) == [5, 5]
@gen_cluster(client=True)
def test_rebalance_unprepared(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
yield gen.sleep(0.1)
yield c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
result = yield x
yield a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
yield gen.sleep(0.01)
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
result = yield x
yield a.close()
start = time()
while x.status == "finished":
assert time() < start + 5
yield gen.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
w = yield Worker(s.address, loop=s.loop)
start = time()
while x.status != "finished":
assert time() < start + 2
yield gen.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = yield x
assert result == 2
yield w.close()
@gen_cluster(client=True, nthreads=[])
def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
n = yield Nanny(s.address, nthreads=2, loop=s.loop, port=0)
result = yield c.gather(futures)
yield n.close()
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
def test_workers_register_indirect_data(c, s, a, b):
[x] = yield c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
yield y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
yield x
yield c.cancel(x)
with pytest.raises(CancelledError):
y = c.submit(inc, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
def test_replicate(c, s, *workers):
[a, b] = yield c.scatter([1, 2])
yield s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True)
def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
yield c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
yield c.rebalance(f)
s.validate_state()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
def test_replicate_workers(c, s, *workers):
[a, b] = yield c.scatter([1, 2], workers=[workers[0].address])
yield s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
yield s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
yield s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
yield s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
class CountSerialization(object):
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
def test_replicate_tree_branching(c, s, *workers):
obj = CountSerialization()
[future] = yield c.scatter([obj])
yield s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
def test_client_replicate(c, s, *workers):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
yield c.replicate([x, y], n=5)
assert len(s.tasks[x.key].who_has) == 5
assert len(s.tasks[y.key].who_has) == 5
yield c.replicate([x, y], n=3)
assert len(s.tasks[x.key].who_has) == 3
assert len(s.tasks[y.key].who_has) == 3
yield c.replicate([x, y])
s.validate_state()
assert len(s.tasks[x.key].who_has) == 10
assert len(s.tasks[y.key].who_has) == 10
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.2", 1), ("127.0.0.2", 1)],
timeout=None,
)
def test_client_replicate_host(client, s, a, b, c):
aws = s.workers[a.address]
bws = s.workers[b.address]
cws = s.workers[c.address]
x = client.submit(inc, 1, workers="127.0.0.2")
yield wait([x])
assert s.tasks[x.key].who_has == {bws} or s.tasks[x.key].who_has == {cws}
yield client.replicate([x], workers=["127.0.0.2"])
assert s.tasks[x.key].who_has == {bws, cws}
yield client.replicate([x], workers=["127.0.0.1"])
assert s.tasks[x.key].who_has == {aws, bws, cws}
def test_client_replicate_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
c.replicate([x, y], n=2)
who_has = c.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
c.replicate([x], n=0)
assert y.result() == 3
@pytest.mark.skipif(WINDOWS, reason="Windows timer too coarse-grained")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 1)
def test_task_load_adapts_quickly(c, s, a):
future = c.submit(slowinc, 1, delay=0.2) # slow
yield wait(future)
assert 0.15 < s.task_duration["slowinc"] < 0.4
futures = c.map(slowinc, range(10), delay=0) # very fast
yield wait(futures)
assert 0 < s.task_duration["slowinc"] < 0.1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
def test_even_load_after_fast_functions(c, s, a, b):
x = c.submit(inc, 1, workers=a.address) # very fast
y = c.submit(inc, 2, workers=b.address) # very fast
yield wait([x, y])
futures = c.map(inc, range(2, 11))
yield wait(futures)
assert any(f.key in a.data for f in futures)
assert any(f.key in b.data for f in futures)
# assert abs(len(a.data) - len(b.data)) <= 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
def test_even_load_on_startup(c, s, a, b):
x, y = c.map(inc, [1, 2])
yield wait([x, y])
assert len(a.data) == len(b.data) == 1
@pytest.mark.skip
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
def test_contiguous_load(c, s, a, b):
w, x, y, z = c.map(inc, [1, 2, 3, 4])
yield wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test_balanced_with_submit(c, s, *workers):
L = [c.submit(slowinc, i) for i in range(4)]
yield wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test_balanced_with_submit_and_resident_data(c, s, *workers):
[x] = yield c.scatter([10], broadcast=True)
L = [c.submit(slowinc, x, pure=False) for i in range(4)]
yield wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
def test_scheduler_saturates_cores(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(slowinc, range(100), delay=delay)
futures = c.map(slowinc, futures, delay=delay / 10)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
yield gen.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
def test_scheduler_saturates_cores_random(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(randominc, range(100), scale=0.1)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
yield gen.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
def test_cancel_clears_processing(c, s, *workers):
da = pytest.importorskip("dask.array")
x = c.submit(slowinc, 1, delay=0.2)
while not s.tasks:
yield gen.sleep(0.01)
yield c.cancel(x)
start = time()
while any(v for w in s.workers.values() for v in w.processing):
assert time() < start + 0.2
yield gen.sleep(0.01)
s.validate_state()
def test_default_get():
with cluster() as (s, [a, b]):
pre_get = dask.base.get_scheduler()
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"], set_as_default=True) as c:
assert dask.base.get_scheduler() == c.get
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c = Client(s["address"], set_as_default=False)
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c.close()
c = Client(s["address"], set_as_default=True)
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == c.get
c.close()
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"]) as c:
assert dask.base.get_scheduler() == c.get
with Client(s["address"], set_as_default=False) as c:
assert dask.base.get_scheduler() != c.get
assert dask.base.get_scheduler() != c.get
with Client(s["address"], set_as_default=True) as c1:
assert dask.base.get_scheduler() == c1.get
with Client(s["address"], set_as_default=True) as c2:
assert dask.base.get_scheduler() == c2.get
assert dask.base.get_scheduler() == c1.get
assert dask.base.get_scheduler() == pre_get
@gen_cluster(client=True)
def test_get_processing(c, s, a, b):
processing = yield c.processing()
assert processing == valmap(tuple, s.processing)
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a.address], allow_other_workers=True
)
yield gen.sleep(0.2)
x = yield c.processing()
assert set(x) == {a.address, b.address}
x = yield c.processing(workers=[a.address])
assert isinstance(x[a.address], (list, tuple))
@gen_cluster(client=True)
def test_get_foo(c, s, a, b):
futures = c.map(inc, range(10))
yield wait(futures)
x = yield c.scheduler.ncores()
assert x == s.nthreads
x = yield c.scheduler.ncores(workers=[a.address])
assert x == {a.address: s.nthreads[a.address]}
x = yield c.scheduler.has_what()
assert valmap(sorted, x) == valmap(sorted, s.has_what)
x = yield c.scheduler.has_what(workers=[a.address])
assert valmap(sorted, x) == {a.address: sorted(s.has_what[a.address])}
x = yield c.scheduler.nbytes(summary=False)
assert x == s.get_nbytes(summary=False)
x = yield c.scheduler.nbytes(keys=[futures[0].key], summary=False)
assert x == {futures[0].key: s.tasks[futures[0].key].nbytes}
x = yield c.scheduler.who_has()
assert valmap(sorted, x) == valmap(sorted, s.who_has)
x = yield c.scheduler.who_has(keys=[futures[0].key])
assert valmap(sorted, x) == {futures[0].key: sorted(s.who_has[futures[0].key])}
def assert_dict_key_equal(expected, actual):
assert set(expected.keys()) == set(actual.keys())
for k in actual.keys():
ev = expected[k]
av = actual[k]
assert list(ev) == list(av)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
def test_get_foo_lost_keys(c, s, u, v, w):
x = c.submit(inc, 1, workers=[u.address])
y = yield c.scatter(3, workers=[v.address])
yield wait([x, y])
ua, va, wa = u.address, v.address, w.address
d = yield c.scheduler.has_what()
assert_dict_key_equal(d, {ua: [x.key], va: [y.key], wa: []})
d = yield c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [x.key], va: [y.key]})
d = yield c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
d = yield c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
yield u.close()
yield v.close()
d = yield c.scheduler.has_what()
assert_dict_key_equal(d, {wa: []})
d = yield c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [], va: []})
# The scattered key cannot be recomputed so it is forgotten
d = yield c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: []})
# ... but when passed explicitly, it is included in the result
d = yield c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [], y.key: []})
@pytest.mark.slow
@gen_cluster(
client=True,
Worker=Nanny,
worker_kwargs={"death_timeout": "500ms"},
clean_kwargs={"threads": False, "processes": False},
)
def test_bad_tasks_fail(c, s, a, b):
f = c.submit(sys.exit, 0)
with pytest.raises(KilledWorker) as info:
yield f
assert info.value.last_worker.nanny in {a.address, b.address}
yield [a.close(), b.close()]
def test_get_processing_sync(c, s, a, b):
processing = c.processing()
assert not any(v for v in processing.values())
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a["address"]], allow_other_workers=False
)
sleep(0.2)
aa = a["address"]
bb = b["address"]
processing = c.processing()
assert set(c.processing(aa)) == {aa}
assert set(c.processing([aa])) == {aa}
c.cancel(futures)
def test_close_idempotent(c):
c.close()
c.close()
c.close()
@nodebug
def test_get_returns_early(c):
start = time()
with ignoring(RuntimeError):
result = c.get({"x": (throws, 1), "y": (sleep, 1)}, ["x", "y"])
assert time() < start + 0.5
# Futures should be released and forgotten
wait_for(lambda: not c.futures, timeout=0.1)
wait_for(lambda: not any(c.processing().values()), timeout=3)
x = c.submit(inc, 1)
x.result()
with ignoring(RuntimeError):
result = c.get({"x": (throws, 1), x.key: (inc, 1)}, ["x", x.key])
assert x.key in c.futures
@pytest.mark.slow
@gen_cluster(Worker=Nanny, client=True)
def test_Client_clears_references_after_restart(c, s, a, b):
x = c.submit(inc, 1)
assert x.key in c.refcount
yield c.restart()
assert x.key not in c.refcount
key = x.key
del x
import gc
gc.collect()
yield gen.moment
assert key not in c.refcount
def test_get_stops_work_after_error(c):
with pytest.raises(RuntimeError):
c.get({"x": (throws, 1), "y": (sleep, 1.5)}, ["x", "y"])
start = time()
while any(c.processing().values()):
sleep(0.01)
assert time() < start + 0.5
def test_as_completed_list(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq))
assert set(c.gather(seq2)) == {1, 2, 3, 4, 5}
def test_as_completed_results(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq, with_results=True))
assert set(pluck(1, seq2)) == {1, 2, 3, 4, 5}
assert set(pluck(0, seq2)) == set(seq)
@pytest.mark.parametrize("with_results", [True, False])
def test_as_completed_batches(c, with_results):
n = 50
futures = c.map(slowinc, range(n), delay=0.01)
out = []
for batch in as_completed(futures, with_results=with_results).batches():
assert isinstance(batch, (tuple, list))
sleep(0.05)
out.extend(batch)
assert len(out) == n
if with_results:
assert set(pluck(1, out)) == set(range(1, n + 1))
else:
assert set(out) == set(futures)
def test_as_completed_next_batch(c):
futures = c.map(slowinc, range(2), delay=0.1)
ac = as_completed(futures)
assert not ac.is_empty()
assert ac.next_batch(block=False) == []
assert set(ac.next_batch(block=True)).issubset(futures)
while not ac.is_empty():
assert set(ac.next_batch(block=True)).issubset(futures)
assert ac.is_empty()
assert not ac.has_ready()
@gen_test()
def test_status():
s = yield Scheduler(port=0)
c = yield Client(s.address, asynchronous=True)
assert c.status == "running"
x = c.submit(inc, 1)
yield c.close()
assert c.status == "closed"
yield s.close()
@gen_cluster(client=True)
def test_persist_optimize_graph(c, s, a, b):
i = 10
for method in [c.persist, c.compute]:
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=False)
yield wait(b4)
assert set(map(tokey, b3.__dask_keys__())).issubset(s.tasks)
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=True)
yield wait(b4)
assert not any(tokey(k) in s.tasks for k in b2.__dask_keys__())
@gen_cluster(client=True, nthreads=[])
def test_scatter_raises_if_no_workers(c, s):
with pytest.raises(gen.TimeoutError):
yield c.scatter(1, timeout=0.5)
@pytest.mark.slow
def test_reconnect(loop):
w = Worker("127.0.0.1", 9393, loop=loop)
loop.add_callback(w.start)
scheduler_cli = [
"dask-scheduler",
"--host",
"127.0.0.1",
"--port",
"9393",
"--no-dashboard",
]
with popen(scheduler_cli) as s:
c = Client("127.0.0.1:9393", loop=loop)
start = time()
while len(c.nthreads()) != 1:
sleep(0.1)
assert time() < start + 3
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while c.status != "connecting":
assert time() < start + 5
sleep(0.01)
with pytest.raises(Exception):
c.nthreads()
assert x.status == "cancelled"
with pytest.raises(CancelledError):
x.result()
with popen(scheduler_cli) as s:
start = time()
while c.status != "running":
sleep(0.1)
assert time() < start + 5
start = time()
while len(c.nthreads()) != 1:
sleep(0.05)
assert time() < start + 15
x = c.submit(inc, 1)
assert x.result() == 2
start = time()
while True:
try:
x.result()
assert False
except CommClosedError:
continue
except CancelledError:
break
assert time() < start + 5
sleep(0.1)
sync(loop, w.close)
c.close()
@gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5})
def test_reconnect_timeout(c, s):
with captured_logger(logging.getLogger("distributed.client")) as logger:
yield s.close()
start = time()
while c.status != "closed":
yield c._update_scheduler_info()
yield gen.sleep(0.05)
assert time() < start + 5, "Timeout waiting for reconnect to fail"
text = logger.getvalue()
assert "Failed to reconnect" in text
@pytest.mark.slow
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.skipif(
sys.version_info[0] == 2, reason="Semaphore.acquire doesn't support timeout option"
)
# @pytest.mark.xfail(reason="TODO: intermittent failures")
@pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)])
def test_open_close_many_workers(loop, worker, count, repeat):
psutil = pytest.importorskip("psutil")
proc = psutil.Process()
with cluster(nworkers=0, active_rpc_timeout=2) as (s, _):
gc.collect()
before = proc.num_fds()
done = Semaphore(0)
running = weakref.WeakKeyDictionary()
workers = set()
status = True
@gen.coroutine
def start_worker(sleep, duration, repeat=1):
for i in range(repeat):
yield gen.sleep(sleep)
if not status:
return
w = worker(s["address"], loop=loop)
running[w] = None
workers.add(w)
yield w
addr = w.worker_address
running[w] = addr
yield gen.sleep(duration)
yield w.close()
del w
yield gen.moment
done.release()
for i in range(count):
loop.add_callback(
start_worker, random.random() / 5, random.random() / 5, repeat=repeat
)
with Client(s["address"], loop=loop) as c:
sleep(1)
for i in range(count):
done.acquire(timeout=5)
gc.collect()
if not running:
break
start = time()
while c.nthreads():
sleep(0.2)
assert time() < start + 10
status = False
[c.sync(w.close) for w in list(workers)]
for w in workers:
assert w.status == "closed"
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
sleep(0.1)
assert time() < start + 10
@gen_cluster(client=False, timeout=None)
def test_idempotence(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
# Submit
x = c.submit(inc, 1)
yield x
log = list(s.transition_log)
len_single_submit = len(log) # see last assert
y = f.submit(inc, 1)
assert x.key == y.key
yield y
yield gen.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
# Error
a = c.submit(div, 1, 0)
yield wait(a)
assert a.status == "error"
log = list(s.transition_log)
b = f.submit(div, 1, 0)
assert a.key == b.key
yield wait(b)
yield gen.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
s.transition_log.clear()
# Simultaneous Submit
d = c.submit(inc, 2)
e = c.submit(inc, 2)
yield wait([d, e])
assert len(s.transition_log) == len_single_submit
yield c.close()
yield f.close()
def test_scheduler_info(c):
info = c.scheduler_info()
assert isinstance(info, dict)
assert len(info["workers"]) == 2
def test_write_scheduler_file(c):
info = c.scheduler_info()
with tmpfile("json") as scheduler_file:
c.write_scheduler_file(scheduler_file)
with Client(scheduler_file=scheduler_file) as c2:
info2 = c2.scheduler_info()
assert c.scheduler.address == c2.scheduler.address
# test that a ValueError is raised if the scheduler_file
# attribute is already set
with pytest.raises(ValueError):
c.write_scheduler_file(scheduler_file)
def test_get_versions(c):
requests = pytest.importorskip("requests")
v = c.get_versions()
assert v["scheduler"] is not None
assert v["client"] is not None
assert len(v["workers"]) == 2
for k, v in v["workers"].items():
assert v is not None
c.get_versions(check=True)
# smoke test for versions
# that this does not raise
v = c.get_versions(packages=["requests"])
assert dict(v["client"]["packages"]["optional"])["requests"] == requests.__version__
def test_threaded_get_within_distributed(c):
import dask.multiprocessing
for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]:
def f():
return get({"x": (lambda: 1,)}, "x")
future = c.submit(f)
assert future.result() == 1
@gen_cluster(client=True)
def test_lose_scattered_data(c, s, a, b):
[x] = yield c.scatter([1], workers=a.address)
yield a.close()
yield gen.sleep(0.1)
assert x.status == "cancelled"
assert x.key not in s.tasks
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
def test_partially_lose_scattered_data(e, s, a, b, c):
x = yield e.scatter(1, workers=a.address)
yield e.replicate(x, n=2)
yield a.close()
yield gen.sleep(0.1)
assert x.status == "finished"
assert s.get_task_status(keys=[x.key]) == {x.key: "memory"}
@gen_cluster(client=True)
def test_scatter_compute_lose(c, s, a, b):
[x] = yield c.scatter([[1, 2, 3, 4]], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
z = c.submit(slowadd, x, y, delay=0.2)
yield gen.sleep(0.1)
yield a.close()
with pytest.raises(CancelledError):
yield wait(z)
assert x.status == "cancelled"
assert y.status == "finished"
assert z.status == "cancelled"
@gen_cluster(client=True)
def test_scatter_compute_store_lose(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
x = yield c.scatter(1, workers=a.address)
xx = c.submit(inc, x, workers=a.address)
y = c.submit(inc, 1)
z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address)
yield wait(z)
yield a.close()
start = time()
while x.status == "finished":
yield gen.sleep(0.01)
assert time() < start + 2
# assert xx.status == 'finished'
assert y.status == "finished"
assert z.status == "finished"
zz = c.submit(inc, z)
yield wait(zz)
zkey = z.key
del z
start = time()
while s.get_task_status(keys=[zkey]) != {zkey: "released"}:
yield gen.sleep(0.01)
assert time() < start + 2
xxkey = xx.key
del xx
start = time()
while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks:
yield gen.sleep(0.01)
assert time() < start + 2
@gen_cluster(client=True)
def test_scatter_compute_store_lose_processing(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
[x] = yield c.scatter([1], workers=a.address)
y = c.submit(slowinc, x, delay=0.2)
z = c.submit(inc, y)
yield gen.sleep(0.1)
yield a.close()
start = time()
while x.status == "finished":
yield gen.sleep(0.01)
assert time() < start + 2
assert y.status == "cancelled"
assert z.status == "cancelled"
@gen_cluster(client=False)
def test_serialize_future(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
future = c.submit(lambda: 1)
result = yield future
with temp_default_client(f):
future2 = pickle.loads(pickle.dumps(future))
assert future2.client is f
assert tokey(future2.key) in f.futures
result2 = yield future2
assert result == result2
yield c.close()
yield f.close()
@gen_cluster(client=False)
def test_temp_client(s, a, b):
c = yield Client(s.address, asynchronous=True)
f = yield Client(s.address, asynchronous=True)
with temp_default_client(c):
assert default_client() is c
assert default_client(f) is f
with temp_default_client(f):
assert default_client() is f
assert default_client(c) is c
yield c.close()
yield f.close()
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
def test_persist_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
total2 = delayed(sum)(L2)
out = e.persist(
L1 + L2 + [total, total2],
workers={
tuple(L1): a.address,
total: b.address,
tuple(L2): [c.address],
total2: b.address,
},
allow_other_workers=L2 + [total2],
)
yield wait(out)
assert all(v.key in a.data for v in L1)
assert total.key in b.data
assert s.loose_restrictions == {total2.key} | {v.key for v in L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
def test_compute_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
out = e.compute(
L1 + L2 + [total],
workers={tuple(L1): a.address, total: b.address, tuple(L2): [c.address]},
allow_other_workers=L1 + [total],
)
yield wait(out)
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
for v in L2:
assert s.worker_restrictions[v.key] == {c.address}
assert s.worker_restrictions[total.key] == {b.address}
assert s.loose_restrictions == {total.key} | {v.key for v in L1}
@gen_cluster(client=True)
def test_compute_nested_containers(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
x = da.ones(10, chunks=(5,)) + 1
future = c.compute({"x": [x], "y": 123})
result = yield future
assert isinstance(result, dict)
assert (result["x"][0] == np.ones(10) + 1).all()
assert result["y"] == 123
def test_get_restrictions():
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
r1, loose = Client.get_restrictions(L2, "127.0.0.1", False)
assert r1 == {d.key: ["127.0.0.1"] for d in L2}
assert not loose
r1, loose = Client.get_restrictions(L2, ["127.0.0.1"], True)
assert r1 == {d.key: ["127.0.0.1"] for d in L2}
assert set(loose) == {d.key for d in L2}
r1, loose = Client.get_restrictions(L2, {total: "127.0.0.1"}, True)
assert r1 == {total.key: ["127.0.0.1"]}
assert loose == [total.key]
r1, loose = Client.get_restrictions(L2, {(total,): "127.0.0.1"}, True)
assert r1 == {total.key: ["127.0.0.1"]}
assert loose == [total.key]
@gen_cluster(client=True)
def test_scatter_type(c, s, a, b):
[future] = yield c.scatter([1])
assert future.type == int
d = yield c.scatter({"x": 1.0})
assert d["x"].type == float
@gen_cluster(client=True)
def test_retire_workers_2(c, s, a, b):
[x] = yield c.scatter([1], workers=a.address)
yield s.retire_workers(workers=[a.address])
assert b.data == {x.key: 1}
assert s.who_has == {x.key: {b.address}}
assert s.has_what == {b.address: {x.key}}
assert a.address not in s.workers
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
def test_retire_many_workers(c, s, *workers):
futures = yield c.scatter(list(range(100)))
yield s.retire_workers(workers=[w.address for w in workers[:7]])
results = yield c.gather(futures)
assert results == list(range(100))
assert len(s.has_what) == len(s.nthreads) == 3
assert all(future.done() for future in futures)
assert all(s.tasks[future.key].state == "memory" for future in futures)
for w, keys in s.has_what.items():
assert 15 < len(keys) < 50
@gen_cluster(client=True, nthreads=[("127.0.0.1", 3)] * 2)
def test_weight_occupancy_against_data_movement(c, s, a, b):
s.extensions["stealing"]._pc.callback_time = 1000000
s.task_duration["f"] = 0.01
def f(x, y=0, z=0):
sleep(0.01)
return x
y = yield c.scatter([[1, 2, 3, 4]], workers=[a.address])
z = yield c.scatter([1], workers=[b.address])
futures = c.map(f, [1, 2, 3, 4], y=y, z=z)
yield wait(futures)
assert sum(f.key in a.data for f in futures) >= 2
assert sum(f.key in b.data for f in futures) >= 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)])
def test_distribute_tasks_by_nthreads(c, s, a, b):
s.task_duration["f"] = 0.01
s.extensions["stealing"]._pc.callback_time = 1000000
def f(x, y=0):
sleep(0.01)
return x
y = yield c.scatter([1], broadcast=True)
futures = c.map(f, range(20), y=y)
yield wait(futures)
assert len(b.data) > 2 * len(a.data)
@gen_cluster(client=True, clean_kwargs={"threads": False})
def test_add_done_callback(c, s, a, b):
S = set()
def f(future):
future.add_done_callback(g)
def g(future):
S.add((future.key, future.status))
u = c.submit(inc, 1, key="u")
v = c.submit(throws, "hello", key="v")
w = c.submit(slowinc, 2, delay=0.3, key="w")
x = c.submit(inc, 3, key="x")
u.add_done_callback(f)
v.add_done_callback(f)
w.add_done_callback(f)
yield wait((u, v, w, x))
x.add_done_callback(f)
t = time()
while len(S) < 4 and time() - t < 2.0:
yield gen.sleep(0.01)
assert S == {(f.key, f.status) for f in (u, v, w, x)}
@gen_cluster(client=True)
def test_normalize_collection(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(x)
z = delayed(inc)(y)
yy = c.persist(y)
zz = c.normalize_collection(z)
assert len(z.dask) == len(y.dask) + 1
assert isinstance(zz.dask[y.key], Future)
assert len(zz.dask) < len(z.dask)
@gen_cluster(client=True)
def test_normalize_collection_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
y = x + 1
yy = c.persist(y)
z = y.sum()
zdsk = dict(z.dask)
zz = c.normalize_collection(z)
assert z.dask == zdsk # do not mutate input
assert len(z.dask) > len(zz.dask)
assert any(isinstance(v, Future) for v in zz.dask.values())
for k, v in yy.dask.items():
assert zz.dask[k].key == v.key
result1 = yield c.compute(z)
result2 = yield c.compute(zz)
assert result1 == result2
@pytest.mark.slow
def test_normalize_collection_with_released_futures(c):
da = pytest.importorskip("dask.array")
x = da.arange(2 ** 20, chunks=2 ** 10)
y = x.persist()
wait(y)
sol = y.sum().compute()
# Start releasing futures
del y
# Try to reuse futures. Previously this was a race condition,
# and the call to `.compute()` would error out due to missing
# futures on the scheduler at compute time.
normalized = c.normalize_collection(x)
res = normalized.sum().compute()
assert res == sol
@gen_cluster(client=True)
def test_auto_normalize_collection(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
assert len(x.dask) == 2
with dask.config.set(optimizations=[c._optimize_insert_futures]):
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
yield wait(yy)
start = time()
future = c.compute(y.sum())
yield future
end = time()
assert end - start < 1
start = time()
z = c.persist(y + 1)
yield wait(z)
end = time()
assert end - start < 1
def test_auto_normalize_collection_sync(c):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
wait(yy)
with dask.config.set(optimizations=[c._optimize_insert_futures]):
start = time()
y.sum().compute()
end = time()
assert end - start < 1
def assert_no_data_loss(scheduler):
for key, start, finish, recommendations, _ in scheduler.transition_log:
if start == "memory" and finish == "released":
for k, v in recommendations.items():
assert not (k == key and v == "waiting")
@gen_cluster(client=True, timeout=None)
def test_interleave_computations(c, s, a, b):
import distributed
distributed.g = s
xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)]
ys = [delayed(slowdec)(x, delay=0.02) for x in xs]
zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
future = c.compute(total)
done = ("memory", "released")
yield gen.sleep(0.1)
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
yield gen.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
assert_no_data_loss(s)
@pytest.mark.skip(reason="Now prefer first-in-first-out")
@gen_cluster(client=True, timeout=None)
def test_interleave_computations_map(c, s, a, b):
xs = c.map(slowinc, range(30), delay=0.02)
ys = c.map(slowdec, xs, delay=0.02)
zs = c.map(slowadd, xs, ys, delay=0.02)
done = ("memory", "released")
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
yield gen.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
@gen_cluster(client=True)
def test_scatter_dict_workers(c, s, a, b):
yield c.scatter({"a": 10}, workers=[a.address, b.address])
assert "a" in a.data or "a" in b.data
@pytest.mark.slow
@gen_test()
def test_client_timeout():
c = Client("127.0.0.1:57484", asynchronous=True)
s = Scheduler(loop=c.loop, port=57484)
yield gen.sleep(4)
try:
yield s
except EnvironmentError: # port in use
yield c.close()
return
start = time()
yield c
try:
assert time() < start + 2
finally:
yield c.close()
yield s.close()
@gen_cluster(client=True)
def test_submit_list_kwargs(c, s, a, b):
futures = yield c.scatter([1, 2, 3])
def f(L=None):
return sum(L)
future = c.submit(f, L=futures)
result = yield future
assert result == 1 + 2 + 3
@gen_cluster(client=True)
def test_map_list_kwargs(c, s, a, b):
futures = yield c.scatter([1, 2, 3])
def f(i, L=None):
return i + sum(L)
futures = c.map(f, range(10), L=futures)
results = yield c.gather(futures)
assert results == [i + 6 for i in range(10)]
@gen_cluster(client=True)
def test_dont_clear_waiting_data(c, s, a, b):
start = time()
x = yield c.scatter(1)
y = c.submit(slowinc, x, delay=0.5)
while y.key not in s.tasks:
yield gen.sleep(0.01)
key = x.key
del x
for i in range(5):
assert s.waiting_data[key]
yield gen.moment
@gen_cluster(client=True)
def test_get_future_error_simple(c, s, a, b):
f = c.submit(div, 1, 0)
yield wait(f)
assert f.status == "error"
function, args, kwargs, deps = yield c._get_futures_error(f)
# args contains only solid values, not keys
assert function.__name__ == "div"
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
def test_get_futures_error(c, s, a, b):
x0 = delayed(dec)(2, dask_key_name="x0")
y0 = delayed(dec)(1, dask_key_name="y0")
x = delayed(div)(1, x0, dask_key_name="x")
y = delayed(div)(1, y0, dask_key_name="y")
tot = delayed(sum)(x, y, dask_key_name="tot")
f = c.compute(tot)
yield wait(f)
assert f.status == "error"
function, args, kwargs, deps = yield c._get_futures_error(f)
assert function.__name__ == "div"
assert args == (1, y0.key)
@gen_cluster(client=True)
def test_recreate_error_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(1)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)(x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = yield c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
def test_recreate_error_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = yield c._recreate_error_locally(f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
def test_recreate_error_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: 1 / x)
b = b.persist()
f = c.compute(b)
function, args, kwargs = yield c._recreate_error_locally(f)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
def make_err(x):
# because pandas would happily work with NaN
if x == 0:
raise ValueError
return x
df2 = df.a.map(make_err)
f = c.compute(df2)
function, args, kwargs = yield c._recreate_error_locally(f)
with pytest.raises(ValueError):
function(*args, **kwargs)
# with persist
df3 = c.persist(df2)
function, args, kwargs = yield c._recreate_error_locally(df3)
with pytest.raises(ValueError):
function(*args, **kwargs)
@gen_cluster(client=True)
def test_recreate_error_array(c, s, a, b):
da = pytest.importorskip("dask.array")
pytest.importorskip("scipy")
z = (da.linalg.inv(da.zeros((10, 10), chunks=10)) + 1).sum()
zz = z.persist()
func, args, kwargs = yield c._recreate_error_locally(zz)
assert "0.,0.,0." in str(args).replace(" ", "") # args contain actual arrays
def test_recreate_error_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
with pytest.raises(ZeroDivisionError):
c.recreate_error_locally(f)
assert f.status == "error"
def test_recreate_error_not_error(c):
f = c.submit(dec, 2)
with pytest.raises(ValueError, match="No errored futures passed"):
c.recreate_error_locally(f)
@gen_cluster(client=True)
def test_retire_workers(c, s, a, b):
assert set(s.workers) == {a.address, b.address}
yield c.retire_workers(workers=[a.address], close_workers=True)
assert set(s.workers) == {b.address}
start = time()
while a.status != "closed":
yield gen.sleep(0.01)
assert time() < start + 5
class MyException(Exception):
pass
@gen_cluster(client=True)
def test_robust_unserializable(c, s, a, b):
class Foo(object):
def __getstate__(self):
raise MyException()
with pytest.raises(MyException):
future = c.submit(identity, Foo())
futures = c.map(inc, range(10))
results = yield c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
def test_robust_undeserializable(c, s, a, b):
class Foo(object):
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
future = c.submit(identity, Foo())
with pytest.raises(MyException):
yield future
futures = c.map(inc, range(10))
results = yield c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
def test_robust_undeserializable_function(c, s, a, b):
class Foo(object):
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
def __call__(self, *args):
return 1
future = c.submit(Foo(), 1)
with pytest.raises(MyException):
yield future
futures = c.map(inc, range(10))
results = yield c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
def test_fire_and_forget(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.1)
import distributed
def f(x):
distributed.foo = 123
try:
fire_and_forget(c.submit(f, future))
start = time()
while not hasattr(distributed, "foo"):
yield gen.sleep(0.01)
assert time() < start + 2
assert distributed.foo == 123
finally:
del distributed.foo
start = time()
while len(s.tasks) > 1:
yield gen.sleep(0.01)
assert time() < start + 2
assert set(s.who_wants) == {future.key}
assert set(s.tasks) == {future.key}
@gen_cluster(client=True)
def test_fire_and_forget_err(c, s, a, b):
fire_and_forget(c.submit(div, 1, 0))
yield gen.sleep(0.1)
# erred task should clear out quickly
start = time()
while s.tasks:
yield gen.sleep(0.01)
assert time() < start + 1
def test_quiet_client_close(loop):
with captured_logger(logging.getLogger("distributed")) as logger:
with Client(loop=loop, processes=False, threads_per_worker=4) as c:
futures = c.map(slowinc, range(1000), delay=0.01)
sleep(0.200) # stop part-way
sleep(0.1) # let things settle
out = logger.getvalue()
lines = out.strip().split("\n")
assert len(lines) <= 2
for line in lines:
assert (
not line
or "Reconnecting" in line
or "garbage" in line
or set(line) == {"-"}
), line
@pytest.mark.slow
def test_quiet_client_close_when_cluster_is_closed_before_client(loop):
with captured_logger(logging.getLogger("tornado.application")) as logger:
cluster = LocalCluster(loop=loop, n_workers=1, dashboard_address=":0")
client = Client(cluster, loop=loop)
cluster.close()
client.close()
out = logger.getvalue()
assert "CancelledError" not in out
@gen_cluster()
def test_close(s, a, b):
c = yield Client(s.address, asynchronous=True)
future = c.submit(inc, 1)
yield wait(future)
assert c.id in s.wants_what
yield c.close()
start = time()
while c.id in s.wants_what or s.tasks:
yield gen.sleep(0.01)
assert time() < start + 5
def test_threadsafe(c):
def f(_):
d = deque(maxlen=50)
for i in range(100):
future = c.submit(inc, random.randint(0, 100))
d.append(future)
sleep(0.001)
c.gather(list(d))
total = c.submit(sum, list(d))
return total.result()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(20) as e:
results = list(e.map(f, range(20)))
assert results and all(results)
del results
@pytest.mark.slow
def test_threadsafe_get(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
total += (x + random.randint(0, 20)).sum().compute()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(30) as e:
results = list(e.map(f, range(30)))
assert results and all(results)
@pytest.mark.slow
def test_threadsafe_compute(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
future = c.compute((x + random.randint(0, 20)).sum())
total += future.result()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(30)
results = list(e.map(f, range(30)))
assert results and all(results)
@gen_cluster(client=True)
def test_identity(c, s, a, b):
assert c.id.lower().startswith("client")
assert a.id.lower().startswith("worker")
assert b.id.lower().startswith("worker")
assert s.id.lower().startswith("scheduler")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 2)
def test_get_client(c, s, a, b):
assert get_client() is c
assert c.asynchronous
def f(x):
client = get_client()
future = client.submit(inc, x)
import distributed
assert not client.asynchronous
assert client is distributed.tmp_client
return future.result()
import distributed
distributed.tmp_client = c
try:
futures = c.map(f, range(5))
results = yield c.gather(futures)
assert results == list(map(inc, range(5)))
finally:
del distributed.tmp_client
def test_get_client_no_cluster():
# Clean up any global workers added by other tests. This test requires that
# there are no global workers.
Worker._instances.clear()
msg = "No global client found and no address provided"
with pytest.raises(ValueError, match=r"^{}$".format(msg)):
get_client()
@gen_cluster(client=True)
def test_serialize_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.arange(10, chunks=(5,)).persist()
def f(x):
assert isinstance(x, da.Array)
return x.sum().compute()
future = c.submit(f, x)
result = yield future
assert result == sum(range(10))
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 1, timeout=100)
def test_secede_simple(c, s, a):
def f():
client = get_client()
secede()
return client.submit(inc, 1).result()
result = yield c.submit(f)
assert result == 2
@pytest.mark.slow
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2, timeout=60)
def test_secede_balances(c, s, a, b):
count = threading.active_count()
def f(x):
client = get_client()
sleep(0.01) # do some work
secede()
futures = client.map(slowinc, range(10), pure=False, delay=0.01)
total = client.submit(sum, futures).result()
return total
futures = c.map(f, range(100))
start = time()
while not all(f.status == "finished" for f in futures):
yield gen.sleep(0.01)
assert threading.active_count() < count + 50
# assert 0.005 < s.task_duration['f'] < 0.1
assert len(a.log) < 2 * len(b.log)
assert len(b.log) < 2 * len(a.log)
results = yield c.gather(futures)
assert results == [sum(map(inc, range(10)))] * 100
@gen_cluster(client=True)
def test_sub_submit_priority(c, s, a, b):
def f():
client = get_client()
client.submit(slowinc, 1, delay=0.2, key="slowinc")
future = c.submit(f, key="f")
yield gen.sleep(0.1)
if len(s.tasks) == 2:
assert (
s.priorities["f"] > s.priorities["slowinc"]
) # lower values schedule first
def test_get_client_sync(c, s, a, b):
results = c.run(lambda: get_worker().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
results = c.run(lambda: get_client().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
@gen_cluster(client=True)
def test_serialize_collections_of_futures(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = yield c.scatter(ddf)
ddf2 = yield future
df2 = yield c.compute(ddf2)
assert_eq(df, df2)
def test_serialize_collections_of_futures_sync(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = c.scatter(ddf)
result = future.result()
assert_eq(result.compute(), df)
assert future.type == dd.DataFrame
assert c.submit(lambda x, y: assert_eq(x.compute(), y), future, df).result()
def _dynamic_workload(x, delay=0.01):
if delay == "random":
sleep(random.random() / 2)
else:
sleep(delay)
if x > 4:
return 4
secede()
client = get_client()
futures = client.map(
_dynamic_workload, [x + i + 1 for i in range(2)], pure=False, delay=delay
)
total = client.submit(sum, futures)
return total.result()
def _test_dynamic_workloads_sync(c, delay):
future = c.submit(_dynamic_workload, 0, delay=delay)
assert future.result(timeout=40) == 52
def test_dynamic_workloads_sync(c):
_test_dynamic_workloads_sync(c, delay=0.02)
@pytest.mark.slow
def test_dynamic_workloads_sync_random(c):
_test_dynamic_workloads_sync(c, delay="random")
@gen_cluster(client=True)
def test_bytes_keys(c, s, a, b):
key = b"inc-123"
future = c.submit(inc, 1, key=key)
result = yield future
assert type(future.key) is bytes
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
def test_unicode_ascii_keys(c, s, a, b):
uni_type = type(u"")
key = u"inc-123"
future = c.submit(inc, 1, key=key)
result = yield future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
def test_unicode_keys(c, s, a, b):
uni_type = type(u"")
key = u"inc-123\u03bc"
future = c.submit(inc, 1, key=key)
result = yield future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
future2 = c.submit(inc, future)
result2 = yield future2
assert result2 == 3
future3 = yield c.scatter({u"data-123": 123})
result3 = yield future3[u"data-123"]
assert result3 == 123
def test_use_synchronous_client_in_async_context(loop, c):
@gen.coroutine
def f():
x = yield c.scatter(123)
y = c.submit(inc, x)
z = yield c.gather(y)
raise gen.Return(z)
z = sync(loop, f)
assert z == 124
def test_quiet_quit_when_cluster_leaves(loop_in_thread):
loop = loop_in_thread
with LocalCluster(
loop=loop, scheduler_port=0, dashboard_address=None, silence_logs=False
) as cluster:
with captured_logger("distributed.comm") as sio:
with Client(cluster, loop=loop) as client:
futures = client.map(lambda x: x + 1, range(10))
sleep(0.05)
cluster.close()
sleep(0.05)
text = sio.getvalue()
assert not text
def test_warn_executor(loop, s, a, b):
with warnings.catch_warnings(record=True) as record:
with Executor(s["address"], loop=loop) as c:
pass
assert any("Client" in str(r.message) for r in record)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
def test_call_stack_future(c, s, a, b):
x = c.submit(slowdec, 1, delay=0.5)
future = c.submit(slowinc, 1, delay=0.5)
yield gen.sleep(0.1)
results = yield [c.call_stack(future), c.call_stack(keys=[future.key])]
assert all(list(first(result.values())) == [future.key] for result in results)
assert results[0] == results[1]
result = results[0]
w = a if future.key in a.executing else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
assert "slowdec" not in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
def test_call_stack_all(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.8)
while not a.executing and not b.executing:
yield gen.sleep(0.01)
result = yield c.call_stack()
w = a if a.executing else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
def test_call_stack_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing and not b.executing:
yield gen.sleep(0.001)
result = yield c.call_stack(x)
assert result
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
def test_call_stack_collections_all(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing and not b.executing:
yield gen.sleep(0.001)
result = yield c.call_stack()
assert result
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": 100})
def test_profile(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
yield wait(futures)
x = yield c.profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = yield c.profile(start=0, stop=time())
assert (
x["count"]
== sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
)
y = yield c.profile(start=time() - 0.300, stop=time())
assert 0 < y["count"] < x["count"]
assert not any(p["count"] for _, p in b.profile_history)
result = yield c.profile(workers=b.address)
assert not result["count"]
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": 100})
def test_profile_keys(c, s, a, b):
x = c.map(slowinc, range(10), delay=0.05, workers=a.address)
y = c.map(slowdec, range(10), delay=0.05, workers=a.address)
yield wait(x + y)
xp = yield c.profile("slowinc")
yp = yield c.profile("slowdec")
p = yield c.profile()
assert p["count"] == xp["count"] + yp["count"]
with captured_logger(logging.getLogger("distributed")) as logger:
prof = yield c.profile("does-not-exist")
assert prof == profile.create()
out = logger.getvalue()
assert not out
@gen_cluster()
def test_client_with_name(s, a, b):
with captured_logger("distributed.scheduler") as sio:
client = yield Client(
s.address, asynchronous=True, name="foo", silence_logs=False
)
assert "foo" in client.id
yield client.close()
text = sio.getvalue()
assert "foo" in text
@gen_cluster(client=True)
def test_future_defaults_to_default_client(c, s, a, b):
x = c.submit(inc, 1)
yield wait(x)
future = Future(x.key)
assert future.client is c
@gen_cluster(client=True)
def test_future_auto_inform(c, s, a, b):
x = c.submit(inc, 1)
yield wait(x)
client = yield Client(s.address, asynchronous=True)
future = Future(x.key, client)
start = time()
while future.status != "finished":
yield gen.sleep(0.01)
assert time() < start + 1
yield client.close()
def test_client_async_before_loop_starts():
with pristine_loop() as loop:
client = Client(asynchronous=True, loop=loop)
assert client.asynchronous
client.close()
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, timeout=60, nthreads=[("127.0.0.1", 3)] * 2)
def test_nested_compute(c, s, a, b):
def fib(x):
assert get_worker().get_current_task()
if x < 2:
return x
a = delayed(fib)(x - 1)
b = delayed(fib)(x - 2)
c = a + b
return c.compute()
future = c.submit(fib, 8)
result = yield future
assert result == 21
assert len(s.transition_log) > 50
@gen_cluster(client=True)
def test_task_metadata(c, s, a, b):
yield c.set_metadata("x", 1)
result = yield c.get_metadata("x")
assert result == 1
future = c.submit(inc, 1)
key = future.key
yield wait(future)
yield c.set_metadata(key, 123)
result = yield c.get_metadata(key)
assert result == 123
del future
while key in s.tasks:
yield gen.sleep(0.01)
with pytest.raises(KeyError):
yield c.get_metadata(key)
result = yield c.get_metadata(key, None)
assert result is None
yield c.set_metadata(["x", "a"], 1)
result = yield c.get_metadata("x")
assert result == {"a": 1}
yield c.set_metadata(["x", "b"], 2)
result = yield c.get_metadata("x")
assert result == {"a": 1, "b": 2}
result = yield c.get_metadata(["x", "a"])
assert result == 1
yield c.set_metadata(["x", "a", "c", "d"], 1)
result = yield c.get_metadata("x")
assert result == {"a": {"c": {"d": 1}}, "b": 2}
@gen_cluster(client=True, Worker=Nanny)
def test_logs(c, s, a, b):
yield wait(c.map(inc, range(5)))
logs = yield c.get_scheduler_logs(n=5)
assert logs
for _, msg in logs:
assert "distributed.scheduler" in msg
w_logs = yield c.get_worker_logs(n=5)
assert set(w_logs.keys()) == {a.worker_address, b.worker_address}
for log in w_logs.values():
for _, msg in log:
assert "distributed.worker" in msg
n_logs = yield c.get_worker_logs(nanny=True)
assert set(n_logs.keys()) == {a.worker_address, b.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
n_logs = yield c.get_worker_logs(nanny=True, workers=[a.worker_address])
assert set(n_logs.keys()) == {a.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
@gen_cluster(client=True)
def test_avoid_delayed_finalize(c, s, a, b):
x = delayed(inc)(1)
future = c.compute(x)
result = yield future
assert result == 2
assert list(s.tasks) == [future.key] == [x.key]
@gen_cluster()
def test_config_scheduler_address(s, a, b):
with dask.config.set({"scheduler-address": s.address}):
with captured_logger("distributed.client") as sio:
c = yield Client(asynchronous=True)
assert c.scheduler.address == s.address
text = sio.getvalue()
assert s.address in text
yield c.close()
@gen_cluster(client=True)
def test_warn_when_submitting_large_values(c, s, a, b):
with warnings.catch_warnings(record=True) as record:
future = c.submit(lambda x: x + 1, b"0" * 2000000)
text = str(record[0].message)
assert "2.00 MB" in text
assert "large" in text
assert "..." in text
assert "'000" in text
assert "000'" in text
assert len(text) < 2000
with warnings.catch_warnings(record=True) as record:
data = b"0" * 2000000
for i in range(10):
future = c.submit(lambda x, y: x, data, i)
assert len(record) < 2
@gen_cluster()
def test_scatter_direct(s, a, b):
c = yield Client(s.address, asynchronous=True, heartbeat_interval=10)
last = s.clients[c.id].last_seen
start = time()
while s.clients[c.id].last_seen == last:
yield gen.sleep(0.10)
assert time() < start + 5
yield c.close()
@pytest.mark.skipif(sys.version_info[0] < 3, reason="cloudpickle Py27 issue")
@gen_cluster(client=True)
def test_unhashable_function(c, s, a, b):
d = {"a": 1}
result = yield c.submit(d.get, "a")
assert result == 1
@gen_cluster()
def test_client_name(s, a, b):
with dask.config.set({"client-name": "hello-world"}):
c = yield Client(s.address, asynchronous=True)
assert any("hello-world" in name for name in list(s.clients))
yield c.close()
def test_client_doesnt_close_given_loop(loop, s, a, b):
with Client(s["address"], loop=loop) as c:
assert c.submit(inc, 1).result() == 2
with Client(s["address"], loop=loop) as c:
assert c.submit(inc, 2).result() == 3
@gen_cluster(client=True, nthreads=[])
def test_quiet_scheduler_loss(c, s):
c._periodic_callbacks["scheduler-info"].interval = 10
with captured_logger(logging.getLogger("distributed.client")) as logger:
yield s.close()
yield c._update_scheduler_info()
text = logger.getvalue()
assert "BrokenPipeError" not in text
def test_dashboard_link(loop, monkeypatch):
pytest.importorskip("bokeh")
from distributed.dashboard import BokehScheduler
monkeypatch.setenv("USER", "myusername")
with cluster(
scheduler_kwargs={"services": {("dashboard", 12355): BokehScheduler}}
) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
with dask.config.set(
{"distributed.dashboard.link": "{scheme}://foo-{USER}:{port}/status"}
):
text = c._repr_html_()
link = "http://foo-myusername:12355/status"
assert link in text
@gen_test()
def test_client_timeout_2():
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
start = time()
c = Client("127.0.0.1:3755", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
yield c
stop = time()
assert c.status == "closed"
yield c.close()
assert stop - start < 1
@gen_test()
def test_client_active_bad_port():
import tornado.web
import tornado.httpserver
application = tornado.web.Application([(r"/", tornado.web.RequestHandler)])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
c = Client("127.0.0.1:8080", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
yield c
yield c._close(fast=True)
http_server.stop()
@pytest.mark.parametrize("direct", [True, False])
def test_turn_off_pickle(direct):
@gen_cluster()
def test(s, a, b):
import numpy as np
c = yield Client(s.address, asynchronous=True, serializers=["dask", "msgpack"])
try:
assert (yield c.submit(inc, 1)) == 2
yield c.submit(np.ones, 5)
yield c.scatter(1)
# Can't send complex data
with pytest.raises(TypeError):
future = yield c.scatter(inc)
# can send complex tasks (this uses pickle regardless)
future = c.submit(lambda x: x, inc)
yield wait(future)
# but can't receive complex results
with pytest.raises(TypeError):
yield c.gather(future, direct=direct)
# Run works
result = yield c.run(lambda: 1)
assert list(result.values()) == [1, 1]
result = yield c.run_on_scheduler(lambda: 1)
assert result == 1
# But not with complex return values
with pytest.raises(TypeError):
yield c.run(lambda: inc)
with pytest.raises(TypeError):
yield c.run_on_scheduler(lambda: inc)
finally:
yield c.close()
test()
@gen_cluster()
def test_de_serialization(s, a, b):
import numpy as np
c = yield Client(
s.address,
asynchronous=True,
serializers=["msgpack", "pickle"],
deserializers=["msgpack"],
)
try:
# Can send complex data
future = yield c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = yield future
finally:
yield c.close()
@gen_cluster()
def test_de_serialization_none(s, a, b):
import numpy as np
c = yield Client(s.address, asynchronous=True, deserializers=["msgpack"])
try:
# Can send complex data
future = yield c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = yield future
finally:
yield c.close()
@gen_cluster()
def test_client_repr_closed(s, a, b):
c = yield Client(s.address, asynchronous=True, dashboard_address=None)
yield c.close()
c._repr_html_()
def test_client_repr_closed_sync(loop):
with Client(loop=loop, processes=False, dashboard_address=None) as c:
c.close()
c._repr_html_()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
def test_nested_prioritization(c, s, w):
x = delayed(inc)(1, dask_key_name=("a", 2))
y = delayed(inc)(2, dask_key_name=("a", 10))
o = dask.order.order(merge(x.__dask_graph__(), y.__dask_graph__()))
fx, fy = c.compute([x, y])
yield wait([fx, fy])
assert (o[x.key] < o[y.key]) == (
s.tasks[tokey(fx.key)].priority < s.tasks[tokey(fy.key)].priority
)
@gen_cluster(client=True)
def test_scatter_error_cancel(c, s, a, b):
# https://github.com/dask/distributed/issues/2038
def bad_fn(x):
raise Exception("lol")
x = yield c.scatter(1)
y = c.submit(bad_fn, x)
del x
yield wait(y)
assert y.status == "error"
yield gen.sleep(0.1)
assert y.status == "error" # not cancelled
def test_no_threads_lingering():
active = dict(threading._active)
assert threading.active_count() < 40, list(active.values())
@gen_cluster()
def test_direct_async(s, a, b):
c = yield Client(s.address, asynchronous=True, direct_to_workers=True)
assert c.direct_to_workers
yield c.close()
c = yield Client(s.address, asynchronous=True, direct_to_workers=False)
assert not c.direct_to_workers
yield c.close()
def test_direct_sync(c):
assert not c.direct_to_workers
def f():
return get_client().direct_to_workers
assert c.submit(f).result()
@gen_cluster()
def test_mixing_clients(s, a, b):
c1 = yield Client(s.address, asynchronous=True)
c2 = yield Client(s.address, asynchronous=True)
future = c1.submit(inc, 1)
with pytest.raises(ValueError):
c2.submit(inc, future)
yield c1.close()
yield c2.close()
@gen_cluster(client=True)
def test_tuple_keys(c, s, a, b):
x = dask.delayed(inc)(1, dask_key_name=("x", 1))
y = dask.delayed(inc)(x, dask_key_name=("y", 1))
future = c.compute(y)
assert (yield future) == 3
@gen_cluster(client=True)
def test_map_large_kwargs_in_graph(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(100000)
futures = c.map(lambda a, b: a + b, range(100), b=x)
while not s.tasks:
yield gen.sleep(0.01)
assert len(s.tasks) == 101
assert any(k.startswith("ndarray") for k in s.tasks)
@gen_cluster(client=True)
def test_retry(c, s, a, b):
def f():
assert dask.config.get("foo")
with dask.config.set(foo=False):
future = c.submit(f)
with pytest.raises(AssertionError):
yield future
with dask.config.set(foo=True):
yield future.retry()
yield future
@gen_cluster(client=True)
def test_retry_dependencies(c, s, a, b):
def f():
return dask.config.get("foo")
x = c.submit(f)
y = c.submit(inc, x)
with pytest.raises(KeyError):
yield y
with dask.config.set(foo=100):
yield y.retry()
result = yield y
assert result == 101
yield y.retry()
yield x.retry()
result = yield y
assert result == 101
@gen_cluster(client=True)
def test_released_dependencies(c, s, a, b):
def f(x):
return dask.config.get("foo") + 1
x = c.submit(inc, 1, key="x")
y = c.submit(f, x, key="y")
del x
with pytest.raises(KeyError):
yield y
with dask.config.set(foo=100):
yield y.retry()
result = yield y
assert result == 101
@gen_cluster(client=True, clean_kwargs={"threads": False})
def test_profile_bokeh(c, s, a, b):
pytest.importorskip("bokeh.plotting")
from bokeh.model import Model
yield c.map(slowinc, range(10), delay=0.2)
state, figure = yield c.profile(plot=True)
assert isinstance(figure, Model)
with tmpfile("html") as fn:
try:
yield c.profile(filename=fn)
except PermissionError:
if WINDOWS:
pytest.xfail()
assert os.path.exists(fn)
@gen_cluster(client=True)
def test_get_mix_futures_and_SubgraphCallable(c, s, a, b):
future = c.submit(add, 1, 2)
subgraph = SubgraphCallable(
{"_2": (add, "_0", "_1"), "_3": (add, future, "_2")}, "_3", ("_0", "_1")
)
dsk = {"a": 1, "b": 2, "c": (subgraph, "a", "b"), "d": (subgraph, "c", "b")}
future2 = c.get(dsk, "d", sync=False)
result = yield future2
assert result == 11
# Nested subgraphs
subgraph2 = SubgraphCallable(
{
"_2": (subgraph, "_0", "_1"),
"_3": (subgraph, "_2", "_1"),
"_4": (add, "_3", future2),
},
"_4",
("_0", "_1"),
)
dsk2 = {"e": 1, "f": 2, "g": (subgraph2, "e", "f")}
result = yield c.get(dsk2, "g", sync=False)
assert result == 22
@gen_cluster(client=True)
def test_get_mix_futures_and_SubgraphCallable_dask_dataframe(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = pd.DataFrame({"x": range(1, 11)})
ddf = dd.from_pandas(df, npartitions=2).persist()
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
result = yield c.compute(ddf)
assert result.equals(df.astype("f8"))
def test_direct_to_workers(s, loop):
with Client(s["address"], loop=loop, direct_to_workers=True) as client:
future = client.scatter(1)
future.result()
resp = client.run_on_scheduler(lambda dask_scheduler: dask_scheduler.events)
assert "gather" not in str(resp)
@gen_cluster(client=True)
def test_instances(c, s, a, b):
assert list(Client._instances) == [c]
assert list(Scheduler._instances) == [s]
assert set(Worker._instances) == {a, b}
@gen_cluster(client=True)
def test_wait_for_workers(c, s, a, b):
future = asyncio.ensure_future(c.wait_for_workers(n_workers=3))
yield gen.sleep(0.22) # 2 chances
assert not future.done()
w = yield Worker(s.address)
start = time()
yield future
assert time() < start + 1
yield w.close()
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_file_descriptors_dont_leak(Worker):
pytest.importorskip("pandas")
df = dask.datasets.timeseries(freq="10s", dtypes={"x": int, "y": float})
proc = psutil.Process()
start = proc.num_fds()
async with Scheduler(port=0, dashboard_address=":0") as s:
async with Worker(s.address, nthreads=2) as a, Worker(
s.address, nthreads=2
) as b:
async with Client(s.address, asynchronous=True) as c:
await df.sum().persist()
begin = time()
while proc.num_fds() > begin:
await asyncio.sleep(0.01)
assert time() < begin + 5, (start, proc.num_fds())
if sys.version_info >= (3, 5):
from distributed.tests.py3_test_client import * # noqa F401
|
parser.py
|
import argparse
import subprocess
import sys
from .conf import PAGES, QUIT_KEY, NginxConfig
from .picasso import Picasso
from .store import Store
from blessed import Terminal
from threading import Thread, Lock
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', type=str, required=True, help='The path to the Nginx log file')
parser.add_argument('-d', '--delay', type=int, default=1, help='Seconds to wait between updates')
parser.add_argument('-n', type=str, default='1000', help='Number of lines to start tailing from')
parser.add_argument('-c', '--config', type=str, help='The path to your YAML configuration file')
args = parser.parse_args()
class Parser(object):
def __init__(self):
nginx_config = NginxConfig(args.config)
self.lock = Lock()
self.store = Store(nginx_config)
self.terminal = Terminal()
self.picasso = Picasso(
args.file,
self.lock,
self.store,
self.terminal,
nginx_config.get_extra_variables()
)
def start(self):
thread = Thread(target=self.tail)
thread.setDaemon(True)
thread.start()
with self.terminal.fullscreen(), \
self.terminal.hidden_cursor(), \
self.terminal.keypad(), \
self.terminal.cbreak():
while True:
self.picasso.paint()
key = self.terminal.inkey(timeout=args.delay)
if key in PAGES.keys():
self.picasso.set_active_page(PAGES[key])
if key == QUIT_KEY:
sys.exit()
def tail(self):
f = subprocess.Popen(
['tail', '-n', args.n, '-F', args.file],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
while True:
line = f.stdout.readline()
# Grab the lock before we update our store as we don't want the data to change as we are painting.
self.lock.acquire()
self.store.aggregate(line)
self.lock.release()
def main():
Parser().start()
if __name__ == '__main__':
main()
|
vpp_transport_socket.py
|
#
# VPP Unix Domain Socket Transport.
#
import socket
import struct
import threading
import select
import multiprocessing
try:
import queue as queue
except ImportError:
import Queue as queue
import logging
from . import vpp_papi
class VppTransportSocketIOError(IOError):
# TODO: Document different values of error number (first numeric argument).
pass
class VppTransport(object):
VppTransportSocketIOError = VppTransportSocketIOError
def __init__(self, parent, read_timeout, server_address):
self.connected = False
self.read_timeout = read_timeout if read_timeout > 0 else 1
self.parent = parent
self.server_address = server_address
self.header = struct.Struct('>QII')
self.message_table = {}
# The following fields are set in connect().
self.sque = None
self.q = None
self.message_thread = None
self.socket = None
def msg_thread_func(self):
while True:
try:
rlist, _, _ = select.select([self.socket,
self.sque._reader], [], [])
except socket.error:
# Terminate thread
logging.error('select failed')
self.q.put(None)
return
for r in rlist:
if r == self.sque._reader:
# Terminate
self.q.put(None)
return
elif r == self.socket:
try:
msg = self._read()
if not msg:
self.q.put(None)
return
except socket.error:
self.q.put(None)
return
# Put either to local queue or if context == 0
# callback queue
if self.parent.has_context(msg):
self.q.put(msg)
else:
self.parent.msg_handler_async(msg)
else:
raise VppTransportSocketIOError(
2, 'Unknown response from select')
def connect(self, name, pfx, msg_handler, rx_qlen):
# TODO: Reorder the actions and add "roll-backs",
# to restore clean disconnect state when failure happens durng connect.
if self.message_thread is not None:
raise VppTransportSocketIOError(
1, "PAPI socket transport connect: Need to disconnect first.")
# Create a UDS socket
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.settimeout(self.read_timeout)
# Connect the socket to the port where the server is listening
try:
self.socket.connect(self.server_address)
except socket.error as msg:
logging.error("{} on socket {}".format(msg, self.server_address))
raise
self.connected = True
# TODO: Can this block be moved even later?
self.sque = multiprocessing.Queue()
self.q = multiprocessing.Queue()
self.message_thread = threading.Thread(target=self.msg_thread_func)
# Initialise sockclnt_create
sockclnt_create = self.parent.messages['sockclnt_create']
sockclnt_create_reply = self.parent.messages['sockclnt_create_reply']
args = {'_vl_msg_id': 15,
'name': name,
'context': 124}
b = sockclnt_create.pack(args)
self.write(b)
msg = self._read()
hdr, length = self.parent.header.unpack(msg, 0)
if hdr.msgid != 16:
# TODO: Add first numeric argument.
raise VppTransportSocketIOError('Invalid reply message')
r, length = sockclnt_create_reply.unpack(msg)
self.socket_index = r.index
for m in r.message_table:
n = m.name.rstrip(b'\x00\x13')
self.message_table[n] = m.index
self.message_thread.daemon = True
self.message_thread.start()
return 0
def disconnect(self):
# TODO: Support repeated disconnect calls, recommend users to call
# disconnect when they are not sure what the state is after failures.
# TODO: Any volunteer for comprehensive docstrings?
rv = 0
try:
# Might fail, if VPP closes socket before packet makes it out,
# or if there was a failure during connect().
rv = self.parent.api.sockclnt_delete(index=self.socket_index)
except (IOError, vpp_papi.VPPApiError):
pass
self.connected = False
if self.socket is not None:
self.socket.close()
if self.sque is not None:
self.sque.put(True) # Terminate listening thread
if self.message_thread is not None:
# Allow additional connect() calls.
self.message_thread.join()
# Collect garbage.
self.sque = None
self.q = None
self.message_thread = None
self.socket = None
return rv
def suspend(self):
pass
def resume(self):
pass
def callback(self):
raise NotImplementedError
def get_callback(self, do_async):
return self.callback
def get_msg_index(self, name):
try:
return self.message_table[name]
except KeyError:
return 0
def msg_table_max_index(self):
return len(self.message_table)
def write(self, buf):
"""Send a binary-packed message to VPP."""
if not self.connected:
raise VppTransportSocketIOError(1, 'Not connected')
# Send header
header = self.header.pack(0, len(buf), 0)
n = self.socket.send(header)
n = self.socket.send(buf)
def _read(self):
hdr = self.socket.recv(16)
if not hdr:
return
(_, l, _) = self.header.unpack(hdr) # If at head of message
# Read rest of message
msg = self.socket.recv(l)
if l > len(msg):
nbytes = len(msg)
buf = bytearray(l)
view = memoryview(buf)
view[:nbytes] = msg
view = view[nbytes:]
left = l - nbytes
while left:
nbytes = self.socket.recv_into(view, left)
view = view[nbytes:]
left -= nbytes
return buf
if l == len(msg):
return msg
raise VppTransportSocketIOError(1, 'Unknown socket read error')
def read(self):
if not self.connected:
raise VppTransportSocketIOError(1, 'Not connected')
try:
return self.q.get(True, self.read_timeout)
except queue.Empty:
return None
|
client.py
|
import tkinter as tk
from tkinter import *
from PIL import Image, ImageTk
import socket
import threading
import activeWindows
import tkMessageBox
def client(ip_address, port, username, password):
def validate_client():
# print("attempting to validate client...")
IP = ip_address
PORT = int(port)
PASSWORD = password
USERNAME = username
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
server.connect((IP, PORT))
server.send("0")
confirmed = server.recv(2000)
if bool(int(confirmed)):
pass
else:
tkMessageBox.showwarning("Warning", "Unable to connect to server")
return False
except:
return False
try:
server.send(PASSWORD)
validated = server.recv(2000)
if bool(int(validated)):
pass
else:
tkMessageBox.showwarning("Warning", "Wrong password")
return False
except:
return False
try:
server.send(USERNAME)
validated = server.recv(2000)
if bool(int(validated)):
pass
else:
tkMessageBox.showwarning("Warning", "Username is already in use. Please select another one")
return False
except:
return False
server.close()
return True
def client_thread():
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
server.connect((ip_address, int(port)))
server.send("1")
server.recv(2000)
server.send(username)
server.recv(2000)
except:
exit()
room_name = server.recv(2000)
client_title_label.config(text=room_name)
def send_message(event):
message = client_chat_bar.get()
if not message == "":
try:
server.send(message)
except:
pass
client_chat_bar.delete(0, END)
def action_handler(action):
if " " in action:
action_split = action.split(" ", 1)
command = action_split[0]
parameter = action_split[1]
if command == "rnc":
client_title_label.config(text=parameter)
else:
command = action
if command == "k":
client_chat_window.config(state=NORMAL)
client_chat_window.insert(END, "You have been kicked from the chatroom")
client_chat_window.config(state=DISABLED)
server.close()
elif command == "d":
client_chat_window.config(state=NORMAL)
client_chat_window.insert(END, "You have disconnected from the chatroom")
client_chat_window.config(state=DISABLED)
server.close()
elif command == "sd":
client_chat_window.config(state=NORMAL)
client_chat_window.insert(END, "The server has been shut down")
client_chat_window.config(state=DISABLED)
server.close()
elif command == "m":
client_chat_window.config(state=NORMAL)
client_chat_window.insert(END, "You have been muted. Messages you send will no longer be processed \n")
client_chat_window.config(state=DISABLED)
elif command == "um":
client_chat_window.config(state=NORMAL)
client_chat_window.insert(END, "You have been unmuted. Messages you send will now be seen \n")
client_chat_window.config(state=DISABLED)
elif command == "df":
client_chat_window.config(state=NORMAL)
client_chat_window.insert(END, "You have been deafened. You will no longer receive messages \n")
client_chat_window.config(state=DISABLED)
elif command == "udf":
client_chat_window.config(state=NORMAL)
client_chat_window.insert(END, "You have been undeafened. You can now receive messeges \n")
client_chat_window.config(state=DISABLED)
else:
pass
def disconnect():
try:
server.send("***d " + username)
except:
pass
activeWindows.client_window_isactive = False
client_window.destroy()
client_send_button.bind("<Button-1>", send_message)
client_window.bind("<Return>", send_message)
client_window.protocol("WM_DELETE_WINDOW", disconnect)
while True:
try:
message = server.recv(2000)
if message[0: 3] == "***":
action_handler(message[3:])
if message == "***k" or message == "***d" or message == "***sd":
break
else:
client_chat_window.config(state=NORMAL)
client_chat_window.insert(END, message)
client_chat_window.config(state=DISABLED)
except:
continue
if port == "" or username == "" or ip_address == "":
tkMessageBox.showerror("Error", "One or more your fields are blank. Please fill them.")
activeWindows.client_window_isactive = False
return
try:
socket.inet_aton(ip_address)
except:
tkMessageBox.showerror("Error", "Invalid IP address")
activeWindows.client_window_isactive = False
return
try:
int(port)
except:
tkMessageBox.showerror("Error", "Your port number is invalid")
activeWindows.client_window_isactive = False
return
if not validate_client():
tkMessageBox.showerror("Error", "Unable to validate client")
activeWindows.client_window_isactive = False
return
### Window and Toolbar
client_window = tk.Toplevel()
client_window.iconbitmap("images//logo.ico")
client_window.geometry("300x500")
client_window.geometry("300x500")
client_window.resizable(False, False)
toolbar = Menu(client_window)
client_window.config(menu=toolbar)
toolbar.add_command(label="Exit")
toolbar.add_command(label="Help")
main_logo = ImageTk.PhotoImage(Image.open("images//logo.png").resize((100, 100)))
client_title_label = Label(client_window, text="Sample Chatroom!", font=("Helvetica", 15))
client_chat_window = Text(client_window, width=40, height=17, borderwidth=5, wrap=WORD)
client_chat_bar = Entry(client_window, width=33, borderwidth=5)
client_send_button = Button(client_window, text="Send", width=10, borderwidth=3)
client_logo_label = Label(client_window, image=main_logo)
client_chat_window.config(state=DISABLED)
##### Grids #####
client_title_label.grid(row=0, column=0, pady=3)
client_chat_window.grid(row=1, column=0, columnspan=2, pady=3)
client_chat_bar.grid(row=2, column=0, pady=3)
client_send_button.grid(row=2, column=1, pady=3)
client_logo_label.grid(row=4, pady=5, columnspan=2)
### starting client thread
ct = threading.Thread(target=client_thread)
ct.daemon = True
ct.start()
client_window.mainloop()
|
test_threads.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import sys
import time
from tests import debug
@pytest.mark.parametrize("count", [1, 3])
def test_thread_count(pyfile, target, run, count):
@pyfile
def code_to_debug():
import debug_me # noqa
import threading
import time
import sys
stop = False
def worker(tid, offset):
i = 0
global stop
while not stop:
time.sleep(0.01)
i += 1
threads = []
if sys.argv[1] != "1":
for i in [111, 222]:
thread = threading.Thread(target=worker, args=(i, len(threads)))
threads.append(thread)
thread.start()
print("check here") # @bp
stop = True
with debug.Session() as session:
with run(session, target(code_to_debug, args=[str(count)])):
session.set_breakpoints(code_to_debug, all)
session.wait_for_stop()
threads = session.request("threads")
assert len(threads["threads"]) == count
session.request_continue()
@pytest.mark.parametrize("resume", ["default", "resume_all", "resume_one"])
def test_step_multi_threads(pyfile, target, run, resume):
@pyfile
def code_to_debug():
# After breaking on the thread 1, thread 2 should pause waiting for the event1 to be set,
# so, when we step return on thread 1, the program should finish if all threads are resumed
# or should keep waiting for the thread 2 to run if only thread 1 is resumed.
import debug_me # noqa
import threading
event0 = threading.Event()
event1 = threading.Event()
event2 = threading.Event()
event3 = threading.Event()
def _thread1():
while not event0.is_set():
event0.wait(timeout=0.001)
event1.set() # @break_thread_1
while not event2.is_set():
event2.wait(timeout=0.001)
# Note: we can only get here if thread 2 is also released.
event3.set()
def _thread2():
event0.set()
while not event1.is_set():
event1.wait(timeout=0.001)
event2.set()
while not event3.is_set():
event3.wait(timeout=0.001)
threads = [
threading.Thread(target=_thread1, name="thread1"),
threading.Thread(target=_thread2, name="thread2"),
]
for t in threads:
t.start()
for t in threads:
t.join()
with debug.Session() as session:
if resume == "resume_all":
session.config["steppingResumesAllThreads"] = True
elif resume == "resume_one":
session.config["steppingResumesAllThreads"] = False
with run(session, target(code_to_debug)):
session.set_breakpoints(code_to_debug, all)
stop = session.wait_for_stop()
threads = session.request("threads")
assert len(threads["threads"]) == 3
thread_name_to_id = {t["name"]: t["id"] for t in threads["threads"]}
assert stop.thread_id == thread_name_to_id["thread1"]
if resume == "resume_one":
session.request("stepOut", {"threadId": stop.thread_id})
# Wait a second and check that threads are still there.
time.sleep(1)
stack_trace = session.request(
"stackTrace", {"threadId": thread_name_to_id["thread1"]}
)
assert "_thread1" in [frame["name"] for frame in stack_trace["stackFrames"]]
stack_trace = session.request(
"stackTrace", {"threadId": thread_name_to_id["thread2"]}
)
assert "_thread2" in [frame["name"] for frame in stack_trace["stackFrames"]]
session.request_continue()
else:
session.request("stepOut", {"threadId": stop.thread_id}, freeze=False)
@pytest.mark.skipif(
sys.platform not in ["win32", "darwin"] and not sys.platform.startswith("linux"),
reason="Test not implemented for sys.platform=" + repr(sys.platform),
)
def test_debug_this_thread(pyfile, target, run):
@pyfile
def code_to_debug():
from debug_me import ptvsd
import sys
import threading
def foo(x):
ptvsd.debug_this_thread()
event.set() # @bp
return 0
event = threading.Event()
if sys.platform == "win32":
from ctypes import CFUNCTYPE, c_void_p, c_size_t, c_uint32, windll
thread_func_p = CFUNCTYPE(c_uint32, c_void_p)
thread_func = thread_func_p(
foo
) # must hold a reference to wrapper during the call
assert windll.kernel32.CreateThread(
c_void_p(0),
c_size_t(0),
thread_func,
c_void_p(0),
c_uint32(0),
c_void_p(0),
)
elif sys.platform == "darwin" or sys.platform.startswith("linux"):
from ctypes import CDLL, CFUNCTYPE, byref, c_void_p, c_ulong
from ctypes.util import find_library
libpthread = CDLL(find_library("libpthread"))
thread_func_p = CFUNCTYPE(c_void_p, c_void_p)
thread_func = thread_func_p(
foo
) # must hold a reference to wrapper during the call
assert not libpthread.pthread_create(
byref(c_ulong(0)), c_void_p(0), thread_func, c_void_p(0)
)
else:
pytest.fail(sys.platform)
event.wait()
with debug.Session() as session:
with run(session, target(code_to_debug)):
session.set_breakpoints(code_to_debug, [code_to_debug.lines["bp"]])
session.wait_for_stop()
session.request_continue()
|
train_sampling_multi_gpu.py
|
import dgl
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
import dgl.function as fn
import dgl.nn.pytorch as dglnn
import time
import argparse
from _thread import start_new_thread
from functools import wraps
from dgl.data import RedditDataset
from torch.nn.parallel import DistributedDataParallel
import tqdm
import traceback
#### Neighbor sampler
class NeighborSampler(object):
def __init__(self, g, fanouts):
self.g = g
self.fanouts = fanouts
def sample_blocks(self, seeds):
seeds = th.LongTensor(np.asarray(seeds))
blocks = []
for fanout in self.fanouts:
# For each seed node, sample ``fanout`` neighbors.
frontier = dgl.sampling.sample_neighbors(self.g, seeds, fanout, replace=True)
# Then we compact the frontier into a bipartite graph for message passing.
block = dgl.to_block(frontier, seeds)
# Obtain the seed nodes for next layer.
seeds = block.srcdata[dgl.NID]
blocks.insert(0, block)
return blocks
class SAGE(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
super().__init__()
self.n_layers = n_layers
self.n_hidden = n_hidden
self.n_classes = n_classes
self.layers = nn.ModuleList()
self.layers.append(dglnn.SAGEConv(in_feats, n_hidden, 'mean'))
for i in range(1, n_layers - 1):
self.layers.append(dglnn.SAGEConv(n_hidden, n_hidden, 'mean'))
self.layers.append(dglnn.SAGEConv(n_hidden, n_classes, 'mean'))
self.dropout = nn.Dropout(dropout)
self.activation = activation
def forward(self, blocks, x):
h = x
for l, (layer, block) in enumerate(zip(self.layers, blocks)):
# We need to first copy the representation of nodes on the RHS from the
# appropriate nodes on the LHS.
# Note that the shape of h is (num_nodes_LHS, D) and the shape of h_dst
# would be (num_nodes_RHS, D)
h_dst = h[:block.number_of_dst_nodes()]
# Then we compute the updated representation on the RHS.
# The shape of h now becomes (num_nodes_RHS, D)
h = layer(block, (h, h_dst))
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
return h
def inference(self, g, x, batch_size, device):
"""
Inference with the GraphSAGE model on full neighbors (i.e. without neighbor sampling).
g : the entire graph.
x : the input of entire node set.
The inference code is written in a fashion that it could handle any number of nodes and
layers.
"""
# During inference with sampling, multi-layer blocks are very inefficient because
# lots of computations in the first few layers are repeated.
# Therefore, we compute the representation of all nodes layer by layer. The nodes
# on each layer are of course splitted in batches.
# TODO: can we standardize this?
nodes = th.arange(g.number_of_nodes())
for l, layer in enumerate(self.layers):
y = th.zeros(g.number_of_nodes(), self.n_hidden if l != len(self.layers) - 1 else self.n_classes)
for start in tqdm.trange(0, len(nodes), batch_size):
end = start + batch_size
batch_nodes = nodes[start:end]
block = dgl.to_block(dgl.in_subgraph(g, batch_nodes), batch_nodes)
input_nodes = block.srcdata[dgl.NID]
h = x[input_nodes].to(device)
h_dst = h[:block.number_of_dst_nodes()]
h = layer(block, (h, h_dst))
if l != len(self.layers) - 1:
h = self.activation(h)
h = self.dropout(h)
y[start:end] = h.cpu()
x = y
return y
#### Miscellaneous functions
# According to https://github.com/pytorch/pytorch/issues/17199, this decorator
# is necessary to make fork() and openmp work together.
#
# TODO: confirm if this is necessary for MXNet and Tensorflow. If so, we need
# to standardize worker process creation since our operators are implemented with
# OpenMP.
def thread_wrapped_func(func):
"""
Wraps a process entry point to make it work with OpenMP.
"""
@wraps(func)
def decorated_function(*args, **kwargs):
queue = mp.Queue()
def _queue_result():
exception, trace, res = None, None, None
try:
res = func(*args, **kwargs)
except Exception as e:
exception = e
trace = traceback.format_exc()
queue.put((res, exception, trace))
start_new_thread(_queue_result, ())
result, exception, trace = queue.get()
if exception is None:
return result
else:
assert isinstance(exception, Exception)
raise exception.__class__(trace)
return decorated_function
def prepare_mp(g):
"""
Explicitly materialize the CSR, CSC and COO representation of the given graph
so that they could be shared via copy-on-write to sampler workers and GPU
trainers.
This is a workaround before full shared memory support on heterogeneous graphs.
"""
g.in_degree(0)
g.out_degree(0)
g.find_edges([0])
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (th.argmax(pred, dim=1) == labels).float().sum() / len(pred)
def evaluate(model, g, inputs, labels, val_mask, batch_size, device):
"""
Evaluate the model on the validation set specified by ``val_mask``.
g : The entire graph.
inputs : The features of all the nodes.
labels : The labels of all the nodes.
val_mask : A 0-1 mask indicating which nodes do we actually compute the accuracy for.
batch_size : Number of nodes to compute at the same time.
device : The GPU device to evaluate on.
"""
model.eval()
with th.no_grad():
pred = model.inference(g, inputs, batch_size, device)
model.train()
return compute_acc(pred[val_mask], labels[val_mask])
def load_subtensor(g, labels, seeds, input_nodes, dev_id):
"""
Copys features and labels of a set of nodes onto GPU.
"""
batch_inputs = g.ndata['features'][input_nodes].to(dev_id)
batch_labels = labels[seeds].to(dev_id)
return batch_inputs, batch_labels
#### Entry point
def run(proc_id, n_gpus, args, devices, data):
# Start up distributed training, if enabled.
dev_id = devices[proc_id]
if n_gpus > 1:
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
world_size = n_gpus
th.distributed.init_process_group(backend="nccl",
init_method=dist_init_method,
world_size=world_size,
rank=proc_id)
th.cuda.set_device(dev_id)
# Unpack data
train_mask, val_mask, in_feats, labels, n_classes, g = data
train_nid = th.LongTensor(np.nonzero(train_mask)[0])
val_nid = th.LongTensor(np.nonzero(val_mask)[0])
train_mask = th.BoolTensor(train_mask)
val_mask = th.BoolTensor(val_mask)
# Split train_nid
train_nid = th.split(train_nid, len(train_nid) // n_gpus)[proc_id]
# Create sampler
sampler = NeighborSampler(g, [int(fanout) for fanout in args.fan_out.split(',')])
# Create PyTorch DataLoader for constructing blocks
dataloader = DataLoader(
dataset=train_nid.numpy(),
batch_size=args.batch_size,
collate_fn=sampler.sample_blocks,
shuffle=True,
drop_last=False,
num_workers=args.num_workers)
# Define model and optimizer
model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu, args.dropout)
model = model.to(dev_id)
if n_gpus > 1:
model = DistributedDataParallel(model, device_ids=[dev_id], output_device=dev_id)
loss_fcn = nn.CrossEntropyLoss()
loss_fcn = loss_fcn.to(dev_id)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Training loop
avg = 0
iter_tput = []
for epoch in range(args.num_epochs):
tic = time.time()
# Loop over the dataloader to sample the computation dependency graph as a list of
# blocks.
for step, blocks in enumerate(dataloader):
if proc_id == 0:
tic_step = time.time()
# The nodes for input lies at the LHS side of the first block.
# The nodes for output lies at the RHS side of the last block.
input_nodes = blocks[0].srcdata[dgl.NID]
seeds = blocks[-1].dstdata[dgl.NID]
# Load the input features as well as output labels
batch_inputs, batch_labels = load_subtensor(g, labels, seeds, input_nodes, dev_id)
# Compute loss and prediction
batch_pred = model(blocks, batch_inputs)
loss = loss_fcn(batch_pred, batch_labels)
optimizer.zero_grad()
loss.backward()
if n_gpus > 1:
for param in model.parameters():
if param.requires_grad and param.grad is not None:
th.distributed.all_reduce(param.grad.data,
op=th.distributed.ReduceOp.SUM)
param.grad.data /= n_gpus
optimizer.step()
if proc_id == 0:
iter_tput.append(len(seeds) * n_gpus / (time.time() - tic_step))
if step % args.log_every == 0 and proc_id == 0:
acc = compute_acc(batch_pred, batch_labels)
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MiB'.format(
epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:]), th.cuda.max_memory_allocated() / 1000000))
if n_gpus > 1:
th.distributed.barrier()
toc = time.time()
if proc_id == 0:
print('Epoch Time(s): {:.4f}'.format(toc - tic))
if epoch >= 5:
avg += toc - tic
if epoch % args.eval_every == 0 and epoch != 0:
if n_gpus == 1:
eval_acc = evaluate(model, g, g.ndata['features'], labels, val_mask, args.batch_size, devices[0])
else:
eval_acc = evaluate(model.module, g, g.ndata['features'], labels, val_mask, args.batch_size, devices[0])
print('Eval Acc {:.4f}'.format(eval_acc))
if n_gpus > 1:
th.distributed.barrier()
if proc_id == 0:
print('Avg epoch time: {}'.format(avg / (epoch - 4)))
if __name__ == '__main__':
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument('--gpu', type=str, default='0',
help="Comma separated list of GPU device IDs.")
argparser.add_argument('--num-epochs', type=int, default=20)
argparser.add_argument('--num-hidden', type=int, default=16)
argparser.add_argument('--num-layers', type=int, default=2)
argparser.add_argument('--fan-out', type=str, default='10,25')
argparser.add_argument('--batch-size', type=int, default=1000)
argparser.add_argument('--log-every', type=int, default=20)
argparser.add_argument('--eval-every', type=int, default=5)
argparser.add_argument('--lr', type=float, default=0.003)
argparser.add_argument('--dropout', type=float, default=0.5)
argparser.add_argument('--num-workers', type=int, default=0,
help="Number of sampling processes. Use 0 for no extra process.")
args = argparser.parse_args()
devices = list(map(int, args.gpu.split(',')))
n_gpus = len(devices)
# load reddit data
data = RedditDataset(self_loop=True)
train_mask = data.train_mask
val_mask = data.val_mask
features = th.Tensor(data.features)
in_feats = features.shape[1]
labels = th.LongTensor(data.labels)
n_classes = data.num_labels
# Construct graph
g = dgl.graph(data.graph.all_edges())
g.ndata['features'] = features
prepare_mp(g)
# Pack data
data = train_mask, val_mask, in_feats, labels, n_classes, g
if n_gpus == 1:
run(0, n_gpus, args, devices, data)
else:
procs = []
for proc_id in range(n_gpus):
p = mp.Process(target=thread_wrapped_func(run),
args=(proc_id, n_gpus, args, devices, data))
p.start()
procs.append(p)
for p in procs:
p.join()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.