source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
StorageServer.py | ##############################################################################
#
# Copyright (c) 2001, 2002, 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""The StorageServer class and the exception that it may raise.
This server acts as a front-end for one or more real storages, like
file storage or Berkeley storage.
TODO: Need some basic access control-- a declaration of the methods
exported for invocation by the server.
"""
import asyncore
import codecs
import itertools
import logging
import os
import sys
import tempfile
import threading
import time
import warnings
from .zrpc.error import DisconnectedError
import ZODB.blob
import ZODB.event
import ZODB.serialize
import ZODB.TimeStamp
import zope.interface
import six
from ZEO._compat import Pickler, Unpickler, PY3, BytesIO
from ZEO.Exceptions import AuthError
from .monitor import StorageStats, StatsServer
from .zrpc.connection import ManagedServerConnection, Delay, MTDelay, Result
from .zrpc.server import Dispatcher
from ZODB.Connection import TransactionMetaData
from ZODB.loglevels import BLATHER
from ZODB.POSException import StorageError, StorageTransactionError
from ZODB.POSException import TransactionError, ReadOnlyError, ConflictError
from ZODB.serialize import referencesf
from ZODB.utils import oid_repr, p64, u64, z64
ResolvedSerial = b'rs'
logger = logging.getLogger('ZEO.StorageServer')
def log(message, level=logging.INFO, label='', exc_info=False):
"""Internal helper to log a message."""
if label:
message = "(%s) %s" % (label, message)
logger.log(level, message, exc_info=exc_info)
class StorageServerError(StorageError):
"""Error reported when an unpicklable exception is raised."""
class ZEOStorage(object):
"""Proxy to underlying storage for a single remote client."""
# A list of extension methods. A subclass with extra methods
# should override.
extensions = []
def __init__(self, server, read_only=0, auth_realm=None):
self.server = server
# timeout and stats will be initialized in register()
self.stats = None
self.connection = None
self.client = None
self.storage = None
self.storage_id = "uninitialized"
self.transaction = None
self.read_only = read_only
self.log_label = 'unconnected'
self.locked = False # Don't have storage lock
self.verifying = 0
self.store_failed = 0
self.authenticated = 0
self.auth_realm = auth_realm
self.blob_tempfile = None
# The authentication protocol may define extra methods.
self._extensions = {}
for func in self.extensions:
self._extensions[func.__name__] = None
self._iterators = {}
self._iterator_ids = itertools.count()
# Stores the last item that was handed out for a
# transaction iterator.
self._txn_iterators_last = {}
def _finish_auth(self, authenticated):
if not self.auth_realm:
return 1
self.authenticated = authenticated
return authenticated
def set_database(self, database):
self.database = database
def notifyConnected(self, conn):
self.connection = conn
assert conn.peer_protocol_version is not None
if conn.peer_protocol_version < b'Z309':
self.client = ClientStub308(conn)
conn.register_object(ZEOStorage308Adapter(self))
else:
self.client = ClientStub(conn)
self.log_label = _addr_label(conn.addr)
def notifyDisconnected(self):
# When this storage closes, we must ensure that it aborts
# any pending transaction.
if self.transaction is not None:
self.log("disconnected during %s transaction"
% (self.locked and 'locked' or 'unlocked'))
self.tpc_abort(self.transaction.id)
else:
self.log("disconnected")
self.connection = None
def __repr__(self):
tid = self.transaction and repr(self.transaction.id)
if self.storage:
stid = (self.tpc_transaction() and
repr(self.tpc_transaction().id))
else:
stid = None
name = self.__class__.__name__
return "<%s %X trans=%s s_trans=%s>" % (name, id(self), tid, stid)
def log(self, msg, level=logging.INFO, exc_info=False):
log(msg, level=level, label=self.log_label, exc_info=exc_info)
def setup_delegation(self):
"""Delegate several methods to the storage
"""
# Called from register
storage = self.storage
info = self.get_info()
if not info['supportsUndo']:
self.undoLog = self.undoInfo = lambda *a,**k: ()
self.getTid = storage.getTid
self.load = storage.load
self.loadSerial = storage.loadSerial
record_iternext = getattr(storage, 'record_iternext', None)
if record_iternext is not None:
self.record_iternext = record_iternext
try:
fn = storage.getExtensionMethods
except AttributeError:
pass # no extension methods
else:
d = fn()
self._extensions.update(d)
for name in d:
assert not hasattr(self, name)
setattr(self, name, getattr(storage, name))
self.lastTransaction = storage.lastTransaction
try:
self.tpc_transaction = storage.tpc_transaction
except AttributeError:
if hasattr(storage, '_transaction'):
log("Storage %r doesn't have a tpc_transaction method.\n"
"See ZEO.interfaces.IServeable."
"Falling back to using _transaction attribute, which\n."
"is icky.",
logging.ERROR)
self.tpc_transaction = lambda : storage._transaction
else:
raise
def history(self,tid,size=1):
# This caters for storages which still accept
# a version parameter.
return self.storage.history(tid,size=size)
def _check_tid(self, tid, exc=None):
if self.read_only:
raise ReadOnlyError()
if self.transaction is None:
caller = sys._getframe().f_back.f_code.co_name
self.log("no current transaction: %s()" % caller,
level=logging.WARNING)
if exc is not None:
raise exc(None, tid)
else:
return 0
if self.transaction.id != tid:
caller = sys._getframe().f_back.f_code.co_name
self.log("%s(%s) invalid; current transaction = %s" %
(caller, repr(tid), repr(self.transaction.id)),
logging.WARNING)
if exc is not None:
raise exc(self.transaction.id, tid)
else:
return 0
return 1
def getAuthProtocol(self):
"""Return string specifying name of authentication module to use.
The module name should be auth_%s where %s is auth_protocol."""
protocol = self.server.auth_protocol
if not protocol or protocol == 'none':
return None
return protocol
def register(self, storage_id, read_only):
"""Select the storage that this client will use
This method must be the first one called by the client.
For authenticated storages this method will be called by the client
immediately after authentication is finished.
"""
if self.auth_realm and not self.authenticated:
raise AuthError("Client was never authenticated with server!")
if self.storage is not None:
self.log("duplicate register() call")
raise ValueError("duplicate register() call")
storage = self.server.storages.get(storage_id)
if storage is None:
self.log("unknown storage_id: %s" % storage_id)
raise ValueError("unknown storage: %s" % storage_id)
if not read_only and (self.read_only or storage.isReadOnly()):
raise ReadOnlyError()
self.read_only = self.read_only or read_only
self.storage_id = storage_id
self.storage = storage
self.setup_delegation()
self.stats = self.server.register_connection(storage_id, self)
def get_info(self):
storage = self.storage
supportsUndo = (getattr(storage, 'supportsUndo', lambda : False)()
and self.connection.peer_protocol_version >= b'Z310')
# Communicate the backend storage interfaces to the client
storage_provides = zope.interface.providedBy(storage)
interfaces = []
for candidate in storage_provides.__iro__:
interfaces.append((candidate.__module__, candidate.__name__))
return {'length': len(storage),
'size': storage.getSize(),
'name': storage.getName(),
'supportsUndo': supportsUndo,
'extensionMethods': self.getExtensionMethods(),
'supports_record_iternext': hasattr(self, 'record_iternext'),
'interfaces': tuple(interfaces),
}
def get_size_info(self):
return {'length': len(self.storage),
'size': self.storage.getSize(),
}
def getExtensionMethods(self):
return self._extensions
def loadEx(self, oid):
self.stats.loads += 1
return self.storage.load(oid, '')
def loadBefore(self, oid, tid):
self.stats.loads += 1
return self.storage.loadBefore(oid, tid)
def getInvalidations(self, tid):
invtid, invlist = self.server.get_invalidations(self.storage_id, tid)
if invtid is None:
return None
self.log("Return %d invalidations up to tid %s"
% (len(invlist), u64(invtid)))
return invtid, invlist
def verify(self, oid, tid):
try:
t = self.getTid(oid)
except KeyError:
self.client.invalidateVerify(oid)
else:
if tid != t:
self.client.invalidateVerify(oid)
def zeoVerify(self, oid, s):
if not self.verifying:
self.verifying = 1
self.stats.verifying_clients += 1
try:
os = self.getTid(oid)
except KeyError:
self.client.invalidateVerify((oid, ''))
# It's not clear what we should do now. The KeyError
# could be caused by an object uncreation, in which case
# invalidation is right. It could be an application bug
# that left a dangling reference, in which case it's bad.
else:
if s != os:
self.client.invalidateVerify((oid, ''))
def endZeoVerify(self):
if self.verifying:
self.stats.verifying_clients -= 1
self.verifying = 0
self.client.endVerify()
def pack(self, time, wait=1):
# Yes, you can pack a read-only server or storage!
if wait:
return run_in_thread(self._pack_impl, time)
else:
# If the client isn't waiting for a reply, start a thread
# and forget about it.
t = threading.Thread(target=self._pack_impl, args=(time,))
t.setName("zeo storage packing thread")
t.start()
return None
def _pack_impl(self, time):
self.log("pack(time=%s) started..." % repr(time))
self.storage.pack(time, referencesf)
self.log("pack(time=%s) complete" % repr(time))
# Broadcast new size statistics
self.server.invalidate(0, self.storage_id, None,
(), self.get_size_info())
def new_oids(self, n=100):
"""Return a sequence of n new oids, where n defaults to 100"""
n = min(n, 100)
if self.read_only:
raise ReadOnlyError()
if n <= 0:
n = 1
return [self.storage.new_oid() for i in range(n)]
# undoLog and undoInfo are potentially slow methods
def undoInfo(self, first, last, spec):
return run_in_thread(self.storage.undoInfo, first, last, spec)
def undoLog(self, first, last):
return run_in_thread(self.storage.undoLog, first, last)
def tpc_begin(self, id, user, description, ext, tid=None, status=" "):
if self.read_only:
raise ReadOnlyError()
if self.transaction is not None:
if self.transaction.id == id:
self.log("duplicate tpc_begin(%s)" % repr(id))
return
else:
raise StorageTransactionError("Multiple simultaneous tpc_begin"
" requests from one client.")
t = TransactionMetaData(user, description, ext)
t.id = id
self.serials = []
self.invalidated = []
self.txnlog = CommitLog()
self.blob_log = []
self.tid = tid
self.status = status
self.store_failed = 0
self.stats.active_txns += 1
# Assign the transaction attribute last. This is so we don't
# think we've entered TPC until everything is set. Why?
# Because if we have an error after this, the server will
# think it is in TPC and the client will think it isn't. At
# that point, the client will keep trying to enter TPC and
# server won't let it. Errors *after* the tpc_begin call will
# cause the client to abort the transaction.
# (Also see https://bugs.launchpad.net/zodb/+bug/374737.)
self.transaction = t
def tpc_finish(self, id):
if not self._check_tid(id):
return
assert self.locked, "finished called wo lock"
self.stats.commits += 1
self.storage.tpc_finish(self.transaction, self._invalidate)
# Note that the tid is still current because we still hold the
# commit lock. We'll relinquish it in _clear_transaction.
tid = self.storage.lastTransaction()
# Return the tid, for cache invalidation optimization
return Result(tid, self._clear_transaction)
def _invalidate(self, tid):
if self.invalidated:
self.server.invalidate(self, self.storage_id, tid,
self.invalidated, self.get_size_info())
def tpc_abort(self, tid):
if not self._check_tid(tid):
return
self.stats.aborts += 1
self.storage.tpc_abort(self.transaction)
self._clear_transaction()
def _clear_transaction(self):
# Common code at end of tpc_finish() and tpc_abort()
if self.locked:
self.server.unlock_storage(self)
self.locked = 0
if self.transaction is not None:
self.server.stop_waiting(self)
self.transaction = None
self.stats.active_txns -= 1
if self.txnlog is not None:
self.txnlog.close()
self.txnlog = None
for oid, oldserial, data, blobfilename in self.blob_log:
ZODB.blob.remove_committed(blobfilename)
del self.blob_log
def vote(self, tid):
self._check_tid(tid, exc=StorageTransactionError)
if self.locked or self.server.already_waiting(self):
raise StorageTransactionError(
'Already voting (%s)' % (self.locked and 'locked' or 'waiting')
)
return self._try_to_vote()
def _try_to_vote(self, delay=None):
if self.connection is None:
return # We're disconnected
if delay is not None and delay.sent:
# as a consequence of the unlocking strategy, _try_to_vote
# may be called multiple times for delayed
# transactions. The first call will mark the delay as
# sent. We should skip if the delay was already sent.
return
self.locked, delay = self.server.lock_storage(self, delay)
if self.locked:
try:
self.log(
"Preparing to commit transaction: %d objects, %d bytes"
% (self.txnlog.stores, self.txnlog.size()),
level=BLATHER)
if (self.tid is not None) or (self.status != ' '):
self.storage.tpc_begin(self.transaction,
self.tid, self.status)
else:
self.storage.tpc_begin(self.transaction)
for op, args in self.txnlog:
if not getattr(self, op)(*args):
break
# Blob support
while self.blob_log and not self.store_failed:
oid, oldserial, data, blobfilename = self.blob_log.pop()
self._store(oid, oldserial, data, blobfilename)
if not self.store_failed:
# Only call tpc_vote of no store call failed,
# otherwise the serialnos() call will deliver an
# exception that will be handled by the client in
# its tpc_vote() method.
serials = self.storage.tpc_vote(self.transaction)
if serials:
self.serials.extend(serials)
self.client.serialnos(self.serials)
except Exception:
self.storage.tpc_abort(self.transaction)
self._clear_transaction()
if delay is not None:
delay.error(sys.exc_info())
else:
raise
else:
if delay is not None:
delay.reply(None)
else:
return None
else:
return delay
def _unlock_callback(self, delay):
connection = self.connection
if connection is None:
self.server.stop_waiting(self)
else:
connection.call_from_thread(self._try_to_vote, delay)
# The public methods of the ZEO client API do not do the real work.
# They defer work until after the storage lock has been acquired.
# Most of the real implementations are in methods beginning with
# an _.
def deleteObject(self, oid, serial, id):
self._check_tid(id, exc=StorageTransactionError)
self.stats.stores += 1
self.txnlog.delete(oid, serial)
def storea(self, oid, serial, data, id):
self._check_tid(id, exc=StorageTransactionError)
self.stats.stores += 1
self.txnlog.store(oid, serial, data)
def checkCurrentSerialInTransaction(self, oid, serial, id):
self._check_tid(id, exc=StorageTransactionError)
self.txnlog.checkread(oid, serial)
def restorea(self, oid, serial, data, prev_txn, id):
self._check_tid(id, exc=StorageTransactionError)
self.stats.stores += 1
self.txnlog.restore(oid, serial, data, prev_txn)
def storeBlobStart(self):
assert self.blob_tempfile is None
self.blob_tempfile = tempfile.mkstemp(
dir=self.storage.temporaryDirectory())
def storeBlobChunk(self, chunk):
os.write(self.blob_tempfile[0], chunk)
def storeBlobEnd(self, oid, serial, data, id):
self._check_tid(id, exc=StorageTransactionError)
assert self.txnlog is not None # effectively not allowed after undo
fd, tempname = self.blob_tempfile
self.blob_tempfile = None
os.close(fd)
self.blob_log.append((oid, serial, data, tempname))
def storeBlobShared(self, oid, serial, data, filename, id):
self._check_tid(id, exc=StorageTransactionError)
assert self.txnlog is not None # effectively not allowed after undo
# Reconstruct the full path from the filename in the OID directory
if (os.path.sep in filename
or not (filename.endswith('.tmp')
or filename[:-1].endswith('.tmp')
)
):
logger.critical(
"We're under attack! (bad filename to storeBlobShared, %r)",
filename)
raise ValueError(filename)
filename = os.path.join(self.storage.fshelper.getPathForOID(oid),
filename)
self.blob_log.append((oid, serial, data, filename))
def sendBlob(self, oid, serial):
self.client.storeBlob(oid, serial, self.storage.loadBlob(oid, serial))
def undo(*a, **k):
raise NotImplementedError
def undoa(self, trans_id, tid):
self._check_tid(tid, exc=StorageTransactionError)
self.txnlog.undo(trans_id)
def _op_error(self, oid, err, op):
self.store_failed = 1
if isinstance(err, ConflictError):
self.stats.conflicts += 1
self.log("conflict error oid=%s msg=%s" %
(oid_repr(oid), str(err)), BLATHER)
if not isinstance(err, TransactionError):
# Unexpected errors are logged and passed to the client
self.log("%s error: %s, %s" % ((op,)+ sys.exc_info()[:2]),
logging.ERROR, exc_info=True)
err = self._marshal_error(err)
# The exception is reported back as newserial for this oid
self.serials.append((oid, err))
def _delete(self, oid, serial):
err = None
try:
self.storage.deleteObject(oid, serial, self.transaction)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
err = e
self._op_error(oid, err, 'delete')
return err is None
def _checkread(self, oid, serial):
err = None
try:
self.storage.checkCurrentSerialInTransaction(
oid, serial, self.transaction)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
err = e
self._op_error(oid, err, 'checkCurrentSerialInTransaction')
return err is None
def _store(self, oid, serial, data, blobfile=None):
err = None
try:
if blobfile is None:
newserial = self.storage.store(
oid, serial, data, '', self.transaction)
else:
newserial = self.storage.storeBlob(
oid, serial, data, blobfile, '', self.transaction)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as error:
self._op_error(oid, error, 'store')
err = error
else:
if serial != b"\0\0\0\0\0\0\0\0":
self.invalidated.append(oid)
if isinstance(newserial, bytes):
newserial = [(oid, newserial)]
for oid, s in newserial or ():
if s == ResolvedSerial:
self.stats.conflicts_resolved += 1
self.log("conflict resolved oid=%s"
% oid_repr(oid), BLATHER)
self.serials.append((oid, s))
return err is None
def _restore(self, oid, serial, data, prev_txn):
err = None
try:
self.storage.restore(oid, serial, data, '', prev_txn,
self.transaction)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as err:
self._op_error(oid, err, 'restore')
return err is None
def _undo(self, trans_id):
err = None
try:
tid, oids = self.storage.undo(trans_id, self.transaction)
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
err = e
self._op_error(z64, err, 'undo')
else:
self.invalidated.extend(oids)
self.serials.extend((oid, ResolvedSerial) for oid in oids)
return err is None
def _marshal_error(self, error):
# Try to pickle the exception. If it can't be pickled,
# the RPC response would fail, so use something that can be pickled.
if PY3:
pickler = Pickler(BytesIO(), 3)
else:
# The pure-python version requires at least one argument (PyPy)
pickler = Pickler(0)
pickler.fast = 1
try:
pickler.dump(error)
except:
msg = "Couldn't pickle storage exception: %s" % repr(error)
self.log(msg, logging.ERROR)
error = StorageServerError(msg)
return error
# IStorageIteration support
def iterator_start(self, start, stop):
iid = next(self._iterator_ids)
self._iterators[iid] = iter(self.storage.iterator(start, stop))
return iid
def iterator_next(self, iid):
iterator = self._iterators[iid]
try:
info = next(iterator)
except StopIteration:
del self._iterators[iid]
item = None
if iid in self._txn_iterators_last:
del self._txn_iterators_last[iid]
else:
item = (info.tid,
info.status,
info.user,
info.description,
info.extension)
# Keep a reference to the last iterator result to allow starting a
# record iterator off it.
self._txn_iterators_last[iid] = info
return item
def iterator_record_start(self, txn_iid, tid):
record_iid = next(self._iterator_ids)
txn_info = self._txn_iterators_last[txn_iid]
if txn_info.tid != tid:
raise Exception(
'Out-of-order request for record iterator for transaction %r'
% tid)
self._iterators[record_iid] = iter(txn_info)
return record_iid
def iterator_record_next(self, iid):
iterator = self._iterators[iid]
try:
info = next(iterator)
except StopIteration:
del self._iterators[iid]
item = None
else:
item = (info.oid,
info.tid,
info.data,
info.data_txn)
return item
def iterator_gc(self, iids):
for iid in iids:
self._iterators.pop(iid, None)
def server_status(self):
return self.server.server_status(self.storage_id)
def set_client_label(self, label):
self.log_label = str(label)+' '+_addr_label(self.connection.addr)
class StorageServerDB(object):
def __init__(self, server, storage_id):
self.server = server
self.storage_id = storage_id
self.references = ZODB.serialize.referencesf
def invalidate(self, tid, oids, version=''):
if version:
raise StorageServerError("Versions aren't supported.")
storage_id = self.storage_id
self.server.invalidate(None, storage_id, tid, oids)
def invalidateCache(self):
self.server._invalidateCache(self.storage_id)
transform_record_data = untransform_record_data = lambda self, data: data
class StorageServer(object):
"""The server side implementation of ZEO.
The StorageServer is the 'manager' for incoming connections. Each
connection is associated with its own ZEOStorage instance (defined
below). The StorageServer may handle multiple storages; each
ZEOStorage instance only handles a single storage.
"""
# Classes we instantiate. A subclass might override.
from .zrpc.server import Dispatcher as DispatcherClass
ZEOStorageClass = ZEOStorage
ManagedServerConnectionClass = ManagedServerConnection
def __init__(self, addr, storages,
read_only=0,
invalidation_queue_size=100,
invalidation_age=None,
transaction_timeout=None,
monitor_address=None,
auth_protocol=None,
auth_database=None,
auth_realm=None,
):
"""StorageServer constructor.
This is typically invoked from the start.py script.
Arguments (the first two are required and positional):
addr -- the address at which the server should listen. This
can be a tuple (host, port) to signify a TCP/IP connection
or a pathname string to signify a Unix domain socket
connection. A hostname may be a DNS name or a dotted IP
address.
storages -- a dictionary giving the storage(s) to handle. The
keys are the storage names, the values are the storage
instances, typically FileStorage or Berkeley storage
instances. By convention, storage names are typically
strings representing small integers starting at '1'.
read_only -- an optional flag saying whether the server should
operate in read-only mode. Defaults to false. Note that
even if the server is operating in writable mode,
individual storages may still be read-only. But if the
server is in read-only mode, no write operations are
allowed, even if the storages are writable. Note that
pack() is considered a read-only operation.
invalidation_queue_size -- The storage server keeps a queue
of the objects modified by the last N transactions, where
N == invalidation_queue_size. This queue is used to
speed client cache verification when a client disconnects
for a short period of time.
invalidation_age --
If the invalidation queue isn't big enough to support a
quick verification, but the last transaction seen by a
client is younger than the invalidation age, then
invalidations will be computed by iterating over
transactions later than the given transaction.
transaction_timeout -- The maximum amount of time to wait for
a transaction to commit after acquiring the storage lock.
If the transaction takes too long, the client connection
will be closed and the transaction aborted.
monitor_address -- The address at which the monitor server
should listen. If specified, a monitor server is started.
The monitor server provides server statistics in a simple
text format.
auth_protocol -- The name of the authentication protocol to use.
Examples are "digest" and "srp".
auth_database -- The name of the password database filename.
It should be in a format compatible with the authentication
protocol used; for instance, "sha" and "srp" require different
formats.
Note that to implement an authentication protocol, a server
and client authentication mechanism must be implemented in a
auth_* module, which should be stored inside the "auth"
subdirectory. This module may also define a DatabaseClass
variable that should indicate what database should be used
by the authenticator.
"""
self.addr = addr
self.storages = storages
msg = ", ".join(
["%s:%s:%s" % (name, storage.isReadOnly() and "RO" or "RW",
storage.getName())
for name, storage in storages.items()])
log("%s created %s with storages: %s" %
(self.__class__.__name__, read_only and "RO" or "RW", msg))
self._lock = threading.Lock()
self._commit_locks = {}
self._waiting = dict((name, []) for name in storages)
self.read_only = read_only
self.auth_protocol = auth_protocol
self.auth_database = auth_database
self.auth_realm = auth_realm
self.database = None
if auth_protocol:
self._setup_auth(auth_protocol)
# A list, by server, of at most invalidation_queue_size invalidations.
# The list is kept in sorted order with the most recent
# invalidation at the front. The list never has more than
# self.invq_bound elements.
self.invq_bound = invalidation_queue_size
self.invq = {}
for name, storage in storages.items():
self._setup_invq(name, storage)
storage.registerDB(StorageServerDB(self, name))
self.invalidation_age = invalidation_age
self.connections = {}
self.socket_map = {}
self.dispatcher = self.DispatcherClass(
addr, factory=self.new_connection, map=self.socket_map)
if len(self.addr) == 2 and self.addr[1] == 0 and self.addr[0]:
self.addr = self.dispatcher.socket.getsockname()
ZODB.event.notify(
Serving(self, address=self.dispatcher.socket.getsockname()))
self.stats = {}
self.timeouts = {}
for name in self.storages.keys():
self.connections[name] = []
self.stats[name] = StorageStats(self.connections[name])
if transaction_timeout is None:
# An object with no-op methods
timeout = StubTimeoutThread()
else:
timeout = TimeoutThread(transaction_timeout)
timeout.setName("TimeoutThread for %s" % name)
timeout.start()
self.timeouts[name] = timeout
if monitor_address:
warnings.warn(
"The monitor server is deprecated. Use the server_status\n"
"ZEO method instead.",
DeprecationWarning)
self.monitor = StatsServer(monitor_address, self.stats)
else:
self.monitor = None
def _setup_invq(self, name, storage):
lastInvalidations = getattr(storage, 'lastInvalidations', None)
if lastInvalidations is None:
# Using None below doesn't look right, but the first
# element in invq is never used. See get_invalidations.
# (If it was used, it would generate an error, which would
# be good. :) Doing this allows clients that were up to
# date when a server was restarted to pick up transactions
# it subsequently missed.
self.invq[name] = [(storage.lastTransaction() or z64, None)]
else:
self.invq[name] = list(lastInvalidations(self.invq_bound))
self.invq[name].reverse()
def _setup_auth(self, protocol):
# Can't be done in global scope, because of cyclic references
from .auth import get_module
name = self.__class__.__name__
module = get_module(protocol)
if not module:
log("%s: no such an auth protocol: %s" % (name, protocol))
return
storage_class, client, db_class = module
if not storage_class or not issubclass(storage_class, ZEOStorage):
log(("%s: %s isn't a valid protocol, must have a StorageClass" %
(name, protocol)))
self.auth_protocol = None
return
self.ZEOStorageClass = storage_class
log("%s: using auth protocol: %s" % (name, protocol))
# We create a Database instance here for use with the authenticator
# modules. Having one instance allows it to be shared between multiple
# storages, avoiding the need to bloat each with a new authenticator
# Database that would contain the same info, and also avoiding any
# possibly synchronization issues between them.
self.database = db_class(self.auth_database)
if self.database.realm != self.auth_realm:
raise ValueError("password database realm %r "
"does not match storage realm %r"
% (self.database.realm, self.auth_realm))
def new_connection(self, sock, addr):
"""Internal: factory to create a new connection.
This is called by the Dispatcher class in ZEO.zrpc.server
whenever accept() returns a socket for a new incoming
connection.
"""
if self.auth_protocol and self.database:
zstorage = self.ZEOStorageClass(self, self.read_only,
auth_realm=self.auth_realm)
zstorage.set_database(self.database)
else:
zstorage = self.ZEOStorageClass(self, self.read_only)
c = self.ManagedServerConnectionClass(sock, addr, zstorage, self)
log("new connection %s: %s" % (addr, repr(c)), logging.DEBUG)
return c
def register_connection(self, storage_id, conn):
"""Internal: register a connection with a particular storage.
This is called by ZEOStorage.register().
The dictionary self.connections maps each storage name to a
list of current connections for that storage; this information
is needed to handle invalidation. This function updates this
dictionary.
Returns the timeout and stats objects for the appropriate storage.
"""
self.connections[storage_id].append(conn)
return self.stats[storage_id]
def _invalidateCache(self, storage_id):
"""We need to invalidate any caches we have.
This basically means telling our clients to
invalidate/revalidate their caches. We do this by closing them
and making them reconnect.
"""
# This method can be called from foreign threads. We have to
# worry about interaction with the main thread.
# 1. We modify self.invq which is read by get_invalidations
# below. This is why get_invalidations makes a copy of
# self.invq.
# 2. We access connections. There are two dangers:
#
# a. We miss a new connection. This is not a problem because
# if a client connects after we get the list of connections,
# then it will have to read the invalidation queue, which
# has already been reset.
#
# b. A connection is closes while we are iterating. This
# doesn't matter, bacause we can call should_close on a closed
# connection.
# Rebuild invq
self._setup_invq(storage_id, self.storages[storage_id])
# Make a copy since we are going to be mutating the
# connections indirectoy by closing them. We don't care about
# later transactions since they will have to validate their
# caches anyway.
for p in self.connections[storage_id][:]:
try:
p.connection.should_close()
p.connection.trigger.pull_trigger()
except DisconnectedError:
pass
def invalidate(self, conn, storage_id, tid, invalidated=(), info=None):
"""Internal: broadcast info and invalidations to clients.
This is called from several ZEOStorage methods.
invalidated is a sequence of oids.
This can do three different things:
- If the invalidated argument is non-empty, it broadcasts
invalidateTransaction() messages to all clients of the given
storage except the current client (the conn argument).
- If the invalidated argument is empty and the info argument
is a non-empty dictionary, it broadcasts info() messages to
all clients of the given storage, including the current
client.
- If both the invalidated argument and the info argument are
non-empty, it broadcasts invalidateTransaction() messages to all
clients except the current, and sends an info() message to
the current client.
"""
# This method can be called from foreign threads. We have to
# worry about interaction with the main thread.
# 1. We modify self.invq which is read by get_invalidations
# below. This is why get_invalidations makes a copy of
# self.invq.
# 2. We access connections. There are two dangers:
#
# a. We miss a new connection. This is not a problem because
# we are called while the storage lock is held. A new
# connection that tries to read data won't read committed
# data without first recieving an invalidation. Also, if a
# client connects after getting the list of connections,
# then it will have to read the invalidation queue, which
# has been updated to reflect the invalidations.
#
# b. A connection is closes while we are iterating. We'll need
# to cactch and ignore Disconnected errors.
if invalidated:
invq = self.invq[storage_id]
if len(invq) >= self.invq_bound:
invq.pop()
invq.insert(0, (tid, invalidated))
for p in self.connections[storage_id]:
try:
if invalidated and p is not conn:
p.client.invalidateTransaction(tid, invalidated)
elif info is not None:
p.client.info(info)
except DisconnectedError:
pass
def get_invalidations(self, storage_id, tid):
"""Return a tid and list of all objects invalidation since tid.
The tid is the most recent transaction id seen by the client.
Returns None if it is unable to provide a complete list
of invalidations for tid. In this case, client should
do full cache verification.
"""
# We make a copy of invq because it might be modified by a
# foreign (other than main thread) calling invalidate above.
invq = self.invq[storage_id][:]
oids = set()
latest_tid = None
if invq and invq[-1][0] <= tid:
# We have needed data in the queue
for _tid, L in invq:
if _tid <= tid:
break
oids.update(L)
latest_tid = invq[0][0]
elif (self.invalidation_age and
(self.invalidation_age >
(time.time()-ZODB.TimeStamp.TimeStamp(tid).timeTime())
)
):
for t in self.storages[storage_id].iterator(p64(u64(tid)+1)):
for r in t:
oids.add(r.oid)
latest_tid = t.tid
elif not invq:
log("invq empty")
else:
log("tid to old for invq %s < %s" % (u64(tid), u64(invq[-1][0])))
return latest_tid, list(oids)
def loop(self, timeout=30):
try:
asyncore.loop(timeout, map=self.socket_map)
except Exception:
if not self.__closed:
raise # Unexpected exc
__thread = None
def start_thread(self, daemon=True):
self.__thread = thread = threading.Thread(target=self.loop)
thread.setName("StorageServer(%s)" % _addr_label(self.addr))
thread.setDaemon(daemon)
thread.start()
__closed = False
def close(self, join_timeout=1):
"""Close the dispatcher so that there are no new connections.
This is only called from the test suite, AFAICT.
"""
if self.__closed:
return
self.__closed = True
# Stop accepting connections
self.dispatcher.close()
if self.monitor is not None:
self.monitor.close()
ZODB.event.notify(Closed(self))
# Close open client connections
for sid, connections in self.connections.items():
for conn in connections[:]:
try:
conn.connection.close()
except:
pass
for name, storage in six.iteritems(self.storages):
logger.info("closing storage %r", name)
storage.close()
if self.__thread is not None:
self.__thread.join(join_timeout)
def close_conn(self, conn):
"""Internal: remove the given connection from self.connections.
This is the inverse of register_connection().
"""
for cl in self.connections.values():
if conn.obj in cl:
cl.remove(conn.obj)
def lock_storage(self, zeostore, delay):
storage_id = zeostore.storage_id
waiting = self._waiting[storage_id]
with self._lock:
if storage_id in self._commit_locks:
# The lock is held by another zeostore
locked = self._commit_locks[storage_id]
assert locked is not zeostore, (storage_id, delay)
if locked.connection is None:
locked.log("Still locked after disconnected. Unlocking.",
logging.CRITICAL)
if locked.transaction:
locked.storage.tpc_abort(locked.transaction)
del self._commit_locks[storage_id]
# yuck: have to manipulate lock to appease with :(
self._lock.release()
try:
return self.lock_storage(zeostore, delay)
finally:
self._lock.acquire()
if delay is None:
# New request, queue it
assert not [i for i in waiting if i[0] is zeostore
], "already waiting"
delay = Delay()
waiting.append((zeostore, delay))
zeostore.log("(%r) queue lock: transactions waiting: %s"
% (storage_id, len(waiting)),
_level_for_waiting(waiting)
)
return False, delay
else:
self._commit_locks[storage_id] = zeostore
self.timeouts[storage_id].begin(zeostore)
self.stats[storage_id].lock_time = time.time()
if delay is not None:
# we were waiting, stop
waiting[:] = [i for i in waiting if i[0] is not zeostore]
zeostore.log("(%r) lock: transactions waiting: %s"
% (storage_id, len(waiting)),
_level_for_waiting(waiting)
)
return True, delay
def unlock_storage(self, zeostore):
storage_id = zeostore.storage_id
waiting = self._waiting[storage_id]
with self._lock:
assert self._commit_locks[storage_id] is zeostore
del self._commit_locks[storage_id]
self.timeouts[storage_id].end(zeostore)
self.stats[storage_id].lock_time = None
callbacks = waiting[:]
if callbacks:
assert not [i for i in waiting if i[0] is zeostore
], "waiting while unlocking"
zeostore.log("(%r) unlock: transactions waiting: %s"
% (storage_id, len(callbacks)),
_level_for_waiting(callbacks)
)
for zeostore, delay in callbacks:
try:
zeostore._unlock_callback(delay)
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
logger.exception("Calling unlock callback")
def stop_waiting(self, zeostore):
storage_id = zeostore.storage_id
waiting = self._waiting[storage_id]
with self._lock:
new_waiting = [i for i in waiting if i[0] is not zeostore]
if len(new_waiting) == len(waiting):
return
waiting[:] = new_waiting
zeostore.log("(%r) dequeue lock: transactions waiting: %s"
% (storage_id, len(waiting)),
_level_for_waiting(waiting)
)
def already_waiting(self, zeostore):
storage_id = zeostore.storage_id
waiting = self._waiting[storage_id]
with self._lock:
return bool([i for i in waiting if i[0] is zeostore])
def server_status(self, storage_id):
status = self.stats[storage_id].__dict__.copy()
status['connections'] = len(status['connections'])
status['waiting'] = len(self._waiting[storage_id])
status['timeout-thread-is-alive'] = self.timeouts[storage_id].isAlive()
last_transaction = self.storages[storage_id].lastTransaction()
last_transaction_hex = codecs.encode(last_transaction, 'hex_codec')
if PY3:
# doctests and maybe clients expect a str, not bytes
last_transaction_hex = str(last_transaction_hex, 'ascii')
status['last-transaction'] = last_transaction_hex
return status
def ruok(self):
return dict((storage_id, self.server_status(storage_id))
for storage_id in self.storages)
def _level_for_waiting(waiting):
if len(waiting) > 9:
return logging.CRITICAL
if len(waiting) > 3:
return logging.WARNING
else:
return logging.DEBUG
class StubTimeoutThread(object):
def begin(self, client):
pass
def end(self, client):
pass
isAlive = lambda self: 'stub'
class TimeoutThread(threading.Thread):
"""Monitors transaction progress and generates timeouts."""
# There is one TimeoutThread per storage, because there's one
# transaction lock per storage.
def __init__(self, timeout):
threading.Thread.__init__(self)
self.setName("TimeoutThread")
self.setDaemon(1)
self._timeout = timeout
self._client = None
self._deadline = None
self._cond = threading.Condition() # Protects _client and _deadline
def begin(self, client):
# Called from the restart code the "main" thread, whenever the
# storage lock is being acquired. (Serialized by asyncore.)
with self._cond:
assert self._client is None
self._client = client
self._deadline = time.time() + self._timeout
self._cond.notify()
def end(self, client):
# Called from the "main" thread whenever the storage lock is
# being released. (Serialized by asyncore.)
with self._cond:
assert self._client is not None
assert self._client is client
self._client = None
self._deadline = None
def run(self):
# Code running in the thread.
while 1:
with self._cond:
while self._deadline is None:
self._cond.wait()
howlong = self._deadline - time.time()
if howlong <= 0:
# Prevent reporting timeout more than once
self._deadline = None
client = self._client # For the howlong <= 0 branch below
if howlong <= 0:
client.log("Transaction timeout after %s seconds" %
self._timeout, logging.CRITICAL)
try:
client.connection.call_from_thread(client.connection.close)
except:
client.log("Timeout failure", logging.CRITICAL,
exc_info=sys.exc_info())
self.end(client)
else:
time.sleep(howlong)
def run_in_thread(method, *args):
t = SlowMethodThread(method, args)
t.start()
return t.delay
class SlowMethodThread(threading.Thread):
"""Thread to run potentially slow storage methods.
Clients can use the delay attribute to access the MTDelay object
used to send a zrpc response at the right time.
"""
# Some storage methods can take a long time to complete. If we
# run these methods via a standard asyncore read handler, they
# will block all other server activity until they complete. To
# avoid blocking, we spawn a separate thread, return an MTDelay()
# object, and have the thread reply() when it finishes.
def __init__(self, method, args):
threading.Thread.__init__(self)
self.setName("SlowMethodThread for %s" % method.__name__)
self._method = method
self._args = args
self.delay = MTDelay()
def run(self):
try:
result = self._method(*self._args)
except (SystemExit, KeyboardInterrupt):
raise
except Exception:
self.delay.error(sys.exc_info())
else:
self.delay.reply(result)
class ClientStub(object):
def __init__(self, rpc):
self.rpc = rpc
def beginVerify(self):
self.rpc.callAsync('beginVerify')
def invalidateVerify(self, args):
self.rpc.callAsync('invalidateVerify', args)
def endVerify(self):
self.rpc.callAsync('endVerify')
def invalidateTransaction(self, tid, args):
# Note that this method is *always* called from a different
# thread than self.rpc's async thread. It is the only method
# for which this is true and requires special consideration!
# callAsyncNoSend is important here because:
# - callAsyncNoPoll isn't appropriate because
# the network thread may not wake up for a long time,
# delaying invalidations for too long. (This is demonstrateed
# by a test failure.)
# - callAsync isn't appropriate because (on the server) it tries
# to write to the socket. If self.rpc's network thread also
# tries to write at the ame time, we can run into problems
# because handle_write isn't thread safe.
self.rpc.callAsyncNoSend('invalidateTransaction', tid, args)
def serialnos(self, arg):
self.rpc.callAsyncNoPoll('serialnos', arg)
def info(self, arg):
self.rpc.callAsyncNoPoll('info', arg)
def storeBlob(self, oid, serial, blobfilename):
def store():
yield ('receiveBlobStart', (oid, serial))
f = open(blobfilename, 'rb')
while 1:
chunk = f.read(59000)
if not chunk:
break
yield ('receiveBlobChunk', (oid, serial, chunk, ))
f.close()
yield ('receiveBlobStop', (oid, serial))
self.rpc.callAsyncIterator(store())
class ClientStub308(ClientStub):
def invalidateTransaction(self, tid, args):
ClientStub.invalidateTransaction(
self, tid, [(arg, '') for arg in args])
def invalidateVerify(self, oid):
ClientStub.invalidateVerify(self, (oid, ''))
class ZEOStorage308Adapter(object):
def __init__(self, storage):
self.storage = storage
def __eq__(self, other):
return self is other or self.storage is other
def getSerial(self, oid):
return self.storage.loadEx(oid)[1] # Z200
def history(self, oid, version, size=1):
if version:
raise ValueError("Versions aren't supported.")
return self.storage.history(oid, size=size)
def getInvalidations(self, tid):
result = self.storage.getInvalidations(tid)
if result is not None:
result = result[0], [(oid, '') for oid in result[1]]
return result
def verify(self, oid, version, tid):
if version:
raise StorageServerError("Versions aren't supported.")
return self.storage.verify(oid, tid)
def loadEx(self, oid, version=''):
if version:
raise StorageServerError("Versions aren't supported.")
data, serial = self.storage.loadEx(oid)
return data, serial, ''
def storea(self, oid, serial, data, version, id):
if version:
raise StorageServerError("Versions aren't supported.")
self.storage.storea(oid, serial, data, id)
def storeBlobEnd(self, oid, serial, data, version, id):
if version:
raise StorageServerError("Versions aren't supported.")
self.storage.storeBlobEnd(oid, serial, data, id)
def storeBlobShared(self, oid, serial, data, filename, version, id):
if version:
raise StorageServerError("Versions aren't supported.")
self.storage.storeBlobShared(oid, serial, data, filename, id)
def getInfo(self):
result = self.storage.getInfo()
result['supportsVersions'] = False
return result
def zeoVerify(self, oid, s, sv=None):
if sv:
raise StorageServerError("Versions aren't supported.")
self.storage.zeoVerify(oid, s)
def modifiedInVersion(self, oid):
return ''
def versions(self):
return ()
def versionEmpty(self, version):
return True
def commitVersion(self, *a, **k):
raise NotImplementedError
abortVersion = commitVersion
def zeoLoad(self, oid): # Z200
p, s = self.storage.loadEx(oid)
return p, s, '', None, None
def __getattr__(self, name):
return getattr(self.storage, name)
def _addr_label(addr):
if isinstance(addr, six.binary_type):
return addr.decode('ascii')
if isinstance(addr, six.string_types):
return addr
else:
host, port = addr
return str(host) + ":" + str(port)
class CommitLog(object):
def __init__(self):
self.file = tempfile.TemporaryFile(suffix=".comit-log")
self.pickler = Pickler(self.file, 1)
self.pickler.fast = 1
self.stores = 0
def size(self):
return self.file.tell()
def delete(self, oid, serial):
self.pickler.dump(('_delete', (oid, serial)))
self.stores += 1
def checkread(self, oid, serial):
self.pickler.dump(('_checkread', (oid, serial)))
self.stores += 1
def store(self, oid, serial, data):
self.pickler.dump(('_store', (oid, serial, data)))
self.stores += 1
def restore(self, oid, serial, data, prev_txn):
self.pickler.dump(('_restore', (oid, serial, data, prev_txn)))
self.stores += 1
def undo(self, transaction_id):
self.pickler.dump(('_undo', (transaction_id, )))
self.stores += 1
def __iter__(self):
self.file.seek(0)
unpickler = Unpickler(self.file)
for i in range(self.stores):
yield unpickler.load()
def close(self):
if self.file:
self.file.close()
self.file = None
class ServerEvent(object):
def __init__(self, server, **kw):
self.__dict__.update(kw)
self.server = server
class Serving(ServerEvent):
pass
class Closed(ServerEvent):
pass
|
version.py | # Adapted from https://github.com/snap-stanford/ogb/blob/master/ogb/version.py
import os
import logging
from threading import Thread
__version__ = '1.2.2'
try:
os.environ['OUTDATED_IGNORE'] = '1'
from outdated import check_outdated # noqa
except ImportError:
check_outdated = None
def check():
try:
is_outdated, latest = check_outdated('wilds', __version__)
if is_outdated:
logging.warning(
f'The WILDS package is out of date. Your version is '
f'{__version__}, while the latest version is {latest}.')
except Exception:
pass
if check_outdated is not None:
thread = Thread(target=check)
thread.start()
|
test_xsorted.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# std
import os
import random
import threading
import time
import collections
# 3rd party
import pygal
from pygal.style import CleanStyle as memory_profile_chart_style
import psutil
import pytest
from mock import Mock
from hypothesis import given, example, strategies as st
from toolz.itertoolz import partition_all, sliding_window
# local
from xsorted import xsorter, xsorted, _split, _merge, _dump, _load
from . fixtures import xsorted_custom_serializer_fixture, benchmark_items_fixture
from . util import random_strings
def assert_property_xsorted_is_the_same_as_sorted(_xsorted, things, reverse):
"""
Assert the property that given a list of things, when the list of things is sorted using
``xsorted``, the result should be the same as when the list of things is sorted using the
builtin ``sorted``.
:param _xsorted: Xsorted function under test.
:param things: Iterable containing the list of things to sort.
:param reverse: True if things should be sorted in reverse order.
"""
expected = list(sorted(things, reverse=reverse))
actual = list(_xsorted(things, reverse=reverse))
assert actual == expected
def assert_property_xsorted_produces_ordered_iterable(_xsorted, things, reverse):
"""
Assert the property that xsorted should produce an ordered iterable.
"""
actual = list(_xsorted(things, reverse=reverse))
actual = reversed(actual) if reverse else actual
assert all(a <= b for a, b in sliding_window(2, actual))
@given(things=st.lists(st.integers()), reverse=st.booleans())
def test_properties_xsorted(things, reverse):
"""
Verify the property that xsorted == sorted.
"""
assert_property_xsorted_is_the_same_as_sorted(xsorted, things, reverse)
@given(things=st.lists(st.integers()), reverse=st.booleans())
def test_property_xsorted_produces_ordered_iterable(things, reverse):
"""
Verify the property that xsorted should produce an ordered iterable.
"""
assert_property_xsorted_produces_ordered_iterable(xsorted, things, reverse)
@given(things=st.lists(st.integers()), reverse=st.booleans())
def test_property_xsorted_custom_serializer_is_the_same_as_sorted(xsorted_custom_serializer_fixture,
things, reverse):
"""
Verify that we can supply custom serialization dump and load.
"""
assert_property_xsorted_is_the_same_as_sorted(xsorted_custom_serializer_fixture, things, reverse)
@given(things=st.lists(st.integers()), reverse=st.booleans())
def test_property_xsorted_custom_serializer_produces_ordered_iterable(xsorted_custom_serializer_fixture,
things, reverse):
"""
Verify that we can supply custom serialization dump and load.
"""
assert_property_xsorted_produces_ordered_iterable(xsorted_custom_serializer_fixture, things, reverse)
@given(lists_of_things=st.lists(st.lists(st.integers())))
def test_serializer_dump_load(lists_of_things):
"""
Verify that the default serializer loads as expected.
"""
ids = [_dump(thing) for thing in lists_of_things]
actual = [list(_load(id)) for id in ids]
assert lists_of_things == actual
def test_default_serializer_cleanup():
"""
Verify that the default serializer cleans up after itself.
"""
path = _dump([0])
assert os.path.exists(path)
list(_load(path))
assert not os.path.exists(path)
@given(st.integers(min_value=1, max_value=1000), st.integers(min_value=1, max_value=1000))
def test_split(range_size, partition_size):
"""
Verify that the default _split correctly splits the iterable into sorted batches.
"""
dump = Mock()
iterable = list(range(range_size))
list(_split(partition_size=partition_size, dump=dump, iterable=iterable))
expected_call_count = (range_size // partition_size) + int(bool(range_size % partition_size))
assert dump.call_count == expected_call_count
@given(
partition_size=st.integers(min_value=1, max_value=100),
num_items=st.integers(min_value=0, max_value=100),
)
def test_merge(partition_size, num_items):
"""
Verify that _merge correctly merges batches into one sorted iterable.
"""
items = range(num_items)
partitions = list(partition_all(partition_size, items))
partition_ids = range(len(partitions))
random.shuffle(partitions)
merged = _merge(lambda x: partitions[x], partition_ids)
assert list(merged) == list(items)
def do_benchmark(items, function_to_test, benchmark=None):
"""
Generic benchmarking function that will iterate through the ``items`` sorted using ``function_under_test``.
:param items: The items to sort.
:param function_to_test: The sort function to use.
:param benchmark: Benchmark fixture if this should be run as part of pytest-benchmark
"""
def do():
for _ in function_to_test(items):
pass
if benchmark is None:
do()
else:
benchmark(do)
def export_memory_profile_chart(memory_usage_samples, num_strings, strings_length):
"""
Export an svg chart of a memory profile run.
:param memory_usage_samples: List of tuples (elapsed time, memory usage) sampled during profiling.
:param num_strings: The number of strings that were generated.
:param strings_length: The length of the strings generated.
:return: Path to the generated svg file
"""
strings_size_kb = int(strings_length / 1000)
# TODO : Would be nice to some more info about the environment used for the run, version, processor, memory etc.
chart = pygal.XY(
title='Memory Used to Generate and Sort {num_strings} Random {strings_size_kb}KB Strings'.format(**locals()),
fill=True,
range=(0, num_strings * 1.1),
show_dots=False,
x_title='Time (seconds)',
y_title='Memory Usage (KB)',
style=memory_profile_chart_style,
)
for function_under_test, memory_usage in memory_usage_samples.items():
chart.add(function_under_test, memory_usage)
path = os.path.join(os.path.dirname(__file__), '..', 'docs', 'test_profile_memory.svg')
chart.render_to_file(path)
return path
def test_profile_memory():
"""
Profile the memory used when sorting a large memory object using ``xsorted`` compared to ``sorted``.
The sorting is performed in a separate thread, and in the main thread the memory usage is sampled. The difference
between the process memory usage at the start of the test and the memory usage at the sample point are recorded.
We expect sorted to use more memory than xsorted. If this is not the case then xsorted is most likely not a
correct external ort.
"""
process = psutil.Process()
num_strings, strings_length = int(1e5 / 2), 1000
memory_usage_samples = collections.defaultdict(list)
for function_under_test in (sorted, xsorted):
strings = random_strings(length=strings_length, num=num_strings)
thread = threading.Thread(target=do_benchmark, args=(strings, function_under_test))
start = process.memory_info_ex().rss
thread.start()
start_time = time.clock()
while thread.is_alive():
thread.join(0.001)
value = (process.memory_info_ex().rss - start) / 1e3
point = time.clock() - start_time, value
memory_usage_samples[function_under_test.__name__].append(point)
export_memory_profile_chart(memory_usage_samples, num_strings, strings_length)
# extract only the memory usage from the sorted dict for determining the peak usage for each function under test.
values_only = (
(sample[1] for sample in samples[1])
for samples in sorted(memory_usage_samples.items())
)
peak_sorted, peak_xsorted = map(max, values_only)
assert peak_sorted / peak_xsorted >= 15
@pytest.mark.parametrize('partition_size', [
1024,
2048,
4096,
8192,
])
def test_benchmark_xsorted(partition_size, benchmark, benchmark_items_fixture):
"""
Benchmark the performance of the ``sorted`` function (for comparison)
"""
xsorted_ = xsorter(partition_size=partition_size)
do_benchmark(benchmark_items_fixture, xsorted_, benchmark)
def test_benchmark_sorted(benchmark, benchmark_items_fixture):
"""
Benchmark the performance of the ``sorted`` function (for comparison)
"""
do_benchmark(benchmark_items_fixture, sorted, benchmark)
@pytest.mark.skip()
def test_benchmark_xsorted_debug(benchmark_items_fixture):
"""
Debug the benchmark test of the performance of the ``xsorted`` function since both benchmarking and debugging use
settrace. Just comment out the skip mark when debugging.
"""
do_benchmark(benchmark_items_fixture, xsorted)
|
gcsio_test.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for Google Cloud Storage client."""
import errno
import logging
import multiprocessing
import os
import random
import threading
import time
import unittest
import httplib2
import mock
from apache_beam.io import gcsio
from apache_beam.io.gcp.internal.clients import storage
# Protect against environments where apitools library is not available.
# pylint: disable=wrong-import-order, wrong-import-position
try:
from apitools.base.py.exceptions import HttpError
except ImportError:
HttpError = None
# pylint: enable=wrong-import-order, wrong-import-position
class FakeGcsClient(object):
# Fake storage client. Usage in gcsio.py is client.objects.Get(...) and
# client.objects.Insert(...).
def __init__(self):
self.objects = FakeGcsObjects()
# Referenced in GcsIO.batch_copy() and GcsIO.batch_delete().
self._http = object()
class FakeFile(object):
def __init__(self, bucket, obj, contents, generation):
self.bucket = bucket
self.object = obj
self.contents = contents
self.generation = generation
def get_metadata(self):
return storage.Object(
bucket=self.bucket,
name=self.object,
generation=self.generation,
size=len(self.contents))
class FakeGcsObjects(object):
def __init__(self):
self.files = {}
# Store the last generation used for a given object name. Note that this
# has to persist even past the deletion of the object.
self.last_generation = {}
self.list_page_tokens = {}
def add_file(self, f):
self.files[(f.bucket, f.object)] = f
self.last_generation[(f.bucket, f.object)] = f.generation
def get_file(self, bucket, obj):
return self.files.get((bucket, obj), None)
def delete_file(self, bucket, obj):
del self.files[(bucket, obj)]
def get_last_generation(self, bucket, obj):
return self.last_generation.get((bucket, obj), 0)
def Get(self, get_request, download=None): # pylint: disable=invalid-name
f = self.get_file(get_request.bucket, get_request.object)
if f is None:
# Failing with a HTTP 404 if file does not exist.
raise HttpError({'status': 404}, None, None)
if download is None:
return f.get_metadata()
else:
stream = download.stream
def get_range_callback(start, end):
assert start >= 0 and end >= start and end < len(f.contents)
stream.write(f.contents[start:end + 1])
download.GetRange = get_range_callback
def Insert(self, insert_request, upload=None): # pylint: disable=invalid-name
assert upload is not None
generation = self.get_last_generation(insert_request.bucket,
insert_request.name) + 1
f = FakeFile(insert_request.bucket, insert_request.name, '', generation)
# Stream data into file.
stream = upload.stream
data_list = []
while True:
data = stream.read(1024 * 1024)
if not data:
break
data_list.append(data)
f.contents = ''.join(data_list)
self.add_file(f)
def Copy(self, copy_request): # pylint: disable=invalid-name
src_file = self.get_file(copy_request.sourceBucket,
copy_request.sourceObject)
if not src_file:
raise HttpError(
httplib2.Response({'status': '404'}), '404 Not Found',
'https://fake/url')
generation = self.get_last_generation(copy_request.destinationBucket,
copy_request.destinationObject) + 1
dest_file = FakeFile(copy_request.destinationBucket,
copy_request.destinationObject, src_file.contents,
generation)
self.add_file(dest_file)
def Delete(self, delete_request): # pylint: disable=invalid-name
# Here, we emulate the behavior of the GCS service in raising a 404 error
# if this object already exists.
if self.get_file(delete_request.bucket, delete_request.object):
self.delete_file(delete_request.bucket, delete_request.object)
else:
raise HttpError(
httplib2.Response({'status': '404'}), '404 Not Found',
'https://fake/url')
def List(self, list_request): # pylint: disable=invalid-name
bucket = list_request.bucket
prefix = list_request.prefix or ''
matching_files = []
for file_bucket, file_name in sorted(iter(self.files)):
if bucket == file_bucket and file_name.startswith(prefix):
file_object = self.files[(file_bucket, file_name)].get_metadata()
matching_files.append(file_object)
# Handle pagination.
items_per_page = 5
if not list_request.pageToken:
range_start = 0
else:
if list_request.pageToken not in self.list_page_tokens:
raise ValueError('Invalid page token.')
range_start = self.list_page_tokens[list_request.pageToken]
del self.list_page_tokens[list_request.pageToken]
result = storage.Objects(
items=matching_files[range_start:range_start + items_per_page])
if range_start + items_per_page < len(matching_files):
next_range_start = range_start + items_per_page
next_page_token = '_page_token_%s_%s_%d' % (bucket, prefix,
next_range_start)
self.list_page_tokens[next_page_token] = next_range_start
result.nextPageToken = next_page_token
return result
class FakeApiCall(object):
def __init__(self, exception):
self.exception = exception
self.is_error = exception is not None
class FakeBatchApiRequest(object):
def __init__(self, **unused_kwargs):
self.operations = []
def Add(self, service, method, request): # pylint: disable=invalid-name
self.operations.append((service, method, request))
def Execute(self, unused_http, **unused_kwargs): # pylint: disable=invalid-name
api_calls = []
for service, method, request in self.operations:
exception = None
try:
getattr(service, method)(request)
except Exception as e: # pylint: disable=broad-except
exception = e
api_calls.append(FakeApiCall(exception))
return api_calls
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestGCSPathParser(unittest.TestCase):
def test_gcs_path(self):
self.assertEqual(
gcsio.parse_gcs_path('gs://bucket/name'), ('bucket', 'name'))
self.assertEqual(
gcsio.parse_gcs_path('gs://bucket/name/sub'), ('bucket', 'name/sub'))
def test_bad_gcs_path(self):
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs://')
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs://bucket')
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs://bucket/')
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs:///name')
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs:///')
self.assertRaises(ValueError, gcsio.parse_gcs_path, 'gs:/blah/bucket/name')
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestGCSIO(unittest.TestCase):
def _insert_random_file(self, client, path, size, generation=1):
bucket, name = gcsio.parse_gcs_path(path)
f = FakeFile(bucket, name, os.urandom(size), generation)
client.objects.add_file(f)
return f
def setUp(self):
self.client = FakeGcsClient()
self.gcs = gcsio.GcsIO(self.client)
def test_exists(self):
file_name = 'gs://gcsio-test/dummy_file'
file_size = 1234
self._insert_random_file(self.client, file_name, file_size)
self.assertFalse(self.gcs.exists(file_name + 'xyz'))
self.assertTrue(self.gcs.exists(file_name))
@mock.patch.object(FakeGcsObjects, 'Get')
def test_exists_failure(self, mock_get):
# Raising an error other than 404. Raising 404 is a valid failure for
# exists() call.
mock_get.side_effect = HttpError({'status': 400}, None, None)
file_name = 'gs://gcsio-test/dummy_file'
file_size = 1234
self._insert_random_file(self.client, file_name, file_size)
with self.assertRaises(HttpError) as cm:
self.gcs.exists(file_name)
self.assertEquals(400, cm.exception.status_code)
def test_size(self):
file_name = 'gs://gcsio-test/dummy_file'
file_size = 1234
self._insert_random_file(self.client, file_name, file_size)
self.assertTrue(self.gcs.exists(file_name))
self.assertEqual(1234, self.gcs.size(file_name))
def test_file_mode(self):
file_name = 'gs://gcsio-test/dummy_mode_file'
with self.gcs.open(file_name, 'wb') as f:
assert f.mode == 'wb'
with self.gcs.open(file_name, 'rb') as f:
assert f.mode == 'rb'
def test_bad_file_modes(self):
file_name = 'gs://gcsio-test/dummy_mode_file'
with self.assertRaises(ValueError):
self.gcs.open(file_name, 'w+')
with self.assertRaises(ValueError):
self.gcs.open(file_name, 'r+b')
def test_empty_batches(self):
self.assertEqual([], self.gcs.copy_batch([]))
self.assertEqual([], self.gcs.delete_batch([]))
def test_delete(self):
file_name = 'gs://gcsio-test/delete_me'
file_size = 1024
# Test deletion of non-existent file.
self.gcs.delete(file_name)
self._insert_random_file(self.client, file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(file_name) in self.client.objects.files)
self.gcs.delete(file_name)
self.assertFalse(
gcsio.parse_gcs_path(file_name) in self.client.objects.files)
@mock.patch('apache_beam.io.gcsio.BatchApiRequest')
def test_delete_batch(self, *unused_args):
gcsio.BatchApiRequest = FakeBatchApiRequest
file_name_pattern = 'gs://gcsio-test/delete_me_%d'
file_size = 1024
num_files = 10
# Test deletion of non-existent files.
result = self.gcs.delete_batch(
[file_name_pattern % i for i in range(num_files)])
self.assertTrue(result)
for i, (file_name, exception) in enumerate(result):
self.assertEqual(file_name, file_name_pattern % i)
self.assertEqual(exception, None)
self.assertFalse(self.gcs.exists(file_name_pattern % i))
# Insert some files.
for i in range(num_files):
self._insert_random_file(self.client, file_name_pattern % i, file_size)
# Check files inserted properly.
for i in range(num_files):
self.assertTrue(self.gcs.exists(file_name_pattern % i))
# Execute batch delete.
self.gcs.delete_batch([file_name_pattern % i for i in range(num_files)])
# Check files deleted properly.
for i in range(num_files):
self.assertFalse(self.gcs.exists(file_name_pattern % i))
def test_copy(self):
src_file_name = 'gs://gcsio-test/source'
dest_file_name = 'gs://gcsio-test/dest'
file_size = 1024
self._insert_random_file(self.client, src_file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertFalse(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
self.gcs.copy(src_file_name, dest_file_name)
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertTrue(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
self.assertRaises(IOError, self.gcs.copy, 'gs://gcsio-test/non-existent',
'gs://gcsio-test/non-existent-destination')
@mock.patch('apache_beam.io.gcsio.BatchApiRequest')
def test_copy_batch(self, *unused_args):
gcsio.BatchApiRequest = FakeBatchApiRequest
from_name_pattern = 'gs://gcsio-test/copy_me_%d'
to_name_pattern = 'gs://gcsio-test/destination_%d'
file_size = 1024
num_files = 10
# Test copy of non-existent files.
result = self.gcs.copy_batch(
[(from_name_pattern % i, to_name_pattern % i)
for i in range(num_files)])
self.assertTrue(result)
for i, (src, dest, exception) in enumerate(result):
self.assertEqual(src, from_name_pattern % i)
self.assertEqual(dest, to_name_pattern % i)
self.assertTrue(isinstance(exception, IOError))
self.assertEqual(exception.errno, errno.ENOENT)
self.assertFalse(self.gcs.exists(from_name_pattern % i))
self.assertFalse(self.gcs.exists(to_name_pattern % i))
# Insert some files.
for i in range(num_files):
self._insert_random_file(self.client, from_name_pattern % i, file_size)
# Check files inserted properly.
for i in range(num_files):
self.assertTrue(self.gcs.exists(from_name_pattern % i))
# Execute batch copy.
self.gcs.copy_batch([(from_name_pattern % i, to_name_pattern % i)
for i in range(num_files)])
# Check files copied properly.
for i in range(num_files):
self.assertTrue(self.gcs.exists(from_name_pattern % i))
self.assertTrue(self.gcs.exists(to_name_pattern % i))
def test_copytree(self):
src_dir_name = 'gs://gcsio-test/source/'
dest_dir_name = 'gs://gcsio-test/dest/'
file_size = 1024
paths = ['a', 'b/c', 'b/d']
for path in paths:
src_file_name = src_dir_name + path
dest_file_name = dest_dir_name + path
self._insert_random_file(self.client, src_file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertFalse(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
self.gcs.copytree(src_dir_name, dest_dir_name)
for path in paths:
src_file_name = src_dir_name + path
dest_file_name = dest_dir_name + path
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertTrue(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
def test_rename(self):
src_file_name = 'gs://gcsio-test/source'
dest_file_name = 'gs://gcsio-test/dest'
file_size = 1024
self._insert_random_file(self.client, src_file_name, file_size)
self.assertTrue(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertFalse(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
self.gcs.rename(src_file_name, dest_file_name)
self.assertFalse(
gcsio.parse_gcs_path(src_file_name) in self.client.objects.files)
self.assertTrue(
gcsio.parse_gcs_path(dest_file_name) in self.client.objects.files)
def test_full_file_read(self):
file_name = 'gs://gcsio-test/full_file'
file_size = 5 * 1024 * 1024 + 100
random_file = self._insert_random_file(self.client, file_name, file_size)
f = self.gcs.open(file_name)
self.assertEqual(f.mode, 'r')
f.seek(0, os.SEEK_END)
self.assertEqual(f.tell(), file_size)
self.assertEqual(f.read(), '')
f.seek(0)
self.assertEqual(f.read(), random_file.contents)
def test_flaky_file_read(self):
file_name = 'gs://gcsio-test/flaky_file'
file_size = 5 * 1024 * 1024 + 100
random_file = self._insert_random_file(self.client, file_name, file_size)
f = self.gcs.open(file_name)
random.seed(0)
f.buffer_size = 1024 * 1024
f.segment_timeout = 0.01
self.assertEqual(f.mode, 'r')
f._real_get_segment = f._get_segment
def flaky_get_segment(start, size):
if random.randint(0, 3) == 1:
time.sleep(600)
return f._real_get_segment(start, size)
f._get_segment = flaky_get_segment
self.assertEqual(f.read(), random_file.contents)
# Test exception handling in file read.
def failing_get_segment(unused_start, unused_size):
raise IOError('Could not read.')
f._get_segment = failing_get_segment
f.seek(0)
with self.assertRaises(IOError):
f.read()
# Test retry limit in hanging file read.
def hanging_get_segment(unused_start, unused_size):
time.sleep(600)
f._get_segment = hanging_get_segment
f.seek(0)
with self.assertRaises(gcsio.GcsIOError):
f.read()
def test_file_random_seek(self):
file_name = 'gs://gcsio-test/seek_file'
file_size = 5 * 1024 * 1024 - 100
random_file = self._insert_random_file(self.client, file_name, file_size)
f = self.gcs.open(file_name)
random.seed(0)
for _ in range(0, 10):
a = random.randint(0, file_size - 1)
b = random.randint(0, file_size - 1)
start, end = min(a, b), max(a, b)
f.seek(start)
self.assertEqual(f.tell(), start)
self.assertEqual(
f.read(end - start + 1), random_file.contents[start:end + 1])
self.assertEqual(f.tell(), end + 1)
def test_file_iterator(self):
file_name = 'gs://gcsio-test/iterating_file'
lines = []
line_count = 10
for _ in range(line_count):
line_length = random.randint(100, 500)
line = os.urandom(line_length).replace('\n', ' ') + '\n'
lines.append(line)
contents = ''.join(lines)
bucket, name = gcsio.parse_gcs_path(file_name)
self.client.objects.add_file(FakeFile(bucket, name, contents, 1))
f = self.gcs.open(file_name)
read_lines = 0
for line in f:
read_lines += 1
self.assertEqual(read_lines, line_count)
def test_file_read_line(self):
file_name = 'gs://gcsio-test/read_line_file'
lines = []
# Set a small buffer size to exercise refilling the buffer.
# First line is carefully crafted so the newline falls as the last character
# of the buffer to exercise this code path.
read_buffer_size = 1024
lines.append('x' * 1023 + '\n')
for _ in range(1, 1000):
line_length = random.randint(100, 500)
line = os.urandom(line_length).replace('\n', ' ') + '\n'
lines.append(line)
contents = ''.join(lines)
file_size = len(contents)
bucket, name = gcsio.parse_gcs_path(file_name)
self.client.objects.add_file(FakeFile(bucket, name, contents, 1))
f = self.gcs.open(file_name, read_buffer_size=read_buffer_size)
# Test read of first two lines.
f.seek(0)
self.assertEqual(f.readline(), lines[0])
self.assertEqual(f.tell(), len(lines[0]))
self.assertEqual(f.readline(), lines[1])
# Test read at line boundary.
f.seek(file_size - len(lines[-1]) - 1)
self.assertEqual(f.readline(), '\n')
# Test read at end of file.
f.seek(file_size)
self.assertEqual(f.readline(), '')
# Test reads at random positions.
random.seed(0)
for _ in range(0, 10):
start = random.randint(0, file_size - 1)
line_index = 0
# Find line corresponding to start index.
chars_left = start
while True:
next_line_length = len(lines[line_index])
if chars_left - next_line_length < 0:
break
chars_left -= next_line_length
line_index += 1
f.seek(start)
self.assertEqual(f.readline(), lines[line_index][chars_left:])
def test_file_write(self):
file_name = 'gs://gcsio-test/write_file'
file_size = 5 * 1024 * 1024 + 2000
contents = os.urandom(file_size)
f = self.gcs.open(file_name, 'w')
self.assertEqual(f.mode, 'w')
f.write(contents[0:1000])
f.write(contents[1000:1024 * 1024])
f.write(contents[1024 * 1024:])
f.close()
bucket, name = gcsio.parse_gcs_path(file_name)
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
def test_file_close(self):
file_name = 'gs://gcsio-test/close_file'
file_size = 5 * 1024 * 1024 + 2000
contents = os.urandom(file_size)
f = self.gcs.open(file_name, 'w')
self.assertEqual(f.mode, 'w')
f.write(contents)
f.close()
f.close() # This should not crash.
bucket, name = gcsio.parse_gcs_path(file_name)
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
def test_file_flush(self):
file_name = 'gs://gcsio-test/flush_file'
file_size = 5 * 1024 * 1024 + 2000
contents = os.urandom(file_size)
bucket, name = gcsio.parse_gcs_path(file_name)
f = self.gcs.open(file_name, 'w')
self.assertEqual(f.mode, 'w')
f.write(contents[0:1000])
f.flush()
f.write(contents[1000:1024 * 1024])
f.flush()
f.flush() # Should be a NOOP.
f.write(contents[1024 * 1024:])
f.close() # This should already call the equivalent of flush() in its body.
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
def test_context_manager(self):
# Test writing with a context manager.
file_name = 'gs://gcsio-test/context_manager_file'
file_size = 1024
contents = os.urandom(file_size)
with self.gcs.open(file_name, 'w') as f:
f.write(contents)
bucket, name = gcsio.parse_gcs_path(file_name)
self.assertEqual(
self.client.objects.get_file(bucket, name).contents, contents)
# Test reading with a context manager.
with self.gcs.open(file_name) as f:
self.assertEqual(f.read(), contents)
# Test that exceptions are not swallowed by the context manager.
with self.assertRaises(ZeroDivisionError):
with self.gcs.open(file_name) as f:
f.read(0 / 0)
def test_glob(self):
bucket_name = 'gcsio-test'
object_names = [
'cow/cat/fish',
'cow/cat/blubber',
'cow/dog/blubber',
'apple/dog/blubber',
'apple/fish/blubber',
'apple/fish/blowfish',
'apple/fish/bambi',
'apple/fish/balloon',
'apple/fish/cat',
'apple/fish/cart',
'apple/fish/carl',
'apple/dish/bat',
'apple/dish/cat',
'apple/dish/carl',
]
for object_name in object_names:
file_name = 'gs://%s/%s' % (bucket_name, object_name)
self._insert_random_file(self.client, file_name, 0)
test_cases = [
('gs://gcsio-test/*', [
'cow/cat/fish',
'cow/cat/blubber',
'cow/dog/blubber',
'apple/dog/blubber',
'apple/fish/blubber',
'apple/fish/blowfish',
'apple/fish/bambi',
'apple/fish/balloon',
'apple/fish/cat',
'apple/fish/cart',
'apple/fish/carl',
'apple/dish/bat',
'apple/dish/cat',
'apple/dish/carl',
]),
('gs://gcsio-test/cow/*', [
'cow/cat/fish',
'cow/cat/blubber',
'cow/dog/blubber',
]),
('gs://gcsio-test/cow/ca*', [
'cow/cat/fish',
'cow/cat/blubber',
]),
('gs://gcsio-test/apple/[df]ish/ca*', [
'apple/fish/cat',
'apple/fish/cart',
'apple/fish/carl',
'apple/dish/cat',
'apple/dish/carl',
]),
('gs://gcsio-test/apple/fish/car?', [
'apple/fish/cart',
'apple/fish/carl',
]),
('gs://gcsio-test/apple/fish/b*', [
'apple/fish/blubber',
'apple/fish/blowfish',
'apple/fish/bambi',
'apple/fish/balloon',
]),
('gs://gcsio-test/apple/dish/[cb]at', [
'apple/dish/bat',
'apple/dish/cat',
]),
]
for file_pattern, expected_object_names in test_cases:
expected_file_names = ['gs://%s/%s' % (bucket_name, o)
for o in expected_object_names]
self.assertEqual(
set(self.gcs.glob(file_pattern)), set(expected_file_names))
# Check if limits are followed correctly
limit = 3
for file_pattern, expected_object_names in test_cases:
expected_num_items = min(len(expected_object_names), limit)
self.assertEqual(
len(self.gcs.glob(file_pattern, limit)), expected_num_items)
def test_size_of_files_in_glob(self):
bucket_name = 'gcsio-test'
object_names = [
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
('cow/dog/blubber', 4),
('apple/dog/blubber', 5),
('apple/fish/blubber', 6),
('apple/fish/blowfish', 7),
('apple/fish/bambi', 8),
('apple/fish/balloon', 9),
('apple/fish/cat', 10),
('apple/fish/cart', 11),
('apple/fish/carl', 12),
('apple/dish/bat', 13),
('apple/dish/cat', 14),
('apple/dish/carl', 15),
]
for (object_name, size) in object_names:
file_name = 'gs://%s/%s' % (bucket_name, object_name)
self._insert_random_file(self.client, file_name, size)
test_cases = [
('gs://gcsio-test/cow/*', [
('cow/cat/fish', 2),
('cow/cat/blubber', 3),
('cow/dog/blubber', 4),
]),
('gs://gcsio-test/apple/fish/car?', [
('apple/fish/cart', 11),
('apple/fish/carl', 12),
])
]
for file_pattern, expected_object_names in test_cases:
expected_file_sizes = {'gs://%s/%s' % (bucket_name, o): s
for (o, s) in expected_object_names}
self.assertEqual(
self.gcs.size_of_files_in_glob(file_pattern), expected_file_sizes)
@unittest.skipIf(HttpError is None, 'GCP dependencies are not installed')
class TestPipeStream(unittest.TestCase):
def _read_and_verify(self, stream, expected, buffer_size):
data_list = []
bytes_read = 0
seen_last_block = False
while True:
data = stream.read(buffer_size)
self.assertLessEqual(len(data), buffer_size)
if len(data) < buffer_size:
# Test the constraint that the pipe stream returns less than the buffer
# size only when at the end of the stream.
if data:
self.assertFalse(seen_last_block)
seen_last_block = True
if not data:
break
data_list.append(data)
bytes_read += len(data)
self.assertEqual(stream.tell(), bytes_read)
self.assertEqual(''.join(data_list), expected)
def test_pipe_stream(self):
block_sizes = list(4**i for i in range(0, 12))
data_blocks = list(os.urandom(size) for size in block_sizes)
expected = ''.join(data_blocks)
buffer_sizes = [100001, 512 * 1024, 1024 * 1024]
for buffer_size in buffer_sizes:
parent_conn, child_conn = multiprocessing.Pipe()
stream = gcsio.GcsBufferedWriter.PipeStream(child_conn)
child_thread = threading.Thread(
target=self._read_and_verify, args=(stream, expected, buffer_size))
child_thread.start()
for data in data_blocks:
parent_conn.send_bytes(data)
parent_conn.close()
child_thread.join()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
installwizard.py |
from functools import partial
import threading
import os
from typing import TYPE_CHECKING
from kivy.app import App
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.properties import ObjectProperty, StringProperty, OptionProperty
from kivy.core.window import Window
from kivy.uix.button import Button
from kivy.uix.togglebutton import ToggleButton
from kivy.utils import platform
from kivy.uix.widget import Widget
from kivy.core.window import Window
from kivy.clock import Clock
from kivy.utils import platform
from actilectrum.base_wizard import BaseWizard
from actilectrum.util import is_valid_email
from . import EventsDialog
from ...i18n import _
from .password_dialog import PasswordDialog
if TYPE_CHECKING:
from actilectrum.gui.kivy.main_window import ElectrumWindow
# global Variables
is_test = (platform == "linux")
test_seed = "grape impose jazz bind spatial mind jelly tourist tank today holiday stomach"
test_seed = "time taxi field recycle tiny license olive virus report rare steel portion achieve"
test_xpub = "xpub661MyMwAqRbcEbvVtRRSjqxVnaWVUMewVzMiURAKyYratih4TtBpMypzzefmv8zUNebmNVzB3PojdC5sV2P9bDgMoo9B3SARw1MXUUfU1GL"
Builder.load_string('''
#:import Window kivy.core.window.Window
#:import _ actilectrum.gui.kivy.i18n._
<WizardTextInput@TextInput>
border: 4, 4, 4, 4
font_size: '15sp'
padding: '15dp', '15dp'
background_color: (1, 1, 1, 1) if self.focus else (0.454, 0.698, 0.909, 1)
foreground_color: (0.31, 0.31, 0.31, 1) if self.focus else (0.835, 0.909, 0.972, 1)
hint_text_color: self.foreground_color
background_active: 'atlas://actilectrum/gui/kivy/theming/light/create_act_text_active'
background_normal: 'atlas://actilectrum/gui/kivy/theming/light/create_act_text_active'
size_hint_y: None
height: '48sp'
<WizardButton@Button>:
root: None
size_hint: 1, None
height: '48sp'
on_press: if self.root: self.root.dispatch('on_press', self)
on_release: if self.root: self.root.dispatch('on_release', self)
<BigLabel@Label>
color: .854, .925, .984, 1
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
bold: True
<-WizardDialog>
text_color: .854, .925, .984, 1
value: ''
#auto_dismiss: False
size_hint: None, None
canvas.before:
Color:
rgba: .239, .588, .882, 1
Rectangle:
size: Window.size
crcontent: crcontent
# add actilectrum icon
BoxLayout:
orientation: 'vertical' if self.width < self.height else 'horizontal'
padding:
min(dp(27), self.width/32), min(dp(27), self.height/32),\
min(dp(27), self.width/32), min(dp(27), self.height/32)
spacing: '10dp'
GridLayout:
id: grid_logo
cols: 1
pos_hint: {'center_y': .5}
size_hint: 1, None
height: self.minimum_height
Label:
color: root.text_color
text: 'ACTILECTRUM'
size_hint: 1, None
height: self.texture_size[1] if self.opacity else 0
font_size: '33sp'
font_name: 'actilectrum/gui/kivy/data/fonts/tron/Tr2n.ttf'
GridLayout:
cols: 1
id: crcontent
spacing: '1dp'
Widget:
size_hint: 1, 0.3
GridLayout:
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
id: back
text: _('Back')
root: root
WizardButton:
id: next
text: _('Next')
root: root
disabled: root.value == ''
<WizardMultisigDialog>
value: 'next'
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: _("Choose the number of signatures needed to unlock funds in your wallet")
Widget
size_hint: 1, 1
GridLayout:
orientation: 'vertical'
cols: 2
spacing: '14dp'
size_hint: 1, 1
height: self.minimum_height
Label:
color: root.text_color
text: _('From {} cosigners').format(n.value)
Slider:
id: n
range: 2, 5
step: 1
value: 2
Label:
color: root.text_color
text: _('Require {} signatures').format(m.value)
Slider:
id: m
range: 1, n.value
step: 1
value: 2
Widget
size_hint: 1, 1
Label:
id: backup_warning_label
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
opacity: int(m.value != n.value)
text: _("Warning: to be able to restore a multisig wallet, " \
"you should include the master public key for each cosigner " \
"in all of your backups.")
<WizardChoiceDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
<WizardConfirmDialog>
message : ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
<WizardTOSDialog>
message : ''
size_hint: 1, 1
ScrollView:
size_hint: 1, 1
TextInput:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.minimum_height
text: root.message
disabled: True
<WizardEmailDialog>
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: 'Please enter your email address'
WizardTextInput:
id: email
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<WizardKnownOTPDialog>
message : ''
message2: ''
Widget:
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
Widget
size_hint: 1, 1
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
Widget
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
Widget
size_hint: 1, 1
height: '48sp'
BoxLayout:
orientation: 'horizontal'
WizardButton:
id: cb
text: _('Request new secret')
on_release: root.request_new_secret()
size_hint: 1, None
WizardButton:
id: abort
text: _('Abort creation')
on_release: root.abort_wallet_creation()
size_hint: 1, None
<WizardNewOTPDialog>
message : ''
message2 : ''
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message
QRCodeWidget:
id: qr
size_hint: 1, 1
Label:
color: root.text_color
size_hint: 1, None
text_size: self.width, None
height: self.texture_size[1]
text: root.message2
WizardTextInput:
id: otp
on_text: Clock.schedule_once(root.on_text)
multiline: False
on_text_validate: Clock.schedule_once(root.on_enter)
<MButton@Button>:
size_hint: 1, None
height: '33dp'
on_release:
self.parent.update_amount(self.text)
<WordButton@Button>:
size_hint: None, None
padding: '5dp', '5dp'
text_size: None, self.height
width: self.texture_size[0]
height: '30dp'
on_release:
self.parent.new_word(self.text)
<SeedButton@Button>:
height: dp(100)
border: 4, 4, 4, 4
halign: 'justify'
valign: 'top'
font_size: '18dp'
text_size: self.width - dp(24), self.height - dp(12)
color: .1, .1, .1, 1
background_normal: 'atlas://actilectrum/gui/kivy/theming/light/white_bg_round_top'
background_down: self.background_normal
size_hint_y: None
<SeedLabel@Label>:
font_size: '12sp'
text_size: self.width, None
size_hint: 1, None
height: self.texture_size[1]
halign: 'justify'
valign: 'middle'
border: 4, 4, 4, 4
<SeedDialogHeader@GridLayout>
text: ''
options_dialog: None
rows: 1
orientation: 'horizontal'
size_hint: 1, None
height: self.minimum_height
BigLabel:
size_hint: 9, None
text: root.text
IconButton:
id: options_button
height: '30dp'
width: '30dp'
size_hint: 1, None
icon: 'atlas://actilectrum/gui/kivy/theming/light/gear'
on_release:
root.options_dialog() if root.options_dialog else None
<RestoreSeedDialog>
message: ''
word: ''
SeedDialogHeader:
id: seed_dialog_header
text: 'ENTER YOUR SEED PHRASE'
options_dialog: root.options_dialog
GridLayout:
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input_seed
text: ''
on_text: Clock.schedule_once(root.on_text)
SeedLabel:
text: root.message
BoxLayout:
id: suggestions
height: '35dp'
size_hint: 1, None
new_word: root.on_word
BoxLayout:
id: line1
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
MButton:
text: 'Q'
MButton:
text: 'W'
MButton:
text: 'E'
MButton:
text: 'R'
MButton:
text: 'T'
MButton:
text: 'Y'
MButton:
text: 'U'
MButton:
text: 'I'
MButton:
text: 'O'
MButton:
text: 'P'
BoxLayout:
id: line2
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 0.5, None
height: '33dp'
MButton:
text: 'A'
MButton:
text: 'S'
MButton:
text: 'D'
MButton:
text: 'F'
MButton:
text: 'G'
MButton:
text: 'H'
MButton:
text: 'J'
MButton:
text: 'K'
MButton:
text: 'L'
Widget:
size_hint: 0.5, None
height: '33dp'
BoxLayout:
id: line3
update_amount: root.update_text
size_hint: 1, None
height: '30dp'
Widget:
size_hint: 1, None
MButton:
text: 'Z'
MButton:
text: 'X'
MButton:
text: 'C'
MButton:
text: 'V'
MButton:
text: 'B'
MButton:
text: 'N'
MButton:
text: 'M'
MButton:
text: ' '
MButton:
text: '<'
<AddXpubDialog>
title: ''
message: ''
BigLabel:
text: root.title
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: ''
on_text: Clock.schedule_once(root.check_text)
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
IconButton:
id: scan
height: '48sp'
on_release: root.scan_xpub()
icon: 'atlas://actilectrum/gui/kivy/theming/light/camera'
size_hint: 1, None
WizardButton:
text: _('Paste')
on_release: root.do_paste()
WizardButton:
text: _('Clear')
on_release: root.do_clear()
<ShowXpubDialog>
xpub: ''
message: _('Here is your master public key. Share it with your cosigners.')
BigLabel:
text: "MASTER PUBLIC KEY"
GridLayout
cols: 1
padding: 0, '12dp'
orientation: 'vertical'
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
SeedButton:
id: text_input
text: root.xpub
SeedLabel:
text: root.message
GridLayout
rows: 1
spacing: '12dp'
size_hint: 1, None
height: self.minimum_height
WizardButton:
text: _('QR code')
on_release: root.do_qr()
WizardButton:
text: _('Copy')
on_release: root.do_copy()
WizardButton:
text: _('Share')
on_release: root.do_share()
<ShowSeedDialog>
spacing: '12dp'
value: 'next'
SeedDialogHeader:
text: "PLEASE WRITE DOWN YOUR SEED PHRASE"
options_dialog: root.options_dialog
GridLayout:
id: grid
cols: 1
pos_hint: {'center_y': .5}
size_hint_y: None
height: self.minimum_height
orientation: 'vertical'
spacing: '12dp'
SeedButton:
text: root.seed_text
SeedLabel:
text: root.message
<LineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message
TextInput:
id: passphrase_input
multiline: False
size_hint: 1, None
height: '48dp'
SeedLabel:
text: root.warning
<ChoiceLineDialog>
BigLabel:
text: root.title
SeedLabel:
text: root.message1
GridLayout:
row_default_height: '48dp'
orientation: 'vertical'
id: choices
cols: 1
spacing: '14dp'
size_hint: 1, None
SeedLabel:
text: root.message2
TextInput:
id: text_input
multiline: False
size_hint: 1, None
height: '48dp'
''')
class WizardDialog(EventsDialog):
''' Abstract dialog to be used as the base for all Create Account Dialogs
'''
crcontent = ObjectProperty(None)
def __init__(self, wizard, **kwargs):
self.auto_dismiss = False
super(WizardDialog, self).__init__()
self.wizard = wizard
self.ids.back.disabled = not wizard.can_go_back()
self.app = App.get_running_app()
self.run_next = kwargs['run_next']
self._trigger_size_dialog = Clock.create_trigger(self._size_dialog)
# note: everything bound here needs to be unbound as otherwise the
# objects will be kept around and keep receiving the callbacks
Window.bind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
self._trigger_size_dialog()
self._on_release = False
def _size_dialog(self, dt):
app = App.get_running_app()
if app.ui_mode[0] == 'p':
self.size = Window.size
else:
#tablet
if app.orientation[0] == 'p':
#portrait
self.size = Window.size[0]/1.67, Window.size[1]/1.4
else:
self.size = Window.size[0]/2.5, Window.size[1]
def add_widget(self, widget, index=0):
if not self.crcontent:
super(WizardDialog, self).add_widget(widget)
else:
self.crcontent.add_widget(widget, index=index)
def on_keyboard(self, instance, key, keycode, codepoint, modifier):
if key == 27:
if self.wizard.can_go_back():
self.wizard.go_back()
else:
app = App.get_running_app()
if not app.is_exit:
app.is_exit = True
app.show_info(_('Press again to exit'))
else:
self._on_release = False
self.dismiss()
return True
def on_dismiss(self):
Window.unbind(size=self._trigger_size_dialog,
rotation=self._trigger_size_dialog,
on_keyboard=self.on_keyboard)
app = App.get_running_app()
if app.wallet is None and not self._on_release:
app.stop()
def get_params(self, button):
return (None,)
def on_release(self, button):
self._on_release = True
self.close()
if not button:
self.parent.dispatch('on_wizard_complete', None, None)
return
if button is self.ids.back:
self.wizard.go_back()
return
params = self.get_params(button)
self.run_next(*params)
class WizardMultisigDialog(WizardDialog):
def get_params(self, button):
m = self.ids.m.value
n = self.ids.n.value
return m, n
class WizardOTPDialogBase(WizardDialog):
def get_otp(self):
otp = self.ids.otp.text
if len(otp) != 6:
return
try:
return int(otp)
except:
return
def on_text(self, dt):
self.ids.next.disabled = self.get_otp() is None
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardKnownOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
self.message = _("This wallet is already registered with TrustedCoin. To finalize wallet creation, please enter your Google Authenticator Code.")
self.message2 =_("If you have lost your Google Authenticator account, you can request a new secret. You will need to retype your seed.")
self.request_new = False
def get_params(self, button):
return (self.get_otp(), self.request_new)
def request_new_secret(self):
self.request_new = True
self.on_release(True)
def abort_wallet_creation(self):
self._on_release = True
self.wizard.terminate(aborted=True)
self.dismiss()
class WizardNewOTPDialog(WizardOTPDialogBase):
def __init__(self, wizard, **kwargs):
WizardOTPDialogBase.__init__(self, wizard, **kwargs)
otp_secret = kwargs['otp_secret']
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
self.message = "Please scan the following QR code in Google Authenticator. You may also use the secret key: %s"%otp_secret
self.message2 = _('Then, enter your Google Authenticator code:')
self.ids.qr.set_data(uri)
def get_params(self, button):
return (self.get_otp(), False)
class WizardTOSDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.ids.next.text = 'Accept'
self.ids.next.disabled = False
self.message = kwargs['tos']
self.message2 = _('Enter your email address:')
class WizardEmailDialog(WizardDialog):
def get_params(self, button):
return (self.ids.email.text,)
def on_text(self, dt):
self.ids.next.disabled = not is_valid_email(self.ids.email.text)
def on_enter(self, dt):
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
class WizardConfirmDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardConfirmDialog, self).__init__(wizard, **kwargs)
self.message = kwargs.get('message', '')
self.value = 'ok'
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (True,)
class WizardChoiceDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(WizardChoiceDialog, self).__init__(wizard, **kwargs)
self.title = kwargs.get('message', '')
self.message = kwargs.get('message', '')
choices = kwargs.get('choices', [])
self.init_choices(choices)
def init_choices(self, choices):
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for action, text in choices:
l = WizardButton(text=text)
l.action = action
l.height = '48dp'
l.root = self
layout.add_widget(l)
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(app.dispatch, 'on_back')
def get_params(self, button):
return (button.action,)
class LineDialog(WizardDialog):
title = StringProperty('')
message = StringProperty('')
warning = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.title = kwargs.get('title', '')
self.message = kwargs.get('message', '')
self.ids.next.disabled = False
def get_params(self, b):
return (self.ids.passphrase_input.text,)
class CLButton(ToggleButton):
def on_release(self):
self.root.script_type = self.script_type
self.root.set_text(self.value)
class ChoiceLineDialog(WizardChoiceDialog):
title = StringProperty('')
message1 = StringProperty('')
message2 = StringProperty('')
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.title = kwargs.get('title', '')
self.message1 = kwargs.get('message1', '')
self.message2 = kwargs.get('message2', '')
self.choices = kwargs.get('choices', [])
default_choice_idx = kwargs.get('default_choice_idx', 0)
self.ids.next.disabled = False
layout = self.ids.choices
layout.bind(minimum_height=layout.setter('height'))
for idx, (script_type, title, text) in enumerate(self.choices):
b = CLButton(text=title, height='30dp', group=self.title, allow_no_selection=False)
b.script_type = script_type
b.root = self
b.value = text
layout.add_widget(b)
if idx == default_choice_idx:
b.trigger_action(duration=0)
def set_text(self, value):
self.ids.text_input.text = value
def get_params(self, b):
return (self.ids.text_input.text, self.script_type)
class ShowSeedDialog(WizardDialog):
seed_text = StringProperty('')
message = _("If you forget your PIN or lose your device, your seed phrase will be the only way to recover your funds.")
ext = False
def __init__(self, wizard, **kwargs):
super(ShowSeedDialog, self).__init__(wizard, **kwargs)
self.seed_text = kwargs['seed_text']
def on_parent(self, instance, value):
if value:
app = App.get_running_app()
self._back = _back = partial(self.ids.back.dispatch, 'on_release')
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(ext, _):
self.ext = ext
d = SeedOptionsDialog(self.ext, None, callback)
d.open()
def get_params(self, b):
return (self.ext,)
class WordButton(Button):
pass
class WizardButton(Button):
pass
class RestoreSeedDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
super(RestoreSeedDialog, self).__init__(wizard, **kwargs)
self._test = kwargs['test']
from actilectrum.mnemonic import Mnemonic
from actilectrum.old_mnemonic import wordlist as old_wordlist
self.words = set(Mnemonic('en').wordlist).union(set(old_wordlist))
self.ids.text_input_seed.text = test_seed if is_test else ''
self.message = _('Please type your seed phrase using the virtual keyboard.')
self.title = _('Enter Seed')
self.ext = False
self.bip39 = False
def options_dialog(self):
from .seed_options import SeedOptionsDialog
def callback(ext, bip39):
self.ext = ext
self.bip39 = bip39
self.update_next_button()
d = SeedOptionsDialog(self.ext, self.bip39, callback)
d.open()
def get_suggestions(self, prefix):
for w in self.words:
if w.startswith(prefix):
yield w
def update_next_button(self):
self.ids.next.disabled = False if self.bip39 else not bool(self._test(self.get_text()))
def on_text(self, dt):
self.update_next_button()
text = self.ids.text_input_seed.text
if not text:
last_word = ''
elif text[-1] == ' ':
last_word = ''
else:
last_word = text.split(' ')[-1]
enable_space = False
self.ids.suggestions.clear_widgets()
suggestions = [x for x in self.get_suggestions(last_word)]
if last_word in suggestions:
b = WordButton(text=last_word)
self.ids.suggestions.add_widget(b)
enable_space = True
for w in suggestions:
if w != last_word and len(suggestions) < 10:
b = WordButton(text=w)
self.ids.suggestions.add_widget(b)
i = len(last_word)
p = set()
for x in suggestions:
if len(x)>i: p.add(x[i])
for line in [self.ids.line1, self.ids.line2, self.ids.line3]:
for c in line.children:
if isinstance(c, Button):
if c.text in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ':
c.disabled = (c.text.lower() not in p) and bool(last_word)
elif c.text == ' ':
c.disabled = not enable_space
def on_word(self, w):
text = self.get_text()
words = text.split(' ')
words[-1] = w
text = ' '.join(words)
self.ids.text_input_seed.text = text + ' '
self.ids.suggestions.clear_widgets()
def get_text(self):
ti = self.ids.text_input_seed
return ' '.join(ti.text.strip().split())
def update_text(self, c):
c = c.lower()
text = self.ids.text_input_seed.text
if c == '<':
text = text[:-1]
else:
text += c
self.ids.text_input_seed.text = text
def on_parent(self, instance, value):
if value:
tis = self.ids.text_input_seed
tis.focus = True
#tis._keyboard.bind(on_key_down=self.on_key_down)
self._back = _back = partial(self.ids.back.dispatch,
'on_release')
app = App.get_running_app()
def on_key_down(self, keyboard, keycode, key, modifiers):
if keycode[0] in (13, 271):
self.on_enter()
return True
def on_enter(self):
#self._remove_keyboard()
# press next
next = self.ids.next
if not next.disabled:
next.dispatch('on_release')
def _remove_keyboard(self):
tis = self.ids.text_input_seed
if tis._keyboard:
tis._keyboard.unbind(on_key_down=self.on_key_down)
tis.focus = False
def get_params(self, b):
return (self.get_text(), self.bip39, self.ext)
class ConfirmSeedDialog(RestoreSeedDialog):
def __init__(self, *args, **kwargs):
RestoreSeedDialog.__init__(self, *args, **kwargs)
self.ids.seed_dialog_header.ids.options_button.disabled = True
def get_params(self, b):
return (self.get_text(),)
def options_dialog(self):
pass
class ShowXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
self.xpub = kwargs['xpub']
self.ids.next.disabled = False
def do_copy(self):
self.app._clipboard.copy(self.xpub)
def do_share(self):
self.app.do_share(self.xpub, _("Master Public Key"))
def do_qr(self):
from .qr_dialog import QRDialog
popup = QRDialog(_("Master Public Key"), self.xpub, True)
popup.open()
class AddXpubDialog(WizardDialog):
def __init__(self, wizard, **kwargs):
WizardDialog.__init__(self, wizard, **kwargs)
def is_valid(x):
try:
return kwargs['is_valid'](x)
except:
return False
self.is_valid = is_valid
self.title = kwargs['title']
self.message = kwargs['message']
self.allow_multi = kwargs.get('allow_multi', False)
def check_text(self, dt):
self.ids.next.disabled = not bool(self.is_valid(self.get_text()))
def get_text(self):
ti = self.ids.text_input
return ti.text.strip()
def get_params(self, button):
return (self.get_text(),)
def scan_xpub(self):
def on_complete(text):
if self.allow_multi:
self.ids.text_input.text += text + '\n'
else:
self.ids.text_input.text = text
self.app.scan_qr(on_complete)
def do_paste(self):
self.ids.text_input.text = test_xpub if is_test else self.app._clipboard.paste()
def do_clear(self):
self.ids.text_input.text = ''
class InstallWizard(BaseWizard, Widget):
'''
events::
`on_wizard_complete` Fired when the wizard is done creating/ restoring
wallet/s.
'''
__events__ = ('on_wizard_complete', )
def on_wizard_complete(self, storage, db):
"""overriden by main_window"""
pass
def waiting_dialog(self, task, msg, on_finished=None):
'''Perform a blocking task in the background by running the passed
method in a thread.
'''
def target():
# run your threaded function
try:
task()
except Exception as err:
self.show_error(str(err))
# on completion hide message
Clock.schedule_once(lambda dt: app.info_bubble.hide(now=True), -1)
if on_finished:
def protected_on_finished():
try:
on_finished()
except Exception as e:
self.show_error(str(e))
Clock.schedule_once(lambda dt: protected_on_finished(), -1)
app = App.get_running_app()
app.show_info_bubble(
text=msg, icon='atlas://actilectrum/gui/kivy/theming/light/important',
pos=Window.center, width='200sp', arrow_pos=None, modal=True)
t = threading.Thread(target = target)
t.start()
def terminate(self, *, storage=None, db=None, aborted=False):
if storage is None and not aborted:
storage, db = self.create_storage(self.path)
self.dispatch('on_wizard_complete', storage, db)
def choice_dialog(self, **kwargs):
choices = kwargs['choices']
if len(choices) > 1:
WizardChoiceDialog(self, **kwargs).open()
else:
f = kwargs['run_next']
f(choices[0][0])
def multisig_dialog(self, **kwargs): WizardMultisigDialog(self, **kwargs).open()
def show_seed_dialog(self, **kwargs): ShowSeedDialog(self, **kwargs).open()
def line_dialog(self, **kwargs): LineDialog(self, **kwargs).open()
def choice_and_line_dialog(self, **kwargs): ChoiceLineDialog(self, **kwargs).open()
def confirm_seed_dialog(self, **kwargs):
kwargs['title'] = _('Confirm Seed')
kwargs['message'] = _('Please retype your seed phrase, to confirm that you properly saved it')
ConfirmSeedDialog(self, **kwargs).open()
def restore_seed_dialog(self, **kwargs):
RestoreSeedDialog(self, **kwargs).open()
def confirm_dialog(self, **kwargs):
WizardConfirmDialog(self, **kwargs).open()
def tos_dialog(self, **kwargs):
WizardTOSDialog(self, **kwargs).open()
def email_dialog(self, **kwargs):
WizardEmailDialog(self, **kwargs).open()
def otp_dialog(self, **kwargs):
if kwargs['otp_secret']:
WizardNewOTPDialog(self, **kwargs).open()
else:
WizardKnownOTPDialog(self, **kwargs).open()
def add_xpub_dialog(self, **kwargs):
kwargs['message'] += ' ' + _('Use the camera button to scan a QR code.')
AddXpubDialog(self, **kwargs).open()
def add_cosigner_dialog(self, **kwargs):
kwargs['title'] = _("Add Cosigner") + " %d"%kwargs['index']
kwargs['message'] = _('Please paste your cosigners master public key, or scan it using the camera button.')
AddXpubDialog(self, **kwargs).open()
def show_xpub_dialog(self, **kwargs): ShowXpubDialog(self, **kwargs).open()
def show_message(self, msg): self.show_error(msg)
def show_error(self, msg):
app = App.get_running_app() # type: ElectrumWindow
Clock.schedule_once(lambda dt: app.show_error(msg))
def request_password(self, run_next, force_disable_encrypt_cb=False):
if force_disable_encrypt_cb:
# do not request PIN for watching-only wallets
run_next(None, False)
return
def on_success(old_pw, pw):
assert old_pw is None
run_next(pw, True)
def on_failure():
self.show_error(_('Password mismatch'))
self.run('request_password', run_next)
popup = PasswordDialog()
app = App.get_running_app()
popup.init(
app,
check_password=lambda x:True,
on_success=on_success,
on_failure=on_failure,
is_change=True,
is_password=True,
message=_('Choose a password'))
popup.open()
def action_dialog(self, action, run_next):
f = getattr(self, action)
f()
|
fine_grained_tune.py | from neural_net import *
from threading import *
from data_utils import *
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = "C:\Users\Pomodori\workspace\cifar-10-batches-py"
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Reshape data to rows
X_train = X_train.reshape(num_training, -1)
X_val = X_val.reshape(num_validation, -1)
X_test = X_test.reshape(num_test, -1)
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
def tryArgs(hidden_size,learning_rate,reg):
net = TwoLayerNet(3072, i, 10)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=1000, batch_size=200,
learning_rate=learning_rate, learning_rate_decay=0.95,
reg=reg, verbose=False)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
f=open("fine_grained_nn.csv","a")
f.write(str(hidden_size)+','+str(learning_rate)+','+str(reg)+','+str(val_acc)+'\n')
f.close()
print hidden_size,learning_rate,reg, val_acc
hidden_size = range(300,450,10)
reg=[0.05*2**i for i in range(-2,8)]
f=open("naive_nn.csv","w")
for i in hidden_size:
for k in reg:
# t=Thread(target=tryArgs,args=(i,j,k))
# t.daemon=True
# t.start()
tryArgs(i,0.001,k)
|
test.py | # Copyright 2012 Mozilla Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json, platform, os, shutil, sys, subprocess, tempfile, threading
import time, urllib, urllib2, hashlib, re, base64, uuid, socket, errno
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
from optparse import OptionParser
from urlparse import urlparse, parse_qs
from threading import Lock
USAGE_EXAMPLE = "%prog"
# The local web server uses the git repo as the document root.
DOC_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__),".."))
GIT_CLONE_CHECK = True
DEFAULT_MANIFEST_FILE = 'test_manifest.json'
EQLOG_FILE = 'eq.log'
BROWSERLOG_FILE = 'browser.log'
REFDIR = 'ref'
TEST_SNAPSHOTS = 'test_snapshots'
TMPDIR = 'tmp'
VERBOSE = False
BROWSER_TIMEOUT = 60
SERVER_HOST = "localhost"
lock = Lock()
class TestOptions(OptionParser):
def __init__(self, **kwargs):
OptionParser.__init__(self, **kwargs)
self.add_option("-m", "--masterMode", action="store_true", dest="masterMode",
help="Run the script in master mode.", default=False)
self.add_option("--noPrompts", action="store_true", dest="noPrompts",
help="Uses default answers (intended for CLOUD TESTS only!).", default=False)
self.add_option("--manifestFile", action="store", type="string", dest="manifestFile",
help="A JSON file in the form of test_manifest.json (the default).")
self.add_option("-b", "--browser", action="store", type="string", dest="browser",
help="The path to a single browser (right now, only Firefox is supported).")
self.add_option("--browserManifestFile", action="store", type="string",
dest="browserManifestFile",
help="A JSON file in the form of those found in resources/browser_manifests")
self.add_option("--reftest", action="store_true", dest="reftest",
help="Automatically start reftest showing comparison test failures, if there are any.",
default=False)
self.add_option("--port", action="store", dest="port", type="int",
help="The port the HTTP server should listen on.", default=8080)
self.add_option("--unitTest", action="store_true", dest="unitTest",
help="Run the unit tests.", default=False)
self.add_option("--fontTest", action="store_true", dest="fontTest",
help="Run the font tests.", default=False)
self.add_option("--noDownload", action="store_true", dest="noDownload",
help="Skips test PDFs downloading.", default=False)
self.add_option("--ignoreDownloadErrors", action="store_true", dest="ignoreDownloadErrors",
help="Ignores errors during test PDFs downloading.", default=False)
self.add_option("--statsFile", action="store", dest="statsFile", type="string",
help="The file where to store stats.", default=None)
self.add_option("--statsDelay", action="store", dest="statsDelay", type="int",
help="The amount of time in milliseconds the browser should wait before starting stats.", default=10000)
self.set_usage(USAGE_EXAMPLE)
def verifyOptions(self, options):
if options.reftest and (options.unitTest or options.fontTest):
self.error("--reftest and --unitTest/--fontTest must not be specified at the same time.")
if options.masterMode and options.manifestFile:
self.error("--masterMode and --manifestFile must not be specified at the same time.")
if not options.manifestFile:
options.manifestFile = DEFAULT_MANIFEST_FILE
if options.browser and options.browserManifestFile:
print "Warning: ignoring browser argument since manifest file was also supplied"
if not options.browser and not options.browserManifestFile:
print "Starting server on port %s." % options.port
if not options.statsFile:
options.statsDelay = 0
return options
def prompt(question):
'''Return True iff the user answered "yes" to |question|.'''
inp = raw_input(question +' [yes/no] > ')
return inp == 'yes'
MIMEs = {
'.css': 'text/css',
'.html': 'text/html',
'.js': 'application/javascript',
'.json': 'application/json',
'.svg': 'image/svg+xml',
'.pdf': 'application/pdf',
'.xhtml': 'application/xhtml+xml',
'.gif': 'image/gif',
'.ico': 'image/x-icon',
'.png': 'image/png',
'.log': 'text/plain',
'.tif': 'image/tiff',
'.tiff': 'image/tiff',
'.properties': 'text/plain'
}
class State:
browsers = [ ]
manifest = { }
taskResults = { }
remaining = { }
results = { }
done = False
numErrors = 0
numEqFailures = 0
numEqNoSnapshot = 0
numFBFFailures = 0
numLoadFailures = 0
eqLog = None
saveStats = False
stats = [ ]
lastPost = { }
class UnitTestState:
browsers = [ ]
browsersRunning = 0
lastPost = { }
numErrors = 0
numRun = 0
class Result:
def __init__(self, snapshot, failure, page):
self.snapshot = snapshot
self.failure = failure
self.page = page
class TestServer(ThreadingMixIn, HTTPServer):
pass
class TestHandlerBase(BaseHTTPRequestHandler):
# Disable annoying noise by default
def log_request(code=0, size=0):
if VERBOSE:
BaseHTTPRequestHandler.log_request(code, size)
def handle_one_request(self):
try:
BaseHTTPRequestHandler.handle_one_request(self)
except socket.error, v:
if v[0] == errno.ECONNRESET:
# Ignoring connection reset by peer exceptions
print 'Detected connection reset'
elif v[0] == errno.EPIPE:
print 'Detected remote peer disconnected'
elif v[0] == 10053:
print 'An established connection was aborted by the' \
' software in your host machine'
else:
raise
def finish(self,*args,**kw):
# From http://stackoverflow.com/a/14355079/1834797
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def sendFile(self, path, ext):
self.send_response(200)
self.send_header("Accept-Ranges", "bytes")
self.send_header("Content-Type", MIMEs[ext])
self.send_header("Content-Length", os.path.getsize(path))
self.end_headers()
with open(path, "rb") as f:
self.wfile.write(f.read())
def sendFileRange(self, path, ext, start, end):
file_len = os.path.getsize(path)
if (end is None) or (file_len < end):
end = file_len
if (file_len < start) or (end <= start):
self.send_error(416)
return
chunk_len = end - start
time.sleep(chunk_len / 1000000.0)
self.send_response(206)
self.send_header("Accept-Ranges", "bytes")
self.send_header("Content-Type", MIMEs[ext])
self.send_header("Content-Length", chunk_len)
self.send_header("Content-Range", 'bytes ' + str(start) + '-' + str(end - 1) + '/' + str(file_len))
self.end_headers()
with open(path, "rb") as f:
f.seek(start)
self.wfile.write(f.read(chunk_len))
def do_GET(self):
url = urlparse(self.path)
# Ignore query string
path, _ = urllib.unquote_plus(url.path), url.query
path = os.path.abspath(os.path.realpath(DOC_ROOT + os.sep + path))
prefix = os.path.commonprefix(( path, DOC_ROOT ))
_, ext = os.path.splitext(path.lower())
if url.path == "/favicon.ico":
self.sendFile(os.path.join(DOC_ROOT, "test", "resources", "favicon.ico"), ext)
return
if os.path.isdir(path):
self.sendIndex(url.path, url.query)
return
if not (prefix == DOC_ROOT
and os.path.isfile(path)
and ext in MIMEs):
print path
self.send_error(404)
return
if 'Range' in self.headers:
range_re = re.compile(r"^bytes=(\d+)\-(\d+)?")
parsed_range = range_re.search(self.headers.getheader("Range"))
if parsed_range is None:
self.send_error(501)
return
if VERBOSE:
print 'Range requested %s - %s: %s' % (
parsed_range.group(1), parsed_range.group(2))
start = int(parsed_range.group(1))
if parsed_range.group(2) is None:
self.sendFileRange(path, ext, start, None)
else:
end = int(parsed_range.group(2)) + 1
self.sendFileRange(path, ext, start, end)
return
self.sendFile(path, ext)
class UnitTestHandler(TestHandlerBase):
def sendIndex(self, path, query):
print "send index"
def translateFont(self, base64Data):
self.send_response(200)
self.send_header("Content-Type", "text/xml")
self.end_headers()
data = base64.b64decode(base64Data)
taskId = str(uuid.uuid4())
fontPath = 'ttx/' + taskId + '.otf'
resultPath = 'ttx/' + taskId + '.ttx'
with open(fontPath, "wb") as f:
f.write(data)
# When fontTools used directly, we need to snif ttx file
# to check what version of python is used
ttxPath = ''
for path in os.environ["PATH"].split(os.pathsep):
if os.path.isfile(path + os.sep + "ttx"):
ttxPath = path + os.sep + "ttx"
break
if ttxPath == '':
self.wfile.write("<error>TTX was not found</error>")
return
ttxRunner = ''
with open(ttxPath, "r") as f:
firstLine = f.readline()
if firstLine[:2] == '#!' and firstLine.find('python') > -1:
ttxRunner = firstLine[2:].strip()
with open(os.devnull, "w") as fnull:
if ttxRunner != '':
result = subprocess.call([ttxRunner, ttxPath, fontPath], stdout = fnull)
else:
result = subprocess.call([ttxPath, fontPath], stdout = fnull)
os.remove(fontPath)
if not os.path.isfile(resultPath):
self.wfile.write("<error>Output was not generated</error>")
return
with open(resultPath, "rb") as f:
self.wfile.write(f.read())
os.remove(resultPath)
return
def do_POST(self):
with lock:
url = urlparse(self.path)
numBytes = int(self.headers['Content-Length'])
content = self.rfile.read(numBytes)
# Process special utility requests
if url.path == '/ttx':
self.translateFont(content)
return
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
result = json.loads(content)
browser = result['browser']
UnitTestState.lastPost[browser] = int(time.time())
if url.path == "/tellMeToQuit":
tellAppToQuit(url.path, url.query)
UnitTestState.browsersRunning -= 1
UnitTestState.lastPost[browser] = None
return
elif url.path == '/info':
print result['message']
elif url.path == '/submit_task_results':
status, description = result['status'], result['description']
UnitTestState.numRun += 1
if status == 'TEST-UNEXPECTED-FAIL':
UnitTestState.numErrors += 1
message = status + ' | ' + description + ' | in ' + browser
if 'error' in result:
message += ' | ' + result['error']
print message
else:
print 'Error: uknown action' + url.path
class PDFTestHandler(TestHandlerBase):
def sendIndex(self, path, query):
if not path.endswith("/"):
# we need trailing slash
self.send_response(301)
redirectLocation = path + "/"
if query:
redirectLocation += "?" + query
self.send_header("Location", redirectLocation)
self.end_headers()
return
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
if query == "frame":
self.wfile.write("<html><frameset cols=*,200><frame name=pdf>" +
"<frame src='" + path + "'></frameset></html>")
return
location = os.path.abspath(os.path.realpath(DOC_ROOT + os.sep + path))
self.wfile.write("<html><body><h1>TIFFs of " + path + "</h1>\n")
for filename in os.listdir(location):
if filename.lower().endswith('.tif') or filename.lower().endswith('.tiff'):
self.wfile.write("<a href='/web/viewer.html?file=" +
urllib.quote_plus(path + filename, '/') + "' target=pdf>" +
filename + "</a><br>\n")
self.wfile.write("</body></html>")
def do_POST(self):
with lock:
numBytes = int(self.headers['Content-Length'])
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
url = urlparse(self.path)
if url.path == "/tellMeToQuit":
tellAppToQuit(url.path, url.query)
return
result = json.loads(self.rfile.read(numBytes))
browser = result['browser']
State.lastPost[browser] = int(time.time())
if url.path == "/info":
print result['message']
return
id = result['id']
failure = result['failure']
round = result['round']
page = result['page']
snapshot = result['snapshot']
taskResults = State.taskResults[browser][id]
taskResults[round].append(Result(snapshot, failure, page))
if State.saveStats:
stat = {
'browser': browser,
'pdf': id,
'page': page,
'round': round,
'stats': result['stats']
}
State.stats.append(stat)
def isTaskDone():
last_page_num = result['lastPageNum']
rounds = State.manifest[id]['rounds']
for round in range(0,rounds):
if not taskResults[round]:
return False
latest_page = taskResults[round][-1]
if not latest_page.page == last_page_num:
return False
return True
if isTaskDone():
# sort the results since they sometimes come in out of order
for results in taskResults:
results.sort(key=lambda result: result.page)
check(State.manifest[id], taskResults, browser,
self.server.masterMode)
# Please oh please GC this ...
del State.taskResults[browser][id]
State.remaining[browser] -= 1
checkIfDone()
def checkIfDone():
State.done = True
for key in State.remaining:
if State.remaining[key] != 0:
State.done = False
return
# Applescript hack to quit Chrome on Mac
def tellAppToQuit(path, query):
if platform.system() != "Darwin":
return
d = parse_qs(query)
path = d['path'][0]
cmd = """osascript<<END
tell application "%s"
quit
end tell
END""" % path
os.system(cmd)
class BaseBrowserCommand(object):
def __init__(self, browserRecord):
self.name = browserRecord["name"]
self.path = browserRecord["path"]
self.tempDir = None
self.process = None
if platform.system() == "Darwin" and (self.path.endswith(".app") or self.path.endswith(".app/")):
self._fixupMacPath()
if not os.path.exists(self.path):
raise Exception("Path to browser '%s' does not exist." % self.path)
def setup(self):
self.tempDir = tempfile.mkdtemp()
self.profileDir = os.path.join(self.tempDir, "profile")
self.browserLog = open(BROWSERLOG_FILE, "w")
def teardown(self):
self.process.terminate()
# If the browser is still running, wait up to ten seconds for it to quit
if self.process and self.process.poll() is None:
checks = 0
while self.process.poll() is None and checks < 20:
checks += 1
time.sleep(.5)
# If it's still not dead, try to kill it
if self.process.poll() is None:
print "Process %s is still running. Killing." % self.name
self.process.kill()
self.process.wait()
if self.tempDir is not None and os.path.exists(self.tempDir):
shutil.rmtree(self.tempDir)
self.browserLog.close()
def start(self, url):
raise Exception("Can't start BaseBrowserCommand")
class FirefoxBrowserCommand(BaseBrowserCommand):
def _fixupMacPath(self):
self.path = os.path.join(self.path, "Contents", "MacOS", "firefox-bin")
def setup(self):
super(FirefoxBrowserCommand, self).setup()
shutil.copytree(os.path.join(DOC_ROOT, "test", "resources", "firefox"),
self.profileDir)
def start(self, url):
cmds = [self.path]
if platform.system() == "Darwin":
cmds.append("-foreground")
cmds.extend(["-no-remote", "-profile", self.profileDir, url])
self.process = subprocess.Popen(cmds, stdout = self.browserLog, stderr = self.browserLog)
class ChromeBrowserCommand(BaseBrowserCommand):
def _fixupMacPath(self):
self.path = os.path.join(self.path, "Contents", "MacOS", "Google Chrome")
def start(self, url):
cmds = [self.path]
cmds.extend(["--user-data-dir=%s" % self.profileDir,
"--no-first-run", "--disable-sync", url])
self.process = subprocess.Popen(cmds, stdout = self.browserLog, stderr = self.browserLog)
def makeBrowserCommand(browser):
path = browser["path"].lower()
name = browser["name"]
if name is not None:
name = name.lower()
types = {"firefox": FirefoxBrowserCommand,
"chrome": ChromeBrowserCommand }
command = None
for key in types.keys():
if (name and name.find(key) > -1) or path.find(key) > -1:
command = types[key](browser)
command.name = command.name or key
break
if command is None:
raise Exception("Unrecognized browser: %s" % browser)
return command
def makeBrowserCommands(browserManifestFile):
with open(browserManifestFile) as bmf:
browsers = [makeBrowserCommand(browser) for browser in json.load(bmf)]
return browsers
def downloadLinkedPDF(f):
linkFile = open(f +'.link')
link = linkFile.read()
linkFile.close()
sys.stdout.write('Downloading '+ link +' to '+ f +' ...')
sys.stdout.flush()
response = urllib2.urlopen(link)
with open(f, 'wb') as out:
out.write(response.read())
print 'done'
def downloadLinkedPDFs(manifestList, ignoreDownloadErrors):
for item in manifestList:
f, isLink = item['file'], item.get('link', False)
if isLink and not os.access(f, os.R_OK):
try:
downloadLinkedPDF(f)
except:
print 'ERROR: Unable to download file "' + f + '".'
if ignoreDownloadErrors:
open(f, 'wb').close()
else:
raise
def verifyPDFs(manifestList):
error = False
for item in manifestList:
f = item['file']
if os.access(f, os.R_OK):
fileMd5 = hashlib.md5(open(f, 'rb').read()).hexdigest()
if 'md5' not in item:
print 'WARNING: Missing md5 for file "' + f + '".',
print 'Hash for current file is "' + fileMd5 + '"'
error = True
continue
md5 = item['md5']
if fileMd5 != md5:
print 'WARNING: MD5 of file "' + f + '" does not match file.',
print 'Expected "' + md5 + '" computed "' + fileMd5 + '"'
error = True
continue
else:
print 'WARNING: Unable to open file for reading "' + f + '".'
error = True
return not error
def getTestBrowsers(options):
testBrowsers = []
if options.browserManifestFile:
testBrowsers = makeBrowserCommands(options.browserManifestFile)
elif options.browser:
testBrowsers = [makeBrowserCommand({"path":options.browser, "name":None})]
if options.browserManifestFile or options.browser:
assert len(testBrowsers) > 0
return testBrowsers
def setUp(options):
# Only serve files from a pdf.js clone
assert not GIT_CLONE_CHECK or os.path.isfile('../src/pdf.js') and os.path.isdir('../.git')
if options.masterMode and os.path.isdir(TMPDIR):
print 'Temporary snapshot dir tmp/ is still around.'
print 'tmp/ can be removed if it has nothing you need.'
if options.noPrompts or prompt('SHOULD THIS SCRIPT REMOVE tmp/? THINK CAREFULLY'):
subprocess.call(( 'rm', '-rf', 'tmp' ))
assert not os.path.isdir(TMPDIR)
testBrowsers = getTestBrowsers(options)
with open(options.manifestFile) as mf:
manifestList = json.load(mf)
if not options.noDownload:
downloadLinkedPDFs(manifestList, options.ignoreDownloadErrors)
if not verifyPDFs(manifestList):
print 'Unable to verify the checksum for the files that are used for testing.'
print 'Please re-download the files, or adjust the MD5 checksum in the manifest for the files listed above.\n'
for b in testBrowsers:
State.taskResults[b.name] = { }
State.remaining[b.name] = len(manifestList)
State.lastPost[b.name] = int(time.time())
for item in manifestList:
id, rounds = item['id'], int(item['rounds'])
State.manifest[id] = item
taskResults = [ ]
for r in xrange(rounds):
taskResults.append([ ])
State.taskResults[b.name][id] = taskResults
if options.statsFile != None:
State.saveStats = True
return testBrowsers
def setUpUnitTests(options):
# Only serve files from a pdf.js clone
assert not GIT_CLONE_CHECK or os.path.isfile('../src/pdf.js') and os.path.isdir('../.git')
testBrowsers = getTestBrowsers(options)
UnitTestState.browsersRunning = len(testBrowsers)
for b in testBrowsers:
UnitTestState.lastPost[b.name] = int(time.time())
return testBrowsers
def startBrowsers(browsers, options, path):
for b in browsers:
b.setup()
print 'Launching', b.name
host = 'http://%s:%s' % (SERVER_HOST, options.port)
qs = '?browser='+ urllib.quote(b.name) +'&manifestFile='+ urllib.quote(options.manifestFile)
qs += '&path=' + b.path
qs += '&delay=' + str(options.statsDelay)
qs += '&masterMode=' + str(options.masterMode)
b.start(host + path + qs)
def teardownBrowsers(browsers):
for b in browsers:
try:
b.teardown()
except:
print "Error cleaning up after browser at ", b.path
print "Temp dir was ", b.tempDir
print "Error:", sys.exc_info()[0]
def check(task, results, browser, masterMode):
failed = False
for r in xrange(len(results)):
pageResults = results[r]
for p in xrange(len(pageResults)):
pageResult = pageResults[p]
if pageResult is None:
continue
failure = pageResult.failure
if failure:
failed = True
State.numErrors += 1
print 'TEST-UNEXPECTED-FAIL | test failed', task['id'], '| in', browser, '| page', p + 1, 'round', r, '|', failure
if failed:
return
kind = task['type']
if 'eq' == kind or 'text' == kind:
checkEq(task, results, browser, masterMode)
elif 'fbf' == kind:
checkFBF(task, results, browser)
elif 'load' == kind:
checkLoad(task, results, browser)
else:
assert 0 and 'Unknown test type'
def createDir(dir):
try:
os.makedirs(dir)
except OSError, e:
if e.errno != 17: # file exists
print >>sys.stderr, 'Creating', dir, 'failed!'
def readDataUri(data):
metadata, encoded = data.rsplit(",", 1)
return base64.b64decode(encoded)
def checkEq(task, results, browser, masterMode):
pfx = os.path.join(REFDIR, sys.platform, browser, task['id'])
testSnapshotDir = os.path.join(TEST_SNAPSHOTS, sys.platform, browser, task['id'])
results = results[0]
taskId = task['id']
taskType = task['type']
passed = True
for result in results:
page = result.page
snapshot = readDataUri(result.snapshot)
ref = None
eq = True
path = os.path.join(pfx, str(page) + '.png')
if not os.access(path, os.R_OK):
State.numEqNoSnapshot += 1
if not masterMode:
print 'WARNING: no reference snapshot', path
else:
f = open(path, 'rb')
ref = f.read()
f.close()
eq = (ref == snapshot)
if not eq:
print 'TEST-UNEXPECTED-FAIL |', taskType, taskId, '| in', browser, '| rendering of page', page, '!= reference rendering'
if not State.eqLog:
State.eqLog = open(EQLOG_FILE, 'w')
eqLog = State.eqLog
createDir(testSnapshotDir)
testSnapshotPath = os.path.join(testSnapshotDir, str(page) + '.png')
handle = open(testSnapshotPath, 'wb')
handle.write(snapshot)
handle.close()
refSnapshotPath = os.path.join(testSnapshotDir, str(page) + '_ref.png')
handle = open(refSnapshotPath, 'wb')
handle.write(ref)
handle.close()
# NB: this follows the format of Mozilla reftest
# output so that we can reuse its reftest-analyzer
# script
eqLog.write('REFTEST TEST-UNEXPECTED-FAIL | ' + browser +'-'+ taskId +'-page'+ str(page) + ' | image comparison (==)\n')
eqLog.write('REFTEST IMAGE 1 (TEST): ' + testSnapshotPath + '\n')
eqLog.write('REFTEST IMAGE 2 (REFERENCE): ' + refSnapshotPath + '\n')
passed = False
State.numEqFailures += 1
if masterMode and (ref is None or not eq):
tmpTaskDir = os.path.join(TMPDIR, sys.platform, browser, task['id'])
createDir(tmpTaskDir)
handle = open(os.path.join(tmpTaskDir, str(page)) + '.png', 'wb')
handle.write(snapshot)
handle.close()
if passed:
print 'TEST-PASS |', taskType, 'test', task['id'], '| in', browser
def checkFBF(task, results, browser):
round0, round1 = results[0], results[1]
assert len(round0) == len(round1)
passed = True
for page in xrange(len(round1)):
r0Page, r1Page = round0[page], round1[page]
if r0Page is None:
break
if r0Page.snapshot != r1Page.snapshot:
print 'TEST-UNEXPECTED-FAIL | forward-back-forward test', task['id'], '| in', browser, '| first rendering of page', page + 1, '!= second'
passed = False
State.numFBFFailures += 1
if passed:
print 'TEST-PASS | forward-back-forward test', task['id'], '| in', browser
def checkLoad(task, results, browser):
# Load just checks for absence of failure, so if we got here the
# test has passed
print 'TEST-PASS | load test', task['id'], '| in', browser
def processResults(options):
print ''
numFatalFailures = (State.numErrors + State.numFBFFailures)
if 0 == State.numEqFailures and 0 == numFatalFailures:
print 'All regression tests passed.'
else:
print 'OHNOES! Some tests failed!'
if 0 < State.numErrors:
print ' errors:', State.numErrors
if 0 < State.numEqFailures:
print ' different ref/snapshot:', State.numEqFailures
if 0 < State.numFBFFailures:
print ' different first/second rendering:', State.numFBFFailures
if options.statsFile != None:
with open(options.statsFile, 'w') as sf:
sf.write(json.dumps(State.stats, sort_keys=True, indent=4))
print 'Wrote stats file: ' + options.statsFile
def maybeUpdateRefImages(options, browser):
if options.masterMode and (0 < State.numEqFailures or 0 < State.numEqNoSnapshot):
print "Some eq tests failed or didn't have snapshots."
print 'Checking to see if master references can be updated...'
numFatalFailures = (State.numErrors + State.numFBFFailures)
if 0 < numFatalFailures:
print ' No. Some non-eq tests failed.'
else:
print ' Yes! The references in tmp/ can be synced with ref/.'
if options.reftest:
startReftest(browser, options)
if options.noPrompts or prompt('Would you like to update the master copy in ref/?'):
sys.stdout.write(' Updating ref/ ... ')
if not os.path.exists('ref'):
subprocess.check_call('mkdir ref', shell = True)
subprocess.check_call('cp -Rf tmp/* ref/', shell = True)
print 'done'
else:
print ' OK, not updating.'
def startReftest(browser, options):
url = "http://%s:%s" % (SERVER_HOST, options.port)
url += "/test/resources/reftest-analyzer.xhtml"
url += "#web=/test/eq.log"
try:
browser.setup()
browser.start(url)
print "Waiting for browser..."
browser.process.wait()
finally:
teardownBrowsers([browser])
print "Completed reftest usage."
def runTests(options, browsers):
try:
shutil.rmtree(TEST_SNAPSHOTS);
except OSError, e:
if e.errno != 2: # folder doesn't exist
print >>sys.stderr, 'Deleting', dir, 'failed!'
t1 = time.time()
try:
startBrowsers(browsers, options, '/test/test_slave.html')
while not State.done:
for b in State.lastPost:
if State.remaining[b] > 0 and int(time.time()) - State.lastPost[b] > BROWSER_TIMEOUT:
print 'TEST-UNEXPECTED-FAIL | test failed', b, "has not responded in", BROWSER_TIMEOUT, "s"
State.numErrors += State.remaining[b]
State.remaining[b] = 0
checkIfDone()
time.sleep(1)
processResults(options)
finally:
teardownBrowsers(browsers)
t2 = time.time()
print "Runtime was", int(t2 - t1), "seconds"
if State.eqLog:
State.eqLog.close();
if options.masterMode:
maybeUpdateRefImages(options, browsers[0])
elif options.reftest and State.numEqFailures > 0:
print "\nStarting reftest harness to examine %d eq test failures." % State.numEqFailures
startReftest(browsers[0], options)
def runUnitTests(options, browsers, url, name):
t1 = time.time()
try:
startBrowsers(browsers, options, url)
while UnitTestState.browsersRunning > 0:
for b in UnitTestState.lastPost:
if UnitTestState.lastPost[b] != None and int(time.time()) - UnitTestState.lastPost[b] > BROWSER_TIMEOUT:
print 'TEST-UNEXPECTED-FAIL | test failed', b, "has not responded in", BROWSER_TIMEOUT, "s"
UnitTestState.lastPost[b] = None
UnitTestState.browsersRunning -= 1
UnitTestState.numErrors += 1
time.sleep(1)
print ''
print 'Ran', UnitTestState.numRun, 'tests'
if UnitTestState.numErrors > 0:
print 'OHNOES! Some', name, 'tests failed!'
print ' ', UnitTestState.numErrors, 'of', UnitTestState.numRun, 'failed'
else:
print 'All', name, 'tests passed.'
finally:
teardownBrowsers(browsers)
t2 = time.time()
print '', name, 'tests runtime was', int(t2 - t1), 'seconds'
def main():
optionParser = TestOptions()
options, args = optionParser.parse_args()
options = optionParser.verifyOptions(options)
if options == None:
sys.exit(1)
if options.unitTest or options.fontTest:
httpd = TestServer((SERVER_HOST, options.port), UnitTestHandler)
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.setDaemon(True)
httpd_thread.start()
browsers = setUpUnitTests(options)
if len(browsers) > 0:
if options.unitTest:
runUnitTests(options, browsers, '/test/unit/unit_test.html', 'unit')
if options.fontTest:
runUnitTests(options, browsers, '/test/font/font_test.html', 'font')
else:
httpd = TestServer((SERVER_HOST, options.port), PDFTestHandler)
httpd.masterMode = options.masterMode
httpd_thread = threading.Thread(target=httpd.serve_forever)
httpd_thread.setDaemon(True)
httpd_thread.start()
browsers = setUp(options)
if len(browsers) > 0:
runTests(options, browsers)
else:
# just run the server
print "Running HTTP server. Press Ctrl-C to quit."
try:
while True:
time.sleep(1)
except (KeyboardInterrupt):
print "\nExiting."
if __name__ == '__main__':
main()
|
ix.py | import os, configparser, argparse
import re, threading, json, hashlib
import pathlib
from datetime import datetime
# Global verbosity check
# Get's changed by command line flag '-v'
verbose = False
# Colors
RED = '\x1B[31;1m'
CYAN = '\x1B[36m'
GREEN = '\x1B[32;1m'
YELLOW = '\x1B[33;1m'
RESET = '\x1B[0m'
WHITE = '\x1B[37;1m'
MAGENTA = '\x1B[35;1m'
# _
# ___| | __ _ ___ ___ ___ ___
# / __| |/ _` / __/ __|/ _ \/ __|
# | (__| | (_| \__ \__ \ __/\__ \
# \___|_|\__,_|___/___/\___||___/
# -------------------------------------------------------------------------
class Parser:
@staticmethod
def get_config_key(key):
'''
Given a key of the format 'key.value', find out what the
value for the variable of that format is within the ix config
'''
try:
k, v = key.strip().split('.', 1)
return config[k][v]
except:
return None
@staticmethod
def get_secondary_key_value(key):
'''
Unwrap whether or not a configuration value exists
for the given key.
Parameters:
key (str): The key to look for
Returns:
str: The value, or null
'''
value = Parser.get_config_key(key)
if not value:
return None
return os.path.expandvars(value)
@staticmethod
def get_main_key_value(key):
'''
Unwrap whether or not a configuration value exists
for the given key, as well as making sure to unravel
any helpers within the provided key.
Parameters:
key (str): The key to look for
Returns:
str: The value, or null
'''
stripped = key.strip()
value = None
if len(stripped.split(' ', 1)) == 1:
value = Parser.get_config_key(key)
if not value: return None
return os.path.expandvars(value)
# Check for helpers
helper, parameters = stripped.split(' ', 1)
parameters = [ param.strip() for param in parameters.split(';') ]
# First argument doesn't have a name
main = parameters.pop(0)
main = Parser.get_config_key(main) or main
modifier_keys = list()
modifier_values = list()
for param in parameters:
name, value = param.split(':')
name = name.strip()
value = value.strip()
modifier_keys.append(name)
modifier_values.append(Parser.get_config_key(value) or value)
modifiers = dict(zip(modifier_keys, modifier_values))
value = Helpers.call(helper, main, modifiers)
return os.path.expandvars(value)
@staticmethod
def parse_secondary_keys(string, prefix):
'''
Find secondary variables within a file ( these are variables within main variables ),
denoted by '[]', and look whether or not they have a defined value inside the configuration
file.
If they do, replace the variable with the value from the configuration.
Parameters:
string (str): The data we want to look through for variables
prefix (str): What prefix the parent variables are denoted by
'''
pattern = re.compile('%s{{.+\\[(.+?)\\].+}}' % re.escape(prefix), re.MULTILINE)
items = set(re.findall(pattern, string))
unmatched = None
contents = string
for key in items:
value = Parser.get_secondary_key_value(key)
if not value:
if not unmatched: unmatched = []
unmatched.append(f'[{key}]')
continue
contents = contents.replace(f'[{ key }]', value)
return ( contents, unmatched )
@staticmethod
def parse_main_keys(string, prefix):
'''
Find main variables within a file ( something like ${{}} ) and look
whether or not they have a defined value inside the configuration file.
If they do, repalce the variable with the value from the configuration.
Parameters:
string (str): The data we want to look through for variables
prefix (str): What prefix the variables are denoted by
'''
pattern = re.compile('%s{{(.+?)}}' % re.escape(prefix), re.MULTILINE)
items = set(re.findall(pattern, string))
unmatched = None
contents = string
for key in items:
full_key = '{}{}{}{}'.format(prefix, sequence[0], key, sequence[1])
value = Parser.get_main_key_value(key)
if not value:
if not unmatched: unmatched = []
unmatched.append(full_key)
continue
contents = contents.replace(full_key, value)
return (contents, unmatched)
@staticmethod
def expand_ix_vars(string, prefix):
'''
Look through a given string of data in a file and find every
variable starting with the prefix defined for that specific file.
Replace all thos variables with their related values inside the
configuration file.
Parameters:
string (str): The string contents in which to look for variables
prefix (str): The prefix used for including the variables in the given string
Returns:
contents (str): The original content with all the variables replaced
unmatched (list): The keys for all the variables that couldn't be matched within the string
'''
contents, unmatched_secondary = Parser.parse_secondary_keys(string, prefix)
contents, unmatched_main = Parser.parse_main_keys(contents, prefix)
if not unmatched_secondary: unmatched_secondary = []
if not unmatched_main: unmatched_main = []
unmatched = unmatched_main + unmatched_secondary
return (contents, unmatched)
@staticmethod
def wrap_file(file_path):
'''
Wrap a file and its contents in the custom File class
to allow for easier handling.
This finds whether or not a file is ix compatible, what
comment type it uses, and makes sure to setup all the ix
configuration found within the file.
Parameters:
file_path (str): The path to the file we want to wrap
'''
root, name = file_path.rsplit('/', 1)
file = get_file_lines(file_path)
if not file:
return None
lines = list(file)
found = False
current = None
# Check the first few lines of the file for the trigger.
# If the trigger isn't found, assume this file shouldn't
# be processed.
for idx, line in enumerate(lines):
for entry in entries:
start = '{}{}'.format(entry, notation)
if line.startswith(start):
if trigger in line:
found = True
current = File(root, name, start)
continue
if not found:
continue
clean = line.replace(start, '').strip()
if clean.startswith(tuple(current.fields)):
field, data = clean.split(':', 1)
current.load_field((field, data))
continue
if idx == 20 and not found:
return None
return current
@staticmethod
def find_ix(root):
'''
Find all files that contain the 'ix' trigger so we know what
needs parsing.
Parameters:
root (str): The directory to look into for files
Returns:
list: All the files in the directory that contain the trigger
'''
ix_files = []
for root, _, files in os.walk(root):
for name in files:
if name.endswith('.ix'): continue
full_path = root + '/' + name
file = Parser.wrap_file(full_path)
if file:
ix_files.append(file)
return ix_files
@staticmethod
def process_file(file):
'''
Go through the given file's contents and make sure to replace
all the variables that have matches within the 'ixrc' configuration
as well as making sure to remove every trace of 'ix' itself from
the processed file, leaving it nice and clean, as well as making sure
to add the processed file, to the lock file so we don't have to process
it again unless it's contents change.
Parameters:
file (File): The file object to parse
'''
processed = file.parse()
if not file.rules:
regex = re.compile('^{}.+[\\s\\S]$'.format(file.notation), re.MULTILINE)
for line in re.findall(regex, processed):
processed = processed.replace(line, '')
try:
with open(file.get_output_path(), 'w') as f:
f.write(processed)
if file.has_custom_access:
os.chmod(file.get_output_path(), file.access)
lock_file[file.original_path] = file.to_dict()
except FileNotFoundError:
error('Could not find output path: {}.\n\tUsed in file: {}'.format(file.get_output_path(), file.original_path), True)
return
success('Saved: {1}{2}{0} to {1}{3}'.format(WHITE, RESET, file.original_path, file.get_output_path()), True)
class Helpers:
'''
List of all the helpers that can be used within files when
including variables and/or templating
Helpers can only be used within main variables, aka. '${{ thing.thing }}'
Parameters:
helper (str): The name of the helper function to run
value (str/int): The value to perform the function on
modifiers (dict): Extra parameters passed to the helper to further tweak the value
'''
@staticmethod
def call(helper, value, modifiers):
'''
Call a specific helper, if defined
'''
try:
method = getattr(Helpers, helper)
return method(value, **modifiers)
except Exception as e:
error(f'{e!r} ---- helper: {helper}')
return ''
@staticmethod
def rgb(value, alpha = None):
'''
Take a hex string ( #181b21 ) and convert it to 'rgb'.
If an rgb or rgba string is provided, if the opacity isn't
overwritten, it'll just return the string that was passed in.
If the opacity is overwritten, however, it'll replace the alpha
field within the given string.
Optionally, pass in opacity to override or add the alpha channel.
'''
# We got an rgb value
if not value.startswith('#'):
# Give it back as it is if no overrides are specified
if not alpha: return value
values = [ x.strip() for x in value.split('(', 1).pop().rstrip(')').split(',') ]
r = values[0]
g = values[1]
b = values[2]
a = alpha
return f'rgba({r}, {g}, {b}, {a})'
string = value.lstrip('#')
r, g, b = tuple(int(string[i:i+2], 16) for i in (0, 2, 4))
a = ''
if len(string) == 8:
a = round(int(string[6:6+2], 16) / 255, 2)
if alpha:
a = alpha
if a != '': tag = f'rgba({r}, {g}, {b}, {a})'
else: tag = f'rgb({r}, {g}, {b})'
return tag
@staticmethod
def hex(value, alpha = None, argb = None):
'''
Take an rgb/rgba string and convert it to a hex representation
of the same color. If a hex string is provided, it'll return the exact
same hex string unless the opacity is overwritten. If it is, it'll
replace the alpha field within the given string.
Optionally pass in opacity to override or add the alpha channel.
'''
if alpha:
alpha = hex(round(float(alpha) * 255))[2:]
# We got a hex string
if value.startswith('#'):
# Give it back as it is if no overrides are specified
if not alpha: return value
value = value[1:7]
if argb:
return f'#{alpha}{value}'
return f'#{value}{alpha}'
a = value.startswith('rgba')
value = value.split('(', 1).pop().rstrip(')').split(',')
r = hex(int(value[0]))[2:]
g = hex(int(value[1]))[2:]
b = hex(int(value[2]))[2:]
a = hex(round(float(value[3]) * 255))[2:] if a else ''
if alpha: a = alpha
if argb:
return f'#{a}{r}{g}{b}'
return f'#{r}{g}{b}{a}'
@staticmethod
def include(path):
'''
Include a given file directly into the current file.
This allows you to import/merge multiple files into one.
If the file you're importing is an ix compatible file,
it will be parsed, otherwise the plain text will be included.
Environment variables work, as well as ix variables.
'''
path = os.path.expandvars(path)
file = Parser.wrap_file(path)
# If it's not an ix file just read the contents
if not file:
with open(path) as f:
return f.read()
contents, _ = Parser.expand_ix_vars(file)
return contents
@staticmethod
def uppercase(string):
'''
Turn a given string to uppercase.
Environment variables work, as well as ix variables.
'''
return string.upper()
@staticmethod
def lowercase(string):
'''
Turn a given string to lowercase.
Environment variables work, as well as ix variables.
'''
return string.lower()
class File:
'''
Structured class to keep track of everything about each
file that needs parsing. Such as the comment type,
the paths, the ix-configuration, and so on.
'''
def __init__(self, root, name, notation = '#', rules = None) -> None:
self.original_path = root + '/' + name
self.name = name
self.notation = notation
self.hash = ''
self.rules = rules
# Flags
self.has_custom_dir = False
self.has_custom_name = False
self.has_custom_access = False
# Config fields
self.to = root
self.prefix = '#'
self.access = ''
self.fields = {
'to': self.__set_to,
'out': self.__set_to,
'as': self.__set_as,
'name': self.__set_as,
'prefix': self.__set_prefix,
'access': self.__set_access
}
def get_output_path(self) -> str:
'''
Get the full (directory + filename) path for the current file.
Making sure to account for the location, and add an '.ix' extension
to the filename if the directory is the same as the original file.
We do not want to overwrite the original file.
Parameters:
self (File): The current file object
'''
extension = ''
# If no custom directory was defined
# and no custom filename was defined
# we add '.ix' to the original file name
# when saving so we don't overwrite the original
if not self.has_custom_dir:
if not self.has_custom_name:
extension = '.ix'
# If we have a custom directory
# we write to that directory, with whatever the current
# name is.
return self.to + '/' + self.name + extension
def load_field(self, field_tuple):
'''
Parse a given 'ix' configuration field. Usually comes in the following
format `out: /path/to/whatever`. Find out what item this configuration
field refers to and run the expected actions for said item.
Parameters:
self (File): The current file object
field (str): The field line directly from a file, with the comment stripped
'''
field, data = field_tuple
parse = self.fields.get(field, lambda x: 'No such field: ' + field)
if isinstance(data, str):
parse(data.strip())
else:
parse(data)
def __set_to(self, data):
'''
Update the directory that the processed file should be saved
to once done, making sure to create said directory if it
doesn't exist already and to expand any environment variables
or 'ix' variables within it.
This is used to parse a specific field from the ix configuration.
Parameters:
self (File): The current file object
data (str): The new output directory
'''
expanded = os.path.expandvars(data)
expanded = self.__unwrap_parse(Parser.expand_ix_vars(expanded, self.prefix))
# If the given directory does not exist
# we want to create it.
if not os.path.isdir(expanded):
info('{} does not exist, creating it for the following file: {}'.format(expanded, self.name), True)
os.makedirs(expanded)
self.has_custom_dir = True
self.to = expanded
def __set_as(self, data):
'''
Update the name that the processed file should have when it
gets saved to the file system.
This is used to parse a specific field from the ix configuration.
Parameters:
self (File): The current file object
data (str): The new file name + extension (if any)
'''
self.has_custom_name = True
self.name = self.__unwrap_parse(Parser.expand_ix_vars(data, self.prefix))
def __set_prefix(self, data):
'''
Replace the default prefix for this specific file.
This is used to parse a specific field from the ix configuration.
Parameters:
self (File): The current file object
data (str): The new prefix
'''
expanded = self.__unwrap_parse(Parser.expand_ix_vars(data, self.prefix))
self.prefix = expanded
def __set_access(self, data):
'''
Take in a decimal string of permissions in 'chmod' format
and turn them into an octal value instead since that is the
only format the python implementation of chmod will accept.
This is used to parse a specific field from the ix configuration.
Parameters:
self (File): The current file object
data (str): The permissions in 'chmod' format
'''
self.has_custom_access = True
# Turn the perms to octal since chmod only accepts that
expanded = self.__unwrap_parse(Parser.expand_ix_vars(data, self.prefix))
self.access = int(expanded, 8)
def __unwrap_parse(self, parsed):
'''
Spread the tuple returned from an expansion of ix variables and making
sure to display a message if some variables were not found.
Parameters:
self (File): The current instance
parsed (tuple): (parsed contents, unmatched variables)
'''
contents, unmatched = parsed
if unmatched:
variables = '\n\t'.join(unmatched)
warn(f'Could not find\n\t{ variables }\n in { self.original_path }\n', True)
return contents
def to_dict(self):
'''
Put everything about this file that we want to store in
the lock file within a dictionary
Parameters:
self (File): The current file object
'''
return {
'hash': self.hash_contents(),
'output': self.get_output_path(),
'created_at': str(datetime.now())
}
def hash_contents(self):
'''
Hash the entire file contents, not all at once of course,
do it in chunks in case we hit some massive files we don't want to
eat up all the RAM.
The hash is later used to create unique identifiers for different purposes.
One of which is to store the hash in the lock file and later compare when
checking whether or not a file should be parsed again.
The hashing is done in md5 since it's fast and we really don't have to
worry about colisions. The chances of the same file colliding are extremely
small.
Parameters:
self (File): The current file object
'''
if self.hash != '':
return self.hash
md5 = hashlib.md5()
with open(self.original_path, 'rb') as bytes:
while True:
data = bytes.read(65536)
if not data:
break
md5.update(data)
digest = md5.hexdigest()
self.hash = digest
return digest
def parse(self):
'''
Parse the contents of the file, replacing
all variables with their defined values.
Parameters:
self (File): The current file obejct
'''
with open(self.original_path, 'r') as f:
contents = self.__unwrap_parse(Parser.expand_ix_vars(f.read(), self.prefix))
return contents
# __ _ _
# / _|_ _ _ __ ___| |_(_) ___ _ __ ___
# | |_| | | | '_ \ / __| __| |/ _ \| '_ \/ __|
# | _| |_| | | | | (__| |_| | (_) | | | \__ \
# |_| \__,_|_| |_|\___|\__|_|\___/|_| |_|___/
# -------------------------------------------------------------------------
def out(message, forced = False):
if forced or verbose:
print(message)
def info(message, f = False): out(CYAN + 'ℹ ' + WHITE + message + RESET, f)
def error(message, f = False): out(RED + '✖ ' + message + RESET, f)
def warn(message, f = False): out(YELLOW + '⚠ ' + WHITE + message + RESET, f)
def success(message, f = False): out(GREEN + '✔ ' + WHITE + message + RESET, f)
def log(message, f = False): out(MAGENTA + '~ ' + WHITE + message + RESET, f)
def get_file_lines(file_path):
'''
Try and open a file as a normal text file.
If succeeded, return an array of all the lines
inside that file.
'''
try:
# Try and open the file as a normal text file
# Abort if it's binary or something else
file = open(file_path, 'r')
lines = list(file)
file.close()
return lines
except PermissionError:
info('No permission to access file, ignoring: ' + file_path)
return None
except:
info('Found non-text file, ignoring: ' + file_path)
return None
def read_config(at):
'''
Read the 'ix' configuration from it's specific path.
Either user defined, or the default one. Use config parser
to load and resolve all the magic that the .ini format provides.
Parameters:
at (str): The exact path to the config file
'''
config = configparser.ConfigParser()
config._interpolation = configparser.ExtendedInterpolation()
config.read(at)
return config
def read_lock_file(path):
'''
Read a JSON file into a dictionary allowing us to do
quick lookups for specific files whenever we need to check
if one was already parsed or not, allowing us to skip part of the
process.
Parameters:
path (str): The directory of the lock file
'''
try:
file = open(path + '/ix.lock')
contents = json.loads(file.read())
file.close()
return contents
except FileNotFoundError:
# Start fresh if the file doesn't exist
return {}
def save_lock_file(path, data):
'''
Save a dictionary full of all parsed files to a file.
This will be used later on when 'ix' runs again in order
to check which files have changed and only re-process those files.
Giving a bit of a performance boost in very large directories.
Parameters:
path (str): The directory of the lock file
data (dict): Dictionary full of all the file data that we care about saving
'''
if not os.path.isdir(path):
os.makedirs(path)
with open(path + '/ix.lock', 'w') as lock:
lock.write(json.dumps(data))
def cleanup():
'''
Attempt to remove all the files that were previously
processed and stored in the cache, making sure to
clear the cache when done so we're starting fresh.
'''
lock = read_lock_file(lock_path)
info('Purging all previous builds...', True)
if lock == {}:
log('Found no items in cache, exiting...', True)
for _, entry in lock.items():
file = entry['output']
try:
os.remove(file)
log(f'\tRemoved: {file}')
except Exception as e:
error(f"Couldn't remove: {file} - {e!r}")
save_lock_file(lock_path, {})
success('Done', True)
def main(rules = None):
'''
The main entrypoint for the program.
Initializes everything that needs to happen.
From finding all the 'ix' files to creating new Threads for
parsing each of the available files, as well as saving and updating
the lock file once everything has been processed.
Args:
args (dict): The arguments passed to the program
'''
threads = list()
if rules:
files = list()
for f in rules['parse']:
root, name = f['file'].rsplit('/', 1)
if not os.path.isfile(f['file']):
error('Could not find file: ' + f['file'])
continue
file = File(root, name, rules = f)
for field in f.items():
file.load_field(field)
files.append(file)
else:
files = Parser.find_ix(root_path)
unchanged = 0
saved = 0
if len(files) > 0:
info('Found {} ix compatible files'.format(len(files)))
else:
log('Found no ix compatible files in: {}.'.format(root_path))
log('Exiting.')
return
for file in files:
if file.original_path in lock_file:
hash = file.hash_contents()
lock = lock_file[file.original_path]
# Don't run for files that haven't changed
if lock and hash == lock['hash']:
unchanged += 1
continue
thread = threading.Thread(target=Parser.process_file, args=(file,))
threads.append(thread)
thread.start()
saved += 1
for thread in threads:
thread.join()
# Logging
if saved > 0:
success('Saved {} files'.format(saved), True)
if unchanged > 0:
log('Skipped {} files because they were unchanged'.format(unchanged))
# Cache all the parsed files
save_lock_file(lock_path, lock_file)
# __ _ _ _
# ___ ___ _ __ / _(_) __ _ _ _ _ __ __ _| |_(_) ___ _ __
# / __/ _ \| '_ \| |_| |/ _` | | | | '__/ _` | __| |/ _ \| '_ \
# | (_| (_) | | | | _| | (_| | |_| | | | (_| | |_| | (_) | | | |
# \___\___/|_| |_|_| |_|\__, |\__,_|_| \__,_|\__|_|\___/|_| |_|
# |___/
# -------------------------------------------------------------------------
# Symbol configurations
notation = ':'
trigger = 'ix-config'
entries = [ '//', '#', '--', '--[', '/*', '*' ]
sequence = [ '{{', '}}' ]
# Directory configurations
root_path = os.path.expandvars('$HOME/dots')
config_path = os.path.expandvars('$HOME/.config/ix/ixrc')
lock_path = os.path.expandvars('$HOME/.cache/ix')
lock_file = None
config = None
# Commandline arguments
parser = argparse.ArgumentParser(description='Find and replace variables in files within a given directory')
parser.add_argument('-c', '--config', help='The path where the .ix configuration is located. Default $HOME/.config/ix/ixrc')
parser.add_argument('-r', '--rules', help='File that contains a list of all files to be parsed and included. Used instead of the #ix-config header in each individual file')
parser.add_argument('-d', '--directory', help='The directory to parse. Default $HOME/dots')
parser.add_argument('-f', '--field', help='Get a specific field value from the config')
parser.add_argument('--full', help='Skip looking at the cache and parse everything', action='store_false')
parser.add_argument('--reverse', help='Remove all the parsed files (everything defined in the cache)', action='store_true')
parser.add_argument('-v', '--verbose', help='Output extra information about what is happening', action='store_true')
args = parser.parse_args()
json_rules = None
if args.rules:
with open(args.rules) as file:
json_rules = json.load(file)
if args.verbose:
verbose = True;
if args.config:
if args.rules:
config_path = json_rules['vars_file']
else:
config_path = args.config
if args.field:
config = read_config(config_path)
contents = Parser.get_main_key_value(args.field)
print(contents)
# The whole thing doesn't need to run
# if only one field is needed
exit()
if args.directory:
if args.rules:
root_path = pathlib.Path(os.path.expandvars(json_rules['root'])).absolute()
else:
root_path = pathlib.Path(os.path.expandvars(args.directory)).absolute()
# Load in the cache if not specified
# otherwise.
if not args.full:
lock_file = {}
else:
lock_file = read_lock_file(lock_path)
# Load in the config
config = read_config(config_path)
# Run
if __name__ == '__main__':
# Windows handles colors weirdly by default
if os.name == 'nt':
os.system('color')
if not args.full:
info('Skipping cache, doing a full parse...', True)
cleanup()
if args.reverse:
cleanup()
main(rules = json_rules)
|
clientPerf2Nossl.py | #!/usr/bin/env python2.7
from multiprocessing import Process, Value
import time
import sys
import xmlrpclib
def call_rpc(errors, i, num):
try:
for j in range(0, num):
s = xmlrpclib.ServerProxy('http://localhost:8000')
s.test(i)
except Exception as Ex:
errors.value += 1
def jobs_process(errors, process_n, num):
for i in range(process_n):
p = Process(target=call_rpc, args=(errors, i, num))
p.start()
if __name__ == '__main__':
process_n, num_n = sys.argv[1:]
errors = Value('i', 0)
intprocess = int(process_n)
num = int(num_n)
start = time.time()
jobs = Process(target=jobs_process, args=(errors, intprocess, num))
jobs.start()
jobs.join()
took = time.time() - start
print("Total jobs: %s" % (process_n))
print("RPC Errors: %s" % (errors.value))
print("Elapsed time: %s" % (took))
sys.exit(0)
|
help_window.py | import threading
import webbrowser
from tkinter import *
from tkinter import ttk
from tkinter.font import *
count = 1
def reset(win):
global count
count = 1
win.destroy()
class Help:
def __init__(self, version):
self.version = version
global count
if count == 1:
self.help_win = Toplevel()
self.help_win.title('Help | YoutubeDL GUI | v{}'.format(self.version))
self.help_win.iconbitmap('images/#app.ico')
self.help_win.resizable(False, False)
self.help_win.configure(bg='#cbdbfc', bd=5)
self.help_win.geometry("500x300")
self.help_win.protocol('WM_DELETE_WINDOW', lambda: reset(self.help_win))
self.f = Font(family='TkDefaultFont', size=13, weight=BOLD)
exit_btn = ttk.Button(self.help_win, text="Exit", style="some.TButton", command=lambda: reset(self.help_win))
exit_btn.place(x=410, y=2)
count = 2
# threading
def ffmpeg_thread(self):
thread = threading.Thread(target=self.ffmpeg_help)
thread.start()
def detect_urls_thread(self):
thread = threading.Thread(target=self.detect_urls_help)
thread.start()
def downloading_videos_thread(self):
thread = threading.Thread(target=self.downloading_videos_help)
thread.start()
def other_issues_thread(self):
thread = threading.Thread(target=self.other_issues)
thread.start()
def about_gui_thread(self):
thread = threading.Thread(target=self.about_gui)
thread.start()
def add_label(self, win, text, bg="#ffffff", fg="black", x=1, y=1, font=None, bind=(False, None, None), bind1=(False, None), bind2=(False, None)):
label_adder = Label(win, text=text, fg=fg, bg=bg, font=font if font is not None else "TkDefaultFont")
label_adder.place(x=x, y=y)
if bind[0]:
label_adder.bind(bind[1], bind[2])
if bind1[0]:
label_adder.bind(bind1[1], lambda event: label_adder.config(bg="#859fd4"))
if bind2[0]:
label_adder.bind(bind2[1], lambda event: label_adder.config(bg="#cbdbfc"))
def ffmpeg_help(self):
self.add_label(self.help_win, "FFmpeg - Help", '#cbdbfc', x=190, y=3, font=self.f)
self.add_label(self.help_win, "If you have not already read the How-To on github, then it's all explained here:", '#cbdbfc', x=5, y=40)
self.add_label(self.help_win, "If you've tried downloading a video, and got an error based on files that couldnt merge"
"\nor too high quality then that means you have not installed FFmpeg.", '#cbdbfc', x=5, y=80)
self.add_label(self.help_win, "To install FFmpeg, simply go to the 'Tools' tab of this GUI and click 'Install FFmpeg'.", '#cbdbfc', x=5, y=127)
self.add_label(self.help_win, "After that, I recommend choosing the latest version and your architecture"
"\n and then choose 'Static' for linking which should be automatic.", '#cbdbfc', x=5, y=160)
self.add_label(self.help_win, "Once installed, find the folder that contains 'ffmpeg.exe', 'ffplay.exe' or 'ffprobe.exe'"
"\nand then set that folder as your System Environmental Variable.", '#cbdbfc', x=5, y=210)
f = Font(family="TkDefaultFont", size=8, weight=BOLD)
self.add_label(self.help_win, "(Control Panel>System>Advanced System Settings>Environmental Variables>Path>Edit)", '#cbdbfc', x=-3, y=245, font=f)
self.add_label(self.help_win, "Then you should be done! If you have any issues, Google may have a better answer.", '#cbdbfc', x=5, y=270)
def detect_urls_help(self):
self.add_label(self.help_win, "Selenium - Help", '#cbdbfc', x=190, y=3, font=self.f)
self.add_label(self.help_win, "The option that is next to the download button looks very intimidating, doesn't it?", '#cbdbfc', x=5, y=27)
self.add_label(self.help_win, "It uses the well-known python module 'Selenium' for webbrowser automation.", '#cbdbfc', x=5, y=47)
self.add_label(self.help_win, "You can use it as a normal browser, and clicking 'Execute' to catch all downloadable URLs.", '#cbdbfc', x=5, y=67)
self.add_label(self.help_win, "But how do you use it?", '#cbdbfc', x=5, y=90)
f = Font(family="TkDefaultFont", size=8, weight=BOLD)
self.add_label(self.help_win, "- Install a WebDriver for the browser you want to use.", '#cbdbfc', x=5, y=110, font=f)
self.add_label(self.help_win, "- You can do this by going to the 'Tools' tab and choosing 'Install WebDriver'.", '#cbdbfc', x=5, y=140, font=f)
self.add_label(self.help_win, "- Next, find that .exe file that you installed and place it anywhere on your machine.", '#cbdbfc', x=5, y=170, font=f)
self.add_label(self.help_win, "- Then go to 'Settings' in the 'File' tab and in Selenium Settings set the PATH to the .exe!", '#cbdbfc', x=5, y=200, font=f)
self.add_label(self.help_win, "- If your using the Firefox browser you can choose to link your Firefox profile (optional).", '#cbdbfc', x=5, y=230, font=f)
self.add_label(self.help_win, "- When you open selenium, a command-line for the .exe will come up. Do not close it!", '#cbdbfc', x=5, y=260, font=f)
def downloading_videos_help(self):
self.add_label(self.help_win, "Downloading Videos - Help", '#cbdbfc', x=140, y=3, font=self.f)
self.add_label(self.help_win, "Looks like you need help with Downloading Videos. I got you covered!", '#cbdbfc', x=5, y=30)
self.add_label(self.help_win, "Here are some common issues:", '#cbdbfc', x=5, y=50)
f = Font(family="TkDefaultFont", size=8, weight=BOLD)
self.add_label(self.help_win, "- Blocked Websites", '#cbdbfc', x=5, y=70, font=f)
self.add_label(self.help_win, "- Bad Network", '#cbdbfc', x=5, y=90, font=f)
self.add_label(self.help_win, "- Checked downloading.log or not?", '#cbdbfc', x=5, y=110, font=f)
self.add_label(self.help_win, "- Has FFmpeg or something simallur been installed?", '#cbdbfc', x=5, y=130, font=f)
self.add_label(self.help_win, "- Is FFmpeg a System Environmental Variable?", '#cbdbfc', x=5, y=150, font=f)
self.add_label(self.help_win, "- Is the video available in your country?", '#cbdbfc', x=5, y=170, font=f)
self.add_label(self.help_win, "If none of them options help, consider searching up the problem.", '#cbdbfc', x=5, y=200)
self.add_label(self.help_win, "If still nothing, go to Other Options and check 'Print various debugging info' and then", '#cbdbfc', x=5, y=240)
self.add_label(self.help_win, "download the video again and screenshot it and make an issue on ", '#cbdbfc', x=5, y=260)
self.add_label(self.help_win, "GitHub.", '#cbdbfc', "blue", x=356, y=260,
bind=(True, "<Button-1>", lambda event: webbrowser.open('https://github.com/Gloryness/YoutubeDL-GUI/issues')), bind1=(True, "<Enter>"), bind2=(True, "<Leave>"))
def other_issues(self):
f = Font(family="TkDefaultFont", size=10, weight=BOLD)
self.add_label(self.help_win, "Other Issues - Help", '#cbdbfc', x=180, y=3, font=self.f)
self.add_label(self.help_win, "If you need any other help, then feel free to create an Issue on github.", '#cbdbfc', x=25, y=120, font=f)
self.add_label(self.help_win, "A response usually comes within the day.", '#cbdbfc', x=100, y=140, font=f)
self.add_label(self.help_win, ">> Github Link <<", '#cbdbfc', "blue", x=170, y=200, font=f,
bind=(True, "<Button-1>", lambda event: webbrowser.open('https://github.com/Gloryness/YoutubeDL-GUI/issues')), bind1=(True, "<Enter>"), bind2=(True, "<Leave>"))
def about_gui(self):
self.add_label(self.help_win, "About this GUI", "#cbdbfc", x=185, y=5, font=self.f)
self.add_label(self.help_win, "This GUI was made to make Downloading Videos easier, but in a bit of a stylish way too :)", "#cbdbfc", x=5, y=40)
self.add_label(self.help_win, "It took 2 months to make, on & off and it was all made by one person! (see credits)", "#cbdbfc", x=5, y=60)
self.add_label(self.help_win, "If you would like to look at the source code, it's all on Github.", "#cbdbfc", x=5, y=100)
self.add_label(self.help_win, "If you would like to request a feature, make an Issue on Github.", "#cbdbfc", x=5, y=120)
self.add_label(self.help_win, "If you encounter any bugs, be sure to report them on Github.", "#cbdbfc", x=5, y=140) |
carema2.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2022/3/8 5:32 下午
# @File : carema2.py
# @author : Akaya
# @Software: PyCharm
# carema2 :
import cv2
import queue
import os
import numpy as np
from threading import Thread
import datetime, _thread
import subprocess as sp
import time
# 使用线程锁,防止线程死锁
mutex = _thread.allocate_lock()
# 存图片的队列
frame_queue = queue.Queue()
# 推流的地址,前端通过这个地址拉流,主机的IP,2019是ffmpeg在nginx中设置的端口号
rtmpUrl = "rtmp://192.168.40.145:2019/live/1"
# 用于推流的配置,参数比较多,可网上查询理解
command = ['ffmpeg',
'-y',
'-f', 'rawvideo',
'-vcodec', 'rawvideo',
'-pix_fmt', 'bgr24',
'-s', "{}x{}".format(640, 480), # 图片分辨率
'-r', str(25.0), # 视频帧率
'-i', '-',
'-c:v', 'libx264',
'-pix_fmt', 'yuv420p',
'-preset', 'ultrafast',
'-f', 'flv',
rtmpUrl]
def Video():
# 调用相机拍图的函数
vid = cv2.VideoCapture(0)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
while (vid.isOpened()):
return_value, frame = vid.read()
# 原始图片推入队列中
frame_queue.put(frame)
def push_frame():
# 推流函数
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = time.time()
# 防止多线程时 command 未被设置
while True:
if len(command) > 0:
# 管道配置,其中用到管道
p = sp.Popen(command, stdin=sp.PIPE)
break
while True:
if frame_queue.empty() != True:
# 从队列中取出图片
frame = frame_queue.get()
# curr_time = timer()
# exec_time = curr_time - prev_time
# prev_time = curr_time
# accum_time = accum_time + exec_time
# curr_fps = curr_fps + 1
# process frame
# 你处理图片的代码
# 将图片从队列中取出来做处理,然后再通过管道推送到服务器上
# 增加画面帧率
# if accum_time > 1:
# accum_time = accum_time - 1
# fps = "FPS: " + str(curr_fps)
# curr_fps = 0
# write to pipe
# 将处理后的图片通过管道推送到服务器上,image是处理后的图片
p.stdin.write(frame.tostring())
def run():
# 使用两个线程处理
thread1 = Thread(target=Video, )
thread1.start()
thread2 = Thread(target=push_frame, )
thread2.start()
if __name__ == '__main__':
run()
|
db_import_multiplexer.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""A loading-only EventMultiplexer that actually populates a SQLite DB."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import os
import threading
import time
import six
from six.moves import queue, xrange # pylint: disable=redefined-builtin
from tensorboard import data_compat
from tensorboard.backend.event_processing import directory_watcher
from tensorboard.backend.event_processing import event_file_loader
from tensorboard.backend.event_processing import io_wrapper
from tensorboard.backend.event_processing import sqlite_writer
from tensorboard.compat import tf
from tensorboard.compat.proto import event_pb2
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
class DbImportMultiplexer(object):
"""A loading-only `EventMultiplexer` that populates a SQLite DB.
This EventMultiplexer only loads data; it provides no read APIs.
"""
def __init__(self,
db_connection_provider,
purge_orphaned_data,
max_reload_threads,
use_import_op):
"""Constructor for `DbImportMultiplexer`.
Args:
db_connection_provider: Provider function for creating a DB connection.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
max_reload_threads: The max number of threads that TensorBoard can use
to reload runs. Each thread reloads one run at a time. If not provided,
reloads runs serially (one after another).
use_import_op: If True, use TensorFlow's import_event() op for imports,
otherwise use TensorBoard's own sqlite ingestion logic.
"""
logger.info('DbImportMultiplexer initializing')
self._db_connection_provider = db_connection_provider
self._purge_orphaned_data = purge_orphaned_data
self._max_reload_threads = max_reload_threads
self._use_import_op = use_import_op
self._event_sink = None
self._run_loaders = {}
if self._purge_orphaned_data:
logger.warn(
'--db_import does not yet support purging orphaned data')
conn = self._db_connection_provider()
# Extract the file path of the DB from the DB connection.
rows = conn.execute('PRAGMA database_list').fetchall()
db_name_to_path = {row[1]: row[2] for row in rows}
self._db_path = db_name_to_path['main']
logger.info('DbImportMultiplexer using db_path %s', self._db_path)
# Set the DB in WAL mode so reads don't block writes.
conn.execute('PRAGMA journal_mode=wal')
conn.execute('PRAGMA synchronous=normal') # Recommended for WAL mode
sqlite_writer.initialize_schema(conn)
logger.info('DbImportMultiplexer done initializing')
def _CreateEventSink(self):
if self._use_import_op:
return _ImportOpEventSink(self._db_path)
else:
return _SqliteWriterEventSink(self._db_connection_provider)
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
Args:
path: A string path to a directory to load runs from.
name: Optional, specifies a name for the experiment under which the
runs from this directory hierarchy will be imported. If omitted, the
path will be used as the name.
Raises:
ValueError: If the path exists and isn't a directory.
"""
logger.info('Starting AddRunsFromDirectory: %s (as %s)', path, name)
for subdir in io_wrapper.GetLogdirSubdirectories(path):
logger.info('Processing directory %s', subdir)
if subdir not in self._run_loaders:
logger.info('Creating DB loader for directory %s', subdir)
names = self._get_exp_and_run_names(path, subdir, name)
experiment_name, run_name = names
self._run_loaders[subdir] = _RunLoader(
subdir=subdir,
experiment_name=experiment_name,
run_name=run_name)
logger.info('Done with AddRunsFromDirectory: %s', path)
def Reload(self):
"""Load events from every detected run."""
logger.info('Beginning DbImportMultiplexer.Reload()')
# Defer event sink creation until needed; this ensures it will only exist in
# the thread that calls Reload(), since DB connections must be thread-local.
if not self._event_sink:
self._event_sink = self._CreateEventSink()
# Use collections.deque() for speed when we don't need blocking since it
# also has thread-safe appends/pops.
loader_queue = collections.deque(six.itervalues(self._run_loaders))
loader_delete_queue = collections.deque()
def batch_generator():
while True:
try:
loader = loader_queue.popleft()
except IndexError:
return
try:
for batch in loader.load_batches():
yield batch
except directory_watcher.DirectoryDeletedError:
loader_delete_queue.append(loader)
except (OSError, IOError) as e:
logger.error('Unable to load run %r: %s', loader.subdir, e)
num_threads = min(self._max_reload_threads, len(self._run_loaders))
if num_threads <= 1:
logger.info('Importing runs serially on a single thread')
for batch in batch_generator():
self._event_sink.write_batch(batch)
else:
output_queue = queue.Queue()
sentinel = object()
def producer():
try:
for batch in batch_generator():
output_queue.put(batch)
finally:
output_queue.put(sentinel)
logger.info('Starting %d threads to import runs', num_threads)
for i in xrange(num_threads):
thread = threading.Thread(target=producer, name='Loader %d' % i)
thread.daemon = True
thread.start()
num_live_threads = num_threads
while num_live_threads > 0:
output = output_queue.get()
if output == sentinel:
num_live_threads -= 1
continue
self._event_sink.write_batch(output)
for loader in loader_delete_queue:
logger.warn('Deleting loader %r', loader.subdir)
del self._run_loaders[loader.subdir]
logger.info('Finished with DbImportMultiplexer.Reload()')
def _get_exp_and_run_names(self, path, subdir, experiment_name_override=None):
if experiment_name_override is not None:
return (experiment_name_override, os.path.relpath(subdir, path))
sep = io_wrapper.PathSeparator(path)
path_parts = os.path.relpath(subdir, path).split(sep, 1)
experiment_name = path_parts[0]
run_name = path_parts[1] if len(path_parts) == 2 else '.'
return (experiment_name, run_name)
# Struct holding a list of tf.Event serialized protos along with metadata about
# the associated experiment and run.
_EventBatch = collections.namedtuple('EventBatch',
['events', 'experiment_name', 'run_name'])
class _RunLoader(object):
"""Loads a single run directory in batches."""
_BATCH_COUNT = 5000
_BATCH_BYTES = 2**20 # 1 MiB
def __init__(self, subdir, experiment_name, run_name):
"""Constructs a `_RunLoader`.
Args:
subdir: string, filesystem path of the run directory
experiment_name: string, name of the run's experiment
run_name: string, name of the run
"""
self._subdir = subdir
self._experiment_name = experiment_name
self._run_name = run_name
self._directory_watcher = directory_watcher.DirectoryWatcher(
subdir,
event_file_loader.RawEventFileLoader,
io_wrapper.IsTensorFlowEventsFile)
@property
def subdir(self):
return self._subdir
def load_batches(self):
"""Returns a batched event iterator over the run directory event files."""
event_iterator = self._directory_watcher.Load()
while True:
events = []
event_bytes = 0
start = time.time()
for event_proto in event_iterator:
events.append(event_proto)
event_bytes += len(event_proto)
if len(events) >= self._BATCH_COUNT or event_bytes >= self._BATCH_BYTES:
break
elapsed = time.time() - start
logger.debug('RunLoader.load_batch() yielded in %0.3f sec for %s',
elapsed, self._subdir)
if not events:
return
yield _EventBatch(
events=events,
experiment_name=self._experiment_name,
run_name=self._run_name)
@six.add_metaclass(abc.ABCMeta)
class _EventSink(object):
"""Abstract sink for batches of serialized tf.Event data."""
@abc.abstractmethod
def write_batch(self, event_batch):
"""Writes the given event batch to the sink.
Args:
event_batch: an _EventBatch of event data.
"""
raise NotImplementedError()
class _ImportOpEventSink(_EventSink):
"""Implementation of EventSink using TF's import_event() op."""
def __init__(self, db_path):
"""Constructs an ImportOpEventSink.
Args:
db_path: string, filesystem path of the DB file to open
"""
self._db_path = db_path
self._writer_fn_cache = {}
def _get_writer_fn(self, event_batch):
key = (event_batch.experiment_name, event_batch.run_name)
if key in self._writer_fn_cache:
return self._writer_fn_cache[key]
with tf.Graph().as_default():
placeholder = tf.compat.v1.placeholder(shape=[], dtype=tf.string)
writer = tf.contrib.summary.create_db_writer(
self._db_path,
experiment_name=event_batch.experiment_name,
run_name=event_batch.run_name)
with writer.as_default():
# TODO(nickfelt): running import_event() one record at a time is very
# slow; we should add an op that accepts a vector of records.
import_op = tf.contrib.summary.import_event(placeholder)
session = tf.compat.v1.Session()
session.run(writer.init())
def writer_fn(event_proto):
session.run(import_op, feed_dict={placeholder: event_proto})
self._writer_fn_cache[key] = writer_fn
return writer_fn
def write_batch(self, event_batch):
start = time.time()
writer_fn = self._get_writer_fn(event_batch)
for event_proto in event_batch.events:
writer_fn(event_proto)
elapsed = time.time() - start
logger.debug(
'ImportOpEventSink.WriteBatch() took %0.3f sec for %s events', elapsed,
len(event_batch.events))
class _SqliteWriterEventSink(_EventSink):
"""Implementation of EventSink using SqliteWriter."""
def __init__(self, db_connection_provider):
"""Constructs a SqliteWriterEventSink.
Args:
db_connection_provider: Provider function for creating a DB connection.
"""
self._writer = sqlite_writer.SqliteWriter(db_connection_provider)
def write_batch(self, event_batch):
start = time.time()
tagged_data = {}
for event_proto in event_batch.events:
event = event_pb2.Event.FromString(event_proto)
self._process_event(event, tagged_data)
if tagged_data:
self._writer.write_summaries(
tagged_data,
experiment_name=event_batch.experiment_name,
run_name=event_batch.run_name)
elapsed = time.time() - start
logger.debug(
'SqliteWriterEventSink.WriteBatch() took %0.3f sec for %s events',
elapsed, len(event_batch.events))
def _process_event(self, event, tagged_data):
"""Processes a single tf.Event and records it in tagged_data."""
event_type = event.WhichOneof('what')
# Handle the most common case first.
if event_type == 'summary':
for value in event.summary.value:
value = data_compat.migrate_value(value)
tag, metadata, values = tagged_data.get(value.tag, (None, None, []))
values.append((event.step, event.wall_time, value.tensor))
if tag is None:
# Store metadata only from the first event.
tagged_data[value.tag] = sqlite_writer.TagData(
value.tag, value.metadata, values)
elif event_type == 'file_version':
pass # TODO: reject file version < 2 (at loader level)
elif event_type == 'session_log':
if event.session_log.status == event_pb2.SessionLog.START:
pass # TODO: implement purging via sqlite writer truncation method
elif event_type in ('graph_def', 'meta_graph_def'):
pass # TODO: support graphs
elif event_type == 'tagged_run_metadata':
pass # TODO: support run metadata
|
extech-ea15.py | #!/usr/bin/env python
# Copyright 2020 Kent A. Vander Velden <kent.vandervelden@gmail.com>
#
# If you use this software, please consider contacting me. I'd like to hear
# about your work.
#
# This file is part of Extech-EA15, a decoder for the Extech EA15 thermocouple
# datalogging thermometer.
#
# Please see LICENSE for limitations on use.
#
# If you see a permission problem with accessing serial ports, the following may help.
# Add yourself to the dialout group and remove modemmanager.
# $ adduser kent dialout
# $ apt remove modemmanager
import datetime
import multiprocessing as mp
import time
import serial
class Temperature:
v_ = 0
valid_ = False
def __init__(self, v=None, u='C'):
if v is not None:
self.set(v, u)
def __str__(self):
return f'{self.v_:.02f}C'
def set(self, v, u='C'):
self.valid_ = True
if u == 'C':
self.v_ = v
elif u == 'F':
self.v_ = self.f2c(v)
elif u == 'K':
self.v_ = self.k2c(v)
else:
self.valid_ = False
def C(self):
return self.v_
def F(self):
return self.c2f(self.v_)
def K(self):
return self.c2k(self.v_)
@staticmethod
def f2c(v):
return (v - 32) * (5 / 9.)
@staticmethod
def k2c(v):
return v - 273.15
@staticmethod
def c2f(v):
return v * (9 / 5.) + 32
@staticmethod
def c2k(v):
return v + 273.15
class ExtechEA15Serial:
ser = None
download_datalog_ = False
def __init__(self, dev_fn=''):
self.open(dev_fn)
def __del__(self):
if self.ser is not None:
self.ser.close()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
pass
def open(self, dev_fn):
# Timeout must be less than the interval between consecutive packets, ~ 1.5s
# and not so long that recording the timestamp is delay. 0.1s seems fine.
self.ser = serial.Serial(dev_fn, 9600, timeout=.1)
def decode(self, buf, dt=None):
d = {'dt': datetime.datetime.now() if dt is None else dt,
't1': Temperature(),
't1u': '',
't2': '',
't2u': '',
'type': '',
'valid': False
}
d2 = {'dt': d['dt'],
't1': Temperature(),
't2': Temperature(),
'type': '',
'valid': False
}
if not (buf[0] == 0x02 and buf[-1] == 0x03 and len(buf) == 9):
return d2
temp_units = {0: 'C', 2: 'K', 3: 'F'}
sensor_types = {0: 'K', 1: 'J', 2: 'E', 3: 'T', 4: 'R', 5: 'S', 6: 'N'}
d['t1'] = (buf[2] * 0xff + buf[3]) / 10.
d['t1u'] = temp_units[buf[1]]
d['t2'] = (buf[5] * 0xff + buf[6]) / 10.
d['t2u'] = temp_units[buf[4]]
d['type'] = sensor_types[buf[7]]
d['valid'] = True
d2 = {'dt': d['dt'],
't1': Temperature(d['t1'], d['t1u']),
't2': Temperature(d['t2'], d['t2u']),
'type': d['type'],
'valid': d['valid'],
}
return d2
def decode2(self, buf, start_dt):
if not (buf[0] == 0x02 and buf[-1] == 0x03):
return []
all_lst = []
i = 1
s = 0
sps = 0
lst = []
marker = b'\x00\x55\xaa\x00'
while True:
if s == 0:
if len(buf) <= i + 5:
break
if buf[i:i + 4] == marker:
s = 1
sps = buf[i + 4]
i += 5
else:
i += 1
else:
if len(buf) <= i + 7:
break
if buf[i:i + 4] == marker:
all_lst += [(sps, lst)]
lst = []
sps = buf[i + 4]
i += 5
else:
bb = buf[i:i + 7]
bb = b'\x02' + bb + b'\x03'
lst += [self.decode(bb, start_dt + datetime.timedelta(seconds=i * sps))]
i += 7
if i + 1 != len(buf):
print(f'Truncated download: {i + 1} {len(buf)}')
if lst:
all_lst += [(sps, lst)]
return all_lst
datalog_download_state_ = 0
datalog_expected_ = 0
def decode_one(self):
while True:
if self.download_datalog_ and self.datalog_download_state_ == 0:
self.datalog_download_state_ = 1
self.download_datalog_ = False
packet_type = 0
buf = b''
st0 = time.time()
while True:
c = self.ser.read()
et = time.time()
# There is a small delay, ~1.5s, between packets. Use the delay to tokenize the
# serial stream. When the delay greater than the serial timeout, c will be empty.
# If buf is not empty, check if buf may contain a packet.
if buf and not c:
if buf[0] == 0x02 and buf[-1] == 0x03:
if buf.startswith(b'\x02\x00\x55\xaa\x00'):
if len(buf) == self.datalog_expected_ + 2:
packet_type = 3
else:
if len(buf) == 9:
packet_type = 1
elif len(buf) == 5:
packet_type = 2
break
# Start over
if st0 - et > .5:
print('Restarting')
buf = b''
continue
# Don't wait forever
elif st0 - et > 5:
print('Aborting')
return None
buf += c
if packet_type == 0:
print('Unable to decode:', buf)
else:
if packet_type == 1:
if self.datalog_download_state_ == 1:
self.ser.write(b'\x41')
self.ser.flush()
elif self.datalog_download_state_ == 2:
self.ser.write(b'\x55')
self.ser.flush()
if packet_type == 1:
return self.decode(buf)
elif packet_type == 2:
# print('Datalog len packet:', buf)
# 02 00 8c 80 03 <= empty datalog 35968
# 02 00 8c 8c 03 <= 1 datalog entry 35980 12 = 1*5 + 1*7
# 02 00 8c 93 03 <= 2 datalog entries 35987 19 = 1*5 + 2*7
# 02 00 8c a1 03 <= 4 datalog entries 36001 33 = 1*5 + 4*7
# 02 00 8c c9 03 <= 2 sets with 1 and 8 records 36041 73 = 2*5 + 9*7
# 02 00 8d 57 03 <= 30 datalog entries 36183 215 = 1*5 + 30*7
self.datalog_expected_ = buf[2] * 256 + buf[3] - 0x8c80
if self.datalog_expected_ == 0:
print(f'Datalog is empty')
self.datalog_download_state_ = 0
else:
print(f'Expecting {self.datalog_expected_} bytes from datalog')
self.datalog_download_state_ = 2
elif packet_type == 3:
self.datalog_download_state_ = 0
self.datalog_expected_ = 0
return self.decode2(buf, datetime.datetime.now())
def decode_loop(self):
while True:
v = self.decode_one()
if v is None:
continue
print(v)
def download_datalog(self):
if self.datalog_download_state_ == 0:
self.download_datalog_ = True
class ExtechEA15Threaded:
q = None
q2 = None
q3 = None
ea15 = None
download_datalog_ = False
dev_fn_ = ''
def __init__(self, dev_fn=''):
self.q = mp.Queue()
self.q2 = mp.Queue()
self.q3 = mp.Queue()
self.dev_fn_ = dev_fn
self.ea15 = ExtechEA15Serial(dev_fn)
def __del__(self):
pass
def __enter__(self):
self.run()
return self
def __exit__(self, type, value, tb):
pass
def open(self, dev_fn):
self.ea15.open(dev_fn)
def run(self):
p = mp.Process(target=self.main, args=(self,))
p.start()
def main(self_, self):
# self.ea15 = ExtechEA15(self.dev_fn_)
while True:
if not self.q3.empty():
s = self.q3.get()
if s == 'Datalog':
self.ea15.download_datalog()
v = self.ea15.decode_one()
if v is None:
pass
elif isinstance(v, dict):
self.q.put(v)
elif isinstance(v, list):
self.q2.put(v)
def download_datalog(self):
self.q3.put('Datalog')
def main(dev_fn):
def decode(v):
return f'{v["dt"]} : {v["t1"]} : {v["t2"]} : {v["type"]} : {v["valid"]}'
# Below are a few different ways to use the classes
if False:
with ExtechEA15Serial(dev_fn) as ea15:
ea15.decode_loop()
if False:
with ExtechEA15Serial(dev_fn) as ea15:
for i in range(3):
print(i, ea15.decode_one())
if False:
ea15 = ExtechEA15Serial(dev_fn)
print(ea15.decode_one())
if False:
ea15 = ExtechEA15Threaded(dev_fn)
ea15.run()
while True:
while not ea15.q.empty():
v = ea15.q.get()
print(decode(v))
if False:
with ExtechEA15Threaded(dev_fn) as ea15:
import time, random
while True:
while not ea15.q.empty():
v = ea15.q.get()
print(decode(v))
# import queue
# try:
# v = ea15.q.get(timeout=.05)
# print('dequeued', v)
# except queue.Empty:
# print('timeout')
if random.random() < .05:
print('Requesting datalog download')
ea15.download_datalog()
while not ea15.q2.empty():
v2_ = ea15.q2.get()
for j, v2 in enumerate(v2_):
sps, lst = v2
print(f'Datalog set {j + 1} with {len(lst)} records, sampled every {sps} seconds')
for i, v in enumerate(lst):
v['dt'] = i * sps
print(f'{j + 1:02d} : {i + 1:04d} : {decode(v)}')
time.sleep(.5)
if True:
import matplotlib.pyplot as plt
# If you encounter an error about not being able to use the TkInter matplotlib backend
# or unable to load the tkinter module, try the following. (TkInter cannot be installed
# by pipenv.)
# sudo apt-get install python3-tk
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
x = []
y1 = []
y2 = []
line1, = ax.plot(x, y1, 'r-', label='T1') # Returns a tuple of line objects, thus the comma
line2, = ax.plot(x, y2, 'b-', label='T2') # Returns a tuple of line objects, thus the comma
ax.set_xlabel('Time [s]')
ax.set_ylabel('Temperature [C]')
plt.legend()
with ExtechEA15Threaded(dev_fn) as ea15:
import time, random
t0 = 0
while True:
while not ea15.q.empty():
v = ea15.q.get()
print(decode(v))
if not v['valid']:
continue
y1 += [v['t1'].C()]
y2 += [v['t2'].C()]
if x == []:
t0 = v['dt']
x += [(v['dt'] - t0).total_seconds()]
line1.set_xdata(x)
line1.set_ydata(y1)
line2.set_xdata(x)
line2.set_ydata(y2)
ax.relim()
ax.autoscale_view()
fig.canvas.draw()
fig.canvas.flush_events()
time.sleep(.5)
def find_dev(id_str):
import os
dn = '/dev/serial/by-id/'
for fn in os.listdir(dn):
if id_str in fn:
return os.path.join(dn, fn)
return ''
if __name__ == "__main__":
dev_fn = find_dev('usb-Prolific_Technology_Inc._USB-Serial_Controller')
if not dev_fn:
print('No device found')
else:
print('Using device:', dev_fn)
main(dev_fn)
|
main.py | import hashlib
import json
import os
import threading
import time
import engine
import requests
import uuid
import subprocess
import socket
import shutil
from pathlib import Path
from fastapi import FastAPI, Request, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse
from pydantic import BaseModel
devs = {
"Matej Justus": {
"git": "https://github.com/UntriexTv", "mail": "maco.justus@gmail.com"},
"Benjamin Kojda": {
"git": "https://github.com/Tucan444", "mail": "ben4442004@gmail.com"
},
"Jakub Ďuriš": {
"git": "https://github.com/ff0082", "mail": "jakub1.duris@gmail.com"
},
"Samuel Šubika": {
"git": "https://github.com/JustSteel", "mail": "SteelSamko2000@gmail.com"}
}
check = engine.Scan()
check.check_to_go()
if check.state_list["error"]:
for error in check.errors:
print(error)
check.fix_version()
with open("settings.json", "r", encoding='utf-8') as f: # loading settings
settings = json.load(f)
with open("filesystem.json", "r", encoding='utf-8') as f: # loading filesystem
filesystem = json.load(f)
if settings["clear_cache_on_startup"]:
shutil.rmtree("cache")
os.mkdir("cache")
def get_my_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
IP = get_my_ip()
ID = filesystem["ID"]
location = filesystem["location"]
time_to_save = settings["time_to_save"]
app = FastAPI() # init of FastAPI
origins = ["*", ]
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
log = engine.Log(settings["log"]) # init of LOG
update = engine.Update()
offline = []
save_time = time.time()
time_to_heartbeat = settings["time_to_heartbeat"] # Raspberry will be requesting heartbeat every __ seconds
time_to_heartbeat_offline = settings[
"time_to_heartbeat_offline"] # Raspberry will be requesting heartbeat every __ seconds from offline rpi
# json variables
heartbeat_table = settings["heartbeat_table"]
sensors = {}
messages = [] # {user: "", timestamp: time.Time(), message: ""}
if ID not in heartbeat_table["ID"]:
heartbeat_table["ID"].append(ID)
heartbeat_table["IP"].append(IP)
heartbeat_table["location"].append(location)
heartbeat_table["file_system"].append(filesystem)
heartbeat_table["last_heartbeat"].append(time_to_heartbeat)
else:
index_server_run = heartbeat_table["ID"].index(ID)
heartbeat_table["IP"][index_server_run] = IP
heartbeat_table["location"][index_server_run] = location
heartbeat_table["file_system"][index_server_run] = filesystem
heartbeat_table["last_heartbeat"][index_server_run] = time_to_heartbeat
heartbeat_table["my_ip"] = IP
class ServerTable(BaseModel): # table of content for heartbeat request
ID: list
IP: list
location: list
file_system: list
last_heartbeat: list
my_ip: str
class Sensor(BaseModel):
name: str
value: str
class Message(BaseModel):
m_sender: str
message: str
@app.get("/")
def read_root():
return "wikispot"
@app.post("/heartbeat")
def heartbeat(s_table: ServerTable, request: Request):
global heartbeat_table
log.message(f"server requested heartbeat {s_table.my_ip}:{request.client.port}")
log.debug(f"Recieved server table: {s_table}")
try:
for position, server_id in enumerate(s_table.ID):
if server_id in heartbeat_table["ID"]:
if heartbeat_table["last_heartbeat"][heartbeat_table["ID"].index(server_id)] < \
s_table.last_heartbeat[position]:
heartbeat_table["last_heartbeat"][heartbeat_table["ID"].index(server_id)] = s_table.last_heartbeat[
position]
log.debug(f"updated {server_id}`s heartbeat to {s_table.last_heartbeat[position]}")
heartbeat_table["file_system"][heartbeat_table["ID"].index(server_id)] = s_table.file_system[
position]
elif server_id == ID:
log.debug(f"Updated my heartbeat from {s_table.last_heartbeat[position]} to {time_to_heartbeat}")
heartbeat_table["last_heartbeat"][heartbeat_table["ID"].index(ID)] = time_to_heartbeat
elif server_id not in heartbeat_table["ID"]:
log.message(f"Heartbeat from new server:\n ID: {server_id} IP: {s_table.my_ip}")
heartbeat_table["ID"].append(int(s_table.ID[position]))
heartbeat_table["IP"].append(s_table.IP[position])
heartbeat_table["location"].append(s_table.location[position])
heartbeat_table["file_system"].append(s_table.file_system[position])
heartbeat_table["last_heartbeat"].append(int(s_table.last_heartbeat[position]))
log.debug(f"Created {server_id}`s heartbeat: {s_table.last_heartbeat[position]}")
except Exception as error:
log.error(f"heartbeat > {error}")
try:
if heartbeat_table["ID"][heartbeat_table["IP"].index(s_table.my_ip)] in offline:
offline.remove(heartbeat_table["ID"][heartbeat_table["IP"].index(s_table.my_ip)])
log.warning(f"{s_table.my_ip} gone online")
except Exception as error:
log.error(f"heartbeat > {error}")
return heartbeat_table, {"ID": ID, "file_system": filesystem, "location": location}
@app.get("/{IDx}/sensors")
def get_sensors(IDx: int, request: Request):
global sensors
if IDx == ID:
log.debug(f"Sensor data sent to {request.client.host} :\n {sensors}")
return sensors
else:
try:
r = requests.get(f"""http://{heartbeat_table["IP"][heartbeat_table["ID"].index(IDx)]}:8000/{IDx}/sensors""")
log.debug(f"Sensor data from {IDx} sent to {request.client.host} :\n {r.json()}")
return r.json()
except Exception as error:
log.error(f"Sensor data download from {IDx} failed.\n ERROR: {error}")
return f"Sensor data download from {IDx} failed.\n ERROR: {error}"
@app.get("/files/{IDx}/{file}")
def get_file(IDx: int, file: str, request: Request):
log.debug(f"""{request.client} requested {file} from {"this server" if IDx == ID else f"id {IDx}"}""")
if IDx == ID:
if os.path.isfile(f"files/{file}"):
return FileResponse(f"files/{file}")
else:
log.warning(f"{request.client} tried to access file ({file}) that does not exist on this server.")
return f"ERROR: File {file} does not exist."
if IDx not in heartbeat_table["ID"]:
log.warning(f"{request.client} tried to access id ({IDx}) that does not exist.")
return f"ERROR: {IDx} does not exist."
else:
server_ip = heartbeat_table["IP"][heartbeat_table["ID"].index(IDx)]
if os.path.isdir(f"cache/{IDx}"):
if os.path.isfile(f"cache/{IDx}/{file}"):
with open(f"cache/{IDx}/{file}", "rb") as compared_file:
m = hashlib.md5()
for line in compared_file:
m.update(line)
rr = requests.get(f"""http://{server_ip}:8000/compare/{file}""")
if rr.text.strip('"') != str(m.hexdigest()):
log.warning(f"{file} on server {server_ip} is changed.")
else:
log.debug(f"returning cached file cache/{IDx}{file}")
return FileResponse(f"cache/{IDx}/{file}")
elif sum(file.stat().st_size for file in Path("cache").rglob('*'))/1024**2 > settings["cache_size_mb"]:
shutil.rmtree("cache")
os.mkdir("cache")
log.message(f"""Clearing cache, because of limit of {settings["cache_size_mb"]}MB""")
os.mkdir(f"cache/{IDx}")
else:
os.mkdir(f"cache/{IDx}")
r = requests.get(f"http://{server_ip}:8000/files/{IDx}/{file}")
if "does not exist" in r.text:
log.warning(f"{request.client} tried to access file ({file}) on id {IDx} that does not exist.")
return f"ERROR: {file} does not exist."
log.message(f"Downloaded {file} from {server_ip}")
with open(f"cache/{IDx}/{file}", "wb") as save:
save.write(bytes(r.content))
return FileResponse(f"cache/{IDx}/{file}")
@app.post("/{IDx}/update_sensor")
def update_sensors(data: Sensor, request: Request, IDx: int):
global sensors
if IDx == ID or IDx == -1:
if data.name in sensors:
if not data.value:
log.message(f"{request.client.host} removed sensor {data.name}")
del sensors[data.name]
else:
log.message(f"{request.client.host} updated sensor {data.name} with value {data.value}")
sensors[data.name] = data.value
else:
log.warning(f"{request.client} created new sensor.\n SENSOR: {data}")
sensors[data.name] = data.value
return f"Successfuly made new sensor"
elif IDx in heartbeat_table["ID"]:
r = requests.post(f"""http://{heartbeat_table["IP"][heartbeat_table["ID"].index(IDx)]}:8000/{IDx}/update_sensor""",
json={"name": data.name, "value": data.value})
return r.text
else:
return f"ERROR: server {IDx} does not exist."
@app.get("/compare/{file}")
def comparision(file: str):
try:
with open(f"files/{file}", "rb") as compared_file:
m = hashlib.md5()
for line in compared_file:
m.update(line)
return m.hexdigest()
except FileNotFoundError:
return f"ERROR {file} does not exist"
@app.get("/devices_list")
def get_devices_list():
returning_value = [{"connected_id": ID}, *heartbeat_table["file_system"]]
while "" in returning_value:
returning_value.remove("")
return returning_value
@app.get("/admin/get/{command}")
def admin_get(command: str, request: Request):
log.message(f"{request.client} used admin command.")
if command == "get_updates":
return [update.get_version(), update.get_updates()]
if "update-" in command:
state = []
version = command.split("-")[1]
for rpi in heartbeat_table["IP"]:
if rpi != IP:
r = requests.get(f"""http://{rpi}:8000/admin/get/update_one-{version}""")
if r.text.strip('"').split("\\n")[0] == "SUCCESS":
log.message(f"{rpi} was updated to {version}")
else:
log.error(f"""{rpi} failed to update. Manual update may be needed for proper working of network.
Response from server: {r.text}""")
state.append({rpi: r.text.strip('"').split("\\n")})
subprocess.check_output(f"""python3 system.py update -version {version}""")
log.message(f"All devices in network should be updated to {version}")
state.append({IP: "updated"})
return state
if "update_one-" in command:
state = subprocess.check_output(["python3", "system.py", "update", "-version", f"""{command.split("-")[1]}"""])
log.message(state.decode("utf-8"))
return state.decode("utf-8")
if command == "heartbeat_table":
return heartbeat_table
if command == "filesystem":
return filesystem
@app.post("/admin/{id_server}/upload_file")
async def create_upload_file(id_server: int, uploaded_file: UploadFile = File(...), patch: str = ""):
file_location = f"{patch}{uploaded_file.filename}"
if id_server == ID:
with open(file_location, "wb+") as file_object:
file_object.write(uploaded_file.file.read())
else:
with open(f"cache/{uploaded_file.filename}", "wb+") as file_object:
file_object.write(uploaded_file.file.read())
file = open(f"cache/{uploaded_file.filename}", "rb")
requests.post(f"""http://{heartbeat_table["IP"][heartbeat_table["ID"].index(id_server)]}:8000/admin/{id_server}/upload_file""",
files={"uploaded_file": file, "patch": patch})
file.close()
return {"info": f"""file '{uploaded_file.filename}' saved at '{id_server}/{file_location}'"""}
@app.get("/messages/get")
def get_messages(timestamp: str = None):
if timestamp:
for position, message in enumerate(reversed(messages)):
if float(message["timestamp"]) <= float(timestamp):
return list(reversed(list(reversed(messages))[:position]))
if timestamp == "0":
return messages
return []
else:
return messages[:10]
@app.get("/messages/register")
def register():
return [uuid.uuid4().hex[24:], messages[:9]]
@app.get("/discovery")
def discovery():
return "Success"
@app.post("/messages/post")
def post_messages(data: Message):
log.debug(f"Message was posted. Sender: {data.m_sender}\n MESSAGE: {data.message}")
if len(messages) >= settings["max_mess"]:
del messages[:len(messages) - settings["max_mess"]]
if data.m_sender and data.message:
messages.append({"sender": data.m_sender, "message": data.message, "timestamp": time.time()})
return "successful"
else:
return "Empty message/sender"
def send_heartbeat(ip, id):
global heartbeat_table
log.message(f"""sending heartbeat to {ip}({"offline" if id in offline else "online"})""")
cache_request = requests.post(f"http://{ip}:8000/heartbeat", data=json.dumps(heartbeat_table))
heartbeat_table = dict(cache_request.json()[0])
log.debug(json.dumps(cache_request.json(), indent=4))
def mainloop():
global save_time
while True:
for device_number, device_ID in enumerate(heartbeat_table["ID"]):
if device_ID != ID:
if int(heartbeat_table["last_heartbeat"][device_number]) < 0:
try:
send_heartbeat(heartbeat_table["IP"][device_number], heartbeat_table["ID"][device_number])
except requests.exceptions.ConnectionError:
if heartbeat_table["ID"][device_number] not in offline:
log.warning(f"""{heartbeat_table["IP"][device_number]} disconnected/is not available""")
offline.append(heartbeat_table["ID"][device_number])
heartbeat_table["last_heartbeat"][int(device_number)] = int(time_to_heartbeat_offline)
else:
offline.remove(heartbeat_table["ID"][device_number])
log.message(f"""Removing {device_ID} because of long inactivity.""")
del heartbeat_table["ID"][device_number]
del heartbeat_table["IP"][device_number]
del heartbeat_table["location"][device_number]
del heartbeat_table["file_system"][device_number]
del heartbeat_table["last_heartbeat"][device_number]
else:
if heartbeat_table["ID"][device_number] in offline:
offline.remove(heartbeat_table["ID"][device_number])
log.message(f"""{heartbeat_table["IP"][device_number]} gone online""")
heartbeat_table["last_heartbeat"][int(device_number)] = int(time_to_heartbeat) + 5
try:
log.debug(
f"""{device_ID} : time to heartbeat : {heartbeat_table["last_heartbeat"][device_number]}""")
heartbeat_table["last_heartbeat"][device_number] = int(heartbeat_table["last_heartbeat"][device_number]) - 1
except IndexError:
pass
if time.time() - time_to_save > save_time and settings["save_table"]:
save_time = time.time()
log.message("Saving heartbeat table.")
log.debug(f"Saving heartbeat table: {heartbeat_table}")
settings["heartbeat_table"] = heartbeat_table
with open("settings.json", "w", encoding='utf-8') as file:
json.dump(settings, file, indent=2)
time.sleep(1)
print(f"""Starting WikiSpot V{update.get_version()["version"]} on http://{IP}:8000""")
print("GitHub: https://github.com/Tucan444/Mabasej_Team")
print("Developers of this project: ")
for dev in devs:
print(f"""{dev}, GitHub: {devs[dev]["git"]}, mail: {devs[dev]["mail"]}""")
thread_1 = threading.Thread(target=mainloop, daemon=True)
thread_1.start()
|
vtmanager.py | #####################
# VT Manager #
#####################
# yliu301@iit.edu #
#####################
# EMB VT #
# Version 0.9.1 #
#####################
import time, datetime
import random
import ast
import os, sys, signal
import multiprocessing
import logging
import subprocess # Watch out for shell injection for using subprocess.call
import optparse
import psutil # apt-get install python-dev pip install psutil
import zmq
# import vtclock
CLOCK_REALTIME = 1
# Initial Logger for multiprocess
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# create the logging file handler
fh = logging.FileHandler("vmgm.log", mode='w')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add handler to logger object
logger.addHandler(fh)
class HostService(object):
"""class of service handling"""
def __init__(self, node_id, node_value, proc_status):
"""
Args:
noe_id............int
node_value........list of float
proc_status.......dict {123:True, 124:False} bool true unfreeze false freeze
"""
self.node_id = node_id
self.node_value = node_value
self.proc_status = proc_status # no more than 16
def getValue(self):
return self.node_value
def setValue(self, node_value):
self.node_value = node_value
def getID(self):
return self.node_id
def getStatus(self):
return self.proc_status
def setStatus(self, proc_status):
self.proc_status = proc_status
#def procStatus(self, pid):
# for line in open("/proc/%d/status" % pid).readlines():
# if line.startswith("State:"):
# return line.split(":",1)[1].strip().split(' ')[0]
# return None
def isAlive(self):
""" To check if the proc is not paused """
for key in self.proc_status:
print '[*] Checking PID: ', key , ' alive status:', psutil.Process(key).status()
#if self.procStatus(key) == 'T':
if psutil.Process(key).status() == 'stopped':
print '[*] PID: ', key, 'is paused'
return False
return True
class ControllerHandler(HostService):
"""Class to handle behavior of Controller"""
def __init__(self, type_of_service, node_id, node_value, proc_status):
"""
Args:
type_of_service...int 1. Controller 2. Sensor
noe_id............int
node_value........list of float
proc_status.......dict {123:True, 124:False} bool true unfreeze false freeze
"""
HostService.__init__(self, node_id, node_value, proc_status)
self.type_of_service = type_of_service
def getFunc(self):
""" Return a string that contains name of Function...string"""
return "valueRetriever" # should be different
class SensorHandler(HostService):
"""Class to handle behavior of Sensor"""
def __init__(self, type_of_service, node_id, node_value, proc_status):
"""
Args:
type_of_service...int 1. Controller 2. Sensor
noe_id............int
node_value........list of float
proc_status.......dict {123:True, 124:False} bool true unfreeze false freeze
"""
HostService.__init__(self, node_id, node_value, proc_status)
self.type_of_service = type_of_service
def getFunc(self):
""" Return a string that contains name of Function...string"""
return "valueRetriever"
class ConnectionManager(object):
"""Class to send command"""
def __init__(self):
self.context = zmq.Context()
self.zmq_socket = self.context.socket(zmq.PAIR)
# File which contains all the consumers IP e.g. tcp://127.0.0.1:5555.
with open('consumers.txt') as temp_file:
self.consumers_ip_list = [line.rstrip('\n') for line in temp_file]
self.loopback_ip = 'tcp://127.0.0.1:5555'
def sendCommand(self, opt, server_localhost, send_to):
"""
Args:
opt................operation e.g. STOP RESUME
server_localhost...HostService
send_to............string loopback, comlist
"""
if send_to == 'loopback':
self.zmq_socket.connect(self.loopback_ip)
work_message = { 'Opt' : opt, 'FromID' : str(server_localhost.getID()), 'ProDict' : str(server_localhost.getStatus()) }
self.zmq_socket.send_json(work_message) # remove for testing
else:
for ip in self.consumers_ip_list:
self.zmq_socket.connect(ip)
# Start your result manager and workers before you start your producers
work_message = { 'Opt' : opt, 'FromID' : str(server_localhost.getID()), 'ProDict' : str(server_localhost.getStatus()) }
self.zmq_socket.send_json(work_message) # remove for testing
print '[*] Sending', opt, ' :', time.ctime()
def processHandler(self, opt, server_localhost):
"""
write freeze or unfreeze to /sys/vt/VT7/mode
Args:
opt...string
server_localhost...HostService
"""
cp_proc_dict = server_localhost.getStatus()
for key, _ in cp_proc_dict.items():
if opt == 'STOP':
cp_proc_dict[key] = False
elif opt == 'RESUME':
cp_proc_dict[key] = True
# update the dict for class
server_localhost.setStatus(cp_proc_dict)
if opt == 'STOP':
if not server_localhost.isAlive():
while not server_localhost.isAlive():
print '[*] Waiting for system to resume before pausing'
time.sleep(1)
#f2 = open('/sys/vt/VT7/mode', 'w')
#f2.write('freeze')
#f2.close()
logger.info("NCProcHandler_Stop_Services: %.9f", time.time())
subprocess.call("echo 'freeze' > /sys/vt/VT7/mode", shell=True)
print '[*] Stop Services', time.ctime()
elif opt == 'RESUME':
if server_localhost.isAlive():
while server_localhost.isAlive():
print '[*] Waiting for system to pasue before resume'
time.sleep(1)
#f2 = open('/sys/vt/VT7/mode', 'w')
#f2.write('unfreeze')
#f2.close()
logger.info("NCProcHandler_Resume_Services: %.9f", time.time())
subprocess.call("echo 'unfreeze' > /sys/vt/VT7/mode", shell=True)
print '[*] Resume Services', time.ctime()
""" Connection Manager """
def writeProcToFile(server_localhost):
"""
wrtie PIDs to /sys/vt/VT7/pid_...
Args:
server_localhost...HostService
"""
cp_proc_dict = server_localhost.getStatus()
for i, (key, _) in enumerate(cp_proc_dict.items()):
file_name = '/sys/vt/VT7/pid_' + str(i+1).zfill(2) # pid file starts from 01 not 00
f1 = open(file_name, 'w')
f1.write(str(key))
f1.close()
def startConnectionManager(server_localhost):
"""
start the service for listening
Arg:
server_localhost...HostService
"""
connectionMg = ConnectionManager()
# TODO: Correct the IPs
context = zmq.Context()
# recieve work
socket = context.socket(zmq.PAIR)
socket.bind("tcp://127.0.0.1:5555") # dummy data
while True:
work = socket.recv_json()
opt = work['Opt']
from_sender = work['FromID']
pro_dict = ast.literal_eval(work['ProDict'])
server_localhost.setStatus(pro_dict)
# target_ids_list = work['NodeIDs']
# handling stop or resume process
print '[*] Receive', opt, ' From :', from_sender, ' ', time.ctime()
if __debug__:
if opt == 'STOP':
connectionMg.processHandler('STOP', server_localhost)
logger.info("NetCoor_Stop_Services: %.9f", time.time())
time.sleep(random.randint(5,10))
connectionMg.processHandler('RESUME', server_localhost)
logger.info("NetCoor_Resume_Services: %.9f", time.time())
print '[*] Resume: ', time.ctime()
else:
# wait until get the result from win server
pass
""" Host Activities """
def getSensorData():
"""
Get the values from sensors
"""
# get random value for now. to simulate getting value from real sensors
ret_val = random.randrange(0, 10)
return ret_val
def valueRetriever(server_localhost):
"""
start the service for retrieving data and handling pause
Arg:
server_localhost...HostService
"""
connectionMg = ConnectionManager()
server_localhost.setStatus({int(os.getpid()):True})
while True:
time.sleep(1)
sensor_val = getSensorData()
print '[*] Value: ', sensor_val, ' ... ', time.ctime()
if __debug__:
if sensor_val < 2: # assume this is the situation we need to pause the system
print '[*] Not getting value, system pasuing... ', time.ctime()
process = subprocess.Popen(['cat', '/proc/'+str(os.getpid())+'/fpt'], stdout=subprocess.PIPE)
out, err = process.communicate()
logger.info("Host_System_pasuing: %.9f", (float(out[:-3])*0.000000001) + time.time())
connectionMg.sendCommand('STOP', server_localhost, 'loopback')
time.sleep(5) # to simulate waiting for value
else:
pass
"""
if sensor_val < 2: # assume this is the situation we need to pause the system
print '[*] Not getting value, system pasuing'
connectionMg.sendCommand('STOP', server_localhost, 'loopback')
# TODO: Should have a lock here, wait until you get the value
while sensor_val < 2:
time.sleep(1)
sensor_val = getSensorData()
print '[*] Requesting updated Value: ', sensor_val
connectionMg.sendCommand('STOP', server_localhost, 'loopback')
connectionMg.processHandler('STOP', server_localhost)
server_localhost.setValue([sensor_val])
"""
def startingTime(time2start):
"""
start the Process at specific time
Arg:
time2start...string
"""
try:
datetime.datetime.strptime(time2start, '%H:%M:%S')
except ValueError:
raise ValueError("Incorrect data format, should be HH:MM:SS")
print '[*] Wait until', time2start, '...'
while time2start != datetime.datetime.now().strftime("%H:%M:%S"):
time.sleep(1)
print '[*] Start!'
def main():
"""main"""
if __debug__:
print 'Debug mode: ON'
else:
print 'Debug mode: OFF'
parser = optparse.OptionParser()
parser.add_option(
'-t', '--time',
dest='time2start',
type='string', default='-1',
help='''Enter the start time formart:HH:MM:SS e.g. 10:12:00'''
)
options, _ = parser.parse_args()
try:
type_of_service = int(raw_input('Please selete the type of service: \n1. Controller \
\n2. Sensor \nPress Enter: '))
except ValueError:
raise ValueError("Error: Please confirm your input!")
try:
node_id = int(raw_input('Please enter the ID for this node: \nPress Enter: '))
except ValueError:
raise ValueError("Error: Please confirm your input!")
if options.time2start != '-1':
startingTime(options.time2start)
# for now both of them do the same thing
if type_of_service == 1 or type_of_service == 2:
# add real value
if type_of_service == 1:
server_localhost = ControllerHandler(type_of_service, node_id, [0.], {0:True}) # init
print "[*] Starting Controller"
elif type_of_service == 2:
server_localhost = SensorHandler(type_of_service, node_id, [0.], {0:True}) # init
print "[*] Starting Sensor"
logger.info("Starting Service ID: #%s", server_localhost.getID())
print "[*] Starting Service ID: #%s" % (server_localhost.getID())
p1 = multiprocessing.Process(name='p1', target=startConnectionManager, args=(server_localhost,))
p1.start()
logger.info("Connection Manager Started: %.9f", time.time())
print "[*] Connection Manager Started..."
func_for_eval = server_localhost.getFunc()
p2 = multiprocessing.Process(name='p2', target=eval(func_for_eval), args=(server_localhost,))
p2.start()
logger.info("Host Activities Started: %.9f", time.time())
# Get p2.pid AKA the pid for the node and put it in the class
server_localhost.setStatus({int(p2.pid):True})
writeProcToFile(server_localhost) # take out this part for testing
print "[*] Set PID: ", p2.pid, " to proc dict"
print "[*] Host Activities Started..."
else:
print "Error: Please confirm your input"
def kill_child_processes(signum, frame):
""" Handling Ctrl+Z """
parent_id = os.getpid()
ps_command = subprocess.Popen("ps -o pid --ppid %d --noheaders" % parent_id, shell=True, \
stdout=subprocess.PIPE)
ps_output = ps_command.stdout.read()
ps_command.wait()
for pid_str in ps_output.strip().split("\n")[:-1]:
os.kill(int(pid_str), signal.SIGTERM)
subprocess.call("dmesg | grep 'VT-GPIO' > dmesg.log", shell=True) # log Kernel Msg to file
sys.exit()
if __name__ == '__main__':
main()
signal.signal(signal.SIGINT, kill_child_processes)
|
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.contacts import Contacts
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch, metrics
from kivy.lang import Builder
# lazy imports for factory so that widgets can be used in kv
Factory.register('InstallWizard',
module='electrum_gui.kivy.uix.dialogs.installwizard')
Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.uix.checkbox import CheckBox
from kivy.uix.switch import Switch
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def on_quotes(self, d):
#Logger.info("on_quotes")
pass
def on_history(self, d):
#Logger.info("on_history")
if self.history_screen:
Clock.schedule_once(lambda dt: self.history_screen.update())
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self.update_status()
if self.history_screen:
self.history_screen.update()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
if self.history_screen:
self.history_screen.update()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = run_hook('exchange_rate')
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = run_hook('exchange_rate')
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
wallet = ObjectProperty(None)
'''Holds the electrum wallet
:attr:`wallet` is a `ObjectProperty` defaults to None.
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.qrscanner = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
super(ElectrumWindow, self).__init__(**kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
#self.config = self.gui_object.config
self.contacts = Contacts(self.electrum_config)
self.invoices = InvoiceStore(self.electrum_config)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet =\
Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status =\
Clock.create_trigger(self.update_status, .5)
self._trigger_notify_transactions = \
Clock.create_trigger(self.notify_transactions, 5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def on_pr(self, pr):
if pr.verify(self.contacts):
key = self.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.bitcoin import base_decode
from electrum.transaction import Transaction
try:
text = base_decode(data, None, base=43).encode('hex')
tx = Transaction(text)
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'requests']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if self.send_screen is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.open()
def qr_dialog(self, title, data, show_text=False):
from uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass
from android import activity
PythonActivity = autoclass('org.renpy.android.PythonActivity')
Intent = autoclass('android.content.Intent')
intent = Intent("com.google.zxing.client.android.SCAN")
intent.putExtra("SCAN_MODE", "QR_CODE_MODE")
def on_qr_result(requestCode, resultCode, intent):
if requestCode == 0:
if resultCode == -1: # RESULT_OK:
contents = intent.getStringExtra("SCAN_RESULT")
if intent.getStringExtra("SCAN_RESULT_FORMAT") == 'QR_CODE':
on_complete(contents)
else:
self.show_error("wrong format " + intent.getStringExtra("SCAN_RESULT_FORMAT"))
activity.bind(on_activity_result=on_qr_result)
try:
PythonActivity.mActivity.startActivityForResult(intent, 0)
except:
self.show_error(_('Could not start Barcode Scanner.') + ' ' + _('Please install the Barcode Scanner app from ZXing'))
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.renpy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
Logger.info("dpi: {} {}".format(metrics.dpi, metrics.dpi_rounded))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.renpy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def load_wallet_by_name(self, wallet_path):
if not wallet_path:
return
config = self.electrum_config
try:
storage = WalletStorage(wallet_path)
except IOError:
self.show_error("Cannot read wallet file")
return
if storage.file_exists:
wallet = Wallet(storage)
action = wallet.get_action()
else:
action = 'new'
if action is not None:
# start installation wizard
Logger.debug('Electrum: Wallet not found. Launching install wizard')
wizard = Factory.InstallWizard(config, self.network, storage)
wizard.bind(on_wizard_complete=lambda instance, wallet: self.load_wallet(wallet))
wizard.run(action)
else:
self.load_wallet(wallet)
self.on_resume()
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.wallet.stop_threads()
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
self.is_exit = False
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
if self._settings_dialog is None:
from uix.dialogs.settings import SettingsDialog
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.icon = "icons/electrum.png"
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction']
self.network.register_callback(self.on_network, interests)
#self.wallet = None
self.tabs = self.root.ids['tabs']
def on_network(self, event, *args):
if event == 'updated':
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_notify_transactions(*args)
@profiler
def load_wallet(self, wallet):
self.stop_wallet()
self.wallet = wallet
self.wallet.start_threads(self.network)
self.current_account = self.wallet.storage.get('current_account', None)
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
self.notify_transactions()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
self.status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
self.status = _("Synchronizing...")
elif server_lag > 1:
self.status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_account_balance(self.current_account)
text = self.format_amount(c+x+u)
self.status = str(text.strip() + ' ' + self.base_unit)
else:
self.status = _("Not connected")
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
amount, fee = self.wallet.get_max_amount(self.electrum_config, inputs, addr, None)
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
#if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
@profiler
def notify_transactions(self, *dt):
if not self.network or not self.network.is_connected():
return
# temporarily disabled for merge
return
iface = self.network
ptfn = iface.pending_transactions_for_notifications
if len(ptfn) > 0:
# Combine the transactions if there are more then three
tx_amount = len(ptfn)
if(tx_amount >= 3):
total_amount = 0
for tx in ptfn:
is_relevant, is_mine, v, fee = self.wallet.get_tx_value(tx)
if(v > 0):
total_amount += v
self.notify(_("{txs}s new transactions received. Total amount"
"received in the new transactions {amount}s"
"{unit}s").format(txs=tx_amount,
amount=self.format_amount(total_amount),
unit=self.base_unit()))
iface.pending_transactions_for_notifications = []
else:
for tx in iface.pending_transactions_for_notifications:
if tx:
iface.pending_transactions_for_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_tx_value(tx)
if(v > 0):
self.notify(
_("{txs} new transaction received. {amount} {unit}").
format(txs=tx_amount, amount=self.format_amount(v),
unit=self.base_unit))
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.qrscanner:
self.qrscanner.stop()
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.qrscanner and qrscanner.get_parent_window():
self.qrscanner.start()
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, txid):
self.show_info(txid)
if ok and pr:
pr.set_paid(tx.hash())
self.invoices.save()
self.update_tab('invoices')
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.use_encryption:
self.password_dialog(msg, f, args)
else:
apply(f, args + (None,))
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.use_encryption and password is None:
return
try:
seed = self.wallet.get_seed(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
def change_password(self, cb):
if self.wallet.use_encryption:
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.use_encryption:
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
def callback(pw):
Clock.schedule_once(lambda x: apply(f, args + (pw,)), 0.1)
if self._password_dialog is None:
from uix.dialogs.password_dialog import PasswordDialog
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
|
test_fetcher.py | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import BaseHTTPServer
import hashlib
import os
import SocketServer
import unittest
from contextlib import closing, contextmanager
from functools import reduce
from threading import Thread
import mock
import requests
from six import StringIO
from pants.net.http.fetcher import Fetcher
from pants.util.contextutil import temporary_dir, temporary_file
from pants.util.dirutil import safe_open, touch
class FetcherTest(unittest.TestCase):
def setUp(self):
self.requests = mock.Mock(spec=requests.Session)
self.response = mock.Mock(spec=requests.Response)
self.fetcher = Fetcher('/unused/root/dir', requests_api=self.requests)
self.listener = mock.create_autospec(Fetcher.Listener, spec_set=True)
def status_call(self, status_code, content_length=None):
return mock.call.status(status_code, content_length=content_length)
def ok_call(self, chunks):
return self.status_call(200, content_length=sum(len(c) for c in chunks))
def assert_listener_calls(self, expected_listener_calls, chunks, expect_finished=True):
expected_listener_calls.extend(mock.call.recv_chunk(chunk) for chunk in chunks)
if expect_finished:
expected_listener_calls.append(mock.call.finished())
self.assertEqual(expected_listener_calls, self.listener.method_calls)
def assert_local_file_fetch(self, url_prefix=''):
chunks = ['0123456789', 'a']
with temporary_file() as fp:
for chunk in chunks:
fp.write(chunk)
fp.close()
self.fetcher.fetch(url_prefix + fp.name, self.listener, chunk_size_bytes=10)
self.assert_listener_calls([self.ok_call(chunks)], chunks)
self.requests.assert_not_called()
def test_file_path(self):
self.assert_local_file_fetch()
def test_file_scheme(self):
self.assert_local_file_fetch('file:')
def assert_local_file_fetch_relative(self, url, *rel_path):
expected_contents = b'proof'
with temporary_dir() as root_dir:
with safe_open(os.path.join(root_dir, *rel_path), 'wb') as fp:
fp.write(expected_contents)
with temporary_file() as download_fp:
Fetcher(root_dir).download(url, path_or_fd=download_fp)
download_fp.close()
with open(download_fp.name, 'rb') as fp:
self.assertEqual(expected_contents, fp.read())
def test_file_scheme_double_slash_relative(self):
self.assert_local_file_fetch_relative('file://relative/path', 'relative', 'path')
def test_file_scheme_embedded_double_slash(self):
self.assert_local_file_fetch_relative('file://a//strange//path', 'a', 'strange', 'path')
def test_file_scheme_triple_slash(self):
self.assert_local_file_fetch('file://')
def test_file_dne(self):
with temporary_dir() as base:
with self.assertRaises(self.fetcher.PermanentError):
self.fetcher.fetch(os.path.join(base, 'dne'), self.listener)
def test_file_no_perms(self):
with temporary_dir() as base:
no_perms = os.path.join(base, 'dne')
touch(no_perms)
os.chmod(no_perms, 0)
self.assertTrue(os.path.exists(no_perms))
with self.assertRaises(self.fetcher.PermanentError):
self.fetcher.fetch(no_perms, self.listener)
@contextmanager
def expect_get(self, url, chunk_size_bytes, timeout_secs, chunks=None, listener=True):
chunks = chunks or ['0123456789', 'a']
size = sum(len(c) for c in chunks)
self.requests.get.return_value = self.response
self.response.status_code = 200
self.response.headers = {'content-length': str(size)}
self.response.iter_content.return_value = chunks
yield chunks, [self.ok_call(chunks)] if listener else []
self.requests.get.expect_called_once_with(url, allow_redirects=True, stream=True,
timeout=timeout_secs)
self.response.iter_content.expect_called_once_with(chunk_size=chunk_size_bytes)
def test_get(self):
with self.expect_get('http://bar',
chunk_size_bytes=1024,
timeout_secs=60) as (chunks, expected_listener_calls):
self.fetcher.fetch('http://bar',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assert_listener_calls(expected_listener_calls, chunks)
self.response.close.expect_called_once_with()
def test_checksum_listener(self):
digest = mock.Mock(spec=hashlib.md5())
digest.hexdigest.return_value = '42'
checksum_listener = Fetcher.ChecksumListener(digest=digest)
with self.expect_get('http://baz',
chunk_size_bytes=1,
timeout_secs=37) as (chunks, expected_listener_calls):
self.fetcher.fetch('http://baz',
checksum_listener.wrap(self.listener),
chunk_size_bytes=1,
timeout_secs=37)
self.assertEqual('42', checksum_listener.checksum)
def expected_digest_calls():
for chunk in chunks:
yield mock.call.update(chunk)
yield mock.call.hexdigest()
self.assertEqual(list(expected_digest_calls()), digest.method_calls)
self.assert_listener_calls(expected_listener_calls, chunks)
self.response.close.assert_called_once_with()
def concat_chunks(self, chunks):
return reduce(lambda acc, c: acc + c, chunks, '')
def test_download_listener(self):
with self.expect_get('http://foo',
chunk_size_bytes=1048576,
timeout_secs=3600) as (chunks, expected_listener_calls):
with closing(StringIO()) as fp:
self.fetcher.fetch('http://foo',
Fetcher.DownloadListener(fp).wrap(self.listener),
chunk_size_bytes=1024 * 1024,
timeout_secs=60 * 60)
downloaded = self.concat_chunks(chunks)
self.assertEqual(downloaded, fp.getvalue())
self.assert_listener_calls(expected_listener_calls, chunks)
self.response.close.assert_called_once_with()
def test_size_mismatch(self):
self.requests.get.return_value = self.response
self.response.status_code = 200
self.response.headers = {'content-length': '11'}
chunks = ['a', 'b']
self.response.iter_content.return_value = chunks
with self.assertRaises(self.fetcher.Error):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.requests.get.assert_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
self.response.iter_content.assert_called_once_with(chunk_size=1024)
self.assert_listener_calls([self.status_call(200, content_length=11)], chunks,
expect_finished=False)
self.response.close.assert_called_once_with()
def test_get_error_transient(self):
self.requests.get.side_effect = requests.ConnectionError
with self.assertRaises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.requests.get.assert_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
def test_get_error_permanent(self):
self.requests.get.side_effect = requests.TooManyRedirects
with self.assertRaises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertTrue(e.exception.response_code is None)
self.requests.get.assert_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
def test_http_error(self):
self.requests.get.return_value = self.response
self.response.status_code = 404
with self.assertRaises(self.fetcher.PermanentError) as e:
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.assertEqual(404, e.exception.response_code)
self.requests.get.expect_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
self.listener.status.expect_called_once_with(404)
self.response.close.expect_called_once_with()
def test_iter_content_error(self):
self.requests.get.return_value = self.response
self.response.status_code = 200
self.response.headers = {}
self.response.iter_content.side_effect = requests.Timeout
with self.assertRaises(self.fetcher.TransientError):
self.fetcher.fetch('http://foo',
self.listener,
chunk_size_bytes=1024,
timeout_secs=60)
self.requests.get.expect_called_once_with('http://foo', allow_redirects=True, stream=True,
timeout=60)
self.response.iter_content.expect_called_once_with(chunk_size=1024)
self.listener.status.expect_called_once_with(200, content_length=None)
self.response.close.expect_called_once_with()
def expect_download(self, path_or_fd=None):
with self.expect_get('http://1',
chunk_size_bytes=13,
timeout_secs=13,
listener=False) as (chunks, expected_listener_calls):
path = self.fetcher.download('http://1',
path_or_fd=path_or_fd,
chunk_size_bytes=13,
timeout_secs=13)
self.response.close.expect_called_once_with()
downloaded = self.concat_chunks(chunks)
return downloaded, path
def test_download(self):
downloaded, path = self.expect_download()
try:
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
finally:
os.unlink(path)
def test_download_fd(self):
with temporary_file() as fd:
downloaded, path = self.expect_download(path_or_fd=fd)
self.assertEqual(path, fd.name)
fd.close()
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
def test_download_path(self):
with temporary_file() as fd:
fd.close()
downloaded, path = self.expect_download(path_or_fd=fd.name)
self.assertEqual(path, fd.name)
with open(path) as fp:
self.assertEqual(downloaded, fp.read())
@mock.patch('time.time')
def test_progress_listener(self, timer):
timer.side_effect = [0, 1.137]
stream = StringIO()
progress_listener = Fetcher.ProgressListener(width=5, chunk_size_bytes=1, stream=stream)
with self.expect_get('http://baz',
chunk_size_bytes=1,
timeout_secs=37,
chunks=[[1]] * 1024) as (chunks, expected_listener_calls):
self.fetcher.fetch('http://baz',
progress_listener.wrap(self.listener),
chunk_size_bytes=1,
timeout_secs=37)
self.assert_listener_calls(expected_listener_calls, chunks)
# We just test the last progress line which should indicate a 100% complete download.
# We control progress bar width (5 dots), size (1KB) and total time downloading (fake 1.137s).
self.assertEqual('100% ..... 1 KB 1.137s\n', stream.getvalue().split('\r')[-1])
class FetcherRedirectTest(unittest.TestCase):
# NB(Eric Ayers): Using class variables like this seems horrible, but I can't figure out a better
# to pass state between the test and the RedirectHTTPHandler class because it gets
# re-instantiated on every request.
_URL = None
_URL2_ACCESSED = False
_URL1_ACCESSED = False
# A trivial HTTP server that serves up a redirect from /url2 --> /url1 and some hard-coded
# responses in the HTTP message body.
class RedirectHTTPHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, request, client_address, server):
# The base class implements GET and HEAD.
# Old-style class, so we must invoke __init__ this way.
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, request, client_address, server)
def do_GET(self):
if self.path.endswith('url2'):
self.send_response(302)
redirect_url = '{}/url1'.format(FetcherRedirectTest._URL)
self.send_header('Location',redirect_url)
self.end_headers()
self.wfile.write('\nredirecting you to {}'.format(redirect_url))
FetcherRedirectTest._URL2_ACCESSED = True
elif self.path.endswith('url1'):
self.send_response(200)
self.wfile.write('\nreturned from redirect')
FetcherRedirectTest._URL1_ACCESSED = True
else:
self.send_response(404)
self.end_headers()
@contextmanager
def setup_server(self):
httpd = None
httpd_thread = None
try:
handler = self.RedirectHTTPHandler
httpd = SocketServer.TCPServer(('localhost', 0), handler)
port = httpd.server_address[1]
httpd_thread = Thread(target=httpd.serve_forever)
httpd_thread.start()
yield 'http://localhost:{0}'.format(port)
finally:
if httpd:
httpd.shutdown()
if httpd_thread:
httpd_thread.join()
def test_download_redirect(self):
"""Make sure that a server that returns a redirect is actually followed.
Test with a real HTTP server that redirects from one URL to another.
"""
fetcher = Fetcher('/unused/root/dir')
with self.setup_server() as base_url:
self._URL = base_url
self.assertFalse(self._URL2_ACCESSED)
self.assertFalse(self._URL1_ACCESSED)
path = fetcher.download(base_url + '/url2')
self.assertTrue(self._URL2_ACCESSED)
self.assertTrue(self._URL1_ACCESSED)
with open(path) as fp:
self.assertEqual('returned from redirect\r\n', fp.read())
|
GUI_passing_queues.py | '''
Created on May 28, 2019
Ch06
@author: Burkhard A. Meier
'''
#======================
# imports
#======================
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import Menu
from tkinter import messagebox as msg
from tkinter import Spinbox
from time import sleep
from Ch06_Code.ToolTip import ToolTip
from threading import Thread
from queue import Queue
import Ch06_Code.Queues as bq
GLOBAL_CONST = 42
#=====================================================
class OOP():
def __init__(self): # Initializer method
# Create instance
self.win = tk.Tk()
# Add a title
self.win.title("Python GUI")
self.create_widgets()
# Create Queue instance
def use_queues(self, loops=5):
gui_queue = Queue()
print(gui_queue)
for idx in range(loops):
gui_queue.put('Message from a queue: ' + str(idx))
while True:
print(gui_queue.get())
def method_in_a_thread(self, num_of_loops=10):
for idx in range(num_of_loops):
sleep(1)
self.scrol.insert(tk.INSERT, str(idx) + '\n')
# Running methods in Threads
def create_thread(self, num=1):
self.run_thread = Thread(target=self.method_in_a_thread, args=[num])
self.run_thread.setDaemon(True)
self.run_thread.start()
# start queue in its own thread
write_thread = Thread(target=self.use_queues, args=[num], daemon=True)
write_thread.start()
# Button callback
def click_me(self):
self.action.configure(text='Hello ' + self.name.get())
print(self)
# self.create_thread() # now called from imported module
bq.write_to_scrol(self)
# Spinbox callback
def _spin(self):
value = self.spin.get()
self.scrol.insert(tk.INSERT, value + '\n')
# GUI Callback
def checkCallback(self, *ignored_args):
# only enable one checkbutton
if self.chVarUn.get(): self.check3.configure(state='disabled')
else: self.check3.configure(state='normal')
if self.chVarEn.get(): self.check2.configure(state='disabled')
else: self.check2.configure(state='normal')
# Radiobutton Callback
def radCall(self):
radSel = self.radVar.get()
if radSel == 0: self.mighty2.configure(text='Blue')
elif radSel == 1: self.mighty2.configure(text='Gold')
elif radSel == 2: self.mighty2.configure(text='Red')
# update progressbar in callback loop
def run_progressbar(self):
self.progress_bar["maximum"] = 100
for i in range(101):
sleep(0.05)
self.progress_bar["value"] = i # increment progressbar
self.progress_bar.update() # have to call update() in loop
self.progress_bar["value"] = 0 # reset/clear progressbar
def start_progressbar(self):
self.progress_bar.start()
def stop_progressbar(self):
self.progress_bar.stop()
def progressbar_stop_after(self, wait_ms=1000):
self.win.after(wait_ms, self.progress_bar.stop)
def usingGlobal(self):
global GLOBAL_CONST
GLOBAL_CONST = 777
# Exit GUI cleanly
def _quit(self):
self.win.quit()
self.win.destroy()
exit()
#####################################################################################
def create_widgets(self):
tabControl = ttk.Notebook(self.win) # Create Tab Control
tab1 = ttk.Frame(tabControl) # Create a tab
tabControl.add(tab1, text='Tab 1') # Add the tab
tab2 = ttk.Frame(tabControl) # Add a second tab
tabControl.add(tab2, text='Tab 2') # Make second tab visible
tabControl.pack(expand=1, fill="both") # Pack to make visible
# LabelFrame using tab1 as the parent
mighty = ttk.LabelFrame(tab1, text=' Mighty Python ')
mighty.grid(column=0, row=0, padx=8, pady=4)
# Modify adding a Label using mighty as the parent instead of win
a_label = ttk.Label(mighty, text="Enter a name:")
a_label.grid(column=0, row=0, sticky='W')
# Adding a Textbox Entry widget
self.name = tk.StringVar()
self.name_entered = ttk.Entry(mighty, width=24, textvariable=self.name)
self.name_entered.grid(column=0, row=1, sticky='W')
# Adding a Button
self.action = ttk.Button(mighty, text="Click Me!", command=self.click_me)
self.action.grid(column=2, row=1)
ttk.Label(mighty, text="Choose a number:").grid(column=1, row=0)
number = tk.StringVar()
self.number_chosen = ttk.Combobox(mighty, width=14, textvariable=number, state='readonly')
self.number_chosen['values'] = (1, 2, 4, 42, 100)
self.number_chosen.grid(column=1, row=1)
self.number_chosen.current(0)
# Adding a Spinbox widget
self.spin = Spinbox(mighty, values=(1, 2, 4, 42, 100), width=5, bd=9, command=self._spin) # using range
self.spin.grid(column=0, row=2, sticky='W') # align left
# Using a scrolled Text control
scrol_w = 40; scrol_h = 10 # increase sizes
self.scrol = scrolledtext.ScrolledText(mighty, width=scrol_w, height=scrol_h, wrap=tk.WORD)
self.scrol.grid(column=0, row=3, sticky='WE', columnspan=3)
for child in mighty.winfo_children(): # add spacing to align widgets within tabs
child.grid_configure(padx=4, pady=2)
#=====================================================================================
# Tab Control 2 ----------------------------------------------------------------------
self.mighty2 = ttk.LabelFrame(tab2, text=' The Snake ')
self.mighty2.grid(column=0, row=0, padx=8, pady=4)
# Creating three checkbuttons
chVarDis = tk.IntVar()
check1 = tk.Checkbutton(self.mighty2, text="Disabled", variable=chVarDis, state='disabled')
check1.select()
check1.grid(column=0, row=0, sticky=tk.W)
chVarUn = tk.IntVar()
check2 = tk.Checkbutton(self.mighty2, text="UnChecked", variable=chVarUn)
check2.deselect()
check2.grid(column=1, row=0, sticky=tk.W)
chVarEn = tk.IntVar()
check3 = tk.Checkbutton(self.mighty2, text="Enabled", variable=chVarEn)
check3.deselect()
check3.grid(column=2, row=0, sticky=tk.W)
# trace the state of the two checkbuttons
chVarUn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
chVarEn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
# First, we change our Radiobutton global variables into a list
colors = ["Blue", "Gold", "Red"]
# create three Radiobuttons using one variable
self.radVar = tk.IntVar()
# Next we are selecting a non-existing index value for radVar
self.radVar.set(99)
# Now we are creating all three Radiobutton widgets within one loop
for col in range(3):
curRad = tk.Radiobutton(self.mighty2, text=colors[col], variable=self.radVar,
value=col, command=self.radCall)
curRad.grid(column=col, row=1, sticky=tk.W) # row=6
# And now adding tooltips
ToolTip(curRad, 'This is a Radiobutton control')
# Add a Progressbar to Tab 2
self.progress_bar = ttk.Progressbar(tab2, orient='horizontal', length=286, mode='determinate')
self.progress_bar.grid(column=0, row=3, pady=2)
# Create a container to hold buttons
buttons_frame = ttk.LabelFrame(self.mighty2, text=' ProgressBar ')
buttons_frame.grid(column=0, row=2, sticky='W', columnspan=2)
# Add Buttons for Progressbar commands
ttk.Button(buttons_frame, text=" Run Progressbar ", command=self.run_progressbar).grid(column=0, row=0, sticky='W')
ttk.Button(buttons_frame, text=" Start Progressbar ", command=self.start_progressbar).grid(column=0, row=1, sticky='W')
ttk.Button(buttons_frame, text=" Stop immediately ", command=self.stop_progressbar).grid(column=0, row=2, sticky='W')
ttk.Button(buttons_frame, text=" Stop after second ", command=self.progressbar_stop_after).grid(column=0, row=3, sticky='W')
for child in buttons_frame.winfo_children():
child.grid_configure(padx=2, pady=2)
for child in self.mighty2.winfo_children():
child.grid_configure(padx=8, pady=2)
# Creating a Menu Bar
menu_bar = Menu(self.win)
self.win.config(menu=menu_bar)
# Add menu items
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label="New")
file_menu.add_separator()
file_menu.add_command(label="Exit", command=self._quit)
menu_bar.add_cascade(label="File", menu=file_menu)
# Display a Message Box
def _msgBox():
msg.showinfo('Python Message Info Box', 'A Python GUI created using tkinter:\nThe year is 2019.')
# Add another Menu to the Menu Bar and an item
help_menu = Menu(menu_bar, tearoff=0)
help_menu.add_command(label="About", command=_msgBox) # display messagebox when clicked
menu_bar.add_cascade(label="Help", menu=help_menu)
# Change the main windows icon
self.win.iconbitmap('pyc.ico')
# It is not necessary to create a tk.StringVar()
# strData = tk.StringVar()
strData = self.spin.get()
# call function
self.usingGlobal()
self.name_entered.focus()
# Add Tooltips -----------------------------------------------------
# Add a Tooltip to the Spinbox
ToolTip(self.spin, 'This is a Spinbox control')
# Add Tooltips to more widgets
ToolTip(self.name_entered, 'This is an Entry control')
ToolTip(self.action, 'This is a Button control')
ToolTip(self.scrol, 'This is a ScrolledText control')
#======================
# Start GUI
#======================
oop = OOP()
oop.win.mainloop()
|
compare_Wchain_sgd_1layers.py | import qiskit
import numpy as np
import sys
sys.path.insert(1, '../')
import qtm.base, qtm.constant, qtm.nqubit, qtm.fubini_study, qtm.encoding
import importlib
import multiprocessing
importlib.reload(qtm.base)
importlib.reload(qtm.constant)
importlib.reload(qtm.onequbit)
importlib.reload(qtm.nqubit)
importlib.reload(qtm.fubini_study)
def run_wchain(num_layers, num_qubits):
thetas = np.ones(num_layers*num_qubits*4)
psi = 2*np.random.rand(2**num_qubits)-1
psi = psi / np.linalg.norm(psi)
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc.initialize(psi, range(0, num_qubits))
loss_values = []
thetass = []
for i in range(0, 400):
if i % 20 == 0:
print('W_chain: (' + str(num_layers) + ',' + str(num_qubits) + '): ' + str(i))
grad_loss = qtm.base.grad_loss(
qc,
qtm.nqubit.create_Wchain_layerd_state,
thetas, num_layers = num_layers)
thetas -= qtm.constant.learning_rate*(grad_loss)
thetass.append(thetas.copy())
qc_copy = qtm.nqubit.create_Wchain_layerd_state(qc.copy(), thetas, num_layers)
loss = qtm.base.loss_basis(qtm.base.measure(qc_copy, list(range(qc_copy.num_qubits))))
loss_values.append(loss)
traces = []
fidelities = []
for thetas in thetass:
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.nqubit.create_Wchain_layerd_state(qc, thetas, num_layers = num_layers).inverse()
psi_hat = qiskit.quantum_info.Statevector.from_instruction(qc)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces.append(trace)
fidelities.append(fidelity)
print('Writting ... ' + str(num_layers) + ' layers,' + str(num_qubits) + ' qubits')
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/loss_values.csv", loss_values, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/thetass.csv", thetass, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/traces.csv", traces, delimiter=",")
np.savetxt("../../experiments/tomography/tomography_wchain_" + str(num_layers) + "/" + str(num_qubits) + "/fidelities.csv", fidelities, delimiter=",")
if __name__ == "__main__":
# creating thread
num_layers = [1]
num_qubits = [2, 3, 4, 5, 6]
t_wchains = []
for i in num_layers:
for j in num_qubits:
t_wchains.append(multiprocessing.Process(target = run_wchain, args=(i, j)))
for t_wchain in t_wchains:
t_wchain.start()
for t_wchain in t_wchains:
t_wchain.join()
print("Done!") |
deadlock.py | #! /usr/bin/env python
# -*- coding:UTF-8 -*-
# 死锁,就是线程互相等待相互的资源,互不想让,本质相互依赖
import threading
import time
a = 5
alock = threading.Lock()
b = 5
block = threading.Lock()
def thread1calc():
print "Thread1 acquiring lock a"
alock.acquire()
time.sleep(5)
print "Thread1 again attempt acquiring lock b"
block.acquire()
time.sleep(5)
a += 5
b += 5
print "Thread1 releasing both locks"
block.release()
alock.release()
def thread2calc():
print "Thread2 acquiring lock b"
block.acquire()
time.sleep(5)
print "Thread2 again attempt acquiring lock a"
alock.acquire() #这里就僵死了吧,hahah
time.sleep(5)
a += 10
b += 10
print "Thread2 releasing both locks"
block.release()
alock.release()
t = threading.Thread(target = thread1calc)
t.setDaemon(True)
t.start()
t = threading.Thread(target = thread2calc)
t.setDaemon(True)
t.start()
while True:
time.sleep(300)
|
extension_manager.py | """This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import threading
import time
from osquery.extensions.Extension import Iface
from osquery.extensions.ttypes import ExtensionResponse, ExtensionStatus
from osquery.singleton import Singleton
def shutdown_request(code, timeout=1):
"""A delayed shutdown request."""
time.sleep(timeout)
os._exit(code)
class ExtensionManager(Singleton, Iface):
"""The thrift server for handling extension requests
An extension's manager is responsible for maintaining the state of
registered plugins, broadcasting the registry of those plugins to the
core's extension manager and fielding requests that come in on the
extension's socket.
"""
_plugins = {}
_registry = {}
uuid = None
def add_plugin(self, plugin):
"""Register a plugin with the extension manager. In order for the
extension manager to broadcast a plugin, it must be added using this
interface.
Keyword arguments:
plugin -- the plugin class to register
"""
# First, we create an instance of the plugin. All plugins are
# singletons, so this instance will be long-lived.
obj = plugin()
# When the extension manager broadcasts it's registry to core's
# extension manager, the data structure should follow a specific
# format. Whenever we add a plugin, we need to update the internal
# _registry instance variable, which will be sent to core's extension
# manager once the extension has been started
if obj.registry_name() not in self._registry:
self._registry[obj.registry_name()] = {}
if obj.name() not in self._registry[obj.registry_name()]:
self._registry[obj.registry_name()][obj.name()] = obj.routes()
# The extension manager needs a way to route calls to the appropriate
# implementation class. We maintain references to the plugin's
# singleton instantiation in the _plugins instance variable. The
# _plugins member has the same general structure as _registry, but
# instead of pointing to the plugin's routes, it points to the plugin
# implementation object
if obj.registry_name() not in self._plugins:
self._plugins[obj.registry_name()] = {}
if obj.name() not in self._plugins[obj.registry_name()]:
self._plugins[obj.registry_name()][obj.name()] = obj
def shutdown(self):
"""The osquery extension manager requested a shutdown"""
rt = threading.Thread(target=shutdown_request, args=(0,))
rt.daemon = True
rt.start()
return ExtensionStatus(code=0, message="OK")
def registry(self):
"""Accessor for the internal _registry member variable"""
return self._registry
def ping(self):
"""Lightweight health verification
The core osquery extension manager will periodically "ping" each
extension that has connected to it to ensure that the extension is
still active and can field requests, if necessary.
"""
return ExtensionStatus(code=0, message="OK")
def call(self, registry, item, request):
"""The entry-point for plugin requests
When a plugin is accessed from another process, osquery core's
extension manager will send a thrift request to the implementing
extension manager's call method.
Arguments:
registry -- a string representing what registry is being accessed.
for config plugins this is "config", for table plugins this is
"table", etc.
item -- the registry item that is being requested. this is the "name"
of your plugin. for example, this would be the exact name of the
SQL table, if the plugin was a table plugin.
"""
# this API only support plugins of the following types:
# - table
# - config
# - logger
if registry not in ["table", "config", "logger"]:
message = "A registry of an unknown type was called: %s" % registry
return ExtensionResponse(
status=ExtensionStatus(code=1, message=message,),
response=[],)
try:
return self._plugins[registry][item].call(request)
except KeyError:
message = "Extension registry does not contain requested plugin"
return ExtensionResponse(
status=ExtensionStatus(code=1, message=message,),
response=[],)
|
logger_test.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for worker logging utilities."""
import json
import logging
import sys
import threading
import unittest
from apache_beam.runners.worker import logger
class PerThreadLoggingContextTest(unittest.TestCase):
def thread_check_attribute(self, name):
self.assertFalse(name in logger.per_thread_worker_data.get_data())
with logger.PerThreadLoggingContext(**{name: 'thread-value'}):
self.assertEqual(
logger.per_thread_worker_data.get_data()[name], 'thread-value')
self.assertFalse(name in logger.per_thread_worker_data.get_data())
def test_per_thread_attribute(self):
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
with logger.PerThreadLoggingContext(xyz='value'):
self.assertEqual(logger.per_thread_worker_data.get_data()['xyz'], 'value')
thread = threading.Thread(
target=self.thread_check_attribute, args=('xyz',))
thread.start()
thread.join()
self.assertEqual(logger.per_thread_worker_data.get_data()['xyz'], 'value')
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
def test_set_when_undefined(self):
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
with logger.PerThreadLoggingContext(xyz='value'):
self.assertEqual(logger.per_thread_worker_data.get_data()['xyz'], 'value')
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
def test_set_when_already_defined(self):
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
with logger.PerThreadLoggingContext(xyz='value'):
self.assertEqual(logger.per_thread_worker_data.get_data()['xyz'], 'value')
with logger.PerThreadLoggingContext(xyz='value2'):
self.assertEqual(
logger.per_thread_worker_data.get_data()['xyz'], 'value2')
self.assertEqual(logger.per_thread_worker_data.get_data()['xyz'], 'value')
self.assertFalse('xyz' in logger.per_thread_worker_data.get_data())
class JsonLogFormatterTest(unittest.TestCase):
SAMPLE_RECORD = {
'created': 123456.789, 'msecs': 789.654321,
'msg': '%s:%d:%.2f', 'args': ('xyz', 4, 3.14),
'levelname': 'WARNING',
'process': 'pid', 'thread': 'tid',
'name': 'name', 'filename': 'file', 'funcName': 'func',
'exc_info': None}
SAMPLE_OUTPUT = {
'timestamp': {'seconds': 123456, 'nanos': 789654321},
'severity': 'WARN', 'message': 'xyz:4:3.14', 'thread': 'pid:tid',
'job': 'jobid', 'worker': 'workerid', 'logger': 'name:file:func'}
def create_log_record(self, **kwargs):
class Record(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
return Record(**kwargs)
def test_basic_record(self):
formatter = logger.JsonLogFormatter(job_id='jobid', worker_id='workerid')
record = self.create_log_record(**self.SAMPLE_RECORD)
self.assertEqual(json.loads(formatter.format(record)), self.SAMPLE_OUTPUT)
def execute_multiple_cases(self, test_cases):
record = self.SAMPLE_RECORD
output = self.SAMPLE_OUTPUT
formatter = logger.JsonLogFormatter(job_id='jobid', worker_id='workerid')
for case in test_cases:
record['msg'] = case['msg']
record['args'] = case['args']
output['message'] = case['expected']
self.assertEqual(
json.loads(formatter.format(self.create_log_record(**record))),
output)
def test_record_with_format_character(self):
test_cases = [
{'msg': '%A', 'args': (), 'expected': '%A'},
{'msg': '%s', 'args': (), 'expected': '%s'},
{'msg': '%A%s', 'args': ('xy'), 'expected': '%A%s with args (xy)'},
{'msg': '%s%s', 'args': (1), 'expected': '%s%s with args (1)'},
]
self.execute_multiple_cases(test_cases)
def test_record_with_arbitrary_messages(self):
test_cases = [
{'msg': ImportError('abc'), 'args': (), 'expected': 'abc'},
{'msg': TypeError('abc %s'), 'args': ('def'), 'expected': 'abc def'},
]
self.execute_multiple_cases(test_cases)
def test_record_with_per_thread_info(self):
with logger.PerThreadLoggingContext(
work_item_id='workitem', stage_name='stage', step_name='step'):
formatter = logger.JsonLogFormatter(job_id='jobid', worker_id='workerid')
record = self.create_log_record(**self.SAMPLE_RECORD)
log_output = json.loads(formatter.format(record))
expected_output = dict(self.SAMPLE_OUTPUT)
expected_output.update(
{'work': 'workitem', 'stage': 'stage', 'step': 'step'})
self.assertEqual(log_output, expected_output)
def test_nested_with_per_thread_info(self):
formatter = logger.JsonLogFormatter(job_id='jobid', worker_id='workerid')
with logger.PerThreadLoggingContext(
work_item_id='workitem', stage_name='stage', step_name='step1'):
record = self.create_log_record(**self.SAMPLE_RECORD)
log_output1 = json.loads(formatter.format(record))
with logger.PerThreadLoggingContext(step_name='step2'):
record = self.create_log_record(**self.SAMPLE_RECORD)
log_output2 = json.loads(formatter.format(record))
record = self.create_log_record(**self.SAMPLE_RECORD)
log_output3 = json.loads(formatter.format(record))
record = self.create_log_record(**self.SAMPLE_RECORD)
log_output4 = json.loads(formatter.format(record))
self.assertEqual(log_output1, dict(
self.SAMPLE_OUTPUT, work='workitem', stage='stage', step='step1'))
self.assertEqual(log_output2, dict(
self.SAMPLE_OUTPUT, work='workitem', stage='stage', step='step2'))
self.assertEqual(log_output3, dict(
self.SAMPLE_OUTPUT, work='workitem', stage='stage', step='step1'))
self.assertEqual(log_output4, self.SAMPLE_OUTPUT)
def test_exception_record(self):
formatter = logger.JsonLogFormatter(job_id='jobid', worker_id='workerid')
try:
raise ValueError('Something')
except ValueError:
attribs = dict(self.SAMPLE_RECORD)
attribs.update({'exc_info': sys.exc_info()})
record = self.create_log_record(**attribs)
log_output = json.loads(formatter.format(record))
# Check if exception type, its message, and stack trace information are in.
exn_output = log_output.pop('exception')
self.assertNotEqual(exn_output.find('ValueError: Something'), -1)
self.assertNotEqual(exn_output.find('logger_test.py'), -1)
self.assertEqual(log_output, self.SAMPLE_OUTPUT)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
|
TFSparkNode.py | # Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""
This module provides Spark-compatible functions to launch TensorFlow on the executors.
There are three main phases of operation:
1. Reservation - reserves a port for the TensorFlow process on each executor and also starts a multiprocessing.Manager to
listen for data/control messages. For TensorFlow cluster applications, a cluster_spec "template" should be supplied.
2. Startup - launches the Tensorflow main function on the executors. Note: for cluster applications, this MUST be invoked from
a background thread on the Spark driver because the PS nodes will block until the job completes.
3. Data feeding - sends RDD data to the TensorFlow nodes via each executor's multiprocessing.Manager. Note: because the PS
nodes block on startup, they will not receive any RDD partitions.
4. Shutdown - sends a shutdown control message to the multiprocessing.Managers of the PS nodes.
"""
import logging
import os
import random
import socket
import subprocess
import threading
import time
import uuid
import Queue
import TFManager
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s (%(threadName)s-%(process)d) %(message)s",)
class TFNodeContext:
"""This encapsulates key metadata for each TF node"""
def __init__(self, worker_num, job_name, task_index, cluster_spec, defaultFS, working_dir, mgr):
self.worker_num = worker_num
self.job_name = job_name
self.task_index = task_index
self.cluster_spec = cluster_spec
self.defaultFS = defaultFS
self.working_dir = working_dir
self.mgr = mgr
class TFSparkNode(object):
"""
This class contains the TFManager "singleton" per executor/python-worker. Note that Spark may spawn more than one python-worker
per executor, so these module functions will reconnect to the "singleton", if needed.
"""
mgr = None
def _get_manager(cluster_info, host, ppid):
"""
Returns this executor's "singleton" instance of the multiprocessing.Manager, reconnecting per python-worker if needed.
"""
for node in cluster_info:
if node['host'] == host and node['ppid'] == ppid:
addr = node['addr']
authkey = node['authkey']
TFSparkNode.mgr = TFManager.connect(addr,authkey)
break;
logging.info("Connected to TFSparkNode.mgr on {0}, ppid={1}, state={2}".format(host, ppid, str(TFSparkNode.mgr.get('state'))))
return TFSparkNode.mgr
def reserve(cluster_spec, tensorboard, queues=['input', 'output']):
"""
Allocates a port for Tensorflow on this node, starts TensorBoard if requested, and starts a multiprocessing.Manager to listen for data/control msgs.
"""
def _reserve(iter):
# worker_num is assigned for the cluster (and may not correlate to Spark's executor id)
for i in iter:
worker_num = i
# assign TF job/task based on provided cluster_spec template (or use default/null values)
job_name = 'default'
task_index = -1
for jobtype in cluster_spec:
nodes = cluster_spec[jobtype]
if worker_num in nodes:
job_name = jobtype
task_index = nodes.index(worker_num)
break;
# get unique id (hostname,ppid) for this executor's JVM
host = socket.gethostname()
ppid = os.getppid()
# start a TFManager and get a free port
if TFSparkNode.mgr is not None and str(TFSparkNode.mgr.get('state')) != "'stopped'":
# raise an exception to force Spark to retry this "reservation" task on another executor
raise Exception("TFManager already started on {0}, ppid={1}, state={2}".format(host, ppid, str(TFSparkNode.mgr.get("state"))))
else:
# use a random uuid as the authkey
authkey = uuid.uuid4()
addr = None
if job_name == 'ps':
# PS nodes must be remotely accessible in order to shutdown from Spark driver.
TFSparkNode.mgr = TFManager.start(authkey, ['control'], 'remote')
addr = (host, TFSparkNode.mgr.address[1])
else:
# worker nodes only need to be locally accessible within the executor for data feeding
TFSparkNode.mgr = TFManager.start(authkey, queues)
addr = TFSparkNode.mgr.address
# initialize mgr state
TFSparkNode.mgr.set('state', 'running')
TFSparkNode.mgr.set('ppid', ppid)
# start TensorBoard if requested
tb_pid = 0
tb_port = 0
if tensorboard and job_name == 'worker' and task_index == 0:
tb_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tb_sock.bind(('',0))
tb_port = tb_sock.getsockname()[1]
tb_sock.close()
logdir = "tensorboard_%d" %(worker_num)
if 'PYSPARK_PYTHON' in os.environ:
# user-specified Python (typically Python.zip)
pypath = os.environ['PYSPARK_PYTHON']
logging.info("PYSPARK_PYTHON: {0}".format(pypath))
pydir = os.path.dirname(pypath)
tb_proc = subprocess.Popen([pypath, "%s/tensorboard"%pydir, "--logdir=%s"%logdir, "--port=%d"%tb_port, "--debug"])
else:
# system-installed Python & tensorboard
tb_proc = subprocess.Popen(["tensorboard", "--logdir=%s"%logdir, "--port=%d"%tb_port, "--debug"])
tb_pid = tb_proc.pid
# find a free port for TF
# TODO: bind to port until TF server start
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('',0))
port = s.getsockname()[1]
# sleep a bit to force Spark to distribute the remaining reservation tasks to other/idle executors
time.sleep(10)
s.close()
# return everything we need to reconnect later
resp = {
'worker_num': worker_num,
'host': host,
'ppid': ppid,
'job_name': job_name,
'task_index': task_index,
'port': port,
'tb_pid': tb_pid,
'tb_port': tb_port,
'addr': addr,
'authkey': authkey
}
logging.info("TFSparkNode.reserve: {0}".format(resp))
return [resp]
return _reserve
def start(fn, tf_args, cluster_info, defaultFS, working_dir, background):
"""
Wraps the TensorFlow main function in a Spark mapPartitions-compatible function.
"""
def _mapfn(iter):
# Note: consuming the input iterator helps Pyspark re-use this worker,
# but we'll use the worker_num assigned during the reserve() step.
for i in iter:
worker_num = i
# construct a TensorFlow clusterspec from supplied cluster_info AND get node info for this executor
# Note: we could compute the clusterspec outside this function, but it's just a subset of cluster_info...
spec = {}
host = socket.gethostname()
ppid = os.getppid()
job_name = ''
task_index = -1
for node in cluster_info:
logging.info("node: {0}".format(node))
(njob, nhost, nport, nppid) = (node['job_name'], node['host'], node['port'], node['ppid'])
hosts = [] if njob not in spec else spec[njob]
hosts.append("{0}:{1}".format(nhost, nport))
spec[njob] = hosts
if nhost == host and nppid == ppid:
(worker_num, job_name, task_index) = (node['worker_num'], node['job_name'], node['task_index'])
# figure out which executor we're on, and get the reference to the multiprocessing.Manager
mgr = _get_manager(cluster_info, host, ppid)
ctx = TFNodeContext(worker_num, job_name, task_index, spec, defaultFS, working_dir, mgr)
if job_name == 'ps' or background:
# invoke the TensorFlow main function in a background thread
logging.info("Starting TensorFlow {0}:{1} on cluster node {2} on background thread".format(job_name, task_index, worker_num))
t = threading.Thread(target=fn, args=(tf_args, ctx))
t.start()
# for ps nodes only, wait indefinitely for a "control" event (None == "stop")
if job_name == 'ps':
queue = mgr.get_queue('control')
done = False
while not done:
msg = queue.get(block=True)
logging.info("Got msg: {0}".format(msg))
if msg == None:
logging.info("Terminating PS")
mgr.set('state', 'stopped')
done = True
queue.task_done()
else:
# otherwise, just run TF function in the main executor/worker thread
logging.info("Starting TensorFlow {0}:{1} on cluster node {2} on foreground thread".format(job_name, task_index, worker_num))
# package up the context for the TF node
fn(tf_args, ctx)
logging.info("Finished TensorFlow {0}:{1} on cluster node {2}".format(job_name, task_index, worker_num))
return [(worker_num, job_name, task_index)]
return _mapfn
def train(cluster_info, qname='input'):
"""
Feeds Spark partitions into the shared multiprocessing.Queue.
"""
def _train(iter):
# get shared queue, reconnecting if necessary
mgr = _get_manager(cluster_info, socket.gethostname(), os.getppid())
queue = mgr.get_queue(qname)
state = str(mgr.get('state'))
logging.info("mgr.state={0}".format(state))
terminating = state == "'terminating'"
if terminating:
logging.info("mgr is terminating, skipping partition")
count = 0
for item in iter:
count += 1
logging.info("Skipped {0} items from partition".format(count))
else:
logging.info("Feeding partition {0} into {1} queue {2}".format(iter, qname, queue))
count = 0
for item in iter:
count += 1
queue.put(item, block=True)
# wait for consumers to finish processing all items in queue before "finishing" this iterator
queue.join()
logging.info("Processed {0} items in partition".format(count))
return [terminating]
return _train
def inference(cluster_info, qname='input'):
"""
Feeds Spark partitions into the shared multiprocessing.Queue and returns inference results.
"""
def _inference(iter):
# get shared queue, reconnecting if necessary
mgr = _get_manager(cluster_info, socket.gethostname(), os.getppid())
queue_in = mgr.get_queue(qname)
logging.info("Feeding partition {0} into {1} queue {2}".format(iter, qname, queue_in))
count = 0
for item in iter:
count += 1
queue_in.put(item, block=True)
# wait for consumers to finish processing all items in queue before "finishing" this iterator
queue_in.join()
logging.info("Processed {0} items in partition".format(count))
# read result queue
results = []
queue_out = mgr.get_queue('output')
while count > 0:
result = queue_out.get(block=True)
results.append(result)
count -= 1
queue_out.task_done()
logging.info("Finished processing partition")
return results
return _inference
def shutdown(cluster_info, queues=['input']):
def _shutdown(iter):
"""
Stops all TensorFlow nodes by feeding None into the multiprocessing.Queues.
"""
host = socket.gethostname()
ppid = os.getppid()
# reconnect to shared queue
mgr = _get_manager(cluster_info, host, ppid)
# send SIGTERM to Tensorboard proc (if running)
for node in cluster_info:
if node['host'] == host and node['ppid'] == ppid:
tb_pid = node['tb_pid']
if tb_pid != 0:
logging.info("Stopping tensorboard (pid={0})".format(tb_pid))
subprocess.Popen(["kill", str(tb_pid)])
# terminate any listening queues
logging.info("Stopping all queues")
for q in queues:
queue = mgr.get_queue(q)
logging.info("Feeding None into {0} queue".format(q))
queue.put(None, block=True)
logging.info("Setting mgr.state to 'stopped'")
mgr.set('state', 'stopped')
return [True]
return _shutdown
|
test_nuage_static_nat.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for Static NAT functionality with Nuage VSP SDN plugin
"""
# Import Local Modules
from nuageTestCase import nuageTestCase
from marvin.lib.base import (Account,
PublicIpRange,
Network,
VirtualMachine)
from marvin.lib.common import list_virtual_machines
from marvin.lib.common import list_virtual_machines
# Import System Modules
from nose.plugins.attrib import attr
import threading
import copy
import time
class TestNuageStaticNat(nuageTestCase):
"""Test Static NAT functionality with Nuage VSP SDN plugin
"""
@classmethod
def setUpClass(cls):
super(TestNuageStaticNat, cls).setUpClass()
return
def setUp(self):
# Create an account
self.account = Account.create(self.api_client,
self.test_data["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = [self.account]
return
# create_PublicIpRange - Creates public IP range
def create_PublicIpRange(self):
self.debug("Creating public IP range")
self.test_data["vlan_ip_range"]["startip"] = "20.200.200.100"
self.test_data["vlan_ip_range"]["endip"] = "20.200.200.200"
self.test_data["vlan_ip_range"]["netmask"] = "255.255.255.0"
self.test_data["vlan_ip_range"]["gateway"] = "20.200.200.1"
self.test_data["vlan_ip_range"]["forvirtualnetwork"] = "true"
self.test_data["vlan_ip_range"]["zoneid"] = self.zone.id
public_ip_range = PublicIpRange.create(self.api_client,
self.test_data["vlan_ip_range"]
)
self.debug("Created public IP range")
return public_ip_range
# validate_PublicIpRange - Validates public IP range creation and state
def validate_PublicIpRange(self, public_ip_range):
public_ip_ranges = PublicIpRange.list(self.api_client,
id=public_ip_range.vlan.id
)
self.assertEqual(isinstance(public_ip_ranges, list), True,
"List Public IP Range should return a valid list"
)
self.assertEqual(public_ip_range.vlan.startip,
public_ip_ranges[0].startip,
"Start IP of the public IP range should match with "
"the returned list data"
)
self.assertEqual(public_ip_range.vlan.endip, public_ip_ranges[0].endip,
"End IP of the public IP range should match with the "
"returned list data"
)
# validate_NuageUnderlayPublicIpRange - Validates Nuage underlay enabled
# public IP range creation and state
def validate_NuageUnderlayPublicIpRange(self, public_ip_range):
self.nuage_underlay_public_ip_ranges = \
self.list_NuageUnderlayPublicIpRanges(public_ip_range)
self.assertEqual(isinstance(self.nuage_underlay_public_ip_ranges,
list),
True,
"List Nuage Underlay Public IP Range should return "
"a valid list"
)
self.assertEqual(public_ip_range.vlan.startip,
self.nuage_underlay_public_ip_ranges[0].startip,
"Start IP of the public IP range should match with "
"the returned list data"
)
self.assertEqual(public_ip_range.vlan.endip,
self.nuage_underlay_public_ip_ranges[0].endip,
"End IP of the public IP range should match with the "
"returned list data"
)
# verify_StaticNAT_traffic - Verifies Static NAT traffic by performing
# wget traffic test with the given Static NAT enabled public IP, http web
# server running on the corresponding VM in the given network
def verify_StaticNAT_traffic(self, network, public_ip, vpc=None,
non_default_nic=False):
if self.isSimulator:
self.debug("Simulator Environment: skipping static nat"
"traffic tests.")
return
# Adding Ingress Firewall/Network ACL rule
self.debug("Adding Ingress Firewall/Network ACL rule to make the "
"created Static NAT rule (wget) accessible...")
if vpc:
public_http_rule = self.create_NetworkAclRule(
self.test_data["http_rule"], network=network)
else:
public_http_rule = self.create_FirewallRule(
public_ip, self.test_data["http_rule"])
# VSD verification
self.verify_vsd_firewall_rule(public_http_rule)
# wget from VM
tries = 0
max_tries = 3 if non_default_nic else 120
filename = None
headers = None
while tries < max_tries:
try:
filename, headers = self.wget_from_server(public_ip)
if filename and headers:
self.debug("wget from VM is successful")
break
except Exception as e:
self.debug("Failed to wget from VM - %s" % e)
self.debug("Retrying wget from VM after some time...")
time.sleep(5)
tries += 1
try:
if not filename and not headers:
if non_default_nic:
self.debug("Failed to wget from VM via this NIC as it "
"is not the default NIC")
else:
self.fail("Failed to wget from VM")
finally:
# Removing Ingress Firewall/Network ACL rule
self.debug("Removing the created Ingress Firewall/Network ACL "
"rule in the network...")
public_http_rule.delete(self.api_client)
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_firewall_rule(public_http_rule)
self.debug("Ingress Firewall/Network ACL rule successfully "
"deleted in VSD")
self.debug("Successfully verified Static NAT traffic by "
"performing wget traffic test with the given Static "
"NAT enabled public IP - %s" % public_ip)
# wget_from_internet - From within the given VM (ssh client),
# fetches index.html file of an Internet web server, wget www.google.com
def wget_from_Internet(self, ssh_client, timeout):
if self.http_proxy:
cmd = "wget --no-cache --output-document=index.html " \
"http://www.google.com/ -e use_proxy=yes -e http_proxy=" + \
self.http_proxy + " --timeout=" + str(timeout)
else:
cmd = "wget --no-cache --output-document=index.html " \
"http://www.google.com/ --timeout=" + str(timeout)
test_result = self.execute_cmd(ssh_client, cmd)
if "200 OK" in test_result:
cmd = "rm -rf index.html*"
self.execute_cmd(ssh_client, cmd)
return test_result
# verify_StaticNAT_Internet_traffic - Verifies Static NAT traffic to the
# Internet (wget www.google.com) from the given VM
def verify_StaticNAT_Internet_traffic(self, vm, network, public_ip,
vpc=None, non_default_nic=False,
negative_test=False):
if self.isSimulator and not negative_test:
self.debug("Simulator Environment: not verifying internet traffic")
return
elif self.isSimulator:
raise Exception("Simulator simulating exception")
# Adding Ingress Firewall/Network ACL rule
self.debug("Adding Ingress Firewall/Network ACL rule to make the "
"created Static NAT rule (SSH) accessible...")
if vpc:
public_ssh_rule = self.create_NetworkAclRule(
self.test_data["ingress_rule"], network=network)
else:
public_ssh_rule = self.create_FirewallRule(
public_ip, self.test_data["ingress_rule"])
# VSD verification
self.verify_vsd_firewall_rule(public_ssh_rule)
# Adding Egress Network ACL rule
if vpc and self.http_proxy and not negative_test:
self.debug("Adding Egress Network ACL rule in the created VPC "
"network to allow access to the configured Internet "
"proxy servers...")
proxy_rule = copy.deepcopy(self.test_data["http_rule"])
proxy_rule["privateport"] = 1080
proxy_rule["publicport"] = 1080
proxy_rule["startport"] = 1080
proxy_rule["endport"] = 1080
internet_proxy_server_rule = self.create_NetworkAclRule(
proxy_rule, traffic_type="Egress", network=network)
# VSD verification
self.verify_vsd_firewall_rule(
internet_proxy_server_rule, traffic_type="Egress")
# SSH into VM
ssh_client = None
try:
if non_default_nic:
with self.assertRaises(Exception):
self.ssh_into_VM(vm, public_ip, negative_test=True)
self.debug("Can not SSH into the VM via this NIC as it is "
"not the default NIC")
else:
ssh_client = self.ssh_into_VM(vm, public_ip)
# wget from Internet
test_result = None
if ssh_client and self.isInternetConnectivityAvailable:
timeout = 100 if negative_test else 300
test_result = self.wget_from_Internet(ssh_client, timeout)
finally:
# Removing Ingress Firewall/Network ACL rule
self.debug("Removing the created Ingress Firewall/Network ACL "
"rule in the network...")
public_ssh_rule.delete(self.api_client)
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_firewall_rule(public_ssh_rule)
self.debug("Ingress Firewall/Network ACL rule successfully "
"deleted in VSD")
# Removing Egress Network ACL rule
if vpc and self.http_proxy:
self.debug("Removing the created Egress Network ACL rule in the "
"VPC network...")
internet_proxy_server_rule.delete(self.api_client)
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_firewall_rule(internet_proxy_server_rule)
self.debug("Egress Network ACL rule successfully deleted in VSD")
if test_result:
if "200 OK" in test_result:
self.debug("Successfully verified Static NAT Internet traffic "
"(wget www.google.com) from VM - %s" % vm.name)
else:
self.fail("Failed to verify Static NAT Internet traffic "
"(wget www.google.com) from VM - %s" % vm.name)
else:
if negative_test:
self.fail("Skipped Static NAT Internet traffic "
"(wget www.google.com) test from VM as there is no "
"Internet connectivity in the data center")
else:
self.debug("Skipped Static NAT Internet traffic "
"(wget www.google.com) test from VM as there is no "
"Internet connectivity in the data center")
# enable_staticNat_on_a_starting_vm - Enables Static Nat on a starting VM
# in the given network with the given public IP.
def enable_staticNat_on_a_starting_vm(self):
self.debug("Enables Static Nat on a starting VM in the network - %s "
"with the given public IP - %s" %
(self.network, self.public_ip))
time.sleep(15)
vm_list = list_virtual_machines(self.api_client, listall=True)
self.create_StaticNatRule_For_VM(
vm_list[0], self.public_ip, self.network)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_01_nuage_StaticNAT_public_ip_range(self):
"""Test Nuage VSP Public IP Range creation and deletion
"""
# 1. Create a public IP range (VLAN IP range), check if it is
# successfully created in the zone and the physical network.
# 2. Delete the created public IP range (VLAN IP range), check if it is
# successfully deleted from the zone and the physical network.
# 3. Delete all the created objects (cleanup).
self.debug("Creating a public IP range...")
public_ip_range = self.create_PublicIpRange()
self.validate_PublicIpRange(public_ip_range)
self.debug("Public IP range successfully created")
self.debug("Deleting the created public IP range...")
public_ip_range.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_PublicIpRange(public_ip_range)
self.debug("Public IP range successfully deleted")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_02_nuage_StaticNAT_underlay_public_ip_range(self):
"""Test Nuage VSP Nuage Underlay (underlay networking) enabled Public
IP Range creation and deletion
"""
# 1. Create a public IP range (VLAN IP range), check if it is
# successfully created in the zone and the physical network.
# 2. Enable Nuage underlay capability (underlay networking) for the
# created public IP range (VLAN IP range), check if the Nuage
# underlay (underlay networking) capability is successfully enabled
# for the created public IP range (VLAN IP range).
# 3. Disable Nuage underlay capability (underlay networking) for the
# created public IP range (VLAN IP range), check if the Nuage
# underlay (underlay networking) capability is successfully disabled
# for the created public IP range (VLAN IP range).
# 4. Delete the created public IP range (VLAN IP range), check if it is
# successfully deleted from the zone and the physical network.
# 5. Delete all the created objects (cleanup).
self.debug("Creating a public IP range...")
public_ip_range = self.create_PublicIpRange()
self.validate_PublicIpRange(public_ip_range)
self.debug("Public IP range successfully created")
self.debug("Enabling Nuage underlay capability (underlay networking) "
"for the created public IP range...")
self.enable_NuageUnderlayPublicIpRange(public_ip_range.vlan.id)
self.validate_NuageUnderlayPublicIpRange(public_ip_range)
self.debug("Nuage underlay capability (underlay networking) for the "
"created public IP range is successfully enabled")
self.debug("Disabling Nuage underlay capability (underlay networking) "
"for the created public IP range...")
self.disable_NuageUnderlayPublicIpRange(public_ip_range)
with self.assertRaises(Exception):
self.validate_NuageUnderlayPublicIpRange(public_ip_range)
self.debug("Nuage underlay capability (underlay networking) for the "
"created public IP range is successfully disabled")
self.debug("Deleting the created public IP range...")
public_ip_range.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_PublicIpRange(public_ip_range)
self.debug("Public IP range successfully deleted")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_03_nuage_StaticNAT_isolated_networks(self):
"""Test Nuage VSP Isolated networks with different combinations of
Static NAT service providers
"""
# 1. Create Nuage VSP Isolated Network offering with different
# combinations of Static NAT service providers
# (NuageVsp, VirtualRouter, no StaticNat service), check if all the
# network offerings are successfully created and enabled.
# 2. Recreate the above created Network offering
# (Static NAT service provider as NuageVsp) with ispersistent flag
# set to True, check if the network offering is successfully created
# and enabled.
# 3. Recreate the above created Network offering
# (Static NAT service provider as NuageVsp) with conserve mode On
# (conserve_mode flag set to True), check if the network offering is
# successfully created and enabled.
# 4. Create an Isolated network with Static NAT service provider as
# NuageVsp, spawn a VM, and create a Static NAT rule. Check if the
# network is successfully created, and the VM along with the VR is
# deployed successfully in the network, verify if the Static NAT
# functionality for this network is successfully enabled in VSD.
# 5. Create a persistent Isolated network with Static NAT service
# provider as NuageVsp, spawn a VM, and create a Static NAT rule.
# Check if the network is successfully created, and the VM along
# with the VR is deployed successfully in the network, verify if the
# Static NAT functionality for this network is successfully enabled
# in VSD.
# 6. Create a conserved Isolated network (conserve mode On) with Static
# NAT service provider as NuageVsp, spawn a VM, and create a Static
# NAT rule. Check if the network is successfully created, and the VM
# along with the VR is deployed successfully in the network, verify
# if the Static NAT functionality for this network is successfully
# enabled in VSD.
# 7. Create an Isolated network with Static NAT service provider as
# VirtualRouter, spawn a VM, and create a Static NAT rule. Check if
# the network is successfully created, and the VM along with the VR
# is deployed successfully in the network, verify if the Static NAT
# functionality for this network is not enabled in VSD as Nuage VSP
# does not support VirtualRouter as the Static NAT service provider.
# 8. Create an Isolated network with no Static NAT service, spawn a VM,
# and create a Static NAT rule. Check if the network is successfully
# created, and the VM along with the VR is deployed successfully in
# the network, verify if the Static NAT functionality for this
# network is not enabled in both CloudStack and VSD as the network
# does not support Static NAT service.
# 9. Delete all the created objects (cleanup).
# Creating network offerings
self.debug("Creating Nuage VSP Isolated Network offering with Static "
"NAT service provider as NuageVsp...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["isolated_network_offering"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Recreating above Network offering with ispersistent "
"True...")
network_offering = copy.deepcopy(
self.test_data["nuagevsp"]["isolated_network_offering"])
network_offering["ispersistent"] = "True"
net_off_2 = self.create_NetworkOffering(network_offering)
self.validate_NetworkOffering(net_off_2, state="Enabled")
self.debug("Recreating above Network offering with conserve mode "
"On...")
net_off_3 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["isolated_network_offering"],
conserve_mode=True)
self.validate_NetworkOffering(net_off_3, state="Enabled")
self.debug("Creating Nuage VSP Isolated Network offering with Static "
"NAT service provider as VirtualRouter...")
network_offering = copy.deepcopy(
self.test_data["nuagevsp"]["isolated_network_offering"])
network_offering["serviceProviderList"]["StaticNat"] = "VirtualRouter"
net_off_4 = self.create_NetworkOffering(network_offering)
self.validate_NetworkOffering(net_off_4, state="Enabled")
self.debug("Creating Nuage VSP Isolated Network offering without "
"Static NAT service...")
network_offering = copy.deepcopy(
self.test_data["nuagevsp"]["isolated_network_offering"])
network_offering["supportedservices"] = \
'Dhcp,SourceNat,Connectivity,UserData,Firewall,Dns'
del network_offering["serviceProviderList"]["StaticNat"]
net_off_5 = self.create_NetworkOffering(network_offering)
self.validate_NetworkOffering(net_off_5, state="Enabled")
self.debug("Creating persistent Nuage VSP Isolated Network offering "
"without VR so no userData and Dns...")
network_offering = copy.deepcopy(
self.test_data["nuagevsp"]["isolated_network_offering"])
network_offering["ispersistent"] = "True"
network_offering["supportedservices"] = \
'Dhcp,SourceNat,Connectivity,StaticNat,Firewall'
del network_offering["serviceProviderList"]["UserData"]
del network_offering["serviceProviderList"]["Dns"]
net_off_6 = self.create_NetworkOffering(network_offering)
self.validate_NetworkOffering(net_off_6, state="Enabled")
# Creating Isolated networks, and deploying VMs
self.debug("Creating an Isolated network with Static NAT service "
"provider as NuageVsp...")
network_1 = self.create_Network(net_off_1, gateway='10.1.1.1')
self.validate_Network(network_1, state="Allocated")
self.debug("Deploying a VM in the created Isolated network...")
vm_1 = self.create_VM(network_1)
self.validate_Network(network_1, state="Implemented")
vr_1 = self.get_Router(network_1)
self.check_Router_state(vr_1, state="Running")
self.check_VM_state(vm_1, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network_1)
self.verify_vsd_router(vr_1)
self.verify_vsd_vm(vm_1)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created Isolated network...")
public_ip = self.acquire_PublicIPAddress(network_1)
self.validate_PublicIPAddress(public_ip, network_1)
self.create_StaticNatRule_For_VM(vm_1, public_ip, network_1)
self.validate_PublicIPAddress(
public_ip, network_1, static_nat=True, vm=vm_1)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network_1, vm_1, public_ip.ipaddress)
# Deleting Static NAT Rule
self.debug("Deleting Static NAT Rule for the deployed VM...")
self.delete_StaticNatRule_For_VM(public_ip)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(
public_ip, network_1, static_nat=True, vm=vm_1)
self.debug("Static NAT Rule for the deployed VM successfully deleted "
"in CloudStack")
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_floating_ip(network_1, vm_1, public_ip.ipaddress)
self.debug("Floating IP for the deployed VM successfully deleted in "
"VSD")
# Releasing acquired public IP
self.debug("Releasing the acquired public IP in the created Isolated "
"network...")
public_ip.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(public_ip, network_1)
self.debug("Acquired public IP in the created Isolated network "
"successfully released in CloudStack")
self.delete_VM(vm_1)
# Bug CLOUDSTACK-9398
"""
self.debug("Creating a persistent Isolated network with Static NAT "
"service...")
network_2 = self.create_Network(net_off_2, gateway='10.1.1.1')
self.validate_Network(network_2, state="Implemented")
vr_2 = self.get_Router(network_2)
self.check_Router_state(vr_2, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network_2)
self.verify_vsd_router(vr_2)
self.debug("Deploying a VM in the created Isolated network...")
vm_2 = self.create_VM(network_2)
self.check_VM_state(vm_2, state="Running")
# VSD verification
self.verify_vsd_vm(vm_2)
# Creating Static NAT rule
self.debug("Creating Static NAT rule in the created Isolated network "
"with its deployed VM...")
public_ip = self.acquire_PublicIPAddress(network_2)
self.validate_PublicIPAddress(public_ip, network_2)
self.create_StaticNatRule_For_VM(vm_2, public_ip, network_2)
self.validate_PublicIPAddress(
public_ip, network_2, static_nat=True, vm=vm_2)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network_2, vm_2, public_ip.ipaddress)
# Deleting Static NAT Rule
self.debug("Deleting Static NAT Rule for the deployed VM...")
self.delete_StaticNatRule_For_VM(public_ip)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(
public_ip, network_2, static_nat=True, vm=vm_2)
self.debug("Static NAT Rule for the deployed VM successfully deleted "
"in CloudStack")
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_floating_ip(network_2, vm_2, public_ip.ipaddress)
self.debug("Floating IP for the deployed VM successfully deleted in "
"VSD")
# Releasing acquired public IP
self.debug("Releasing the acquired public IP in the created Isolated "
"network...")
public_ip.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(public_ip, network_2)
self.debug("Acquired public IP in the created Isolated network "
"successfully released in CloudStack")
self.delete_VM(vm_2)
"""
self.debug("Creating an Isolated network with Static NAT service and "
"conserve mode On...")
network_3 = self.create_Network(net_off_3, gateway='10.1.1.1')
self.validate_Network(network_3, state="Allocated")
self.debug("Deploying a VM in the created Isolated network...")
vm_3 = self.create_VM(network_3)
self.validate_Network(network_3, state="Implemented")
vr_3 = self.get_Router(network_3)
self.check_Router_state(vr_3, state="Running")
self.check_VM_state(vm_3, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network_3)
self.verify_vsd_router(vr_3)
self.verify_vsd_vm(vm_3)
# Creating Static NAT rule
self.debug("Creating Static NAT rule in the created Isolated network "
"with its deployed VM...")
public_ip = self.acquire_PublicIPAddress(network_3)
self.validate_PublicIPAddress(public_ip, network_3)
self.create_StaticNatRule_For_VM(vm_3, public_ip, network_3)
self.validate_PublicIPAddress(
public_ip, network_3, static_nat=True, vm=vm_3)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network_3, vm_3, public_ip.ipaddress)
# Deleting Static NAT Rule
self.debug("Deleting Static NAT Rule for the deployed VM...")
self.delete_StaticNatRule_For_VM(public_ip)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(
public_ip, network_3, static_nat=True, vm=vm_3)
self.debug("Static NAT Rule for the deployed VM successfully deleted "
"in CloudStack")
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_floating_ip(network_3, vm_3, public_ip.ipaddress)
self.debug("Floating IP for the deployed VM successfully deleted in "
"VSD")
# Releasing acquired public IP
self.debug("Releasing the acquired public IP in the created Isolated "
"network...")
public_ip.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(public_ip, network_3)
self.debug("Acquired public IP in the created Isolated network "
"successfully released in CloudStack")
self.delete_VM(vm_3)
self.debug("Creating an Isolated network with Static NAT service "
"provider as VirtualRouter...")
network_4 = self.create_Network(net_off_4, gateway='10.1.1.1')
self.validate_Network(network_4, state="Allocated")
self.debug("Deploying a VM in the created Isolated network...")
vm_4 = self.create_VM(network_4)
self.validate_Network(network_4, state="Implemented")
vr_4 = self.get_Router(network_4)
self.check_Router_state(vr_4, state="Running")
self.check_VM_state(vm_4, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network_4)
self.verify_vsd_router(vr_4)
self.verify_vsd_vm(vm_4)
# Creating Static NAT rule
self.debug("Creating Static NAT rule in the created Isolated network "
"with its deployed VM...")
public_ip = self.acquire_PublicIPAddress(network_4)
self.validate_PublicIPAddress(public_ip, network_4)
self.create_StaticNatRule_For_VM(vm_4, public_ip, network_4)
self.validate_PublicIPAddress(
public_ip, network_4, static_nat=True, vm=vm_4)
# VSD verification for Static NAT functionality
with self.assertRaises(Exception):
self.verify_vsd_floating_ip(network_4, vm_4, public_ip.ipaddress)
self.debug("Nuage VSP does not support VirtualRouter as the Static "
"NAT service provider")
# Deleting Static NAT Rule
self.debug("Deleting Static NAT Rule for the deployed VM...")
self.delete_StaticNatRule_For_VM(public_ip)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(
public_ip, network_4, static_nat=True, vm=vm_4)
self.debug("Static NAT Rule for the deployed VM successfully deleted "
"in CloudStack")
# Releasing acquired public IP
self.debug("Releasing the acquired public IP in the created Isolated "
"network...")
public_ip.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(public_ip, network_4)
self.debug("Acquired public IP in the created Isolated network "
"successfully released in CloudStack")
self.delete_VM(vm_4)
self.debug("Creating an Isolated network with no Static NAT "
"service...")
network_5 = self.create_Network(net_off_5, gateway='10.1.1.1')
self.validate_Network(network_5, state="Allocated")
self.debug("Deploying a VM in the created Isolated network...")
vm_5 = self.create_VM(network_5)
self.validate_Network(network_5, state="Implemented")
vr_5 = self.get_Router(network_5)
self.check_Router_state(vr_5, state="Running")
self.check_VM_state(vm_5, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network_5)
self.verify_vsd_router(vr_5)
self.verify_vsd_vm(vm_5)
# Creating Static NAT rule
self.debug("Creating Static NAT rule in the created Isolated network "
"with its deployed VM...")
public_ip = self.acquire_PublicIPAddress(network_5)
self.validate_PublicIPAddress(public_ip, network_5)
with self.assertRaises(Exception):
self.create_StaticNatRule_For_VM(vm_5, public_ip, network_5)
self.debug("Static NAT rule creation failed as the network does not "
"support Static NAT service")
# Releasing acquired public IP
self.debug("Releasing the acquired public IP in the created Isolated "
"network...")
public_ip.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(public_ip, network_5)
self.debug("Acquired public IP in the created Isolated network "
"successfully released in CloudStack")
self.delete_VM(vm_5)
self.debug("Creating a persistent Isolated network with Static NAT "
"service without UserData Dns, so without VR")
network_6 = self.create_Network(net_off_6, gateway='10.6.1.1')
self.validate_Network(network_6, state="Implemented")
self.debug("Deploying a VM in the created Isolated network...")
vm_6 = self.create_VM(network_6)
self.validate_Network(network_6, state="Implemented")
with self.assertRaises(Exception):
self.get_Router(network_6)
# VSD verification
self.verify_vsd_network(self.domain.id, network_6)
self.verify_vsd_vm(vm_6)
# Creating Static NAT rule
self.debug("Creating Static NAT rule in the created Isolated network "
"with its deployed VM...")
public_ip = self.acquire_PublicIPAddress(network_6)
self.validate_PublicIPAddress(public_ip, network_6)
self.create_StaticNatRule_For_VM(vm_6, public_ip, network_6)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network_6, vm_6, public_ip.ipaddress)
# Deleting Static NAT Rule
self.debug("Deleting Static NAT Rule for the deployed VM...")
self.delete_StaticNatRule_For_VM(public_ip)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(
public_ip, network_6, static_nat=True, vm=vm_6)
self.debug("Static NAT Rule for the deployed VM successfully deleted "
"in CloudStack")
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_floating_ip(network_6, vm_6, public_ip.ipaddress)
self.debug("Floating IP for the deployed VM successfully deleted in "
"VSD")
# Releasing acquired public IP
self.debug("Releasing the acquired public IP in the created Isolated "
"network...")
public_ip.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(public_ip, network_6)
self.debug("Acquired public IP in the created Isolated network "
"successfully released in CloudStack")
self.delete_VM(vm_6)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_04_nuage_StaticNAT_vpc_networks(self):
"""Test Nuage VSP VPC networks with different combinations of Static
NAT service providers
"""
# 1. Create Nuage VSP VPC offering with different combinations of
# Static NAT service providers
# (NuageVsp, VirtualRouter, no StaticNat service), check if all the
# VPC offerings are successfully created and enabled.
# 2. Create VPCs with different combinations of Static NAT service
# providers (NuageVsp, VirtualRouter, no StaticNat service), check
# if only the VPCs with Static NAT service provider as NuageVsp and
# no StaticNat service are successfully created and enabled.
# 3. Create Nuage VSP VPC Network offering with different combinations
# of Static NAT service providers
# (NuageVsp, VirtualRouter, no StaticNat service), check if all the
# network offerings are successfully created and enabled.
# 4. Recreate the above created Network offering
# (Static NAT service provider as NuageVsp) with ispersistent flag
# set to False, check if the network offering is successfully
# created and enabled.
# 5. Recreate the above created Network offering
# (Static NAT service provider as NuageVsp) with conserve mode On
# (conserve_mode flag set to True), check if the network offering
# creation failed as only networks with conserve mode Off can belong
# to VPC.
# 6. Create a VPC network with Static NAT service provider as NuageVsp
# in the VPC with StaticNat service, spawn a VM, and create a Static
# NAT rule. Check if the tier is added to the VPC VR, and the VM is
# deployed successfully in the tier, verify if the Static NAT
# functionality for this network is successfully enabled in VSD.
# 7. Create a non persistent VPC network with Static NAT service
# provider as NuageVsp in the VPC with StaticNat service, spawn a
# VM, and create a Static NAT rule. Check if the tier creation
# failed as Nuage VSP does not support non persistent VPC networks.
# 8. Create a VPC network with Static NAT service provider as
# VpcVirtualRouter in the VPC with StaticNat service, spawn a VM,
# and create a Static NAT rule. Check if the tier is added to the
# VPC VR, and the VM is deployed successfully in the tier, verify if
# the Static NAT functionality for this network is not enabled in
# VSD as Nuage VSP does not support VirtualRouter as the Static NAT
# service provider.
# 9. Create a VPC network with no Static NAT service in the VPC with
# StaticNat service, spawn a VM, and create a Static NAT rule. Check
# if the tier is added to the VPC VR, and the VM is deployed
# successfully in the tier, verify if the Static NAT functionality
# for this network is not enabled in both CloudStack and VSD as the
# network does not support Static NAT service.
# 10. Create a VPC network with Static NAT service provider as NuageVsp
# in the VPC without StaticNat service, check if the tier creation
# failed as the VPC does not support Static NAT service.
# 11. Delete all the created objects (cleanup).
# Creating VPC offerings
self.debug("Creating Nuage VSP VPC offering with Static NAT service "
"provider as NuageVsp...")
vpc_off_1 = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering"])
self.validate_VpcOffering(vpc_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC offering with Static NAT service "
"provider as VpcVirtualRouter...")
vpc_offering = copy.deepcopy(
self.test_data["nuagevsp"]["vpc_offering"])
vpc_offering["serviceProviderList"]["StaticNat"] = "VpcVirtualRouter"
vpc_off_2 = self.create_VpcOffering(vpc_offering)
self.validate_VpcOffering(vpc_off_2, state="Enabled")
self.debug("Creating Nuage VSP VPC offering without Static NAT "
"service...")
vpc_offering = copy.deepcopy(
self.test_data["nuagevsp"]["vpc_offering"])
vpc_offering["supportedservices"] = \
'Dhcp,SourceNat,NetworkACL,Connectivity,UserData,Dns'
del vpc_offering["serviceProviderList"]["StaticNat"]
vpc_off_3 = self.create_VpcOffering(vpc_offering)
self.validate_VpcOffering(vpc_off_3, state="Enabled")
# Creating VPCs
self.debug("Creating a VPC with Static NAT service provider as "
"NuageVsp...")
vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_1, state="Enabled")
self.debug("Creating a VPC with Static NAT service provider as "
"VpcVirtualRouter...")
with self.assertRaises(Exception):
self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16')
self.debug("Nuage VSP does not support provider VpcVirtualRouter for "
"service Static NAT for VPCs")
self.debug("Creating a VPC without Static NAT service...")
vpc_2 = self.create_Vpc(vpc_off_3, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_2, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Static NAT "
"service provider as NuageVsp...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Recreating above Network offering with ispersistent "
"False...")
network_offering = copy.deepcopy(
self.test_data["nuagevsp"]["vpc_network_offering"])
network_offering["ispersistent"] = "False"
net_off_2 = self.create_NetworkOffering(network_offering)
self.validate_NetworkOffering(net_off_2, state="Enabled")
self.debug("Recreating above Network offering with conserve mode "
"On...")
with self.assertRaises(Exception):
self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"],
conserve_mode=True)
self.debug("Network offering creation failed as only networks with "
"conserve mode Off can belong to VPC")
self.debug("Creating Nuage VSP VPC Network offering with Static NAT "
"service provider as VpcVirtualRouter...")
network_offering = copy.deepcopy(
self.test_data["nuagevsp"]["vpc_network_offering"])
network_offering["serviceProviderList"]["StaticNat"] = \
"VpcVirtualRouter"
net_off_3 = self.create_NetworkOffering(network_offering)
self.validate_NetworkOffering(net_off_3, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Static "
"NAT service...")
network_offering = copy.deepcopy(
self.test_data["nuagevsp"]["vpc_network_offering"])
network_offering["supportedservices"] = \
'Dhcp,SourceNat,NetworkACL,Connectivity,UserData,Dns'
del network_offering["serviceProviderList"]["StaticNat"]
net_off_4 = self.create_NetworkOffering(network_offering)
self.validate_NetworkOffering(net_off_4, state="Enabled")
# Creating VPC networks in the VPCs, and deploying VMs
self.debug("Creating a VPC network with Static NAT service provider "
"as NuageVsp in vpc_1...")
vpc_tier_1 = self.create_Network(
net_off_1, gateway='10.1.1.1', vpc=vpc_1)
self.validate_Network(vpc_tier_1, state="Implemented")
vpc_vr = self.get_Router(vpc_tier_1)
self.check_Router_state(vpc_vr, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, vpc_tier_1, vpc_1)
self.verify_vsd_router(vpc_vr)
self.debug("Deploying a VM in the created VPC network...")
vpc_vm_1 = self.create_VM(vpc_tier_1)
self.check_VM_state(vpc_vm_1, state="Running")
# VSD verification
self.verify_vsd_vm(vpc_vm_1)
# Creating Static NAT rule
self.debug("Creating Static NAT rule in the created VPC network with "
"its deployed VM...")
public_ip = self.acquire_PublicIPAddress(vpc_tier_1, vpc=vpc_1)
self.validate_PublicIPAddress(public_ip, vpc_tier_1)
self.create_StaticNatRule_For_VM(vpc_vm_1, public_ip, vpc_tier_1)
self.validate_PublicIPAddress(
public_ip, vpc_tier_1, static_nat=True, vm=vpc_vm_1)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier_1, vpc_vm_1, public_ip.ipaddress, vpc=vpc_1)
# Deleting Static NAT Rule
self.debug("Deleting Static NAT Rule for the deployed VM...")
self.delete_StaticNatRule_For_VM(public_ip)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(
public_ip, vpc_tier_1, static_nat=True, vm=vpc_vm_1)
self.debug("Static NAT Rule for the deployed VM successfully deleted "
"in CloudStack")
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_floating_ip(
vpc_tier_1, vpc_vm_1, public_ip.ipaddress, vpc=vpc_1)
self.debug("Floating IP for the deployed VM successfully deleted in "
"VSD")
# Releasing acquired public IP
self.debug("Releasing the acquired public IP in the created VPC "
"network...")
public_ip.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(public_ip, vpc_tier_1)
self.debug("Acquired public IP in the created VPC network "
"successfully released in CloudStack")
self.debug("Creating a non persistent VPC network with Static NAT "
"service in vpc_1...")
with self.assertRaises(Exception):
self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc_1)
self.debug("Nuage VSP does not support non persistent VPC networks")
self.debug("Creating a VPC network with Static NAT service provider "
"as VpcVirtualRouter in vpc_1...")
with self.assertRaises(Exception):
self.create_Network(net_off_3, gateway='10.1.2.1', vpc=vpc_1)
self.debug("Provider VpcVirtualRouter is not supported for Static NAT "
"service in VPC vpc_1")
self.debug("Creating a VPC network with no Static NAT service in "
"vpc_1...")
vpc_tier_2 = self.create_Network(
net_off_4, gateway='10.1.2.1', vpc=vpc_1)
self.validate_Network(vpc_tier_2, state="Implemented")
vpc_vr = self.get_Router(vpc_tier_2)
self.check_Router_state(vpc_vr, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, vpc_tier_2, vpc_1)
self.verify_vsd_router(vpc_vr)
self.debug("Deploying a VM in the created VPC network...")
vpc_vm_2 = self.create_VM(vpc_tier_2)
self.check_VM_state(vpc_vm_2, state="Running")
# VSD verification
self.verify_vsd_vm(vpc_vm_2)
# Creating Static NAT rule
self.debug("Creating Static NAT rule in the created VPC network with "
"its deployed VM...")
public_ip = self.acquire_PublicIPAddress(vpc_tier_2, vpc=vpc_1)
self.validate_PublicIPAddress(public_ip, vpc_tier_2)
with self.assertRaises(Exception):
self.create_StaticNatRule_For_VM(vpc_vm_2, public_ip, vpc_tier_2)
self.debug("Static NAT rule creation failed as the network does not "
"support Static NAT service")
# Releasing acquired public IP
self.debug("Releasing the acquired public IP in the created VPC "
"network...")
public_ip.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(public_ip, vpc_tier_2)
self.debug("Acquired public IP in the created VPC network "
"successfully released in CloudStack")
self.debug("Creating a VPC network with Static NAT service provider "
"as NuageVsp in vpc_2...")
with self.assertRaises(Exception):
self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc_2)
self.debug("VPC Network creation failed as vpc_2 does not support "
"Static NAT service")
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_05_nuage_StaticNAT_isolated_networks_traffic(self):
"""Test Nuage VSP Static NAT functionality for Isolated network by
performing (wget) traffic tests to the Internet
"""
# 1. Create an Isolated network with Static NAT service provider as
# NuageVsp, spawn a VM, and create a Static NAT rule. Check if the
# network is successfully created, and the VM along with the VR is
# deployed successfully in the network, verify if the Static NAT
# functionality for this network is successfully enabled in VSD.
# 2. Perform and verify Static NAT traffic test (wget www.google.com)
# to the Internet from the deployed VM.
# 3. Deploy another VM in the created Isolated network and create a
# Static NAT rule. Check if the VM is deployed successfully in the
# network, verify if the Static NAT functionality for this network
# is successfully enabled in VSD.
# 4. Perform and verify Static NAT traffic test (wget www.google.com)
# to the Internet from the deployed VM.
# 5. Delete all the created objects (cleanup).
# Note: Above mentioned Static NAT traffic test is done by SSHing into
# the VM using a Static NAT rule, and performing wget traffic
# test (wget www.google.com) to the Internet from the VM.
# Creating network offering
self.debug("Creating Nuage VSP Isolated Network offering with Static "
"NAT service provider as NuageVsp...")
net_off = self.create_NetworkOffering(
self.test_data["nuagevsp"]["isolated_network_offering"])
self.validate_NetworkOffering(net_off, state="Enabled")
self.debug("Creating persistent Nuage VSP Isolated Network offering "
"without VR so no userData and Dns...")
network_offering = copy.deepcopy(
self.test_data["nuagevsp"]["isolated_network_offering"])
network_offering["ispersistent"] = "True"
network_offering["supportedservices"] = \
'Dhcp,SourceNat,Connectivity,StaticNat,Firewall'
del network_offering["serviceProviderList"]["UserData"]
del network_offering["serviceProviderList"]["Dns"]
net_off_2 = self.create_NetworkOffering(network_offering)
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating Isolated network, deploying VMs, and verifying Static NAT
# traffic
self.debug("Creating an Isolated network with Static NAT service...")
network = self.create_Network(net_off, gateway='10.1.1.1')
self.validate_Network(network, state="Allocated")
self.debug("Creating a persistent Isolated network with Static NAT "
"service without UserData Dns, so without VR")
network_2 = self.create_Network(net_off_2, gateway='10.2.1.1')
self.validate_Network(network_2, state="Implemented")
self.debug("Deploying a VM in the created Isolated network...")
vm_1 = self.create_VM(network)
self.validate_Network(network, state="Implemented")
vr = self.get_Router(network)
self.check_Router_state(vr, state="Running")
self.check_VM_state(vm_1, state="Running")
vm_21 = self.create_VM(network_2)
self.validate_Network(network_2, state="Implemented")
with self.assertRaises(Exception):
self.get_Router(network_2)
# VSD verification
self.verify_vsd_network(self.domain.id, network)
self.verify_vsd_router(vr)
self.verify_vsd_vm(vm_1)
self.verify_vsd_network(self.domain.id, network_2)
self.verify_vsd_vm(vm_21)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created Isolated network...")
public_ip_1 = self.acquire_PublicIPAddress(network)
self.validate_PublicIPAddress(public_ip_1, network)
self.create_StaticNatRule_For_VM(vm_1, public_ip_1, network)
self.validate_PublicIPAddress(
public_ip_1, network, static_nat=True, vm=vm_1)
public_ip_21 = self.acquire_PublicIPAddress(network_2)
self.validate_PublicIPAddress(public_ip_21, network_2)
self.create_StaticNatRule_For_VM(vm_21, public_ip_21, network_2)
self.validate_PublicIPAddress(
public_ip_21, network_2, static_nat=True, vm=vm_21)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm_1, public_ip_1.ipaddress)
self.verify_vsd_floating_ip(network_2, vm_21, public_ip_21.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_1)
self.verify_StaticNAT_traffic(network_2, public_ip_21)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm_1, network, public_ip_1)
self.verify_StaticNAT_Internet_traffic(vm_21, network_2, public_ip_21)
self.debug("Deploying another VM in the created Isolated network...")
vm_2 = self.create_VM(network)
self.check_VM_state(vm_2, state="Running")
vm_22 = self.create_VM(network_2)
self.check_VM_state(vm_22, state="Running")
# VSD verification
self.verify_vsd_vm(vm_2)
self.verify_vsd_vm(vm_22)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created Isolated network...")
public_ip_2 = self.acquire_PublicIPAddress(network)
self.validate_PublicIPAddress(public_ip_2, network)
self.create_StaticNatRule_For_VM(vm_2, public_ip_2, network)
self.validate_PublicIPAddress(
public_ip_2, network, static_nat=True, vm=vm_2)
public_ip_22 = self.acquire_PublicIPAddress(network_2)
self.validate_PublicIPAddress(public_ip_22, network_2)
self.create_StaticNatRule_For_VM(vm_22, public_ip_22, network_2)
self.validate_PublicIPAddress(
public_ip_22, network_2, static_nat=True, vm=vm_22)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm_2, public_ip_2.ipaddress)
self.verify_vsd_floating_ip(network_2, vm_22, public_ip_22.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_2)
self.verify_StaticNAT_traffic(network_2, public_ip_22)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm_2, network, public_ip_2)
self.verify_StaticNAT_Internet_traffic(vm_22, network_2, public_ip_22)
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_06_nuage_StaticNAT_vpc_network_traffic(self):
"""Test Nuage VSP Static NAT functionality for VPC network by
performing (wget) traffic tests to the Internet
"""
# 1. Create a VPC network with Static NAT service provider as NuageVsp
# in the VPC with StaticNat service, spawn a VM, and create a Static
# NAT rule. Check if the tier is added to the VPC VR, and the VM is
# deployed successfully in the tier, verify if the Static NAT
# functionality for this network is successfully enabled in VSD.
# 2. Perform and verify Static NAT traffic test (wget www.google.com)
# to the Internet from the deployed VM.
# 3. Deploy another VM in the created VPC network and create a Static
# NAT rule. Check if the VM is deployed successfully in the network,
# verify if the Static NAT functionality for this network is
# successfully enabled in VSD.
# 4. Perform and verify Static NAT traffic test (wget www.google.com)
# to the Internet from the deployed VM.
# 5. Delete all the created objects (cleanup).
# Note: Above mentioned Static NAT traffic test is done by SSHing into
# the VM using a Static NAT rule, and performing wget traffic
# test (wget www.google.com) to the Internet from the VM.
# Creating VPC offering
self.debug("Creating Nuage VSP VPC offering with Static NAT service "
"provider as NuageVsp...")
vpc_off = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating VPC
self.debug("Creating a VPC with Static NAT service provider as "
"NuageVsp...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offering
self.debug("Creating Nuage VSP VPC Network offering with Static NAT "
"service provider as NuageVsp...")
net_off = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off, state="Enabled")
# Creating VPC network in the VPC, deploying VMs, and verifying Static
# NAT traffic
self.debug("Creating a VPC network with Static NAT service...")
vpc_tier = self.create_Network(net_off, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(vpc_tier, state="Implemented")
vpc_vr = self.get_Router(vpc_tier)
self.check_Router_state(vpc_vr, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, vpc_tier, vpc)
self.verify_vsd_router(vpc_vr)
# Adding Egress Network ACL rules
self.debug("Adding Egress Network ACL rules in the created VPC "
"network to allow Static NAT (DNS & HTTP) traffic to the "
"Internet from the VMs in the network...")
dns_rule = self.create_NetworkAclRule(
self.test_data["dns_rule"], traffic_type="Egress",
network=vpc_tier)
http_rule = self.create_NetworkAclRule(
self.test_data["http_rule"], traffic_type="Egress",
network=vpc_tier)
# VSD verification for added Egress Network ACL rules
self.verify_vsd_firewall_rule(dns_rule, traffic_type="Egress")
self.verify_vsd_firewall_rule(http_rule, traffic_type="Egress")
self.debug("Deploying a VM in the created VPC network...")
vpc_vm_1 = self.create_VM(vpc_tier)
self.check_VM_state(vpc_vm_1, state="Running")
# VSD verification
self.verify_vsd_vm(vpc_vm_1)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created VPC network...")
public_ip_1 = self.acquire_PublicIPAddress(vpc_tier, vpc=vpc)
self.validate_PublicIPAddress(public_ip_1, vpc_tier)
self.create_StaticNatRule_For_VM(vpc_vm_1, public_ip_1, vpc_tier)
self.validate_PublicIPAddress(
public_ip_1, vpc_tier, static_nat=True, vm=vpc_vm_1)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier, vpc_vm_1, public_ip_1.ipaddress, vpc=vpc)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(vpc_tier, public_ip_1, vpc=vpc)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vpc_vm_1, vpc_tier, public_ip_1, vpc=vpc)
self.debug("Deploying another VM in the created VPC network...")
vpc_vm_2 = self.create_VM(vpc_tier)
self.check_VM_state(vpc_vm_2, state="Running")
# VSD verification
self.verify_vsd_vm(vpc_vm_2)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created VPC network...")
public_ip_2 = self.acquire_PublicIPAddress(vpc_tier, vpc=vpc)
self.validate_PublicIPAddress(public_ip_2, vpc_tier)
self.create_StaticNatRule_For_VM(vpc_vm_2, public_ip_2, vpc_tier)
self.validate_PublicIPAddress(
public_ip_2, vpc_tier, static_nat=True, vm=vpc_vm_2)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier, vpc_vm_2, public_ip_2.ipaddress, vpc=vpc)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(vpc_tier, public_ip_2, vpc=vpc)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vpc_vm_2, vpc_tier, public_ip_2, vpc=vpc)
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_07_nuage_StaticNAT_acl_rules_traffic(self):
"""Test Nuage VSP Static NAT functionality with different Egress
Firewall/Network ACL rules by performing (wget) traffic tests to the
Internet
"""
# Repeat the tests in the testcases
# "test_05_nuage_StaticNAT_isolated_networks_traffic" and
# "test_06_nuage_StaticNAT_vpc_network_traffic" with different Egress
# Firewall/Network ACL rules:
# 1. Allow and block Egress Firewall rules
# 2. Allow and block Egress Network ACL rules
# Verify the above Egress Firewall/Network ACL rules by performing and
# verifying Static NAT traffic test (wget www.google.com) to the
# Internet from the VM.
# Delete all the created objects (cleanup).
# Creating network offering
self.debug("Creating Nuage VSP Isolated Network offering with Static "
"NAT service provider as NuageVsp...")
net_off = self.create_NetworkOffering(
self.test_data["nuagevsp"]["isolated_network_offering"])
self.validate_NetworkOffering(net_off, state="Enabled")
# Creating Isolated network, deploying VMs, and verifying Static NAT
# traffic with Egress Firewall rules
self.debug("Creating an Isolated network with Static NAT service...")
network = self.create_Network(net_off, gateway='10.1.1.1')
self.validate_Network(network, state="Allocated")
self.debug("Deploying a VM in the created Isolated network...")
vm = self.create_VM(network)
self.validate_Network(network, state="Implemented")
vr = self.get_Router(network)
self.check_Router_state(vr, state="Running")
self.check_VM_state(vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network)
self.verify_vsd_router(vr)
self.verify_vsd_vm(vm)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created Isolated network...")
public_ip_1 = self.acquire_PublicIPAddress(network)
self.validate_PublicIPAddress(public_ip_1, network)
self.create_StaticNatRule_For_VM(vm, public_ip_1, network)
self.validate_PublicIPAddress(
public_ip_1, network, static_nat=True, vm=vm)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm, public_ip_1.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_1)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm, network, public_ip_1)
# Adding Egress Firewall rule
self.debug("Adding an Egress Firewall rule in the created Isolated "
"network to block/drop Static NAT (DNS) traffic to the "
"Internet from the VMs in the network...")
dns_rule_1 = self.create_EgressFirewallRule(
network, self.test_data["dns_rule"])
# VSD verification for added Egress Firewall rule
self.verify_vsd_firewall_rule(dns_rule_1, traffic_type="Egress")
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm, public_ip_1.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_1)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
with self.assertRaises(Exception):
self.verify_StaticNAT_Internet_traffic(
vm, network, public_ip_1, negative_test=True)
self.debug("Static NAT (DNS) traffic to the Internet from the "
"deployed VM is blocked/dropped by the added Egress "
"Firewall rule")
# Removing Egress Firewall rule
self.debug("Removing the added Egress Firewall rule in the created "
"Isolated network to allow Static NAT (DNS) traffic to "
"the Internet from the VMs in the network "
"(Default Egress Firewall rule)...")
dns_rule_1.delete(self.api_client)
# VSD verification for removed Egress Firewall rule
with self.assertRaises(Exception):
self.verify_vsd_firewall_rule(dns_rule_1, traffic_type="Egress")
self.debug("Egress Firewall rule successfully deleted in VSD")
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm, public_ip_1.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_1)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm, network, public_ip_1)
# Creating VPC offering
self.debug("Creating Nuage VSP VPC offering with Static NAT service "
"provider as NuageVsp...")
vpc_off = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating VPC
self.debug("Creating a VPC with Static NAT service provider as "
"NuageVsp...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offering
self.debug("Creating Nuage VSP VPC Network offering with Static NAT "
"service provider as NuageVsp...")
net_off = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off, state="Enabled")
# Creating VPC network in the VPC, deploying VMs, and verifying Static
# NAT traffic with Network ACl rules
self.debug("Creating a VPC network with Static NAT service...")
vpc_tier = self.create_Network(net_off, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(vpc_tier, state="Implemented")
vpc_vr = self.get_Router(vpc_tier)
self.check_Router_state(vpc_vr, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, vpc_tier, vpc)
self.verify_vsd_router(vpc_vr)
# Adding Egress Network ACL rules
self.debug("Adding Egress Network ACL rules in the created VPC "
"network to allow Static NAT (DNS & HTTP) traffic to the "
"Internet from the VMs in the network...")
dns_rule_2 = self.create_NetworkAclRule(
self.test_data["dns_rule"], traffic_type="Egress",
network=vpc_tier)
http_rule = self.create_NetworkAclRule(
self.test_data["http_rule"], traffic_type="Egress",
network=vpc_tier)
# VSD verification for added Egress Network ACL rules
self.verify_vsd_firewall_rule(dns_rule_2, traffic_type="Egress")
self.verify_vsd_firewall_rule(http_rule, traffic_type="Egress")
self.debug("Deploying a VM in the created VPC network...")
vpc_vm = self.create_VM(vpc_tier)
self.check_VM_state(vpc_vm, state="Running")
# VSD verification
self.verify_vsd_vm(vpc_vm)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created VPC network...")
public_ip_2 = self.acquire_PublicIPAddress(vpc_tier, vpc=vpc)
self.validate_PublicIPAddress(public_ip_2, vpc_tier)
self.create_StaticNatRule_For_VM(vpc_vm, public_ip_2, vpc_tier)
self.validate_PublicIPAddress(
public_ip_2, vpc_tier, static_nat=True, vm=vpc_vm)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier, vpc_vm, public_ip_2.ipaddress, vpc=vpc)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(vpc_tier, public_ip_2, vpc=vpc)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vpc_vm, vpc_tier, public_ip_2, vpc=vpc)
# Removing Egress Network ACL rule
self.debug("Removing the added Egress Network ACL rule in the created "
"VPC network to block Static NAT (DNS) traffic to the "
"Internet from the VMs in the network "
"(Default Egress Network ACL rule)...")
dns_rule_2.delete(self.api_client)
# VSD verification for removed Egress Network ACL rule
with self.assertRaises(Exception):
self.verify_vsd_firewall_rule(dns_rule_2, traffic_type="Egress")
self.debug("Egress Network ACL rule successfully deleted in VSD")
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier, vpc_vm, public_ip_2.ipaddress, vpc=vpc)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(vpc_tier, public_ip_2, vpc=vpc)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
with self.assertRaises(Exception):
self.verify_StaticNAT_Internet_traffic(
vpc_vm, vpc_tier, public_ip_2, vpc=vpc, negative_test=True)
self.debug("Static NAT (DNS) traffic to the Internet from the "
"deployed VM is blocked by the Default Egress Network ACL "
"rule")
# Re-adding Egress Network ACL rule
self.debug("Re-adding the Egress Network ACL rule in the created VPC "
"network to allow Static NAT (DNS) traffic to the "
"Internet from the VMs in the network...")
dns_rule_2 = self.create_NetworkAclRule(
self.test_data["dns_rule"], traffic_type="Egress",
network=vpc_tier)
# VSD verification for re-added Egress Network ACL rule
self.verify_vsd_firewall_rule(dns_rule_2, traffic_type="Egress")
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier, vpc_vm, public_ip_2.ipaddress, vpc=vpc)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(vpc_tier, public_ip_2, vpc=vpc)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vpc_vm, vpc_tier, public_ip_2, vpc=vpc)
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_08_nuage_StaticNAT_vm_nic_operations_traffic(self):
"""Test Nuage VSP Static NAT functionality with VM NIC operations by
performing (wget) traffic tests to the Internet
"""
# Repeat the tests in the testcase
# "test_05_nuage_StaticNAT_isolated_networks_traffic" with VM NIC
# operations:
# 1. Updating default VM NIC
# 2. Removing non-default VM NIC
# 3. Adding and updating default VM NIC
# Verify the above VM NIC operations by performing and verifying Static
# NAT traffic test (wget www.google.com) to the Internet from the VM.
# Delete all the created objects (cleanup).
# Creating network offering
self.debug("Creating Nuage VSP Isolated Network offering with Static "
"NAT service provider as NuageVsp...")
net_off = self.create_NetworkOffering(
self.test_data["nuagevsp"]["isolated_network_offering"])
self.validate_NetworkOffering(net_off, state="Enabled")
# Creating Isolated networks, deploying a multi-nic VM, and verifying
# Static NAT traffic with VM NIC operations
self.debug("Creating an Isolated network with Static NAT service...")
network_1 = self.create_Network(net_off, gateway='10.1.1.1')
self.validate_Network(network_1, state="Allocated")
self.debug("Creating another Isolated network with Static NAT "
"service...")
network_2 = self.create_Network(net_off, gateway='10.1.1.1')
self.validate_Network(network_2, state="Allocated")
self.debug("Deploying a multi-nic VM in the created Isolated "
"networks...")
vm = self.create_VM([network_1, network_2])
self.validate_Network(network_1, state="Implemented")
vr_1 = self.get_Router(network_1)
self.check_Router_state(vr_1, state="Running")
self.validate_Network(network_2, state="Implemented")
vr_2 = self.get_Router(network_2)
self.check_Router_state(vr_2, state="Running")
self.check_VM_state(vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network_1)
self.verify_vsd_router(vr_1)
self.verify_vsd_network(self.domain.id, network_2)
self.verify_vsd_router(vr_2)
self.verify_vsd_vm(vm)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created Isolated network...")
public_ip_1 = self.acquire_PublicIPAddress(network_1)
self.validate_PublicIPAddress(public_ip_1, network_1)
self.create_StaticNatRule_For_VM(vm, public_ip_1, network_1)
self.validate_PublicIPAddress(
public_ip_1, network_1, static_nat=True, vm=vm)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network_1, vm, public_ip_1.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network_1, public_ip_1)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm, network_1, public_ip_1)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created Isolated network...")
public_ip_2 = self.acquire_PublicIPAddress(network_2)
self.validate_PublicIPAddress(public_ip_2, network_2)
self.create_StaticNatRule_For_VM(vm, public_ip_2, network_2)
self.validate_PublicIPAddress(
public_ip_2, network_2, static_nat=True, vm=vm)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network_2, vm, public_ip_2.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(
network_2, public_ip_2, non_default_nic=True)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vm, network_2, public_ip_2, non_default_nic=True)
# Updating default VM NIC
self.debug("Updating the default nic of the multi-nic VM...")
self.nic_operation_VM(vm, network_2, operation="update")
# Rebooting (stop - start) VM
self.debug("Rebooting the multi-nic VM after updating its default nic "
"for changes to apply to the VM...")
vm.stop(self.api_client)
vm.start(self.api_client)
self.check_VM_state(vm, state="Running")
# VSD verification
updated_vm_info = VirtualMachine.list(self.api_client, id=vm.id)[0]
self.verify_vsd_vm(updated_vm_info)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network_1, vm, public_ip_1.ipaddress)
self.verify_vsd_floating_ip(network_2, vm, public_ip_2.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(
network_1, public_ip_1, non_default_nic=True)
self.verify_StaticNAT_traffic(network_2, public_ip_2)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vm, network_1, public_ip_1, non_default_nic=True)
self.verify_StaticNAT_Internet_traffic(vm, network_2, public_ip_2)
# Removing non-default VM NIC
self.debug("Removing the non-default nic of the multi-nic VM...")
with self.assertRaises(Exception):
self.nic_operation_VM(vm, network_1, operation="remove")
self.debug("Can not remove this NIC as Static NAT rule is enabled on "
"it")
# Deleting Static NAT Rule
self.debug("Deleting Static NAT Rule for the deployed VM...")
self.delete_StaticNatRule_For_VM(public_ip_1)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(
public_ip_1, network_1, static_nat=True, vm=vm)
self.debug("Static NAT Rule for the deployed VM successfully deleted "
"in CloudStack")
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_floating_ip(network_1, vm, public_ip_1.ipaddress)
self.debug("Floating IP for the deployed VM successfully deleted in "
"VSD")
self.nic_operation_VM(vm, network_1, operation="remove")
# Rebooting (stop - start) VM
self.debug("Rebooting the multi-nic VM after removing its non-default "
"nic for changes to apply to the VM...")
vm.stop(self.api_client)
vm.start(self.api_client)
self.check_VM_state(vm, state="Running")
# VSD verification
updated_vm_info = VirtualMachine.list(self.api_client, id=vm.id)[0]
self.verify_vsd_vm(updated_vm_info)
# VSD verification for Static NAT functionality
with self.assertRaises(Exception):
self.verify_vsd_floating_ip(network_1, vm, public_ip_1.ipaddress)
self.debug("Static NAT rule not enabled in this VM NIC")
self.verify_vsd_floating_ip(network_2, vm, public_ip_2.ipaddress)
# Verifying Static NAT traffic
if not self.isSimulator:
with self.assertRaises(AssertionError):
self.verify_StaticNAT_traffic(network_1, public_ip_1)
self.debug("Static NAT rule not enabled in this VM NIC")
self.verify_StaticNAT_traffic(network_2, public_ip_2)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
if not self.isSimulator:
with self.assertRaises(Exception):
self.verify_StaticNAT_Internet_traffic(vm, network_1, public_ip_1)
self.debug("Static NAT rule not enabled in this VM NIC")
self.verify_StaticNAT_Internet_traffic(vm, network_2, public_ip_2)
# Adding and updating default VM NIC
self.debug("Re-adding the non-default nic and updating the default "
"nic of the multi-nic VM...")
self.nic_operation_VM(vm, network_1, operation="add")
self.nic_operation_VM(vm, network_1, operation="update")
# Rebooting (stop - start) VM
self.debug("Rebooting the multi-nic VM after re-adding its "
"non-default nic for changes to apply to the VM...")
vm.stop(self.api_client)
vm.start(self.api_client)
self.check_VM_state(vm, state="Running")
# VSD verification
updated_vm_info = VirtualMachine.list(self.api_client, id=vm.id)[0]
self.verify_vsd_vm(updated_vm_info)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created Isolated network...")
self.create_StaticNatRule_For_VM(vm, public_ip_1, network_1)
self.validate_PublicIPAddress(
public_ip_1, network_1, static_nat=True, vm=vm)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network_1, vm, public_ip_1.ipaddress)
self.verify_vsd_floating_ip(network_2, vm, public_ip_2.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network_1, public_ip_1)
self.verify_StaticNAT_traffic(
network_2, public_ip_2, non_default_nic=True)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm, network_1, public_ip_1)
self.verify_StaticNAT_Internet_traffic(
vm, network_2, public_ip_2, non_default_nic=True)
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_09_nuage_StaticNAT_vm_migration_traffic(self):
"""Test Nuage VSP Static NAT functionality with VM migration by
performing (wget) traffic tests to the Internet
"""
# Repeat the tests in the testcase
# "test_05_nuage_StaticNAT_isolated_networks_traffic" with migration of
# one of the VMs to another host (if available).
# Verify the above VM migration by performing and verifying Static NAT
# traffic test (wget www.google.com) to the Internet from the VM.
# Delete all the created objects (cleanup).
# Creating network offering
self.debug("Creating Nuage VSP Isolated Network offering with Static "
"NAT service provider as NuageVsp...")
net_off = self.create_NetworkOffering(
self.test_data["nuagevsp"]["isolated_network_offering"])
self.validate_NetworkOffering(net_off, state="Enabled")
# Creating an Isolated network, deploying VMs, and verifying Static
# NAT traffic with VM migrations
self.debug("Creating an Isolated network with Static NAT service...")
network = self.create_Network(net_off, gateway='10.1.1.1')
self.validate_Network(network, state="Allocated")
self.debug("Deploying a VM in the created Isolated network...")
vm_1 = self.create_VM(network)
self.validate_Network(network, state="Implemented")
vr = self.get_Router(network)
self.check_Router_state(vr, state="Running")
self.check_VM_state(vm_1, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network)
self.verify_vsd_router(vr)
self.verify_vsd_vm(vm_1)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created Isolated network...")
public_ip_1 = self.acquire_PublicIPAddress(network)
self.validate_PublicIPAddress(public_ip_1, network)
self.create_StaticNatRule_For_VM(vm_1, public_ip_1, network)
self.validate_PublicIPAddress(
public_ip_1, network, static_nat=True, vm=vm_1)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm_1, public_ip_1.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_1)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm_1, network, public_ip_1)
self.debug("Deploying another VM in the created Isolated network...")
vm_2 = self.create_VM(network)
self.check_VM_state(vm_2, state="Running")
# VSD verification
self.verify_vsd_vm(vm_2)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created Isolated network...")
public_ip_2 = self.acquire_PublicIPAddress(network)
self.validate_PublicIPAddress(public_ip_2, network)
self.create_StaticNatRule_For_VM(vm_2, public_ip_2, network)
self.validate_PublicIPAddress(
public_ip_2, network, static_nat=True, vm=vm_2)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm_2, public_ip_2.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_2)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm_2, network, public_ip_2)
# VM migration
# This VM migration has no effect on the Static NAT functionality
self.debug("Migrating one of the VMs in the created Isolated network "
"to another host, if available...")
self.migrate_VM(vm_1)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm_1, public_ip_1.ipaddress)
self.verify_vsd_floating_ip(network, vm_2, public_ip_2.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_1)
self.verify_StaticNAT_traffic(network, public_ip_2)
# VSD verification for Static NAT functionality
self.verify_StaticNAT_Internet_traffic(vm_1, network, public_ip_1)
self.verify_StaticNAT_Internet_traffic(vm_2, network, public_ip_2)
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_10_nuage_StaticNAT_network_restarts_traffic(self):
"""Test Nuage VSP Static NAT functionality with network restarts by
performing (wget) traffic tests to the Internet
"""
# Repeat the tests in the testcases
# "test_05_nuage_StaticNAT_isolated_networks_traffic" and
# "test_06_nuage_StaticNAT_vpc_network_traffic" with network restarts:
# 1. Restart Isolated Network (cleanup = false)
# 2. Restart Isolated Network (cleanup = true)
# 3. Reboot VM in the Isolated Network
# 4. Restart VPC Network (cleanup = false)
# 5. Restart VPC Network (cleanup = true)
# 6. Reboot VM in the VPC Network
# 7. Restart VPC (cleanup = false)
# 8. Restart VPC (cleanup = true)
# Verify the above network restarts by performing and verifying Static
# NAT traffic test (wget www.google.com) to the Internet from the VM.
# Delete all the created objects (cleanup).
# Creating network offering
self.debug("Creating Nuage VSP Isolated Network offering with Static "
"NAT service provider as NuageVsp...")
net_off = self.create_NetworkOffering(
self.test_data["nuagevsp"]["isolated_network_offering"])
self.validate_NetworkOffering(net_off, state="Enabled")
# Creating an Isolated network, deploying a VM, and verifying Static
# NAT traffic with Isolated network restarts
self.debug("Creating an Isolated network with Static NAT service...")
network = self.create_Network(net_off, gateway='10.1.1.1')
self.validate_Network(network, state="Allocated")
self.debug("Deploying a VM in the created Isolated network...")
vm = self.create_VM(network)
self.validate_Network(network, state="Implemented")
vr = self.get_Router(network)
self.check_Router_state(vr, state="Running")
self.check_VM_state(vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network)
self.verify_vsd_router(vr)
self.verify_vsd_vm(vm)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created Isolated network...")
public_ip_1 = self.acquire_PublicIPAddress(network)
self.validate_PublicIPAddress(public_ip_1, network)
self.create_StaticNatRule_For_VM(vm, public_ip_1, network)
self.validate_PublicIPAddress(
public_ip_1, network, static_nat=True, vm=vm)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm, public_ip_1.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_1)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm, network, public_ip_1)
# Restarting Isolated network (cleanup = false)
# VR gets destroyed and deployed again in the Isolated network
# This restart has no effect on the Static NAT functionality
self.debug("Restarting the created Isolated network without "
"cleanup...")
Network.restart(network, self.api_client, cleanup=False)
self.validate_Network(network, state="Implemented")
vr = self.get_Router(network)
self.check_Router_state(vr, state="Running")
self.check_VM_state(vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network)
self.verify_vsd_router(vr)
self.verify_vsd_vm(vm)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm, public_ip_1.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_1)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm, network, public_ip_1)
# Restarting Isolated network (cleanup = true)
# VR gets destroyed and deployed again in the Isolated network
# This restart has no effect on the Static NAT functionality
self.debug("Restarting the created Isolated network with cleanup...")
Network.restart(network, self.api_client, cleanup=True)
self.validate_Network(network, state="Implemented")
vr = self.get_Router(network)
self.check_Router_state(vr, state="Running")
self.check_VM_state(vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network)
self.verify_vsd_router(vr)
self.verify_vsd_vm(vm)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm, public_ip_1.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_1)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm, network, public_ip_1)
# Rebooting (stop - start) VM
# This reboot has no effect on the Static NAT functionality
self.debug("Rebooting the deployed VM in the created Isolated "
"network...")
vm.stop(self.api_client)
vm.start(self.api_client)
self.validate_Network(network, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, network)
self.verify_vsd_router(vr)
self.verify_vsd_vm(vm)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(network, vm, public_ip_1.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(network, public_ip_1)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(vm, network, public_ip_1)
# Creating VPC offering
self.debug("Creating Nuage VSP VPC offering with Static NAT service "
"provider as NuageVsp...")
vpc_off = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating VPC
self.debug("Creating a VPC with Static NAT service provider as "
"NuageVsp...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating VPC network offering
self.debug("Creating Nuage VSP VPC Network offering with Static NAT "
"service provider as NuageVsp...")
net_off = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off, state="Enabled")
# Creating a VPC network in the VPC, deploying a VM, and verifying
# Static NAT traffic with VPC network restarts
self.debug("Creating a VPC network with Static NAT service...")
vpc_tier = self.create_Network(net_off, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(vpc_tier, state="Implemented")
vpc_vr = self.get_Router(vpc_tier)
self.check_Router_state(vpc_vr, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, vpc_tier, vpc)
self.verify_vsd_router(vpc_vr)
# Adding Egress Network ACL rules
self.debug("Adding Egress Network ACL rules in the created VPC "
"network to allow Static NAT (DNS & HTTP) traffic to the "
"Internet from the VMs in the network...")
dns_rule = self.create_NetworkAclRule(
self.test_data["dns_rule"], traffic_type="Egress",
network=vpc_tier)
http_rule = self.create_NetworkAclRule(
self.test_data["http_rule"], traffic_type="Egress",
network=vpc_tier)
# VSD verification for added Egress Network ACL rules
self.verify_vsd_firewall_rule(dns_rule, traffic_type="Egress")
self.verify_vsd_firewall_rule(http_rule, traffic_type="Egress")
self.debug("Deploying a VM in the created VPC network...")
vpc_vm = self.create_VM(vpc_tier)
self.check_VM_state(vpc_vm, state="Running")
# VSD verification
self.verify_vsd_vm(vpc_vm)
# Creating Static NAT rule
self.debug("Creating Static NAT rule for the deployed VM in the "
"created VPC network...")
public_ip_2 = self.acquire_PublicIPAddress(vpc_tier, vpc=vpc)
self.validate_PublicIPAddress(public_ip_2, vpc_tier)
self.create_StaticNatRule_For_VM(vpc_vm, public_ip_2, vpc_tier)
self.validate_PublicIPAddress(
public_ip_2, vpc_tier, static_nat=True, vm=vpc_vm)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier, vpc_vm, public_ip_2.ipaddress, vpc=vpc)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(vpc_tier, public_ip_2, vpc=vpc)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vpc_vm, vpc_tier, public_ip_2, vpc=vpc)
# Restarting VPC network (cleanup = false)
# This restart has no effect on the Static NAT functionality
self.debug("Restarting the created VPC network without cleanup...")
Network.restart(vpc_tier, self.api_client, cleanup=False)
self.validate_Network(vpc_tier, state="Implemented")
self.check_Router_state(vpc_vr, state="Running")
self.check_VM_state(vpc_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, vpc_tier, vpc)
self.verify_vsd_router(vpc_vr)
self.verify_vsd_vm(vpc_vm)
self.verify_vsd_firewall_rule(dns_rule, traffic_type="Egress")
self.verify_vsd_firewall_rule(http_rule, traffic_type="Egress")
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier, vpc_vm, public_ip_2.ipaddress, vpc=vpc)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(vpc_tier, public_ip_2, vpc=vpc)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vpc_vm, vpc_tier, public_ip_2, vpc=vpc)
# Restarting VPC network (cleanup = true)
# This restart has no effect on the Static NAT functionality
self.debug("Restarting the created VPC network with cleanup...")
Network.restart(vpc_tier, self.api_client, cleanup=True)
self.validate_Network(vpc_tier, state="Implemented")
self.check_Router_state(vpc_vr, state="Running")
self.check_VM_state(vpc_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, vpc_tier, vpc)
self.verify_vsd_router(vpc_vr)
self.verify_vsd_vm(vpc_vm)
self.verify_vsd_firewall_rule(dns_rule, traffic_type="Egress")
self.verify_vsd_firewall_rule(http_rule, traffic_type="Egress")
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier, vpc_vm, public_ip_2.ipaddress, vpc=vpc)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(vpc_tier, public_ip_2, vpc=vpc)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vpc_vm, vpc_tier, public_ip_2, vpc=vpc)
# Rebooting (stop - start) VM
# This reboot has no effect on the Static NAT functionality
self.debug("Rebooting the deployed VM in the created VPC network...")
vpc_vm.stop(self.api_client)
vpc_vm.start(self.api_client)
self.validate_Network(vpc_tier, state="Implemented")
self.check_Router_state(vpc_vr, state="Running")
self.check_VM_state(vpc_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, vpc_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(vm)
self.verify_vsd_firewall_rule(dns_rule, traffic_type="Egress")
self.verify_vsd_firewall_rule(http_rule, traffic_type="Egress")
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier, vpc_vm, public_ip_2.ipaddress, vpc=vpc)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(vpc_tier, public_ip_2, vpc=vpc)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vpc_vm, vpc_tier, public_ip_2, vpc=vpc)
# Restarting VPC (cleanup = false)
# VPC VR gets destroyed and deployed again in the VPC
# This restart has no effect on the Static NAT functionality
self.debug("Restarting the VPC without cleanup...")
self.restart_Vpc(vpc, cleanup=False)
self.validate_Network(vpc_tier, state="Implemented")
vpc_vr = self.get_Router(vpc_tier)
self.check_Router_state(vpc_vr, state="Running")
self.check_VM_state(vpc_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, vpc_tier, vpc)
self.verify_vsd_router(vpc_vr)
self.verify_vsd_vm(vpc_vm)
self.verify_vsd_firewall_rule(dns_rule, traffic_type="Egress")
self.verify_vsd_firewall_rule(http_rule, traffic_type="Egress")
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier, vpc_vm, public_ip_2.ipaddress, vpc=vpc)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(vpc_tier, public_ip_2, vpc=vpc)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vpc_vm, vpc_tier, public_ip_2, vpc=vpc)
# Restarting VPC (cleanup = true)
# VPC VR gets destroyed and deployed again in the VPC
# This restart has no effect on the Static NAT functionality
self.debug("Restarting the VPC with cleanup...")
self.restart_Vpc(vpc, cleanup=True)
self.validate_Network(vpc_tier, state="Implemented")
vpc_vr = self.get_Router(vpc_tier)
self.check_Router_state(vpc_vr, state="Running")
self.check_VM_state(vpc_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, vpc_tier, vpc)
self.verify_vsd_router(vpc_vr)
self.verify_vsd_vm(vpc_vm)
self.verify_vsd_firewall_rule(dns_rule, traffic_type="Egress")
self.verify_vsd_firewall_rule(http_rule, traffic_type="Egress")
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(
vpc_tier, vpc_vm, public_ip_2.ipaddress, vpc=vpc)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(vpc_tier, public_ip_2, vpc=vpc)
# Verifying Static NAT traffic (wget www.google.com) to the Internet
# from the deployed VM
self.verify_StaticNAT_Internet_traffic(
vpc_vm, vpc_tier, public_ip_2, vpc=vpc)
# Bug CLOUDSTACK-9751
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_11_nuage_enable_staticNat_when_vr_is_in_starting_state(self):
"""Test Nuage VSP Static NAT functionality by enabling Static Nat when
VR is in starting state
"""
# 1. Create a Nuage VSP Isolated network offering.
# 2. Create an Isolated network with above created offering.
# 3. Deploy a VM in the above created Isolated network,
# which starts a VR.
# 4. While VR is in the starting state, acquire a public IP and enable
# static nat in another thread.
# 5. Verify that Static NAT is successfully enabled in both CloudStack
# and VSD.
# 6. Delete all the created objects (cleanup).
# Creating network offering
self.debug("Creating Nuage VSP Isolated Network offering with Static "
"NAT service provider as NuageVsp...")
net_off = self.create_NetworkOffering(
self.test_data["nuagevsp"]["isolated_network_offering"])
self.validate_NetworkOffering(net_off, state="Enabled")
# Creating an Isolated network
self.debug("Creating an Isolated network with Static NAT service...")
self.network = self.create_Network(net_off, gateway='10.1.1.1')
self.validate_Network(self.network, state="Allocated")
# Acquiring a Public IP
self.debug("Acquiring a Public IP in the created Isolated network...")
self.public_ip = self.acquire_PublicIPAddress(self.network)
self.validate_PublicIPAddress(self.public_ip, self.network)
# Enabling Static NAT on a starting VM
self.debug("Creating a thread for enabling Static Nat on a starting "
"VM...")
static_nat_thread = threading.Thread(
name='enable_static_nat',
target=self.enable_staticNat_on_a_starting_vm)
static_nat_thread.start()
vm = self.create_VM(self.network)
# Check the status of Static Nat thread and if it is not finished then
# below command will wait for it to finish
self.debug("Waiting for for enabling Static Nat on a starting VM "
"thread to finish...")
static_nat_thread.join()
# CloudStack verification for the implemented Isolated Network
self.validate_Network(self.network, state="Implemented")
vr = self.get_Router(self.network)
self.check_Router_state(vr, state="Running")
self.check_VM_state(vm, state="Running")
# VSD verification for the implemented Isolated Network
self.verify_vsd_network(self.domain.id, self.network)
self.verify_vsd_router(vr)
self.verify_vsd_vm(vm)
# CloudStack verification for Static NAT functionality
self.validate_PublicIPAddress(
self.public_ip, self.network, static_nat=True, vm=vm)
# VSD verification for Static NAT functionality
self.verify_vsd_floating_ip(self.network, vm, self.public_ip.ipaddress)
# Verifying Static NAT traffic
self.verify_StaticNAT_traffic(self.network, self.public_ip)
|
Test7BotProgramMK14Idea.py | from __future__ import print_function
import serial
import numpy as np
import time
import threading
NUM_SERVOS = 7
isAllConverge = False
measuredForces = [0] * NUM_SERVOS
measuredRotationDegs = [0] * NUM_SERVOS
# Serial connection to 7Bot
serialIsClosing = False
botPort = serial.Serial(port="COM3", baudrate=115200, timeout=1)
print("7Bot Test Programming MK14")
time.sleep(4.0)
print("Starting ...")
# Read data from bot
def botPortRead(ser):
global isAllConverge, measuredRotationDegs, measuredForces, NUM_SERVOS
global serialIsClosing
rxBuf = [0] * (NUM_SERVOS * 2 + 1)
beginFlag = False
instruction = 0
cnt = 0
while True:
if serialIsClosing or not ser.isOpen():
break
val = ser.read(1)
if len(val) == 0:
continue
# print("Rx", "%02X " % ord(val))
if not beginFlag:
beginFlag = (ord(val) == 0xFE)
instruction = 0
cnt = 0
elif instruction == 0:
instruction = ord(val) - 240
elif instruction == 9:
rxBuf[cnt] = ord(val)
cnt += 1
if cnt >= NUM_SERVOS * 2 + 1:
beginFlag = False
for i in range(NUM_SERVOS):
posCode = rxBuf[i*2] * 128 + rxBuf[i*2+1]
measuredForces[i] = posCode % 16384 / 1024
if posCode / 16384 > 0:
measuredForces[i] = -measuredForces[i]
# Convert 0-1000 code to 0-180 deg
measuredRotationDegs[i] = (posCode % 1024) * 9 / 50
isAllConverge = (rxBuf[(NUM_SERVOS-1)*2+2] == 1)
#print("Forces:", measuredForces, ",Angles:", measuredRotationDegs, isAllConverge)
else:
beginFlag = False
# Thread for reading from port
thread = threading.Thread(target=botPortRead, args=(botPort,))
thread.start()
def constrain(val, valMin, valMax):
if val < valMin:
return valMin
if val > valMax:
return valMax
return val
# set motor force status: 0-forceless, 1-normal servo, 2-protection
def setForceStatus(status):
data = bytearray([0xFE, 0xF5, status])
botPort.write(data)
# set motion fluency & speeds (0~250 ---> 0~25)
def setSpeed(fluentEnables, speeds):
# 1- Process Data
sendData = bytearray([0xFE, 0xF7])
servoIdx = 0
for speed in speeds:
sendData.append(constrain(speed, 0, 250)/10)
if fluentEnables[servoIdx]:
sendData[len(sendData)-1] += 64
servoIdx += 1
# 2- Send Data
botPort.write(sendData)
def appendTwoByteVal(buf, val):
buf.append((val / 128) & 0x7F)
buf.append(val & 0x7F)
# set Servo angles
def setServoAngles(servoAngles):
global isAllConverge
isAllConverge = False
# 1- Process Data
sendData = bytearray([0xFE, 0xF9])
for servoAngle in servoAngles:
val = int(servoAngle*50/9)
appendTwoByteVal(sendData, val)
# 2- Send Data
botPort.write(sendData)
def appendVecToSend(buf, vec):
for el in vec:
val = int(abs(el)) + (0 if el >= 0 else 1024)
appendTwoByteVal(buf, val)
# IK6(6 angles)
# j6:mm(-500~500), vec:(-1.0~1.0)--->(-500~500), theta:Degrees
def setIK6(j6, vec56, vec67, theta6):
global isAllConverge
isAllConverge = False
# 1- Process Data
j6_c = np.array([constrain(j6[0], -500, 500), constrain(j6[1], -500, 500), constrain(j6[2], -500, 500)])
vec56_c = np.copy(vec56)
vec56_c /= np.linalg.norm(vec56_c)
vec56_c *= 500
vec67_c = np.copy(vec67)
vec67_c /= np.linalg.norm(vec67_c)
vec67_c *= 500
sendData = bytearray([0xFE, 0xFA])
appendVecToSend(sendData, j6_c)
appendVecToSend(sendData, vec56_c)
appendVecToSend(sendData, vec67_c)
appendTwoByteVal(sendData, int((theta6*50/9)))
# 2- Send Data
# for dat in sendData:
# print("%02X " % dat, end = "")
botPort.write(sendData)
print("Going normal servo")
setForceStatus(1)
print("Set speed & pose")
setForceStatus(1)
time.sleep(1.0)
# Reboot 7Bot if previous status is not normal servo
# To make motion much more stable, highly recommend you use fluency all the time
fluentEnables = [True, True, True, True, True, True, True]
speeds = [100, 100, 100, 150, 150, 150, 150]
setSpeed(fluentEnables, speeds)
#keys = ["9", "8", "7", "6", "5", "4", "3", "2", "1", "0"]
#keys = ["MEM", "0", "0", "0", "0", "TRM", "D", "C", "TRM", "MEM", "TRM", "7", "4", "TRM"]
baseAddress = "0F12"
hexcodes = "C40D35C40031C401C8F4C410C8F1C400C8EEC40801C0E71EC8E49404C4619002C400C9808F01C0D89C0EC180E4FF9808C8CEC0CAE480C8C64003FC0194D6B8BF98C8C40790CE"
keyPositions = {
"0": [11, 101, 104],
"1": [7, 93, 104],
"2": [14, 93, 104],
"3": [18, 92, 104],
"4": [7, 89, 100],
"5": [13, 89, 100],
"6": [17, 89, 100],
"7": [7, 83, 97],
"8": [11, 84, 97],
"9": [16, 83, 97],
"A": [23, 80, 97],
"B": [28, 77, 97],
"C": [33, 73, 97],
"D": [27, 83, 100],
"E": [32, 81, 100],
"F": [36, 77, 100],
"ABT": [26, 89, 104],
"TRM": [34, 86, 104],
"MEM": [29, 94, 105],
"GO": [33, 90, 105],
}
servos3to7 = [83, 121, 122, 31]
# Set angles and wait for motion converge
print("Programming ...")
def sendKeySequence(keys):
for key in keys:
anglesDown = keyPositions[key] + servos3to7
anglesUp = anglesDown[:]
anglesUp[1] = anglesUp[1] + 6
anglesUp[2] = anglesUp[2] - 7
setServoAngles(anglesUp)
while not isAllConverge:
time.sleep(0.2)
setServoAngles(anglesDown)
while not isAllConverge:
time.sleep(0.2)
setServoAngles(anglesUp)
while not isAllConverge:
time.sleep(0.2)
def setAddress(addr):
print("Setting address to", addr)
keys = ["ABT", "MEM"]
for ch in addr:
keys.append(str(ch))
sendKeySequence(keys)
def sendProgram(hexcodes):
digitPairs = [hexcodes[i:i + 2] for i in range(0, len(hexcodes), 2)]
for pair in digitPairs:
keys = []
keys.append("TRM")
for ch in pair:
keys.append(str(ch))
keys.append("TRM")
keys.append("MEM")
print(pair)
sendKeySequence(keys)
setAddress(baseAddress)
sendProgram(hexcodes)
setAddress(baseAddress)
# angles = [7, 102, 95, 83, 121, 122, 31]
# setServoAngles(angles)
# while not isAllConverge:
# time.sleep(0.2)
#
# time.sleep(1)
#
# # Set angles and wait for motion converge
# print("Down")
# angles = [7, 96, 102, 83, 121, 122, 31]
# setServoAngles(angles)
# while not isAllConverge:
# time.sleep(0.2)
#
# print("Up")
# angles = [7, 102, 95, 83, 121, 122, 31]
# setServoAngles(angles)
# while not isAllConverge:
# time.sleep(0.2)
print("Clear")
angles = [7, 110, 80, 83, 121, 122, 31]
setServoAngles(angles)
while not isAllConverge:
time.sleep(0.2)
time.sleep(0.5)
# Set angles and wait for motion converge
print("Home")
angles = [90, 115, 65, 90, 121, 122, 40]
setServoAngles(angles)
while not isAllConverge:
time.sleep(0.2)
serialIsClosing = True
time.sleep(2.0)
botPort.close()
|
pymultiprocess.py | #!/usr/bin/env python3
from multiprocessing import Process
import time
import os
def sleep(n: float):
print("start process: %d" % os.getpid())
time.sleep(10)
def start():
now = time.time()
processes = []
for _ in range(0,10):
p = Process(target=sleep, args=(5,))
p.start()
processes.append(p)
for process in processes:
process.join()
print("Total cost %.2f seconds" % (time.time()-now))
if __name__ == "__main__":
start()
|
Serial_.py |
import serial
import time
import threading
from collections import deque
exitFlag = 0
class serialTread():
def __init__(self, ser):
self.maxLen = 100
self.ser = ser
self.buf = bytes()
def run(self,buf_broadcast,buf_stm):
thread = threading.Thread(target=self.read_from_port, args=(buf_broadcast,))
thread.start()
while(True):
if buf_stm:
m = buf_stm.popleft().encode('UTF-8')
self.ser.write(m)
time.sleep(0.005)
def read_from_port(self,buf_broadcast):
while True:
reading = self.ser.readline().decode('utf-8')
buf_broadcast.append(reading)
|
tpu_estimator.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from contextlib import contextmanager
import copy
import signal
import threading
import time
import traceback
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.summary import summary_ops as contrib_summary
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.tpu.python.tpu.device_assignment import device_assignment as tpu_device_assignment
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator import util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_PINGING_MASTER_TIMEOUT_IN_MS = 300 * 1000 # 5 minutes
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops():
# Gets the variables back from TPU nodes. This means the variables updated
# by TPU will now be *synced* to host memory.
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps runnining in TPU
system before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
_DEFAULT_JOB_NAME = 'tpu_worker'
_DEFAULT_COORDINATOR_JOB_NAME = 'coordinator'
_LOCAL_MASTERS = ('', 'local')
class _TPUContext(object):
"""A context holds immutable states of TPU computation.
This immutable object holds TPUEstimator config, train/eval batch size, and
`TPUEstimator.use_tpu`, which is expected to be passed around. It also
provides utility functions, basded on the current state, to determine other
information commonly required by TPU computation, such as TPU device names,
TPU hosts, shard batch size, etc.
N.B. As `mode` is not immutable state in Estimator, but essential to
distinguish between TPU training and evaluation, a common usage for
_TPUContext with `mode` is as follows:
```
with _ctx.with_mode(mode) as ctx:
if ctx.is_running_on_cpu():
...
```
"""
def __init__(self, config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu, device_assignment):
self._config = config
self._train_batch_size = train_batch_size
self._eval_batch_size = eval_batch_size
self._predict_batch_size = predict_batch_size
self._use_tpu = use_tpu
self._mode = None
self._device_assignment = device_assignment
self._max_cores_per_host = 8
def _assert_mode(self):
if self._mode is None:
raise RuntimeError(
'`mode` needs to be set via contextmanager `with_mode`.')
return self._mode
@property
def num_of_cores_per_host(self):
num_cores = self.num_cores
return min(num_cores, self._max_cores_per_host)
@property
def num_of_shards_per_host(self):
if self._device_assignment:
maximum_shards_per_host = (
self._max_cores_per_host //
self._device_assignment.num_cores_per_replica)
return min(self.num_shards, maximum_shards_per_host)
else:
num_cores = self.num_cores
return min(num_cores, self._max_cores_per_host)
@contextmanager
def with_mode(self, mode):
new_ctx = copy.copy(self) # Shallow copy is enough.
new_ctx._mode = mode # pylint: disable=protected-access
yield new_ctx
@property
def mode(self):
return self._assert_mode()
@property
def num_cores(self):
# TODO(xiejw): Adds lazy num_shards initialization.
if self._device_assignment:
return self._device_assignment.num_cores_per_replica * self.num_shards
else:
return self.num_shards
@property
def num_shards(self):
return self._config.tpu_config.num_shards
@property
def num_hosts(self):
return self.num_cores // self.num_of_cores_per_host
@property
def config(self):
return self._config
def is_input_sharded_per_core(self):
"""Return true if input_fn is invoked per-core (other than per-host)."""
self._assert_mode()
return (self._mode == model_fn_lib.ModeKeys.TRAIN and
not self._config.tpu_config.per_host_input_for_training)
def is_running_on_cpu(self, is_export_mode=False):
"""Determines whether the input_fn and model_fn should be invoked on CPU.
Args:
is_export_mode: Indicates whether the current mode is for exporting the
model, when mode == PREDICT. Only with this bool, we could
tell whether user is calling the Estimator.predict or
Estimator.export_savedmodel, which are running on TPU and CPU
respectively. Parent class Estimator does not distingush these two.
Returns:
bool, whether current input_fn or model_fn should be running on CPU.
Raises:
ValueError: any configuration is invalid.
"""
mode = self._assert_mode()
if not self._use_tpu:
return True
if mode != model_fn_lib.ModeKeys.PREDICT:
return False
# There are actually 2 use cases when running with mode.PREDICT: prediction
# and saving the model. We run actual predictions on the TPU, but
# model export is run on the CPU.
if is_export_mode:
return True
if self._predict_batch_size is None:
raise ValueError(
'predict_batch_size in TPUEstimator constructor should not be '
'`None` if .predict is running on TPU.')
if self.num_hosts > 1:
raise ValueError(
'TPUEstimator.predict should be running on single host.')
return False
@property
def global_batch_size(self):
mode = self._assert_mode()
if mode == model_fn_lib.ModeKeys.TRAIN:
return self._train_batch_size
elif mode == model_fn_lib.ModeKeys.EVAL:
return self._eval_batch_size
elif mode == model_fn_lib.ModeKeys.PREDICT:
return self._predict_batch_size
else:
return None
@property
def batch_size_for_input_fn(self):
"""Returns the shard batch size for `input_fn`."""
global_batch_size = self.global_batch_size
if self.is_running_on_cpu():
return global_batch_size
# On TPU
if self.is_input_sharded_per_core():
# We prohibit per core input sharding for the model parallelism case,
# therefore it is safe to use num_cores here.
return global_batch_size // self.num_cores
else:
return global_batch_size // self.num_hosts
@property
def batch_size_for_model_fn(self):
"""Returns the shard batch size for `model_fn`."""
global_batch_size = self.global_batch_size
if self.is_running_on_cpu():
return global_batch_size
return global_batch_size // self.num_shards
@property
def master_job(self):
"""Returns the job name to use to place TPU computations on.
Returns:
A string containing the job name, or None if no job should be specified.
Raises:
ValueError: If the user needs to specify a tpu_job_name, because we are
unable to infer the job name automatically, or if the user-specified job
names are inappropriate.
"""
run_config = self._config
# If the user specifies the tpu_job_name, use that.
if run_config.tpu_config.tpu_job_name:
return run_config.tpu_config.tpu_job_name
# The tpu job is determined by the run_config. Right now, this method is
# required as tpu_config is not part of the RunConfig.
mode = self._assert_mode()
master = (
run_config.evaluation_master
if mode == model_fn_lib.ModeKeys.EVAL else run_config.master)
if master in _LOCAL_MASTERS:
return None
if (not run_config.session_config or
not run_config.session_config.cluster_def.job):
return _DEFAULT_JOB_NAME
cluster_def = run_config.session_config.cluster_def
job_names = set([job.name for job in cluster_def.job])
if _DEFAULT_JOB_NAME in job_names:
# b/37868888 tracks allowing ClusterSpec propagation to reuse job names.
raise ValueError('Currently, tpu_worker is not an allowed job name.')
if len(job_names) == 1:
return cluster_def.job[0].name
if len(job_names) == 2:
if _DEFAULT_COORDINATOR_JOB_NAME in job_names:
job_names.remove(_DEFAULT_COORDINATOR_JOB_NAME)
return job_names.pop()
# TODO(b/67716447): Include more sophisticated heuristics.
raise ValueError(
'Could not infer TPU job name. Please specify a tpu_job_name as part '
'of your TPUConfig.')
@property
def tpu_host_placement_function(self):
"""Returns the TPU host place function."""
master = self.master_job
def _placement_function(_sentinal=None, core_id=None, host_id=None): # pylint: disable=invalid-name
assert _sentinal is None
if core_id is not None and host_id is not None:
raise RuntimeError(
'core_id and host_id can have only one non-None value.')
if master is None:
return '/replica:0/task:0/device:CPU:0'
else:
# This assumes that if using more than 8 shards,
# the job configuration varies 'task'.
if core_id is not None:
host_id = core_id / 8
return '/job:%s/task:%d/device:CPU:0' % (master, host_id)
return _placement_function
@property
def tpu_device_placement_function(self):
"""Returns the TPU device place function."""
master = self.master_job
job_device = '' if master is None else ('/job:%s' % master)
def _placement_function(i):
if self._device_assignment:
return self._device_assignment.tpu_device(replica=i, job=master)
else:
return '%s/task:%d/device:TPU:%d' % (job_device, i / 8, i % 8)
return _placement_function
@property
def tpu_ordinal_function(self):
"""Returns the TPU ordinal fn."""
def _tpu_ordinal_function(index):
"""Return the TPU ordinal associated with a shard.
Required because the enqueue ops are placed on CPU.
Args:
index: the shard index
Returns:
The ordinal of the TPU device the shard's infeed should be placed on.
"""
if self._device_assignment:
return self._device_assignment.tpu_ordinal(replica=index)
else:
return index % 8
return _tpu_ordinal_function
@property
def device_assignment(self):
return self._device_assignment
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(
collections.namedtuple('TPUEstimatorSpec', [
'mode',
'predictions',
'loss',
'train_op',
'eval_metrics',
'export_outputs',
'scaffold_fn',
'host_call'
])):
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
'export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
@{tf.estimator.Estimator}. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with @{tf.contrib.summary.create_file_writer}.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=hooks,
evaluation_hooks=hooks,
prediction_hooks=hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.' % self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._session_cancel_timer = None
self._feed_error = None
self._finished = False
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _log_error(self, session, error):
"""Log an infeed or outfeed error.
This logs a short error message immediately, and schedules a timer to
emit the full stack trace and error message after a short period of time.
If the main session has terminated by the time the timer triggers, we
assume the real source of the error was from the main session and avoid
emitting a stack trace for the infeed.
Args:
session: `tf.Session`, session to be terminated error: exception that
triggered logging.
error: the Exception to log.
"""
logging.warning(
'\n\n'
'Error occurred during infeed/outfeed. This may be due to a compile '
'error in the main session. Waiting for a short time for the main '
'session to come back.\n\n%s', error)
self._feed_error = traceback.format_exc()
# If we've already encountered a feed error, don't schedule another
# cancellation op.
if self._session_cancel_timer:
return
def _cancel_session():
# Close the session to avoid the main thread from hanging. If input
# pipeline triggers any error, the infeed thread dies but the main thread
# for TPU computation waits for the infeed enqueue forever. Close the
# Session to cancel the main thread Session.run execution.
#
# We sleep for a few seconds before closing to give some time
# for the TPU compilation error, if any, propagating, from TPU to CPU
# host. Compilation errors should be reported by the main thread so that
# the program can be interrupted and users can take action. Due to a race
# condition, the infeed thread might see an error first. Closing the
# session here immediately would result in a session cancellation
# exception in the main thread, instead of the expected compile error.
# User code that depends on having the proper exception type will
# therefore be confused.
time.sleep(5)
# If the main session is still running, the infeed/outfeed errors are
# legitimate, and should be logged.
if not self._finished and self._feed_error:
logging.error('Feed error: %s', self._feed_error)
logging.error('Closing session. A RuntimeError should follow.')
session.close()
self._session_cancel_timer = threading.Thread(target=_cancel_session)
self._session_cancel_timer.daemon = True
self._session_cancel_timer.start()
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('%s thread sleeping for %d seconds.', self._name,
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('%s thread starting after sleep', self._name)
try:
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
try:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
except Exception as e: # pylint: disable=broad-except
self._log_error(session, e)
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info('Init TPU system')
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
logging.info('Start infeed thread controller')
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
logging.info('Start outfeed thread controller')
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
def before_run(self, run_context):
self._feed_error = None
# Wait for the cancellation timer to complete before continuing.
if self._session_cancel_timer:
self._session_cancel_timer.join()
self._session_cancel_timer = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
if self._session_cancel_timer:
logging.warning('Feed error occurred; waiting for message.')
self._session_cancel_timer.join()
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
logging.info('Stop output thread controller')
self._outfeed_controller.join()
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx, enqueue_ops, dequeue_ops, run_infeed_loop_on_coordinator=False)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provideded)
# batch 1: images, labels, stop = 0 (user provideded)
# ...
# batch 99: images, labels, stop = 0 (user provideded)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediciton, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(ctx, input_fn,
inputs_structure_recorder):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
inputs = _Inputs.from_input_fn(input_fn())
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_configuration_from_sharded_input_tensors(
per_host_sharded_inputs)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
hooks = []
with ops.device(device):
inputs = _Inputs.from_input_fn(input_fn())
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset, batch_size=ctx.batch_size_for_input_fn)
if is_dataset:
hooks.append(inputs.dataset_initializer_hook())
def _tpu_ordinal_function_impl(shard_index_in_host):
# We put both enqueue/dequeue op at tpu.core(0) in each replica.
replica = ctx.device_assignment.lookup_replicas(
host_id, (0, 0, 0))[shard_index_in_host]
return ctx.device_assignment.tpu_ordinal(replica=replica)
if ctx.device_assignment:
tpu_ordinal_function = _tpu_ordinal_function_impl
else:
tpu_ordinal_function = None
def enqueue_ops_fn():
with ops.device(device):
num_of_shards_per_host = ctx.num_of_shards_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels, signals)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_shards_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in `_TPUContext`, it
invokes `input_fn` for all cores (usually multi-host TPU training) or for one
host (usually for single-host TPU evaluation), and sends all `features` and
`labels` returned by `input_fn` to TPU infeed. For per-core invocation,
`features` and `labels` are piped to infeed directly, one tuple for each
core. For per-host invocation, `features` and `labels` are split at host
(with respect to `batch_axis`) and piped to all cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separatedly to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). `features` can be `Tensor` or dict of string name to `Tensor`,
and `labels` could be `None`, `Tensor`, or dict of string name to `Tensor`.
TPU infeed/outfeed library expects flattened tensor list. So, `features` and
`labels` need to be flattened, before infeed enqueue, and the structure of
them needs to be recorded, in order to restore them after infeed dequeue.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self):
# Holds the structure of inputs
self._feature_names = []
self._label_names = []
self._has_labels = False
self._signals_helper = None
# Internal state.
self._initialized = False
def has_labels(self):
return self._has_labels
def validate_and_record_structure(self, features, labels, signals=None):
"""Validates and records the structure of features` and `labels`."""
def _extract_key_names(tensor_or_dict):
if tensor_or_dict is None:
return []
return tensor_or_dict.keys() if isinstance(tensor_or_dict, dict) else []
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if signals is not None and self._signals_helper is None:
# Record signals helper.
self._signals_helper = _SignalsHelper(signals)
if self._initialized:
# Verify the structure is same. The following should never happen.
assert feature_names == self._feature_names, 'feature keys mismatched'
assert label_names == self._label_names, 'label keys mismatched'
assert has_labels == self._has_labels, 'label presence mismatched'
else:
# Record structure.
self._initialized = True
self._feature_names = feature_names
self._label_names = label_names
self._has_labels = has_labels
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
flattened_inputs = []
if self._feature_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend(
[features[name] for name in self._feature_names])
else:
flattened_inputs.append(features)
if labels is not None:
if self._label_names:
# We need a fixed ordering for enqueueing and dequeueing.
flattened_inputs.extend([labels[name] for name in self._label_names])
else:
flattened_inputs.append(labels)
if signals is not None:
flattened_inputs.extend(_SignalsHelper.as_tensor_list(signals))
return flattened_inputs
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
expected_num_features = (
len(self._feature_names) if self._feature_names else 1)
if self._has_labels:
expected_num_labels = (
len(self._label_names) if self._label_names else 1)
else:
expected_num_labels = 0
expected_num_signals = (
self._signals_helper.num_signals if self._signals_helper else 0)
expected_num_tensors = (
expected_num_features + expected_num_labels + expected_num_signals)
if expected_num_tensors != len(flattened_inputs):
raise ValueError(
'The number of flattened tensors mismatches expected num. '
'Expected {}, got {}'.format(expected_num_tensors,
len(flattened_inputs)))
if self._feature_names:
unflattened_features = dict(
zip(self._feature_names, flattened_inputs[:expected_num_features]))
else:
# Single tensor case
unflattened_features = flattened_inputs[0]
if expected_num_labels == 0:
unflattened_label = None
elif self._label_names:
label_list = flattened_inputs[
expected_num_features:expected_num_features + expected_num_labels]
unflattened_label = dict(zip(self._label_names, label_list))
else:
# Single tensor case.
unflattened_label = flattened_inputs[expected_num_features]
signals = None
if expected_num_signals != 0:
tensor_list_for_signals = flattened_inputs[
expected_num_features + expected_num_labels:]
signals = self._signals_helper.unflatten(tensor_list_for_signals)
return _Inputs(unflattened_features, unflattened_label, signals=signals)
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_TPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder()
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
with ops.device(tpu.core(0)):
values = self._infeed_queue.generate_dequeue_op()
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_hooks = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue, hooks, is_dataset = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
self._batch_axis, host_device, host_id))
all_hooks.extend(hooks)
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if is_dataset:
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, all_hooks, run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
# Perform some sanity checks to log user friendly information. We should
# error out to give users better error message. But, if
# _WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
# user code, so, log a warning.
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/programmers_guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels):
return self._call_model_fn(features, labels)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, TPUEstimatorSpec):
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, TPUEstimatorSpec) and
estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return train_step, host_call, captured_scaffold_fn
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, TPUEstimatorSpec):
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
to_record = {}
identity_fn = lambda **kwargs: kwargs
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(tpu_estimator_spec.predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return predict_step, host_calls, captured_scaffold_fn
def _call_model_fn(self, features, labels, is_export_mode=True):
"""Calls the model_fn with required parameters."""
model_fn_args = util.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
params[_BATCH_SIZE_KEY] = batch_size_for_model_fn
estimator_spec = self._model_fn(features=features, **kwargs)
if (self._ctx.is_running_on_cpu(is_export_mode) and
isinstance(estimator_spec, TPUEstimatorSpec)):
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, TPUEstimatorSpec):
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(err_msg.format('training_chief_hooks'))
if estimator_spec.training_hooks:
raise ValueError(err_msg.format('training_hooks'))
if estimator_spec.evaluation_hooks:
raise ValueError(err_msg.format('evaluation_hooks'))
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = util.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return []
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
tpu_device_placement_fn = self._ctx.tpu_device_placement_function
for i in xrange(self._ctx.num_shards):
with ops.device(tpu_device_placement_fn(i)):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes, shapes=tensor_shapes)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos+len(self._tensors[name])]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(core_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Count examples during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
examples_per_sec = self._batch_size * elapsed_steps / elapsed_time
if self._summary_writer is not None:
example_summary = Summary(value=[
Summary.Value(tag='examples_sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(example_summary, global_step)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on
`per_host_input_for_training` in `TPUConfig`.
For evaluation, `model_fn` gets per-core batch size and `input_fn` get
per-host batch size.
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
1. TPU evaluation only works on single host.
2. `input_fn` for evaluation should not throw OutOfRange error for all
evaluation steps and all batches should have the same size.
Example (MNIST):
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Predict support on TPU is not yet implemented. So, `predict` and
`export_savedmodel` are executed on CPU, even if `use_tpu` is true.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator`. For training, the
returned `EstimatorSpec` cannot have hooks as it is not supported in
`TPUEstimator`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently,
- TPU training and evaluation respect this bit.
- Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by
`config.tpu_config.num_shards`.
eval_batch_size: An int representing evaluation batch size.
Must be divisible by `config.tpu_config.num_shards`.
predict_batch_size: An int representing the prediction batch size.
Must be divisible by `config.tpu_config.num_shards`.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False, batch_axis is ignored.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
if not isinstance(train_batch_size, int):
raise ValueError('`train_batch_size` must be an int')
if train_batch_size < 1:
raise ValueError('`train_batch_size` must be positive')
# The specified batch size is the batch size for the entire computation.
# The input_fn and model_fn are called per-shard, so we want to calculate
# the per-shard batch size and pass that.
if train_batch_size % config.tpu_config.num_shards != 0:
raise ValueError(
'train batch size {} must be divisible by number of shards {}'
.format(train_batch_size, config.tpu_config.num_shards))
if (not config.tpu_config.per_host_input_for_training and
config.tpu_config.computation_shape):
raise ValueError(
'Model parallelism only supports per host input for training.')
if eval_batch_size is not None:
if not isinstance(eval_batch_size, int):
raise ValueError('`eval_batch_size` must be an int')
if eval_batch_size < 1:
raise ValueError('`eval_batch_size` must be positive')
if eval_batch_size % config.tpu_config.num_shards != 0:
raise ValueError(
'eval batch size {} must be divisible by number of shards {}'
.format(eval_batch_size, config.tpu_config.num_shards))
if predict_batch_size is not None:
if not isinstance(predict_batch_size, int):
raise ValueError('`predict_batch_size` must be an int')
if predict_batch_size < 1:
raise ValueError('`predict_batch_size` must be positive')
if predict_batch_size % config.tpu_config.num_shards != 0:
raise ValueError(
'predict batch size {} must be divisible by number of shards {}'
.format(predict_batch_size, config.tpu_config.num_shards))
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
if use_tpu and self._config.tpu_config.computation_shape:
try:
with tf_session.Session(
self._config.master,
config=config_pb2.ConfigProto(
operation_timeout_in_ms=_PINGING_MASTER_TIMEOUT_IN_MS)) as sess:
logging.info('Initializing TPU system to fetch topology for model '
'parallelism.')
topology = sess.run(tpu.initialize_system())
device_assignment = tpu_device_assignment(
topology,
computation_shape=self._config.tpu_config.computation_shape,
num_replicas=self._config.tpu_config.num_shards)
logging.info('computation_shape: %s',
str(self._config.tpu_config.computation_shape))
logging.info('num_replicas: %d', self._config.tpu_config.num_shards)
logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
except errors.DeadlineExceededError:
raise ValueError(
'Fail to connect master (%s). Please double check %s is '
'correct.' % (self._config.master, self._config.master))
else:
device_assignment = None
# All properties passed to _TPUContext are immutable.
self._ctx = _TPUContext(self._config, train_batch_size, eval_batch_size,
predict_batch_size, use_tpu, device_assignment)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
# TODO(ylc): Support evaluating with model parallelism in different cluster.
if ctx.device_assignment and (self._config.evaluation_master !=
self._config.master):
raise ValueError(
'In the model-parallel case, both training and evaluation must run '
'in the same cluster.')
if self._config.tpu_config.num_shards > 8:
raise NotImplementedError(
'TPU evaluation is only supported with one host.')
if self._ctx._eval_batch_size is None: # pylint: disable=protected-access
raise ValueError('`eval_batch_size` cannot be `None`'
'if evaluate() is called on TPU.')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
Either features or (features, labels) where features and labels are:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = util.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
kwargs['params'][_BATCH_SIZE_KEY] = batch_size_for_input_fn
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn():
return input_fn(**kwargs)
return _input_fn
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# For export_savedmodel, input_fn is never passed to Estimator. So,
# if features is callable, it means it is the input_fn passed by
# TPUEstimator._call_input_fn. Then we can know if the mode == PREDICT,
# it implies, it is the .predict API, not export_savedmodel API.
is_export_mode = not callable(features)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
return model_fn_wrapper.call_without_tpu(features, labels)
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
ExamplesPerSecondHook(ctx.global_batch_size),
InstallSignalHandlerHook(),
training.LoggingTensorHook(
{
'loss': array_ops.identity(loss),
'step': training.get_global_step()
},
every_n_secs=30)
] + input_hooks
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops()
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_hooks=hooks,
train_op=control_flow_ops.group(*update_ops),
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(total_loss,
math_ops.cast(
iterations_per_loop_var,
dtype=total_loss.dtype))
# Creates a dummy metric update_op for all metrics. Estimator expects
# all metrics in eval_metric_ops have update_op and calls them one by
# one. The real metric update_ops are invoked in a separated thread.
# So, here give Estimator the dummy op for all metrics.
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops()
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
for k, v in host_call_ret['eval_metrics'].items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator)),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
dummy_predict_op, host_calls, scaffold = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops()
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
stopping_signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
stopping_signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(ctx, enqueue_ops,
host_ops),
] + input_hooks
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_eval_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn))
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_eval_step, [_ZERO_LOSS],
name='loop')
(loss,) = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_shards,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
single_tpu_train_step, host_call, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(
iterations_per_loop_var,
single_tpu_train_step, [_INITIAL_LOSS],
name=b'loop')
(loss,) = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_shards,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
num_cores = ctx.num_cores
single_tpu_predict_step, host_calls, captured_scaffold_fn = (
model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn))
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=num_cores,
outputs_from_all_shards=False)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can be captured only. Please file bug .')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug .')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer_hook(self):
"""Returns a `SessionRunHook` to initialize this dataset.
This must be called before `features_and_labels`.
"""
iterator = self._dataset.make_initializable_iterator()
# pylint: disable=protected-access
hook = estimator_lib._DatasetInitializerHook(iterator)
self._iterator = iterator
return hook
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
# TODO(xiejw): Extend this to support final partial batch.
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self, dataset, batch_size):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size))
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size))
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = 0.0
STOPPING_SIGNAL = 1.0
def __init__(self, stop, batch_size):
self._stop = stop
self._batch_size = batch_size
def as_dict(self):
shape = [self._batch_size, 1]
dtype = dtypes.float32
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
return {'stopping': stopping}
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
return scalar_stopping_signal >= _StopSignals.STOPPING_SIGNAL
class _SignalsHelper(object):
"""A general helper class to handle common signals manipulation."""
def __init__(self, signals):
self._signal_keys = []
for key in sorted(signals.iterkeys()):
self._signal_keys.append(key)
@property
def num_signals(self):
return len(self._signal_keys)
def unflatten(self, tensor_list):
return dict(zip(self._signal_keys, tensor_list))
@staticmethod
def as_tensor_list(signals):
return [signals[key] for key in sorted(signals.iterkeys())]
|
dataloader_iter.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import time
import signal
import numbers
import logging
import itertools
import threading
import numpy as np
import multiprocessing
from collections import namedtuple
from paddle.fluid.framework import _set_expected_place, _current_expected_place
# NOTE: queue has a different name in python2 and python3
import queue
import paddle
from .. import core, layers
from ..framework import in_dygraph_mode
from ..multiprocess_utils import _set_SIGCHLD_handler, MP_STATUS_CHECK_INTERVAL, CleanupFuncRegistrar
from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher
from .batch_sampler import _InfiniteIterableSampler
from .collate import default_collate_fn, default_convert_fn
from .worker import ParentWatchDog, get_worker_info, _worker_loop, \
_DatasetKind, _IterableDatasetStopIteration, _WorkerException
from .flat import _flatten_batch, _restore_batch
__all__ = ['get_worker_info']
class _DataLoaderIterBase(object):
"""
Iterator implement of DataLoader, will load and feed mini-batch
data by setting in given dataloader.
Args:
loader(instance of DataLoader): instance of `fluid.io.DataLoader`
"""
def __init__(self, loader):
self._dataset = loader.dataset
self._feed_list = loader.feed_list or []
self._places = loader.places
self._return_list = loader.return_list
self._batch_sampler = loader.batch_sampler
self._auto_collate_batch = loader.auto_collate_batch
self._num_workers = loader.num_workers
self._use_buffer_reader = loader.use_buffer_reader
self._use_shared_memory = loader.use_shared_memory
self._timeout = loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL
self._worker_init_fn = loader.worker_init_fn
self._dataset_kind = loader.dataset_kind
self._pin_memory = loader.pin_memory
if self._auto_collate_batch:
self._sampler_iter = iter(loader.batch_sampler)
self._collate_fn = loader.collate_fn or default_collate_fn
else:
if self._dataset_kind == _DatasetKind.MAP:
self._sampler_iter = iter(list(range(len(self._dataset))))
else:
self._sampler_iter = iter(
_InfiniteIterableSampler(self._dataset, 1))
self._collate_fn = loader.collate_fn or default_convert_fn
# LoDTensorBlockingQueue instance for create_py_reader and a thread
# to put mini-batch data to self._blocking_queue, mini-batch data
# will be get from:
# 1. multi-process mode: get data from workers' result queue
# 2. single-process mode: read mini-batch data in main process
self._blocking_queue = None
self._thread = None
self._thread_done_event = threading.Event()
def __iter__(self):
return self
def __len__(self):
return len(self._batch_sampler)
class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
"""
Single process implement of DataLoaderIter, loading data from
loader.data in main process
"""
def __init__(self, loader):
super(_DataLoaderIterSingleProcess, self).__init__(loader)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collate_batch,
self._collate_fn, True)
# NOTE: _structrue_infos used to record the data structure of
# batch to restore batch structure after reading Tensor
# from blocking_queue in single-process mode. Note that
# only single process is used in single-process mode, we
# can record the data structure sequencely in a list without
# recording the send and recv index
self._structure_infos = []
# NOTE: len(self._places) batch data compose as an output
# iteration, set blocking_queue can cache 2 iteration datas
# at most here
self._blocking_queue_capacity = 2 * len(self._places)
self._init_thread()
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._blocking_queue_capacity,
len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _thread_loop(self, legacy_expected_place):
try:
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
for indices in self._sampler_iter:
# read data from dataset in mini-batch
batch = self._dataset_fetcher.fetch(indices)
# flat batch and record structure infos
batch, structure = _flatten_batch(batch)
self._structure_infos.append(structure)
# pack as LoDTensorArray
array = core.LoDTensorArray()
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
break
if self._thread_done_event.is_set():
break
self._blocking_queue.close()
self._shutdown_thread()
except StopIteration:
self._blocking_queue.close()
except Exception:
self._blocking_queue.kill()
self._shutdown_thread()
logging.warning("DataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def __next__(self):
try:
if in_dygraph_mode():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
return data
except StopIteration:
self._reader.shutdown()
six.reraise(*sys.exc_info())
def _shutdown_thread(self):
if self._thread:
self._thread_done_event.set()
if self._thread is not threading.current_thread():
self._thread.join()
self._thread = None
# python2 compatibility
def next(self):
return self.__next__()
def __del__(self):
# _blocking_queue in keep order mode holds sub-threads
# need to release thread resources on unexpected exit
if self._blocking_queue:
self._blocking_queue.close()
# NOTE: blocking queue should be closed firstly for
# blocking queue read may hang and _thread_done_event
# cannot be checked
self._shutdown_thread()
class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
def __init__(self, loader):
super(_DataLoaderIterMultiProcess, self).__init__(loader)
assert self._num_workers > 0, "Multi-process DataLoader " \
"invalid num_workers({})".format(self._num_workers)
# subprocess wrokers' result queue
self._data_queue = None
# data get from _data_queue will be reordered by _rcvd_idx
# for data order keeping, data index not equal _rcvd_idx
# will be cached in _task_infos
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# indices outstand as _outstanding_capacity at first, and
# blocking_queue capacity is also _outstanding_capacity.
# _outstanding_capacity here to make sure each indices_queue
# has at least 2 indices, and outstanding batch cached
# output data for at least 2 iterations(Note that len(_places)
# batches will be composed as an iteration output)
self._outstanding_capacity = 2 * max(self._num_workers,
len(self._places))
# see _try_put_indices
self._thread_lock = threading.Lock()
# init workers and indices queues and put 2 indices in each indices queue
self._init_workers()
for _ in range(self._outstanding_capacity):
self._try_put_indices()
self._init_thread()
self._shutdown = False
def _init_workers(self):
# multiprocess worker and indice queue list initial as empty
self._workers = []
self._worker_status = []
self._indices_queues = []
self._workers_idx_cycle = itertools.cycle(range(self._num_workers))
# create data_queue for workers
self._data_queue = multiprocessing.Queue()
# event for workers and thread, thread event is only need
# in multi-processing mode
self._workers_done_event = multiprocessing.Event()
self._thread_done_event = threading.Event()
for i in range(self._num_workers):
indices_queue = multiprocessing.Queue()
self._indices_queues.append(indices_queue)
worker = multiprocessing.Process(
target=_worker_loop,
args=(self._dataset, self._dataset_kind, indices_queue,
self._data_queue, self._workers_done_event,
self._auto_collate_batch, self._collate_fn,
self._worker_init_fn, i, self._num_workers,
self._use_shared_memory))
worker.daemon = True
worker.start()
self._workers.append(worker)
self._worker_status.append(True)
core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
_set_SIGCHLD_handler()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._outstanding_capacity, len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _shutdown_worker(self, worker_id):
if self._worker_status[worker_id]:
self._indices_queues[worker_id].put(None)
self._worker_status[worker_id] = False
def _try_shutdown_all(self, timeout=None):
if not self._shutdown:
try:
self._exit_thread_expectedly()
self._clear_and_remove_data_queue()
# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
self._shutdown_worker(i)
if not self._shutdown:
for w in self._workers:
w.join(timeout)
for q in self._indices_queues:
q.cancel_join_thread()
q.close()
finally:
core._erase_process_pids(id(self))
self._shutdown = True
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _thread_loop(self, legacy_expected_place):
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
batch = self._get_data()
if not self._thread_done_event.is_set():
if batch is None:
self._exit_thread_expectedly()
else:
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
if self._use_shared_memory:
for tensor in batch:
array.append(tensor)
else:
# LoDTensor not in shared memory is not
# serializable, cannot be create in workers
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
finally:
self._rcvd_idx += 1
def _get_data(self):
while not self._thread_done_event.is_set():
# For IterableDataset, batch indices is generated infinitely
# for each worker to raise StopIteration, but a StopIteration
# raising process will discard a batch indices which is count
# in _send_idx but will not increase _rcvd_idx, so we check
# whether the worker is still alive here to skip the discarded
# batch indices and increase _rcvd_idx
if self._dataset_kind == _DatasetKind.ITER:
while self._rcvd_idx < self._send_idx:
sys.stdout.flush()
info = self._task_infos[self._rcvd_idx]
if len(info) == 3 or self._worker_status[info[0]]:
break
del self._task_infos[self._rcvd_idx]
self._rcvd_idx += 1
self._batches_outstanding -= 1
else:
# NOTE: _rcvd_idx and _send_idx only record batches among
# workers, if batches among workers drained, there
# may also be data in blocking queue
if self._batches_outstanding < len(self._places):
return None
continue
if self._rcvd_idx in self._task_infos and \
len(self._task_infos[self._rcvd_idx]) == 3:
info = self._task_infos.pop(self._rcvd_idx)
self._structure_infos.append(info[2])
return info[1]
try:
# [ avoid hang ]: main process may blocking at _reader.read_next when
# KeyboardInterrupt, we do following tradeoff:
# 1. get data with timeout, MP_STATUS_CHECK_INTERVAL(5s) as timeout
# default, if KeyboardInterrupt blocking, failed workers will be
# checked and raise RuntimeError to quit DataLoader in timeout
# exception handling.
# 2. if get data timeout and check workers all alive, continue to
# get data again
data = self._data_queue.get(timeout=self._timeout)
except Exception as e:
# check if thread done event set when waiting data
if self._thread_done_event.is_set():
continue
# check failed workers
failed_workers = []
for i, w in enumerate(self._workers):
if self._worker_status[i] and not w.is_alive():
failed_workers.append(w)
self._shutdown_worker(i)
if len(failed_workers) > 0:
self._exit_thread_unexpectedly()
pids = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
"pids: {}".format(len(failed_workers), pids))
# get(timeout) will call _poll(timeout) and may raise IOError
if isinstance(e, queue.Empty) or isinstance(e, IOError):
# continue on timeout to keep getting data from queue
continue
self._exit_thread_unexpectedly()
logging.error("DataLoader reader thread failed({}) to read data from " \
"workers' result queue.".format(e))
six.reraise(*sys.exc_info())
else:
if self._dataset_kind == _DatasetKind.ITER and isinstance(
data, _IterableDatasetStopIteration):
# if a worker get StopIteraion, we shutdown this worker,
# note that this batch indices to trigger StopIteration
# is discard, outstanding batch number should be decrease
# and another indices should be put for other workers
# may still working.
self._shutdown_worker(data.worker_id)
self._batches_outstanding -= 1
self._try_put_indices()
continue
idx, batch, structure = data
if isinstance(batch, _WorkerException):
self._exit_thread_unexpectedly()
batch.reraise()
if idx == self._rcvd_idx:
del self._task_infos[idx]
self._structure_infos.append(structure)
return batch
else:
self._task_infos[idx] += (batch, structure)
continue
def _try_put_indices(self):
assert self._batches_outstanding <= self._outstanding_capacity, \
"too many indices have been put to queue"
# In multi-process mode for IterableDataset, _try_put_indices will
# be called both in main process(for our implement has blocking queue,
# and blocking queue read is in main process) and thread, which may
# cause error following error
# 1. "ValueError: generator already executing" in next(self._sampler_iter)
# 2. re-enter in increase _send_idx
# add a lock for threading save, for _try_put_indices is only a slight
# function which is not in data reading pipeline, this lock almost no
# influence on performance
with self._thread_lock:
try:
indices = next(self._sampler_iter)
except StopIteration:
return
for i in range(self._num_workers):
worker_idx = next(self._workers_idx_cycle)
if self._worker_status[worker_idx]:
break
else:
return
self._indices_queues[worker_idx].put((self._send_idx, indices))
self._task_infos[self._send_idx] = (worker_idx, )
self._batches_outstanding += 1
self._send_idx += 1
def __del__(self):
self._try_shutdown_all()
def _shutdown_on_exit(self):
self._try_shutdown_all(1)
def __next__(self):
try:
# _batches_outstanding here record the total batch data number
# in 'from after _try_put_indices to beforeoutput data', this
# value should be _outstanding_capacity if data is not drained,
# if _batches_outstanding is less than _places number, there are
# no enough data to generate next output, close blocking_queue and
# set _thread_done_event here, py_reader will raise StopIteration,
# end workers and indices_queues in StopIteration handling
if self._batches_outstanding < len(self._places):
self._thread_done_event.set()
self._blocking_queue.close()
if in_dygraph_mode():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
self._on_output_batch()
return data
except StopIteration:
self._reader.shutdown()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def _on_output_batch(self):
for _ in range(len(self._places)):
self._batches_outstanding -= 1
self._try_put_indices()
|
test_threading_local.py | import unittest
from doctest import DocTestSuite
from test import support
import weakref
import gc
# Modules under test
_thread = support.import_module('_thread')
threading = support.import_module('threading')
import _threading_local
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class BaseLocalTest:
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = self._local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
gc.collect()
self.assertEqual(len(weaklist), n)
# XXX _threading_local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n))
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(self._local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
with support.start_threads(threading.Thread(target=f, args=(i,))
for i in range(10)):
pass
def test_derived_cycle_dealloc(self):
# http://bugs.python.org/issue6990
class Local(self._local):
pass
locals = None
passed = False
e1 = threading.Event()
e2 = threading.Event()
def f():
nonlocal passed
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
gc.collect()
e1.set()
e2.wait()
# 4) New Locals should be empty
passed = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed)
def test_arguments(self):
# Issue 1522237
class MyLocal(self._local):
def __init__(self, *args, **kwargs):
pass
MyLocal(a=1)
MyLocal(1)
self.assertRaises(TypeError, self._local, a=1)
self.assertRaises(TypeError, self._local, 1)
def _test_one_class(self, c):
self._failed = "No error message set or cleared."
obj = c()
e1 = threading.Event()
e2 = threading.Event()
def f1():
obj.x = 'foo'
obj.y = 'bar'
del obj.y
e1.set()
e2.wait()
def f2():
try:
foo = obj.x
except AttributeError:
# This is expected -- we haven't set obj.x in this thread yet!
self._failed = "" # passed
else:
self._failed = ('Incorrectly got value %r from class %r\n' %
(foo, c))
sys.stderr.write(self._failed)
t1 = threading.Thread(target=f1)
t1.start()
e1.wait()
t2 = threading.Thread(target=f2)
t2.start()
t2.join()
# The test is done; just let t1 know it can exit, and wait for it.
e2.set()
t1.join()
self.assertFalse(self._failed, self._failed)
def test_threading_local(self):
self._test_one_class(self._local)
def test_threading_local_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_one_class(LocalSubclass)
def _test_dict_attribute(self, cls):
obj = cls()
obj.x = 5
self.assertEqual(obj.__dict__, {'x': 5})
with self.assertRaises(AttributeError):
obj.__dict__ = {}
with self.assertRaises(AttributeError):
del obj.__dict__
def test_dict_attribute(self):
self._test_dict_attribute(self._local)
def test_dict_attribute_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_dict_attribute(LocalSubclass)
def test_cycle_collection(self):
class X:
pass
x = X()
x.local = self._local()
x.local.x = x
wr = weakref.ref(x)
del x
gc.collect()
self.assertIsNone(wr())
class ThreadLocalTest(unittest.TestCase, BaseLocalTest):
_local = _thread._local
class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest):
_local = _threading_local.local
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadLocalTest))
suite.addTest(unittest.makeSuite(PyThreadingLocalTest))
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _thread._local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
send.py | #!/usr/bin/env python3
"""
@summary: submit many contract.set(arg) transactions to the example contract
@version: v52 (22/January/2019)
@since: 17/April/2018
@author: https://github.com/drandreaskrueger
@see: https://github.com/drandreaskrueger/chainhammer for updates
"""
# extend sys.path for imports:
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
################
## Dependencies:
# standard library:
import sys, time, random, json
from threading import Thread
from queue import Queue
from pprint import pprint
# pypi:
import requests # pip3 install requests
import web3
from web3 import Web3, HTTPProvider # pip3 install web3
from web3.utils.abi import filter_by_name, abi_to_signature
from web3.utils.encoding import pad_hex
# chainhammer:
from hammer.config import RPCaddress, ROUTE, PRIVATE_FOR, EXAMPLE_ABI
from hammer.config import PARITY_UNLOCK_EACH_TRANSACTION
from hammer.config import GAS_FOR_SET_CALL
from hammer.config import FILE_LAST_EXPERIMENT, EMPTY_BLOCKS_AT_END
from hammer.deploy import loadFromDisk
from hammer.clienttools import web3connection, unlockAccount
##########################
## smart contract related:
def initialize_fromAddress():
"""
initialise contract object from address, stored in disk file by deploy.py
"""
contractAddress, abi = loadFromDisk()
myContract = w3.eth.contract(address=contractAddress,
abi=abi)
return myContract
def contract_set_via_web3(contract, arg, hashes = None, privateFor=PRIVATE_FOR, gas=GAS_FOR_SET_CALL):
"""
call the .set(arg) method, possibly with 'privateFor' tx-property
using the web3 method
"""
txParameters = {'from': w3.eth.defaultAccount,
'gas' : gas}
if privateFor:
txParameters['privateFor'] = privateFor # untested
# pprint (txParameters)
if PARITY_UNLOCK_EACH_TRANSACTION:
unlockAccount()
tx = contract.functions.set( x=arg ).transact(txParameters)
# print ("[sent via web3]", end=" ") # TODO: not print this here but at start
print (".", end=" ") # TODO: not print this here but at start
tx = w3.toHex(tx)
if not hashes==None:
hashes.append(tx)
return tx
def try_contract_set_via_web3(contract, arg=42):
"""
test the above
"""
tx = contract_set_via_web3(contract, arg=arg)
print (tx)
tx_receipt = w3.eth.waitForTransactionReceipt(tx)
storedData = contract.functions.get().call()
print (storedData)
return storedData
## Manually build & submit transaction, i.e. not going though web3
## (the hope of @jpmsam was that this would speed it up)
##
## Note that the data compilation steps are already implemented as
## myContract.functions.myMethod(*args, **kwargs).buildTransaction(transaction)
## but the following bypasses web3.py completely!
def contract_method_ID(methodname, abi):
"""
build the 4 byte ID, from abi & methodname
"""
method_abi = filter_by_name(methodname, abi)
assert(len(method_abi)==1)
method_abi = method_abi[0]
method_signature = abi_to_signature(method_abi)
method_signature_hash_bytes = w3.sha3(text=method_signature)
method_signature_hash_hex = w3.toHex(method_signature_hash_bytes)
method_signature_hash_4bytes = method_signature_hash_hex[0:10]
return method_signature_hash_4bytes
def argument_encoding(contract_method_ID, arg):
"""
concatenate method ID + padded parameter
"""
arg_hex = w3.toHex(arg)
arg_hex_padded = pad_hex ( arg_hex, bit_size=256)
data = contract_method_ID + arg_hex_padded [2:]
return data
def timeit_argument_encoding():
"""
test the above:
'Doing that 10000 times ... took 0.45 seconds'
"""
timer = time.clock()
reps = 10000
for i in range(reps):
method_ID = contract_method_ID("set", ABI)
data = argument_encoding(method_ID, 7)
timer = time.clock() - timer
print (data)
# no need to precalculate, it takes near to no time:
print ("Doing that %d times ... took %.2f seconds" % (reps, timer) )
def contract_set_via_RPC(contract, arg, hashes = None, privateFor=PRIVATE_FOR, gas=GAS_FOR_SET_CALL):
"""
call the .set(arg) method numTx=10
not going through web3
but directly via RPC
suggestion by @jpmsam
https://github.com/jpmorganchase/quorum/issues/346#issuecomment-382216968
"""
method_ID = contract_method_ID("set", contract.abi) # TODO: make this "set" flexible for any method name
data = argument_encoding(method_ID, arg)
txParameters = {'from': w3.eth.defaultAccount,
'to' : contract.address,
'gas' : w3.toHex(gas),
'data' : data}
if privateFor:
txParameters['privateFor'] = privateFor # untested
method = 'eth_sendTransaction'
payload= {"jsonrpc" : "2.0",
"method" : method,
"params" : [txParameters],
"id" : 1}
headers = {'Content-type' : 'application/json'}
response = requests.post(RPCaddress, json=payload, headers=headers)
# print('raw json response: {}'.format(response.json()))
tx = response.json()['result']
# print ("[sent directly via RPC]", end=" ") # TODO: not print this here but at start
print (".", end=" ") # TODO: not print this here but at start
if not hashes==None:
hashes.append(tx)
return tx
def try_contract_set_via_RPC(contract, steps=3):
"""
test the above, write 3 transactions, and check the storedData
"""
rand = random.randint(1, 100)
for number in range(rand, rand+steps):
tx = contract_set_via_RPC(contract, number)
print ("after setat(%d) tx" % number, tx, " the storedData now is", end=" ")
# TODO: wait for receipt!
storedData = contract.functions.get().call()
print (storedData)
# CHOOSE which route to choose (web3 / RPC) depending on constant ROUTE
contract_set = contract_set_via_web3 if ROUTE=="web3" else contract_set_via_RPC
################################################################
###
### benchmarking routines
###
### 0 blocking
### 1 async
### 2 async, queue, can give number of workers
### 3 async, batched (obsolete)
###
################################################################
def many_transactions_consecutive(contract, numTx):
"""
naive approach, blocking --> 15 TPS
"""
print ("send %d transactions, non-async, one after the other:\n" % (numTx))
txs = []
for i in range(numTx):
tx = contract_set(contract, i)
print ("set() transaction submitted: ", tx) # Web3.toHex(tx)) # new web3
txs.append(tx)
return txs
def many_transactions_threaded(contract, numTx):
"""
submit many transactions multi-threaded.
N.B.: 1 thread / transaction
--> machine can run out of threads, then crash
"""
print ("send %d transactions, multi-threaded, one thread per tx:\n" % (numTx))
threads = []
txs = [] # container to keep all transaction hashes
for i in range(numTx):
t = Thread(target = contract_set,
args = (contract, i, txs))
threads.append(t)
print (".", end="")
print ("%d transaction threads created." % len(threads))
for t in threads:
t.start()
print (".", end="")
sys.stdout.flush()
print ("all threads started.")
for t in threads:
t.join()
print ("all threads ended.")
return txs
def many_transactions_threaded_Queue(contract, numTx, num_worker_threads=25):
"""
submit many transactions multi-threaded,
with size limited threading Queue
"""
line = "send %d transactions, via multi-threading queue with %d workers:\n"
print (line % (numTx, num_worker_threads))
q = Queue()
txs = [] # container to keep all transaction hashes
def worker():
while True:
item = q.get()
contract_set(contract, item, txs)
print ("T", end=""); sys.stdout.flush()
q.task_done()
for i in range(num_worker_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
print ("W", end=""); sys.stdout.flush()
print ("\n%d worker threads created." % num_worker_threads)
for i in range(numTx):
q.put (i)
print ("I", end=""); sys.stdout.flush()
print ("\n%d items queued." % numTx)
q.join()
print ("\nall items - done.")
return txs
def many_transactions_threaded_in_batches(contract, numTx, batchSize=25):
"""
submit many transactions multi-threaded;
but in batches of rather small numbers.
OBSOLETE <-- not faster than threaded2.
"""
line = "send %d transactions, multi-threaded, one thread per tx, " \
"in batches of %d parallel threads:\n"
print (line % (numTx, batchSize))
txs = [] # container to keep all transaction hashes
howManyLeft=numTx
while howManyLeft>0:
line = "Next batch of %d transactions ... %d left to do"
print (line % (batchSize, howManyLeft))
threads = []
number = batchSize if howManyLeft>batchSize else howManyLeft
for i in range(number):
t = Thread(target = contract_set,
args = (contract, i, txs))
threads.append(t)
print (".", end="")
print ("\n%d transaction threads created." % len(threads))
for t in threads:
t.start()
print (".", end="")
sys.stdout.flush()
print ("\nall threads started.")
for t in threads:
t.join()
print ("\nall threads ended.")
howManyLeft -= number
return txs
################################################################
###
### control sample: have the transactions been SUCCESSFUL ?
###
################################################################
def hasTxSucceeded(tx_receipt): #, gasGiven=GAS_FOR_SET_CALL):
# txReceipt.status or None
status = tx_receipt.get("status", None)
if status == 1: # clear answer = transaction succeeded!
return True
if status == 0: # clear answer = transaction failed!
return False
# unfortunately not all clients support status field yet (e.g. testrpc-py, quorum)
# second way is to compare gasGiven with gasUsed:
tx_hash=tx_receipt.transactionHash
gasGiven = w3.eth.getTransaction(tx_hash)["gas"]
gasLeftOver = tx_receipt.gasUsed < gasGiven
if not gasLeftOver:
# many types of transaction failures result in all given gas being used up
# e.g. a failed assert() in solidity leads to all gas used up
# Then it's clear = transaction failed!
return False
if gasLeftOver:
# THIS is the dangerous case, because
# e.g. solidity throw / revert() / require() are also returning some unused gas!
# As well as SUCCESSFUL transactions are returning some gas!
# But for clients without the status field, this is the only indicator, so:
return True
def receiptGetter(tx_hash, timeout, resultsDict):
try:
resultsDict[tx_hash] = w3.eth.waitForTransactionReceipt(tx_hash, timeout)
except web3.utils.threads.Timeout:
pass
def getReceipts_multithreaded(tx_hashes, timeout):
"""
one thread per tx_hash
"""
tx_receipts = {}
print("Waiting for %d transaction receipts, can possibly take a while ..." % len(tx_hashes))
threads = []
for tx_hash in tx_hashes:
t = Thread(target = receiptGetter,
args = (tx_hash, timeout, tx_receipts))
threads.append(t)
t.start()
# wait for all of them coming back:
for t in threads:
t.join()
return tx_receipts
def controlSample_transactionsSuccessful(txs, sampleSize=50, timeout=100):
"""
Makes sure that the transactions were actually successful,
and did not fail because e.g. running out of gas, etc.
We want to benchmark the speed of successful state changes!!
Method: Instead of checking EVERY transaction this just takes some sample.
It can fail in three very different ways:
* timeout when waiting for tx-receipt, then you try raising the timeout seconds
* tx_receipt.status == 0 for any of the sampled transactions. Real tx failure!
* all given gas used up. It's only an indirect indicator for a failed transaction.
"""
print ("Check control sample.")
N = sampleSize if len(txs)>sampleSize else len(txs)
txs_sample = random.sample(txs, N)
tx_receipts = getReceipts_multithreaded(tx_hashes=txs_sample, timeout=timeout)
# Test 1: Are all receipts here?
M = len(tx_receipts)
if M != N:
print ("Bad: Timeout, received receipts only for %d out of %d sampled transactions." % (M, N))
success = False
else:
print ("Good: No timeout, received the receipts for all %d sampled transactions." % N)
success = True
# Test 2: Was each an every transaction successful?
badCounter=0
for tx_hash, tx_receipt in tx_receipts.items():
# status = tx_receipt.get("status", None) # unfortunately not all clients support this yet
# print ((tx_hash, status, tx_receipt.gasUsed ))
if not hasTxSucceeded(tx_receipt):
success = False
print ("Transaction NOT successful:", tx_hash, tx_receipt)
badCounter = badCounter+1
# pprint (dict(tx_receipt))
if badCounter:
print ("Bad: %d out of %d not successful!" % (badCounter, M))
print ("Sample of %d transactions checked ... hints at:" % M, end=" ")
print( "TOTAL SUCCESS :-)" if success else "-AT LEAST PARTIAL- FAILURE :-(" )
return success
# Try out the above with
# pytest tests/test_send.py::test_controlSample_transactionsSuccessful
################################################################################
###
### estimate range of blocks, first and last 100 transaction hashes
###
################################################################################
def getReceipts_multithreaded_Queue(tx_hashes, timeout, num_worker_threads=8, ifPrint=False):
"""
Query the RPC via a multithreading Queue, with 8 worker threads.
Advantage over 'getReceipts_multithreaded':
Will also work for len(tx_hashes) > 1000
"""
start=time.monotonic()
q = Queue()
tx_receipts = {}
def worker():
while True:
tx_hash = q.get()
receiptGetter(tx_hash, timeout, tx_receipts)
q.task_done()
for i in range(num_worker_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
for tx in tx_hashes:
q.put (tx)
q.join()
if ifPrint:
duration = time.monotonic() - start
print ("%d lookups took %.1f seconds" % (len(tx_receipts), duration))
return tx_receipts
def when_last_ones_mined__give_range_of_block_numbers(txs, txRangesSize=100, timeout=60):
"""
Also only a heuristics:
Assuming that the first 100 and the last 100 transaction hashes
that had been added to the list 'txs'
can reveal the min and max blocknumbers of this whole experiment
"""
txs_begin_and_end = txs[:txRangesSize] + txs[-txRangesSize:]
tx_receipts = getReceipts_multithreaded_Queue(tx_hashes=txs_begin_and_end,
timeout=timeout) #, ifPrint=True)
# or actually, all of them? Naaa, too slow:
# TestRPC: 2000 lookups took 122.1 seconds
# Parity: 2000 lookups took 7.2 seconds
# Geth: 2000 lookups took 8.6 seconds
# tx_receipts = getReceipts_multithreaded_Queue(tx_hashes=txs,
# timeout=timeout, ifPrint=True)
blockNumbers = [receipt.blockNumber for receipt in tx_receipts.values()]
blockNumbers = sorted(list(set(blockNumbers))) # make unique
# print (blockNumbers)
return min(blockNumbers), max(blockNumbers)
def store_experiment_data(success, num_txs,
block_from, block_to,
empty_blocks,
filename=FILE_LAST_EXPERIMENT):
"""
most basic data about this last experiment,
stored in same (overwritten) file.
Purpose: diagramming should be able to calc proper averages & select ranges
"""
data = {"send" : {
"block_first" : block_from,
"block_last": block_to,
"empty_blocks": empty_blocks,
"num_txs" : num_txs,
"sample_txs_successful": success
},
"node" : {
"rpc_address": RPCaddress,
"web3.version.node": w3.version.node,
"name" : NODENAME,
"type" : NODETYPE,
"version" : NODEVERSION,
"consensus" : CONSENSUS,
"network_id" : NETWORKID,
"chain_name" : CHAINNAME,
"chain_id" : CHAINID
}
}
with open(filename, "w") as f:
json.dump(data, f)
def wait_some_blocks(waitBlocks=EMPTY_BLOCKS_AT_END, pauseBetweenQueries=0.3):
"""
Actually, the waiting has to be done here,
because ./send.py is started later than ./tps.py
So when ./send.py ends, the analysis can happen.
"""
blockNumber_start = w3.eth.blockNumber
print ("blocknumber now:", blockNumber_start, end=" ")
print ("waiting for %d empty blocks:" % waitBlocks)
bn_previous=bn_now=blockNumber_start
while bn_now < waitBlocks + blockNumber_start:
time.sleep(pauseBetweenQueries)
bn_now=w3.eth.blockNumber
# print (bn_now, waitBlocks + blockNumber_start)
if bn_now!=bn_previous:
bn_previous=bn_now
print (bn_now, end=" "); sys.stdout.flush()
print ("Done.")
def finish(txs, success):
block_from, block_to = when_last_ones_mined__give_range_of_block_numbers(txs)
txt = "Transaction receipts from beginning and end all arrived. Blockrange %d to %d."
txt = txt % (block_from, block_to)
print (txt)
if NODETYPE=="TestRPC" or (NODENAME=="Parity" and CHAINNAME=="developmentchain" and NETWORKID==17):
print ("Do not wait for empty blocks, as this is TestRPC, or parity instantseal.")
waitBlocks=0
else:
waitBlocks=EMPTY_BLOCKS_AT_END
wait_some_blocks(waitBlocks)
store_experiment_data (success, len(txs), block_from, block_to, empty_blocks=waitBlocks)
# print ("Data stored. This will trigger tps.py to end in ~ %d blocks." % EMPTY_BLOCKS_AT_END)
print ("Data stored. This will trigger tps.py to end.\n"
"(Beware: Wait ~0.5s until tps.py stops and writes to same file.)")
# see tps.py --> pauseBetweenQueries=0.3
################################################################################
###
### choose, depending on CLI parameter
###
################################################################################
def check_CLI_or_syntax_info_and_exit():
"""
before anything, check if number of parameters is fine, or print syntax instructions
"""
#print ("len(sys.argv)=", len(sys.argv))
if not (2 <= len(sys.argv) <= 4):
print ("Needs parameters:")
print ("%s numTransactions algorithm [workers]" % sys.argv[0])
print ("at least numTransactions, e.g.")
print ("%s 1000" % sys.argv[0])
exit()
def sendmany(contract):
"""
sends many transactions to contract.
choose algorithm depending on 2nd CLI argument.
"""
# TODO: Perhaps extend this to a full blown (config.py) settings printer?
# but then in tps.py because only that output is visible in run.sh
print("\nCurrent blockNumber = ", w3.eth.blockNumber)
numTransactions = int(sys.argv[1])
if ROUTE=="RPC": route = "RPC directly"
if ROUTE=="web3": route = "web3 library"
print ("You want me to send %d transactions, via route: %s." % (numTransactions, route))
# choose algorithm depending on 2nd CLI argument:
if len(sys.argv)==2 or sys.argv[2]=="sequential":
# blocking, non-async
txs=many_transactions_consecutive(contract, numTransactions)
elif sys.argv[2]=="threaded1":
txs=many_transactions_threaded(contract, numTransactions)
elif sys.argv[2]=="threaded2":
num_workers = 100
if len(sys.argv)==4:
try:
num_workers = int(sys.argv[3])
except:
pass
txs=many_transactions_threaded_Queue(contract,
numTx=numTransactions,
num_worker_threads=num_workers)
elif sys.argv[2]=="threaded3":
batchSize=25
txs=many_transactions_threaded_in_batches(contract,
numTx=numTransactions,
batchSize=batchSize)
else:
print ("Nope. Choice '%s'" % sys.argv[2], "not recognized.")
exit()
print ("%d transaction hashes recorded, examples: %s" % (len(txs), txs[:2]))
return txs
if __name__ == '__main__':
check_CLI_or_syntax_info_and_exit()
global w3, NODENAME, NODETYPE, NODEVERSION, CONSENSUS, NETWORKID, CHAINNAME, CHAINID
w3, chainInfos = web3connection(RPCaddress=RPCaddress, account=None)
NODENAME, NODETYPE, NODEVERSION, CONSENSUS, NETWORKID, CHAINNAME, CHAINID = chainInfos
# wait_some_blocks(0); exit()
# timeit_argument_encoding(); exit()
# try_contract_set_via_web3(contract); exit()
# try_contract_set_via_RPC(contract); exit()
w3.eth.defaultAccount = w3.eth.accounts[0] # set first account as sender
contract = initialize_fromAddress()
txs = sendmany(contract)
sys.stdout.flush() # so that the log files are updated.
success = controlSample_transactionsSuccessful(txs)
sys.stdout.flush()
finish(txs, success)
sys.stdout.flush()
|
test_bz2.py | #!/usr/bin/env python3
from test import support
from test.support import TESTFN
import unittest
from io import BytesIO
import os
import subprocess
import sys
try:
import threading
except ImportError:
threading = None
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = sys.platform not in ("win32", "os2emx")
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT = b'root:x:0:0:root:/root:/bin/bash\nbin:x:1:1:bin:/bin:\ndaemon:x:2:2:daemon:/sbin:\nadm:x:3:4:adm:/var/adm:\nlp:x:4:7:lp:/var/spool/lpd:\nsync:x:5:0:sync:/sbin:/bin/sync\nshutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\nhalt:x:7:0:halt:/sbin:/sbin/halt\nmail:x:8:12:mail:/var/spool/mail:\nnews:x:9:13:news:/var/spool/news:\nuucp:x:10:14:uucp:/var/spool/uucp:\noperator:x:11:0:operator:/root:\ngames:x:12:100:games:/usr/games:\ngopher:x:13:30:gopher:/usr/lib/gopher-data:\nftp:x:14:50:FTP User:/var/ftp:/bin/bash\nnobody:x:65534:65534:Nobody:/home:\npostfix:x:100:101:postfix:/var/spool/postfix:\nniemeyer:x:500:500::/home/niemeyer:/bin/bash\npostgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\nmysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\nwww:x:103:104::/var/www:/bin/false\n'
TEXT_LINES = TEXT.splitlines(True)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
DATA_CRLF = b'BZh91AY&SY\xaez\xbbN\x00\x01H\xdf\x80\x00\x12@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe0@\x01\xbc\xc6`\x86*\x8d=M\xa9\x9a\x86\xd0L@\x0fI\xa6!\xa1\x13\xc8\x88jdi\x8d@\x03@\x1a\x1a\x0c\x0c\x83 \x00\xc4h2\x19\x01\x82D\x84e\t\xe8\x99\x89\x19\x1ah\x00\r\x1a\x11\xaf\x9b\x0fG\xf5(\x1b\x1f?\t\x12\xcf\xb5\xfc\x95E\x00ps\x89\x12^\xa4\xdd\xa2&\x05(\x87\x04\x98\x89u\xe40%\xb6\x19\'\x8c\xc4\x89\xca\x07\x0e\x1b!\x91UIFU%C\x994!DI\xd2\xfa\xf0\xf1N8W\xde\x13A\xf5\x9cr%?\x9f3;I45A\xd1\x8bT\xb1<l\xba\xcb_\xc00xY\x17r\x17\x88\x08\x08@\xa0\ry@\x10\x04$)`\xf2\xce\x89z\xb0s\xec\x9b.iW\x9d\x81\xb5-+t\x9f\x1a\'\x97dB\xf5x\xb5\xbe.[.\xd7\x0e\x81\xe7\x08\x1cN`\x88\x10\xca\x87\xc3!"\x80\x92R\xa1/\xd1\xc0\xe6mf\xac\xbd\x99\xcca\xb3\x8780>\xa4\xc7\x8d\x1a\\"\xad\xa1\xabyBg\x15\xb9l\x88\x88\x91k"\x94\xa4\xd4\x89\xae*\xa6\x0b\x10\x0c\xd6\xd4m\xe86\xec\xb5j\x8a\x86j\';\xca.\x01I\xf2\xaaJ\xe8\x88\x8cU+t3\xfb\x0c\n\xa33\x13r2\r\x16\xe0\xb3(\xbf\x1d\x83r\xe7M\xf0D\x1365\xd8\x88\xd3\xa4\x92\xcb2\x06\x04\\\xc1\xb0\xea//\xbek&\xd8\xe6+t\xe5\xa1\x13\xada\x16\xder5"w]\xa2i\xb7[\x97R \xe2IT\xcd;Z\x04dk4\xad\x8a\t\xd3\x81z\x10\xf1:^`\xab\x1f\xc5\xdc\x91N\x14$+\x9e\xae\xd3\x80'
if has_cmdline_bunzip2:
def decompress(self, data):
pop = subprocess.Popen("bunzip2", shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
pop.stdin.write(data)
pop.stdin.close()
ret = pop.stdout.read()
pop.stdout.close()
if pop.wait() != 0:
ret = bz2.decompress(data)
return ret
else:
# bunzip2 isn't available to run on Windows.
def decompress(self, data):
return bz2.decompress(data)
class BZ2FileTest(BaseTest):
"Test BZ2File type miscellaneous methods."
def setUp(self):
self.filename = TESTFN
def tearDown(self):
if os.path.isfile(self.filename):
os.unlink(self.filename)
def createTempFile(self, crlf=0):
with open(self.filename, "wb") as f:
if crlf:
data = self.DATA_CRLF
else:
data = self.DATA
f.write(data)
def testRead(self):
# "Test BZ2File.read()"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(), self.TEXT)
def testRead0(self):
# Test BBZ2File.read(0)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, None)
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
# "Test BZ2File.read() in chunks of 10 bytes"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while 1:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testRead100(self):
# "Test BZ2File.read(100)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testReadLine(self):
# "Test BZ2File.readline()"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
sio = BytesIO(self.TEXT)
for line in sio.readlines():
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
# "Test BZ2File.readlines()"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
sio = BytesIO(self.TEXT)
self.assertEqual(bz2f.readlines(), sio.readlines())
def testIterator(self):
# "Test iter(BZ2File)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
sio = BytesIO(self.TEXT)
self.assertEqual(list(iter(bz2f)), sio.readlines())
def testClosedIteratorDeadlock(self):
# "Test that iteration on a closed bz2file releases the lock."
# http://bugs.python.org/issue3309
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, bz2f.__next__)
# This call will deadlock of the above .__next__ call failed to
# release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
# "Test BZ2File.write()"
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
# "Test BZ2File.write() with chunks of 10 bytes"
with BZ2File(self.filename, "w") as bz2f:
n = 0
while 1:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteLines(self):
# "Test BZ2File.writelines()"
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
sio = BytesIO(self.TEXT)
bz2f.writelines(sio.readlines())
# patch #1535500
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(self.decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(IOError, bz2f.write, b"a")
self.assertRaises(IOError, bz2f.writelines, [b"a"])
def testSeekForward(self):
# "Test BZ2File.seek(150, 0)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
# "Test BZ2File.seek(-150, 1)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsFromEnd(self):
# "Test BZ2File.seek(-150, 2)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekPostEnd(self):
# "Test BZ2File.seek(150000)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
# "Test BZ2File.seek(150000) twice"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
# "Test BZ2File.seek(-150, 0)"
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testOpenDel(self):
# "Test opening and deleting a file many times"
self.createTempFile()
for i in range(10000):
if support.check_impl_detail(pypy=True):
with BZ2File(self.filename) as o:
pass
else:
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
# "Test opening a nonexistent file"
self.assertRaises(IOError, BZ2File, "/non/existent")
def testBug1191043(self):
# readlines() for files containing no newline
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
# BZ2File supports the context management protocol
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
@unittest.skipUnless(threading, 'Threading required for this test.')
def testThreading(self):
# Using a BZ2File from several threads doesn't deadlock (issue #7205).
data = b"1" * 2**20
nthreads = 10
with bz2.BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
class BZ2CompressorTest(BaseTest):
def testCompress(self):
# "Test BZ2Compressor.compress()/flush()"
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
def testCompressChunks10(self):
# "Test BZ2Compressor.compress()/flush() with chunks of 10 bytes"
bz2c = BZ2Compressor()
n = 0
data = b''
while 1:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(self.decompress(data), self.TEXT)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
# "Test BZ2Decompressor.decompress()"
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
# "Test BZ2Decompressor.decompress() with chunks of 10 bytes"
bz2d = BZ2Decompressor()
text = b''
n = 0
while 1:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
# "Test BZ2Decompressor.decompress() with unused data"
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
# "Calling BZ2Decompressor.decompress() after EOS must raise EOFError"
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
class FuncTest(BaseTest):
"Test module functions"
def testCompress(self):
# "Test compress() function"
data = bz2.compress(self.TEXT)
self.assertEqual(self.decompress(data), self.TEXT)
def testDecompress(self):
# "Test decompress() function"
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
# "Test decompress() function with empty string"
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressIncomplete(self):
# "Test decompress() function with incomplete data"
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
FuncTest
)
support.reap_children()
if __name__ == '__main__':
test_main()
# vim:ts=4:sw=4
|
profiling_base.py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from multiprocessing import Process
from akg.utils.kernel_exec import PERFORMANCE_TEST_FILE
from tests.common.base import TestBase, PERFORMANCE_TEST
TIMEOUT = 600
class ProfilingTestBase(TestBase):
def __init__(self, casename, testcases):
"""
testcase preparcondition
:return:
"""
casepath = os.getcwd()
super(ProfilingTestBase, self).__init__(casename, casepath)
self.testcases = testcases
def setup(self):
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.result_file = os.path.join(self.caselog_path, self.casename + ".csv")
os.environ[PERFORMANCE_TEST] = "True"
os.environ[PERFORMANCE_TEST_FILE] = self.result_file
return
def _get_test_case_perf(self, test_case):
_, func, args, _ = self.ana_args(test_case)
func_name = func if isinstance(func, str) else func.__name__
operator_name = func_name.split("_run")[0]
p_file = open(self.result_file, 'a+')
p_file.write("%s; %s; " % (operator_name, args))
p_file.close()
is_conv = True if "conv" in operator_name else False
self.common_run([test_case], is_conv=is_conv)
def test_run_perf(self):
"""
run case.
:return:
"""
for test_case in self.testcases:
# For the profiling tool, each test case must run with a new process
p = Process(target=self._get_test_case_perf, args=(test_case,))
p.start()
p.join(timeout=TIMEOUT)
if p.is_alive():
p.terminate()
raise RuntimeError("process for {0} timeout!".format(test_case))
def teardown(self):
"""
clean environment
:return:
"""
os.environ.pop(PERFORMANCE_TEST_FILE)
os.environ.pop(PERFORMANCE_TEST)
self._log.info("============= {0} Teardown============".format(self.casename))
return
|
final_script_mini_camera_2.2_4_windows_for_2_crates _2.py | from tkinter import *
from math import sin, cos, pi
from random import randrange
import os
import time, threading
import subprocess
import pickle
import numpy as np
import random
from math import cos, sin, sqrt, pi
from matplotlib.patches import Polygon
import matplotlib.pyplot as plt
from tkinter import *
import matplotlib
import serial
import usb.core
import usb.util
from collections import deque
import can
matplotlib.use("TkAgg")
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.backend_bases import key_press_handler
from matplotlib import cm, colors
import matplotlib.animation as animation
import sys
import os
import socket
from tkinter import messagebox,ttk
import threading
import matplotlib.image as image
import multiprocessing
#from scipy.spatial import KDTree as KDTree
class Canon:
"""Initialisation of elemnt in my GUI
params:
fen1:Tkinter window
flag_start= flag to start event display
threshold: threshold of DAQ
comm:class of communication with socket server
usb2can : class of communication with electronic device usb2can
entr1: spinbox in Tkinter for var_threshold_DAC
var_threshold : value of the var_threshold_DAC
entr14: spinbox in Tkinter for var_threshold_HG
entr14: spinbox in Tkinter for var_threshold_LG
var_threshold_HG: value of threshold_HG
var_threshold_HG: value of threshold_LG
"""
def __init__(self, fen1, flag_start, comm, usb2can, entr1, var_threshold, entr14, var_threshold_HG, entr15,
var_threshold_LG):
self.x_center = 0 #Camera x_center
self.y_center = 0 #Camera y_center
self.size_edge_to_edge = 23.2 # size edge to edge of the pixels
self.death_size = 1 #death size between pixels
self.time_allowed_to_display_events = 4000 # step of time during taking data
self.flag_start=flag_start
self.fen1=fen1
self.flag_1=0 # this flag is an auxilliary flag to know if i m enter in subprocess, if i enter in connect with socket server and so on
self.flag_stop=0 #flag to stop event display
#self.test_messages_old = 0
self.entr1=entr1 # Spinbox of threshold variable
self.threshold_DAC=int(self.entr1.get())
self.var_threshold_DAC=var_threshold
self.entr14, self.var_threshold_HG, self.entr15, self.var_threshold_LG=entr14,var_threshold_HG,entr15,var_threshold_LG
self.threshold_HG,self.threshold_LG=int(self.entr14.get()),int(self.entr15.get())
self.new_file_to_write = "D:/resultat_acquisition_babymind/_new_file_to_write.daq" # result file where i will writing my event data for later access
self.file_to_analyze_rate= "D:/resultat_acquisition_babymind/_file_to_analyze_rate.daq" # file where i will put results for analysing rate
#this are the config files and C sharp script to taking data
self.config_file_init_0= "D:/resultat_acquisition_babymind/config_scriptApplib_files/init_config_b0.xml"
self.config_file_init_1 = "D:/resultat_acquisition_babymind/config_scriptApplib_files/init_config_b1.xml"
self.config_file_aux_0 = "D:/resultat_acquisition_babymind/config_scriptApplib_files/aux_config_b0.xml"
self.config_file_aux_1 = "D:/resultat_acquisition_babymind/config_scriptApplib_files/aux_config_b1.xml"
#self.script_cs="D:/resultat_acquisition_babymind/config_scriptApplib_files/daq-tdm-Applib-slotArray-v1.cs"
self.script_cs = "D:/resultat_acquisition_babymind/config_scriptApplib_files/scriptmainargs.cs"
#configuration regard for the mapping.
self.order_list_of_pixels_in_reading_temperature = [98, 99, 112, 113, 87, 88, 100, 101, 76, 89, 90, 102, 124, 125,
135, 136, 114, 115, 126, 127, 103, 116, 117, 128, 137, 138, 142,
143, 131, 132, 139, 140, 123, 133, 134, 141, 118, 119, 129, 130,
108, 109, 120, 121, 97, 110, 111, 122, 39, 50, 49, 61, 51, 63,
62, 73, 75, 74, 86, 85, 60, 71, 70, 82, 72, 84, 83, 94, 96, 95,
107, 106, 12, 23, 22, 34, 24, 36, 35, 46, 48, 47, 59, 58, 1, 6,
5, 13, 7, 15, 14, 25, 27, 26, 38, 37, 78, 66, 54, 42, 77, 65, 53,
41, 64, 52, 40, 28, 30, 18, 10, 4, 29, 17, 9, 3, 16, 8, 2, 0, 57,
45, 33, 21, 56, 44, 32, 20, 43, 31, 19, 11, 105, 93, 81, 69, 104,
92, 80, 68, 91, 79, 67, 55]
self.flag_store_temperature = 0 # flag to store temperature
self.temperature_file = "D:/resultat_acquisition_babymind/folder_to_test_readout_temperature/push_pull_T.txt" #temperature file where i will store temperature
#initialisation variables
self.list_mean_cosmicray_rate_HG=[]
self.list_std_cosmicray_rate_HG=[]
self.list_mean_cosmicray_rate_LG=[]
self.list_std_cosmicray_rate_LG=[]
self.list_mean_cosmicray_rate_tot=[]
self.list_std_cosmicray_rate_tot=[]
self.list_mean_trigger_rate_ampli=[]
self.list_std_trigger_rate_ampli=[]
self.list_mean_trigger_rate_tot=[]
self.list_std_trigger_rate_tot=[]
#this is the variables to evaluate npe with spectrum
self.pedestal_LG=[15]*144
self.Gain_LG=[4.5]*144
self.pedestal_HG = [144]*144
self.Gain_HG = [47]*144
self.pedestal_tot = [0]*144
self.Gain_tot = [3]*144
# the global histogram will be performing here
self.bin_array_HG = [np.arange(0, 150, round(x + y)) for x, y in zip(self.pedestal_HG, self.Gain_HG)]
self.bin_array_LG = [np.arange(0, 150, round(x + y)) for x, y in zip(self.pedestal_LG, self.Gain_LG)]
self.bin_array_tot = [np.arange(0, 150, round(x + y)) for x, y in zip(self.pedestal_tot, self.Gain_tot)]
#self.my_global_histogram_HG = []
#self.my_global_histogram_LG = []
self.old_dict_pixelid_values_HG_for_histo_global = dict(
(i, Hist1D_global(self.bin_array_HG[i], 0, 5000, i)) for i in np.arange(144))
self.old_dict_pixelid_values_LG_for_histo_global = dict(
(i, Hist1D_global(self.bin_array_LG[i], 0, 5000, i)) for i in np.arange(144))
self.old_dict_pixelid_values_tot_for_histo_global = dict(
(i, Hist1D_global(self.bin_array_tot[i], 0, 5000, i)) for i in np.arange(144))
self.comm=comm
self.usb2can=usb2can
# self.power_supply=power_supply
self.var_pause_restart = 1 # initial value for the pause restart (b5) button
self.flag_active_draw_button_for_histo_parent = 0 #This flag is to know if i can have data to draw it in histogramm
self.var_global_local_histo=1
self.flag_test_if_i_operate_global_local_histo = 0 # flag to test if i m enter in the function global or local plot histo
self.flag_draw_trigger_rate=False
self.flag_finish_function_get_boards_values_from_file = False # this is to false initialisation indication for function get_boards_values_from_fil
self.flag_record_data_in_queu_for_analyse = 0
self.fen1.grid()
#Define widgets,Canvas,...
self.txt2 = Label(self.fen1, text='Gain(HG/LG) :')
self.txt2.grid(row=3, column=1, sticky='NSEW')
self.value_hg_or_lg = StringVar()
self.value_hg_or_lg.set("LG")
self.bouton1 = Radiobutton(self.fen1, text="HG", variable=self.value_hg_or_lg, value="HG")
self.bouton1.grid(row=3, column=2, sticky='NSEW')
self.bouton2 = Radiobutton(self.fen1, text="LG", variable=self.value_hg_or_lg, value="LG")
self.bouton2.grid(row=3, column=3, sticky='NSEW')
self.bouton3 = Radiobutton(self.fen1, text="TOT", variable=self.value_hg_or_lg, value="TOT")
self.bouton3.grid(row=3, column=4, sticky='NSEW')
self.b3 = Button(self.fen1, text='Start', command=self.start_it)
self.b3.grid(row=2, column=5, sticky='NSEW')
self.b4 = Button(self.fen1, text='Stop', cofile_for_acquisition_datammand=self.stop_it)
self.b4.grid(row=3, column=5, sticky='NSEW')
self.txt4 = Label(self.fen1, text="Choose trigger conf:")
self.txt4.grid(row=4, column=1, sticky='NSEW')
self.var_pixel_trigger_configuration = StringVar(self.fen1)
self.var_pixel_trigger_configuration.set("1")
self.entr4 = Spinbox(self.fen1, from_=1, to=3 , textvariable=self.var_pixel_trigger_configuration)
self.entr4.grid(row=4, column=2)
self.b5 = Button(self.fen1, text='Pause', command=self._pause)
self.b5.grid(row=4, column=5, sticky='NSEW')
self.txt5 = Label(self.fen1, text='Average DAC in trigger \n choosen :')
self.txt5.grid(row=5, column=1, sticky='NSEW')
self.var_pixels_trigger_values = StringVar(self.fen1)
self.var_pixels_trigger_values.set(0)
self.entr5 = Entry(self.fen1, textvariable=self.var_pixels_trigger_values)
self.entr5.grid(row=5, column=2,sticky=E)
self.choices_data_display = ["DAC", "PE", "Eie" , "Temp"]
self.variable_choices = StringVar(self.fen1)
self.variable_choices.set("DAC")
self.box_choice = ttk.Combobox(self.fen1, textvariable=self.variable_choices,values=self.choices_data_display)
self.box_choice.grid(row=5, column=4, sticky='NSEW')
self.b6 = Button(self.fen1, text='Quitter', command=self._quit)
self.b6.grid(row=5, column=5, sticky='NSEW')
self.txt6 = Label(self.fen1, text='choose pixel \n for histogramm:')
self.txt6.grid(row=6, column=2, sticky='NSEW')
self.var_pixel_in_histo = StringVar(self.fen1)
self.var_pixel_in_histo.set("0")
self.entr6 = Spinbox(self.fen1, from_=0, to=144, textvariable=self.var_pixel_in_histo)
self.entr6.grid(row=6, column=3)
self.b7 = Button(self.fen1, text='Draw', command=self._trace_histo_pixel)
self.b7.grid(row=6, column=4)
self.txt9 = Label(self.fen1, text='choose time \n for data \n acquisition(sec):')
self.txt9.grid(row=7, column=1, sticky='NSEW')
file_for_acquisition_data
self.var_time_in_data_acquisition = StringVar(self.fen1)
self.var_time_in_data_acquisition.set("280")
self.entr9 = Spinbox(self.fen1, from_=0, to=18000, textvariable=self.var_time_in_data_acquisition)
self.entr9.grid(row=7, column=2)
self.txt10 = Label(self.fen1, text='choose time \n for event \n display(msec):')
self.txt10.grid(row=7, column=3, sticky='NSEW')
self.var_time_in_event_display = StringVar(self.fen1)
self.var_time_in_event_display.set("1000")
self.entr10 = Spinbox(self.fen1, from_=0, to=300000, textvariable=self.var_time_in_event_display)
self.entr10.grid(row=7, column=4)
self.txt12 = Label(self.fen1, text='IN section below,Choose parameters to draw trigger rate \n and cosmic rays flux', borderwidth=2, relief="solid", bg='white')
self.txt12.grid(row=8, column=1, rowspan=1, columnspan=5, sticky='NSEW')
self.txt13 = Label(self.fen1, text='choose\n Threshold\n FROM:')
self.txt13.grid(row=9, column=1, sticky='NSEW')
self.var_threshold_in_trigger_rate_draw_0 = StringVar(self.fen1)
self.var_threshold_in_trigger_rate_draw_0.set("0")
self.entr11 = Spinbox(self.fen1, from_=0, to=1023, textvariable=self.var_threshold_in_trigger_rate_draw_0)
self.entr11.grid(row=9, column=2)
self.txt14 = Label(self.fen1, text='TO:')
self.txt14.grid(row=9, column=3, sticky='NSEW')
self.var_threshold_in_trigger_rate_draw_1 = StringVar(self.fen1)
self.var_threshold_in_trigger_rate_draw_1.set("200")
self.entr12 = Spinbox(self.fen1, from_=0, to=1023, textvariable=self.var_threshold_in_trigger_rate_draw_1)
self.entr12.grid(row=9, column=4)
self.b12 = Button(self.fen1, text='Draw trigger effisciency \n and cosmic ray flux', command=self._draw_trigger_rate_and_cosmic_flux)
self.b12.grid(row=9, column=5)
self.choices_threshold_variation = ["LG", "HG"]
self.variable_threshold = StringVar(self.fen1)
self.variable_threshold.set("LG")
self.threshold_choices = ttk.Combobox(self.fen1, textvariable=self.variable_threshold, values=self.choices_threshold_variation)
self.threshold_choices.grid(row=10, column=2, sticky='NSEW')
self.txt15 = Label(self.fen1, text='STEP OF:')
self.txt15.grid(row=10, column=3, sticky='NSEW')
self.var_step_threshold_in_trigger_rate_draw= StringVar(self.fen1)
self.var_step_threshold_in_trigger_rate_draw.set("20")
self.entr13 = Spinbox(self.fen1, from_=1, to=300, textvariable=self.var_step_threshold_in_trigger_rate_draw)
self.entr13.grid(row=10, column=4)
self.var_text_in_GUI = StringVar()
self._update_box_messages('Welcome to this Event Viewer!!')
self.txt11= Label(self.fen1, textvariable=self.var_text_in_GUI,borderwidth=2, relief="solid", bg='white', height=10, width=90, wraplength=500)
self.txt11.grid(row=12, column=1, rowspan=2,columnspan=5, sticky='NSEW')
self.reatribute_id_pixels = self.make_mini_cam_mathieu_with_node(23.2) # one method to Create and Make the mapping of minicamera
self.find_neighboors_pixels_by_my_method() # method to find the neighboors of pixels
if self.flag_start==0:
self.data_electronics_LG=np.array([random.randint(0, self.threshold_DAC) for r in range(144)])
self.data_electronics_HG = np.array([random.randint(0, self.threshold_DAC) for r in range(144)])
self.list_of_pixels_on_events=[]
self.number_figure = 0 # when you save events, figure number begin by this
self.fig_fen1 = plt.figure(facecolor="green")
self.axes_fen1 = self.fig_fen1.add_subplot(111)
self.canvas_fen1 = FigureCanvasTkAgg(self.fig_fen1, master=self.fen1)
self.canvas_fen1.get_tk_widget().grid(row=1, column=6, rowspan=8, padx=10, pady=5, sticky='NSEW')
self.toolbar_frame_fen1 = Frame(self.fen1, highlightcolor="red", highlightthickness=1, highlightbackground="blue")
self.toolbar_frame_fen1.grid(row=1, column=6)
self.toolbar_fen1 = NavigationToolbar2Tk(self.canvas_fen1, self.toolbar_frame_fen1)
self.canvas_fen1._tkcanvas.grid(row=1, column=6, rowspan=8, padx=10, pady=5, sticky='NSEW')
self.canvas_fen1.show()
if self.box_choice.get() == "Temp":
self.norm1 = matplotlib.colors.Normalize(0, 35)
else:
if self.value_hg_or_lg.get() == "HG":
self.norm1 = matplotlib.colors.Normalize(np.min(self.data_electronics_HG), np.max(self.data_electronics_HG))
elif self.value_hg_or_lg.get() == "LG":
self.norm1 = matplotlib.colors.Normalize(np.min(self.data_electronics_LG), np.max(self.data_electronics_LG))
elif self.value_hg_or_lg.get() == "TOT":
self.norm1 = matplotlib.colors.Normalize(np.min(self.data_electronics_tot), np.max(self.data_electronics_tot))
self.cmap1 = matplotlib.cm.ScalarMappable(norm=self.norm1, cmap=matplotlib.cm.jet)
self.cmap1.set_array([])
self.cb_fen1 = self.fig_fen1.colorbar(self.cmap1) # , ticks=facecolor)
self.dict_polygones={}
list_centers_xs = []
list_centers_ys = []
for pixels_id, polygons_data in self.reatribute_id_pixels.items():
list_xs_ys = [(polygons_data[0][0][i], polygons_data[0][1][i]) for i in range(6)]
list_centers_xs.append(polygons_data[1][0])
list_centers_ys.append(polygons_data[1][1])
# if you want to draw the camera pixels id in the camera
self.draw_camera_pixel_ids(polygons_data[1][0], polygons_data[1][1], pixels_id, self.axes_fen1)
if self.box_choice.get() == "Temp":
self.polygon = Polygon(list_xs_ys, closed=True,
edgecolor="blue")
else:
if self.value_hg_or_lg.get() == "LG":
self.polygon = Polygon(list_xs_ys, closed=True,
edgecolor="blue")
if self.value_hg_or_lg.get() == "HG":
self.polygon = Polygon(list_xs_ys, closed=True,
edgecolor="blue")
if self.value_hg_or_lg.get() == "TOT":
self.polygon = Polygon(list_xs_ys, closed=True,
edgecolor="blue")
self.axes_fen1.add_patch(self.polygon)
self.dict_polygones[pixels_id]=self.polygon
self.plots_hex_in_canvas_pdp()
# this is to adapt size of the widgets,canvas,script_file_for_acquisition_data... with the size of window
self.fen1.rowconfigure(0, weight=1)
self.fen1.rowconfigure(1, weight=1)
self.fen1.rowconfigure(2, weight=1)
self.fen1.rowconfigure(3, weight=1)
self.fen1.rowconfigure(4, weight=1)
self.fen1.rowconfigure(5, weight=1)
self.fen1.rowconfigure(6, weight=1)
self.fen1.rowconfigure(7, weight=1)
self.fen1.rowconfigure(8, weight=1)
self.fen1.rowconfigure(9, weight=1)
self.fen1.columnconfigure(0, weight=1)
self.fen1.columnconfigure(1, weight=1)
self.fen1.columnconfigure(2, weight=1)
self.fen1.columnconfigure(3, weight=1)
self.fen1.columnconfigure(4, weight=1)
self.fen1.columnconfigure(5, weight=1)
self.fen1.columnconfigure(6, weight=1)
self.fen2=None # this condition is imperative to test if child windfow fen2 is open or not
self.fen3 = None # this condition is imperative to test if child windfow fen3 is open or not
self.b4.config(state="disabled") # disable the stop button
self.b5.config(state="disabled") # disable the pause button
self.b7.config(state="disabled") # disable the draw histogram button of the parent window
self.MCB_BOARD_ID = 0
self._boardIds = [self.MCB_BOARD_ID, 2]
# DAQ parameters, will stop on DAQ time reached or File Limit reached
self._DAQ_TIME = 10000 # in ms
self._FILE_LIMIT = 20000 # in Ko
self._SLEEP_TIME = 50 # ms
self.l_timeEnd = self._DAQ_TIME / self._SLEEP_TIME
# Readout parameters
self.l_enableReadoutOnSpillGate = "false"
self.l_enableGtrigOnlyOnSpill = "false"
self.l_syncResetEn = "true" # enable Spill & GTRIG time/tag counters reset
self.l_enableGtrig = "true" # DAQ will see the GTRIG beacons, set it to false if you want only amplitude for histograms
self.beforeConfigure = "false"
self.flag_can_stop_all = 0
def _update_box_messages(self,messages):
'''this function is to update the messages in box messages(Label self txt7)'''
print(messages)
"""if self.test_messages_old==0:
self.var_text_in_GUI.set('%s' % messages)
self.test_messages_old=1
self.messages_old=messages
else:
self.var_text_in_GUI.set(("%s" +"\n %s") %(self.messages_old,messages))
self.messages_old=(("%s" +"\n %s") %(self.messages_old,messages))
self.fen1.update_idletasks()"""
def _trace_histo_pixel(self):
'''this function is to initialiser the window,canvas,buttons where the histogramm will be plot'''
self.fen2=Toplevel(self.fen1) #draw the children window of the window fen1
self.txt8 = Label(self.fen2, text='choose another pixel')
self.txt8.grid(row=8, column=1, sticky='NSEW')
self.var_entr7_pixels_to_histo = StringVar(self.fen2, value=int(self.entr6.get()))
self.entr7 = Entry(self.fen2, textvariable=self.var_entr7_pixels_to_histo)
self.entr7.grid(row=8, column=2)
self.b8 = Button(self.fen2, text='Pause', command=self._pause)
self.b8.grid(row=8, column=5, sticky='NSEW')
self.b8["text"]=self.b5["text"]
self.b9 = Button(self.fen2, text='Global', command=self._plot_global_histo)
self.b9.grid(row=8, column=3, sticky='NSEW')
self.b10 = Button(self.fen2, text='Close', command=self._close_window_histogramm)
self.b10.grid(row=8, column=7, sticky='NSEW')
self.b7.config(state="disabled") # disable the draw histogram button of the parent window
self._trace_histo_pixel_draw_and_plot() #draw this histogramm
# this is to adapt size of the widgets,canvas,... with the size of window
self.fen2.rowconfigure(0, weight=1)
self.fen2.rowconfigure(1, weight=1)
self.fen2.rowconfigure(2, weight=1)
self.fen2.rowconfigure(3, weight=1)
self.fen2.rowconfigure(4, weight=1)
self.fen2.rowconfigure(5, weight=1)
self.fen2.rowconfigure(6, weight=1)
self.fen2.rowconfigure(7, weight=1)
self.fen2.rowconfigure(8, weight=1)
self.fen2.columnconfigure(0, weight=1)
self.fen2.columnconfigure(1, weight=1)
self.fen2.columnconfigure(2, weight=1)
self.fen2.columnconfigure(3, weight=1)
self.fen2.columnconfigure(5, weight=1)
self.fen2.columnconfigure(6, weight=1)
self.fen2.columnconfigure(7, weight=1)
self.fen2.protocol("WM_DELETE_WINDOW", self._close_window_histogramm) # this is reliated to the function _close_window_histogramm
def _close_window_histogramm(self):
'''This function is to close the child window where histogramm have been plotting'''
self.b7.config(state="active") # Activate the draw button of the parent window
if self.var_global_local_histo !=1: # test if i close because i have been in function global or local histo00
# i activate all buttons,entry deactivated
my_button_list = [self.bouton1, self.bouton2,self.bouton3, self.b5, self.b6, self.b8]
my_entry_list = [self.entr4, self.entr5, self.entr6]
for item in my_button_list:
item.config(state="active")
for item in my_entry_list:
item.config(state="normal")
self.var_global_local_histo =1 #initialize the variable of global or local plot histogramm
self.b11.destroy()
self.flag_test_if_i_operate_global_local_histo = 0 # flag to test if i m enter in the function global or local plot histo
self.fen2.destroy() # this is necessary on Windows to prevent
self.fen2 = None
print(self.fen2)
def _plot_global_histo(self):
''' this function is to know if we want to plot the global or the local histogram of the pixel selected'''
if self.flag_test_if_i_operate_global_local_histo==0:
self.flag_test_if_i_operate_global_local_histo=1
self.last_old_dict_pixelid_values_LG_for_histo_local = self.old_dict_pixelid_values_LG_for_histo_local.copy()
self.last_old_dict_pixelid_values_HG_for_histo_local = self.old_dict_pixelid_values_HG_for_histo_local.copy()
self.last_old_dict_pixelid_values_tot_for_histo_local = self.old_dict_pixelid_values_tot_for_histo_local.copy()
if self.var_global_local_histo % 2 != 0:
#self.b8.config(state="disabled")
self.b9["text"] = "Local"
self.var_global_local_histo += 1
#if self.b5["text"] == "Pause": # i check if i am in pause mode or not
# self._pause() # we need to pause the event display to avoid conflict with very heavy file
if self.flag_test_if_i_operate_global_local_histo == 1:
# i disabled all buttons,entry activated
my_button_list = [self.bouton1, self.bouton2,self.bouton3, self.b5, self.b6]
my_entry_list = [self.entr4, self.entr5, self.entr6]
for item in my_button_list:
item.config(state="disabled")
for item in my_entry_list:
item.config(state="disabled")
self.b11 = Button(self.fen2, text='Draw\nhisto', command=self._trace_histo_pixel_draw_and_plot)
self.b11.grid(row=8, column=4, sticky='NSEW')
self.fen2.columnconfigure(4, weight=1)
self.flag_test_if_i_operate_global_local_histo = 2
else:
self.b9["text"] = "Global"
self.var_global_local_histo += 1
#self.b8.config(state="active") # Activate the pause button of the parent window
self._trace_histo_pixel_draw_and_plot()
def _trace_histo_pixel_draw_and_plot(self):
'''this function is to draw histogram in amplitude and tot of the pixels selected
since the event display started and in the intervalle of 4s'''
self.pixel_to_draw_histo = int(self.entr7.get()) # get the value of pixel of which we want to draw the histo
self._update_box_messages(("Drawing histogramm of pixel %s") % self.pixel_to_draw_histo)
if self.value_hg_or_lg.get() == "TOT":
print("ici on trace l'histogramme avec tot")
self.fig_histo = plt.figure(facecolor="green")
self.axs_histo_0 = self.fig_histo.add_subplot(311)
self.axs_histo_1 = self.fig_histo.add_subplot(312)
self.axs_histo_2 = self.fig_histo.add_subplot(313)
try:
if self.flag_test_if_i_operate_global_local_histo != 0: # flag to test if i m enter in the function global or local plot histo
if self.b9["text"] == "Global":
if self.b8["text"] == "Continue":
self.last_old_dict_pixelid_values_HG_for_histo_local = self.old_dict_pixelid_values_HG_for_histo_local.copy()
self.last_old_dict_pixelid_values_LG_for_histo_local = self.old_dict_pixelid_values_LG_for_histo_local.copy()
self.last_old_dict_pixelid_values_tot_for_histo_local = self.old_dict_pixelid_values_tot_for_histo_local.copy()
bins_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_hg = bins_hg[1] - bins_hg[0]
edges_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_hg[np.where(hist_hg == 0)]
bin_c = edges_hg[np.where(hist_hg != 0)]
try:
x_min_hg = bin_c[0]
except:
x_min_hg = 0
try:
x_max_hg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_hg = 5000
self.axs_histo_0.bar(bins_hg, hist_hg, width_hg, align="center", edgecolor='black')
bins_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_lg = bins_lg[1] - bins_lg[0]
edges_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_lg[np.where(hist_lg == 0)]
bin_c = edges_lg[np.where(hist_lg != 0)]
try:
x_min_lg = bin_c[0]
except:
x_min_lg = 0
try:
x_max_lg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_lg = 5000
self.axs_histo_1.bar(bins_lg, hist_lg, width_lg, align="center", edgecolor='black')
bins_tot = \
self.old_dict_pixelid_values_tot_for_histo_local[self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_tot = \
self.old_dict_pixelid_values_tot_for_histo_local[self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_tot = bins_tot[1] - bins_tot[0]
edges_tot = \
self.old_dict_pixelid_values_tot_for_histo_local[
self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_tot[np.where(hist_tot == 0)]
bin_c = edges_tot[np.where(hist_tot != 0)]
try:
x_min_tot = bin_c[0]
except:
x_min_tot = 0
try:
x_max_tot = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_tot = 5000
self.axs_histo_1.bar(bins_tot, hist_tot, width_tot, align="center", edgecolor='black')
else:
self.last_old_dict_pixelid_values_HG_for_histo_local = self.old_dict_pixelid_values_HG_for_histo_local.copy()
self.last_old_dict_pixelid_values_LG_for_histo_local = self.old_dict_pixelid_values_LG_for_histo_local.copy()
self.last_old_dict_pixelid_values_tot_for_histo_local = self.old_dict_pixelid_values_tot_for_histo_local.copy()
bins_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_hg = bins_hg[1] - bins_hg[0]
edges_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_hg[np.where(hist_hg == 0)]
bin_c = edges_hg[np.where(hist_hg != 0)]
try:
x_min_hg = bin_c[0]
except:
x_min_hg = 0
try:
x_max_hg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_hg = 5000
self.axs_histo_0.bar(bins_hg, hist_hg, width_hg, align="center", edgecolor='black')
bins_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_lg = bins_lg[1] - bins_lg[0]
edges_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_lg[np.where(hist_lg == 0)]
bin_c = edges_lg[np.where(hist_lg != 0)]
try:
x_min_lg = bin_c[0]
except:
x_min_lg = 0
try:
x_max_lg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_lg = 5000
self.axs_histo_1.bar(bins_lg, hist_lg, width_lg, align="center", edgecolor='black')
bins_tot = \
self.old_dict_pixelid_values_tot_for_histo_local[self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_tot = \
self.old_dict_pixelid_values_tot_for_histo_local[self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_tot = bins_tot[1] - bins_tot[0]
edges_tot = \
self.old_dict_pixelid_values_tot_for_histo_local[
self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_tot[np.where(hist_tot == 0)]
bin_c = edges_tot[np.where(hist_tot != 0)]
try:
x_min_tot = bin_c[0]
except:
x_min_tot = 0
try:
x_max_tot = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_tot = 5000
self.axs_histo_1.bar(bins_tot, hist_tot, width_tot, align="center", edgecolor='black')
else:
bins_hg = \
self.old_dict_pixelid_values_HG_for_histo_global[self.pixel_to_draw_histo].bins_global[
self.pixel_to_draw_histo]
hist_hg = \
self.old_dict_pixelid_values_HG_for_histo_global[self.pixel_to_draw_histo].hist_global[
self.pixel_to_draw_histo]
width_hg = bins_hg[1] - bins_hg[0]
edges_hg = \
self.old_dict_pixelid_values_HG_for_histo_global[self.pixel_to_draw_histo].edges_global[
self.pixel_to_draw_histo]
bin_rem = edges_hg[np.where(hist_hg == 0)]
bin_c = edges_hg[np.where(hist_hg != 0)]
try:
x_min_hg = bin_c[0]
except:
x_min_hg = 0
try:
x_max_hg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_hg = 5000
self.axs_histo_0.bar(bins_hg, hist_hg, width_hg, align="center", edgecolor='black')
bins_lg = \
self.old_dict_pixelid_values_LG_for_histo_global[self.pixel_to_draw_histo].bins_global[
self.pixel_to_draw_histo]
hist_lg = \
self.old_dict_pixelid_values_LG_for_histo_global[self.pixel_to_draw_histo].hist_global[
self.pixel_to_draw_histo]
width_lg = bins_lg[1] - bins_lg[0]
edges_lg = \
self.old_dict_pixelid_values_LG_for_histo_global[self.pixel_to_draw_histo].edges_global[
self.pixel_to_draw_histo]
bin_rem = edges_lg[np.where(hist_lg == 0)]
bin_c = edges_lg[np.where(hist_lg != 0)]
try:
x_min_lg = bin_c[0]
except:
x_min_lg = 0
try:
x_max_lg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_lg = 5000
self.axs_histo_1.bar(bins_lg, hist_lg, width_lg, align="center", edgecolor='black')
bins_tot = \
self.old_dict_pixelid_values_tot_for_histo_global[self.pixel_to_draw_histo].bins_global[
self.pixel_to_draw_histo]
hist_tot = \
self.old_dict_pixelid_values_tot_for_histo_global[self.pixel_to_draw_histo].hist_global[
self.pixel_to_draw_histo]
width_tot = bins_tot[1] - bins_tot[0]
edges_tot = \
self.old_dict_pixelid_values_tot_for_histo_global[self.pixel_to_draw_histo].edges_global[
self.pixel_to_draw_histo]
bin_rem = edges_tot[np.where(hist_tot == 0)]
bin_c = edges_tot[np.where(hist_tot != 0)]
try:
x_min_tot = bin_c[0]
except:
x_min_tot = 0
try:
x_max_tot = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_tot = 5000
self.axs_histo_1.bar(bins_tot, hist_tot, width_tot, align="center", edgecolor='black')
else:
self.axs_histo_0.hist(self.old_dict_pixelid_values_HG_for_histo_local[self.pixel_to_draw_histo],
bins=np.arange(0, np.max(self.old_dict_pixelid_values_HG_for_histo_local[
self.pixel_to_draw_histo]), 3))
self.axs_histo_1.hist(self.old_dict_pixelid_values_tot_for_histo_local[self.pixel_to_draw_histo],
bins=np.arange(0, np.max(self.old_dict_pixelid_values_tot_for_histo_local[
self.pixel_to_draw_histo]), 3))
self.axs_histo_2.hist(self.old_dict_pixelid_values_tot_for_histo_local[self.pixel_to_draw_histo],
bins=np.arange(0, np.max(self.old_dict_pixelid_values_tot_for_histo_local[
self.pixel_to_draw_histo]), 1))
except KeyError:
image_warning_pixel_triggered_or_not = image.imread(
"D:/resultat_acquisition_babymind/warning_pixel_no_triggered_2.png")
self.axs_histo_0.imshow(image_warning_pixel_triggered_or_not)
self.axs_histo_1.imshow(image_warning_pixel_triggered_or_not)
self.axs_histo_2.imshow(image_warning_pixel_triggered_or_not)
print("There is not this pixels key in the pixels who have triggered")
self.fig_histo.tight_layout()
self.axs_histo_0.set_yscale("log")
self.axs_histo_0.set_title("Histogram in HG")
self.axs_histo_0.grid()
self.axs_histo_1.set_yscale("log")
self.axs_histo_1.set_title("Histogram in LG")
self.axs_histo_1.grid()
self.axs_histo_2.set_yscale("log")
self.axs_histo_2.set_title("Histogram in tot")
self.axs_histo_2.grid()
else:
print("ici on trace l'histo sans tot")
self.fig_histo = plt.figure(facecolor="green")
self.axs_histo_0 = self.fig_histo.add_subplot(211)
self.axs_histo_1 = self.fig_histo.add_subplot(212)
try:
if self.flag_test_if_i_operate_global_local_histo != 0: # flag to test if i m enter in the function global or local plot histo
if self.b9["text"] == "Global":
if self.b8["text"] == "Continue":
bins_hg = \
self.last_old_dict_pixelid_values_HG_for_histo_local[
self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_hg = \
self.last_old_dict_pixelid_values_HG_for_histo_local[
self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_hg = bins_hg[1] - bins_hg[0]
edges_hg = \
self.last_old_dict_pixelid_values_HG_for_histo_local[
self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_hg[np.where(hist_hg == 0)]
bin_c = edges_hg[np.where(hist_hg != 0)]
try:
x_min_hg = bin_c[0]
except:
x_min_hg = 0
try:
x_max_hg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_hg = 5000
self.axs_histo_0.bar(bins_hg, hist_hg, width_hg, align="center", edgecolor='black')
bins_lg = \
self.last_old_dict_pixelid_values_LG_for_histo_local[
self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_lg = \
self.last_old_dict_pixelid_values_LG_for_histo_local[
self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_lg = bins_lg[1] - bins_lg[0]
edges_lg = \
self.last_old_dict_pixelid_values_LG_for_histo_local[
self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_lg[np.where(hist_lg == 0)]
bin_c = edges_lg[np.where(hist_lg != 0)]
try:
x_min_lg = bin_c[0]
except:
x_min_lg = 0
try:
x_max_lg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_lg = 5000
self.axs_histo_1.bar(bins_lg, hist_lg, width_lg, align="center", edgecolor='black')
else:
self.last_old_dict_pixelid_values_HG_for_histo_local = self.old_dict_pixelid_values_HG_for_histo_local.copy()
self.last_old_dict_pixelid_values_LG_for_histo_local = self.old_dict_pixelid_values_LG_for_histo_local.copy()
bins_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[
self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[
self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_hg = bins_hg[1] - bins_hg[0]
edges_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[
self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_hg[np.where(hist_hg == 0)]
bin_c = edges_hg[np.where(hist_hg != 0)]
try:
x_min_hg = bin_c[0]
except:
x_min_hg = 0
try:
x_max_hg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_hg = 5000
self.axs_histo_0.bar(bins_hg, hist_hg, width_hg, align="center", edgecolor='black')
bins_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_lg = bins_lg[1] - bins_lg[0]
edges_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_lg[np.where(hist_lg == 0)]
bin_c = edges_lg[np.where(hist_lg != 0)]
try:
x_min_lg = bin_c[0]
except:
x_min_lg = 0
try:
x_max_lg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_lg = 5000
self.axs_histo_1.bar(bins_lg, hist_lg, width_lg, align="center", edgecolor='black')
else:
bins_hg = \
self.old_dict_pixelid_values_HG_for_histo_global[self.pixel_to_draw_histo].bins_global[
self.pixel_to_draw_histo]
hist_hg = \
self.old_dict_pixelid_values_HG_for_histo_global[self.pixel_to_draw_histo].hist_global[
self.pixel_to_draw_histo]
width_hg = bins_hg[1] - bins_hg[0]
edges_hg = \
self.old_dict_pixelid_values_HG_for_histo_global[self.pixel_to_draw_histo].edges_global[
self.pixel_to_draw_histo]
bin_rem = edges_hg[np.where(hist_hg == 0)]
bin_c = edges_hg[np.where(hist_hg != 0)]
try:
x_min_hg = bin_c[0]
except:
x_min_hg = 0
try:
x_max_hg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_hg = 5000
self.axs_histo_0.bar(bins_hg, hist_hg, width_hg, align="center", edgecolor='black')
bins_lg = \
self.old_dict_pixelid_values_LG_for_histo_global[self.pixel_to_draw_histo].bins_global[
self.pixel_to_draw_histo]
hist_lg = \
self.old_dict_pixelid_values_LG_for_histo_global[self.pixel_to_draw_histo].hist_global[
self.pixel_to_draw_histo]
width_lg = bins_lg[1] - bins_lg[0]
edges_lg = \
self.old_dict_pixelid_values_LG_for_histo_global[self.pixel_to_draw_histo].edges_global[
self.pixel_to_draw_histo]
bin_rem = edges_lg[np.where(hist_lg == 0)]
bin_c = edges_lg[np.where(hist_lg != 0)]
try:
x_min_lg = bin_c[0]
except:
x_min_lg = 0
try:
x_max_lg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_lg = 5000
self.axs_histo_1.bar(bins_lg, hist_lg, width_lg, align="center", edgecolor='black')
else:
bins_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[
self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[
self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_hg = bins_hg[1] - bins_hg[0]
edges_hg = \
self.old_dict_pixelid_values_HG_for_histo_local[
self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_hg[np.where(hist_hg == 0)]
bin_c = edges_hg[np.where(hist_hg != 0)]
try:
x_min_hg = bin_c[0]
except:
x_min_hg = 0
try:
x_max_hg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_hg = 5000
self.axs_histo_0.bar(bins_hg, hist_hg, width_hg, align="center", edgecolor='black')
bins_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].bins_local[
self.pixel_to_draw_histo]
hist_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].hist_local[
self.pixel_to_draw_histo]
width_lg = bins_lg[1] - bins_lg[0]
edges_lg = \
self.old_dict_pixelid_values_LG_for_histo_local[self.pixel_to_draw_histo].edges_local[
self.pixel_to_draw_histo]
bin_rem = edges_lg[np.where(hist_lg == 0)]
bin_c = edges_lg[np.where(hist_lg != 0)]
try:
x_min_lg = bin_c[0]
except:
x_min_lg = 0
try:
x_max_lg = [bin_rem[i] for i in bin_rem if i > bin_c[-1]][0]
except:
x_max_lg = 5000
self.axs_histo_1.bar(bins_lg, hist_lg, width_lg, align="center", edgecolor='black')
except KeyError:
image_warning_pixel_triggered_or_not = image.imread(
"D:/resultat_acquisition_babymind/warning_pixel_no_triggered_2.png")
self.axs_histo_0.imshow(image_warning_pixel_triggered_or_not)
self.axs_histo_1.imshow(image_warning_pixel_triggered_or_not)
print("There is not this pixels key in the pixels who have triggered")
self.fig_histo.tight_layout()
self.axs_histo_0.set_yscale("log")
self.axs_histo_0.set_title("Histogram in HG")
self.axs_histo_0.grid()
self.axs_histo_0.set_xlim(x_min_hg, x_max_hg)
self.axs_histo_1.set_yscale("log")
self.axs_histo_1.set_title("Histogram in LG")
self.axs_histo_1.grid()
self.axs_histo_0.set_xlim(x_min_lg, x_max_lg)
self.canvas_histo = FigureCanvasTkAgg(self.fig_histo, master=self.fen2)
self.canvas_histo.show()
self.canvas_histo.get_tk_widget().grid(row=1, column=1, rowspan=5, columnspan=6, padx=10, pady=5,
sticky='NSEW')
self.toolbar_frame_histo = Frame(self.fen2, highlightcolor="red", highlightthickness=1,
highlightbackground="blue")
self.toolbar_frame_histo.grid(row=0, column=1)
self.toolbar_histo = NavigationToolbar2Tk(self.canvas_histo, self.toolbar_frame_histo)
self.canvas_histo._tkcanvas.grid(row=1, column=1, rowspan=5, columnspan=6, padx=10, pady=5, sticky='NSEW')
def on_key_event(event):
print('you pressed %s' % event.key)
key_press_handler(event, self.canvas_histo, self.toolbar_histo)
self.canvas_histo.mpl_connect('key_press_event', on_key_event)
self.fen2.attributes("-topmost", True) # this is to maintain fen2 in front of all windows
def _pause(self):
''' this function is relatif to the command of the pause button (b5). when pause is applying
the Event display pause and when restart is applying, the event display contnue when
it has been paused'''
self.b4.config(state="disabled") # disable the stop button
if self.var_pause_restart%2 != 0:
self.flag_start=1
self.b5["text"]="Continue"
self.var_pause_restart+=1
self._update_box_messages("Events display has been paused. Click on Continue to continue")
if (self.fen2 is not None) and self.fen2.winfo_exists():
self.b8["text"] = self.b5["text"]
else:
self.flag_start = 0
self.b5["text"] = "Pause"
self.var_pause_restart += 1
self._update_box_messages("Events display will continu")
self.b4.config(
state="active") # Active the stop button
if (self.fen2 is not None) and self.fen2.winfo_exists():
self.b8["text"] = self.b5["text"]
self.start_it()
def _quit(self):
'''This function serve to quit application and close the GUI. It
is relatif to the command of the quit button(b6)'''
answer = messagebox.askyesnocancel("Quit", "Are You Sure?", icon='warning')
if answer:
# Close the socket server
try:
self.comm.deconnect()
time.sleep(1)
if self.usb2can.flag_connect_usb2can==1 or self.usb2can.flag_HV_ON==1:
self.usb2can.set_HV_OFF_PDP()
self.usb2can.shutdown_interface_usb2can_Ixxat()
#self.comm.deconnect()
# #self.power_supply.Communicate("Opall 0\n")
# power_supply.Communicate('INST OUT1\n')
# power_supply.Communicate('OUTP OFF\n')
# power_supply.Communicate('INST OUT2\n')
# power_supply.Communicate('OUTP OFF\n')
# power_supply.Communicate('INST OUT3\n')
# power_supply.Communicate('OUTP OFF\n')
time.sleep(1)
except:
# print("Error in stopped socket server")
self._update_box_messages("Error in stopped socket server")
sys.exit(1)
time.sleep(2)
if self.flag_1 != 0: #test if we are enter in the subprocess create file.this means that stop function has never been activated
self.stop_it() # stop properly all (socket server, file writting and reading, ...)
else:
self.fen1.quit() # stops mainloop
self.fen1.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
def stop_it(self):
'''This function serve to stop event display. You can restart but in this case all
the start acquisition restart proceeding. It is relatif to the command of the quit button(b4)'''
self.b4.config(state="disabled") # disable the stop button
self.b5.config(state="disabled") # disable the pause button
self.flag_start = 0
self.flag_stop=1
self.flag_active_draw_button_for_histo_parent = 0 # This flag is to know if i can have data to draw it in histogramm
"""if self.flag_1==1:
subprocess.call(['taskkill', '/F', '/T', '/PID', str(self.process)])
#print("Subproccess killed with success")
self._update_box_messages("Subproccess killed with success")
else:
#print("subprocess never begin.Continue with procedure of stop events display")
self._update_box_messages("subprocess never begin.Continue with procedure of stop events display")"""
self.flag_1 = 0
if self.flag_draw_trigger_rate == False:
#close new file 1
self.new_file1.close()
#print("new file were writting data close with success")
self._update_box_messages("new file were writting data close with success")
#close file who contain temperatures data recorded
if self.flag_store_temperature!=0:
self.temperature_file.close()
# send command to stop acquisition
try:
# socket.send(bytes('BoardLib.StopAcquisition()\r', "utf-8"))
self.comm.Communicate('BoardLib.StopAcquisition()\r')
except:
#print("Error in Stoping acquisition data")
self._update_box_messages("Error in Stoping acquisition data")
sys.exit(1)
#print("Acquisition of data via socket server has been stopped")
self._update_box_messages("Acquisition of data via socket server has been stopped")
time.sleep(2)
if self.usb2can.flag_connect_usb2can == 1 or self.usb2can.flag_HV_ON == 1:
self.usb2can.set_HV_OFF_PDP()
self.usb2can.shutdown_interface_usb2can_Ixxat()
#self.comm.deconnect()
""""
#send command to socket server to deactivate time over threshold in taking data
if self.value_get_data_with_tot.get():
try:
send command on socket server to activate tot
except:
self._update_box_messages("Error in activate time over threshold command")
self._update_box_messages("time over threshold has been deactivate")
time.sleep(2)
"""
self.flag_finish_function_get_boards_values_from_file = False # this is to false indication in finish function get_boards_values_from_fil
#print("Event display has been stopped by user")
self._update_box_messages("Event display has been stopped by user")
self.b3.config(state="active") # Active the start button to eventually restart event display
#self.b1.config(state="active") # Active the checkbutton tot
self.entr1.config(state="normal") # Active the entry spinbox threshold hardware
# #self.power_supply.Communicate("Opall 0\n")
# power_supply.Communicate('INST OUT1\n')
# power_supply.Communicate('OUTP OFF\n')
# power_supply.Communicate('INST OUT2\n')
# power_supply.Communicate('OUTP OFF\n')
# power_supply.Communicate('INST OUT3\n')
# power_supply.Communicate('OUTP OFF\n')
time.sleep(1)
def start_it(self):
'''This function serve to start event display. It is relatif to the command of the quit button(b4)'''
if self.box_choice.get()=="DAC":
self.txt5["text"] = "Average DAC in trigger \n choosen :"
elif self.box_choice.get()=="PE":
self.txt5["text"] = "Average PE in trigger \n choosen :"
elif self.box_choice.get()=="Eie":
self.txt5["text"] = "Average Eie in trigger \n choosen :"
elif self.box_choice.get()=="Temp":
print("Get temperatures \n of pixels")
self.txt5["text"] = "temperatures \n of pixels"
self.b3.config(state="disabled") # disable the start button to avoid multiple start event display
self.b4.config(state="disabled") # disable the pause button
self.b5.config(state="disabled") # disable the stop button
print(self.usb2can.flag_connect_usb2can)
if self.usb2can.flag_connect_usb2can == 1 and self.usb2can.flag_HV_ON==1:
if self.flag_record_data_in_queu_for_analyse == 0:
self.usb2can.get_temperature_PDP()
#time.sleep(.5)
#elif self.flag_record_data_in_queu_for_analyse == 1:
#time.sleep(0.5)
self.plots_hex_in_canvas_pdp()
else:
if self.b5["text"] == "Pause":
self._pause()
ans = "a"
print("Usb2can is not connected and HV of PDP is Off \n This means Data is not collected \n Do you still Want to display temperature \n (Yes or No) ")
while (ans not in ["Yes","No"]):
ans = input()
if ans=="Yes":
self.usb2can.connect_interface_usb2can_Ixxat()
time.sleep(1)
self.usb2can.set_HV_ON_PDP()
time.sleep(1)
self.usb2can.get_temperature_PDP()
#time.sleep(0.5)
self.plots_hex_in_canvas_pdp()
if ans=="No":
self.variable_choices.set("DAC")
continue
else:
print("Enter the correct answer \n (Yes or No) ")
if self.box_choice.get()!="Temp":
if self.flag_start == 0 and self.flag_stop==0:
#
self.flag_start = 1
if self.flag_1==0:
self.start_test_global = time.time()
self.flag_can_stop_all = 0
self.threshold_DAC = int(self.entr1.get())
self.threshold_HG = int(self.entr14.get())
self.threshold_LG = int(self.entr15.get())
self.b3.config(state="disabled") # disable the start button to avoid multiple start event display
self.b4.config(state="active") # enable the pause button
self.b5.config(state="active") # enable the stop button
#self.b1.config(state="disabled") # disabling the checkbutton tot
self.entr1.config(state="disabled") # disabling the entry spinbox threshold hardware
# #os.system('D:\\resultat_acquisition_babymind\\devcon.exe restart *ROOT_HUB20*')
# #time.sleep(5)
# #self.power_supply.__init__()
# # self.power_supply.Communicate("Opall 1\n")
# power_supply.Communicate('INST OUT1\n')
# power_supply.Communicate('OUTP ON\n')
# power_supply.Communicate('INST OUT2\n')
# power_supply.Communicate('OUTP ON\n')
# power_supply.Communicate('INST OUT3\n')
# power_supply.Communicate('OUTP ON\n')
#'''
if comm.connect_socket_server_on_or_off==0: #test if socket server has never been connected
try:
time.sleep(1)
self.comm.Connect()
except:
#print("Bad connection with socket server")
self._update_box_messages("Bad connection with socket server")
sys.exit(1)
#print("Connection with socket server Enable")
self._update_box_messages("Connection with socket server Enable")
time.sleep(2)
if self.usb2can.flag_connect_usb2can==0:
self.usb2can.connect_interface_usb2can_Ixxat()
time.sleep(0.5)
if self.usb2can.flag_HV_ON==0:
self.usb2can.set_HV_ON_PDP()
try:
# send command to socket server to make and download default configuration file
self.comm.Communicate('BoardLib.OpenConfigFile("{}")\r'.format(self.config_file_init_0))
for i in [0, 1, 2]:
self.comm.Communicate(
'BoardLib.SetVariable("ASICS.ASIC{}.GlobalControl.DAC10b",{})\r'.format(i,self.threshold_DAC))
self.comm.Communicate(
'BoardLib.SetVariable("ASICS.ASIC{}.GlobalControl.DAC10b_t",{})\r'.format(i,
self.threshold_DAC))
comm.Communicate(
'BoardLib.SetVariable("FPGA.ASIC{}.GlobalControl.L1ThresholdHG",{})\r'.format(i,self.threshold_HG))
comm.Communicate(
'BoardLib.SetVariable("FPGA.ASIC{}.GlobalControl.L1ThresholdLG",{})\r'.format(i,self.threshold_LG))
""""#send command to socket server to activate or deactivate time over threshold
if self.value_get_data_with_tot.get():
send command on socket server to activate tot
"""
self.comm.Communicate('BoardLib.BoardConfigure(SendVerifyApply)\r')
self.comm.Communicate('BoardLib.SaveConfigFile("{}")\r'.format(self.config_file_aux_0))
self.comm.Communicate('BoardLib.OpenConfigFile("{}")\r'.format(self.config_file_init_1))
for i in [0, 1, 2]:
self.comm.Communicate(
'BoardLib.SetVariable("ASICS.ASIC{}.GlobalControl.DAC10b",{})\r'.format(i,
self.threshold_DAC))
self.comm.Communicate(
'BoardLib.SetVariable("ASICS.ASIC{}.GlobalControl.DAC10b_t",{})\r'.format(i,
self.threshold_DAC))
comm.Communicate(
'BoardLib.SetVariable("FPGA.ASIC{}.GlobalControl.L1ThresholdHG",{})\r'.format(i,
self.threshold_HG))
comm.Communicate(
'BoardLib.SetVariable("FPGA.ASIC{}.GlobalControl.L1ThresholdLG",{})\r'.format(i,
self.threshold_LG))
""""#send command to socket server to activate or deactivate time over threshold
if self.value_get_data_with_tot.get():
send command on socket server to activate tot
"""
self.comm.Communicate('BoardLib.BoardConfigure(SendVerifyApply)\r')
self.comm.Communicate('BoardLib.SaveConfigFile("{}")\r'.format(self.config_file_aux_1))
except:
#print("Error in Assigning value to the DAC10b")
self._update_box_messages("Error in settings config file")
sys.exit(1)
#print("Configure parameters success")
self._update_box_messages("Configure parameters success")
time.sleep(2)
self._update_box_messages(" all setting Config files success")
self.get_entry_entr9 = int(self.entr9.get())
""""#If i m not drawing trigger rate, i erase all files of previous display
if self.flag_draw_trigger_rate == False:"""
folder_result_acquisition_babymind = "D:/resultat_acquisition_babymind/folder_result_acquisition_babymind" #folder where i store my files that interface produce
for file_object in os.listdir(folder_result_acquisition_babymind):
os.remove(os.path.join(folder_result_acquisition_babymind, file_object))
self.flag_1 = 1
if self.flag_draw_trigger_rate == False:
# start sub process get data from ctrock 7
self.th3 = threading.Thread(target=self.analyse_file_contain_rate_aux).start() # this is to start at same time two functions
#self.th3 =multiprocessing.Process(target=self.analyse_file_contain_rate_aux()).start()
time.sleep(10)
locked = True
file_object = None
while locked == True:
try:
# Opening file in append mode and read the first 8 characters.
self.new_file1 = open(self.new_file_to_write, "r+b", 8)
if self.new_file1:
locked = False
except:
locked = True
time.sleep(10)
finally:
if file_object:
self._update_box_messages("We open new file and begin to write in it")
self.b4.config(state="active") # Active the stop button
else:
self.analyse_file_contain_rate_aux()
if self.flag_draw_trigger_rate == False:
self.start_test=time.time()
#self.export_data_electronis_values()
elif self.flag_stop ==1:
self.flag_start = 0
self.flag_stop=0
self.flag_1 = 0
#print("I initialise different flags to stop event display")
self._update_box_messages("I initialise different flags to stop event display" )
def analyse_file_contain_rate_aux(self):
if self.flag_record_data_in_queu_for_analyse == 0:
self.flag_record_data_in_queu_for_analyse += 1
if os.path.exists(self.new_file_to_write):
os.remove(self.new_file_to_write)
print("%s has been deleted" % self.new_file_to_write)
if os.path.exists(self.file_to_analyze_rate):
os.remove(self.file_to_analyze_rate)
print("%s has been deleted" % self.file_to_analyze_rate)
if os.path.exists(self.temperature_file):
os.remove(self.temperature_file)
print("%s has been deleted" %self.temperature_file)
'''
if self.usb2can.flag_connect_usb2can==1:
self.usb2can.shutdown_interface_usb2can_Ixxat()
time.sleep(1)
power_supply.Communicate('INST OUT1\n')
power_supply.Communicate('OUTP OFF\n')
power_supply.Communicate('INST OUT2\n')
power_supply.Communicate('OUTP OFF\n')
power_supply.Communicate('INST OUT3\n')
power_supply.Communicate('OUTP OFF\n')
time.sleep(1)
power_supply.Communicate('INST OUT1\n')
power_supply.Communicate('OUTP ON\n')
power_supply.Communicate('INST OUT2\n')
power_supply.Communicate('OUTP ON\n')
power_supply.Communicate('INST OUT3\n')
power_supply.Communicate('OUTP ON\n')
time.sleep(5)
self.usb2can.connect_interface_usb2can_Ixxat()
time.sleep(1)
self.usb2can.set_HV_ON_PDP()
time.sleep(1)
'''
died_time=0
self.rate_LG = 0
self.rate_HG = 0
time_spend_on_acquisition_data=0
died_time_on_acquisition_data=0
start_time_1=time.time()
indice_file_taking_data=0
buffer_size = 8
#self.daq_tdm_Applib = 0
while time_spend_on_acquisition_data < self.get_entry_entr9:
start_time_on_acquisition_data = time.time()
aux_name_file_recorded_by_babymind = (
"D:/resultat_acquisition_babymind/folder_result_acquisition_babymind/test_{}.daq".format(indice_file_taking_data))
print("Start proceess to read daq file and write data in new file")
#run c sharp script to take data
self.daq_tdm_Applib_slotArray_v1(aux_name_file_recorded_by_babymind)
locked = True
file_object = None
while locked == True:
try:
# Opening file in append mode and read the first 8 characters.
file_object = open(aux_name_file_recorded_by_babymind, "r+b", buffer_size)
if file_object:
locked = False
except:
locked = True
finally:
if file_object:
file_object.close()
print("taking data is finish in file")
time_spend_on_acquisition_data += time.time() - start_time_on_acquisition_data
start_time_on_acquisition_data = time.time()
#read temperature
if self.usb2can.flag_connect_usb2can == 1 or self.usb2can.flag_HV_ON == 1:
self.usb2can.get_temperature_PDP()
if indice_file_taking_data==0:
self.queue_for_dtata_recorded_by_babymind=deque([aux_name_file_recorded_by_babymind])
self.th3 = threading.Thread(target=self.get_boards_values_from_file).start() # this is to start at same time two functions
else:
self.queue_for_dtata_recorded_by_babymind.append(aux_name_file_recorded_by_babymind)
indice_file_taking_data+=1
died_time_on_acquisition_data += time.time() - start_time_on_acquisition_data
print("time in step 1 ( time for begining taking data until finish) ===============", time.time() - start_time_1)
self.flag_record_data_in_queu_for_analyse=0
if self.usb2can.flag_HV_ON==1:
self.usb2can.set_HV_OFF_PDP()
time.sleep(0.5)
if self.usb2can.flag_connect_usb2can==1:
self.usb2can.shutdown_interface_usb2can_Ixxat()
time.sleep(0.5)
print("died_time=====>",died_time_on_acquisition_data)
print('time spend on acquisitioon data=====',time_spend_on_acquisition_data)
# # self.power_supply.Communicate("Opall 0\n")
# power_supply.Communicate('INST OUT1\n')
# power_supply.Communicate('OUTP OFF\n')
# power_supply.Communicate('INST OUT2\n')
# power_supply.Communicate('OUTP OFF\n')
# power_supply.Communicate('INST OUT3\n')
# power_supply.Communicate('OUTP OFF\n')
time.sleep(1)
if self.flag_draw_trigger_rate == True:
self.get_boards_values_from_file()
self.stop_it()
self.flag_can_stop_all=1
def get_boards_values_from_file(self):
TDM_ID = 0b1110
Hit_Amplitude_Id = 0b0011
Hit_Time_Id = 0b0010
Gtrig_Header_Id = 0b0001
Gtrig_trailer_1_Id = 0b0100
Gtrig_trailer_2_Id = 0b0101
Special_Word_id = 0b1111
mean_rate = dict((i, []) for i in np.arange(0, 144))
std_rate = dict((i, []) for i in np.arange(0, 144))
mean_trigger_rate=[]
std_trigger_rate =[]
start_time_2 = time.time()
#if len(self.queue_for_dtata_recorded_by_babymind)<=3:
# time.sleep(5)
while len(self.queue_for_dtata_recorded_by_babymind) < 1:
time.sleep(5)
pass
with open(self.new_file_to_write, "ab")as new_file:
#while self.flag_record_data_in_queu_for_analyse==1:
#if len(self.queue_for_dtata_recorded_by_babymind) == 0:
# time.sleep(5)
print("queue_for_dtata_recorded_by_babymind====",len(self.queue_for_dtata_recorded_by_babymind))
while len(self.queue_for_dtata_recorded_by_babymind)>0:
file_to_open = self.queue_for_dtata_recorded_by_babymind.popleft()
print("we analyse now %s in the queue"%file_to_open)
print(len(self.queue_for_dtata_recorded_by_babymind))
if self.flag_record_data_in_queu_for_analyse != 0 and len(self.queue_for_dtata_recorded_by_babymind)==0:
while len(self.queue_for_dtata_recorded_by_babymind) < 1:
time.sleep(5)
pass
with open(file_to_open, "r+b") as file:
line = file.read(4)
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
event_data_amplitude_LG = {}
event_data_amplitude_HG = {}
event_data_tot = {}
'''
data_LG = {}
data_HG = {}
data_time = {}
'''
data_LG = [[0]*144]
data_HG = [[0]*144]
data_time =[[0]*144]
dict_queue_edge = {}
negative_tot = 0
positive_tot = 0
pin_complete_slots = []
_break = 0
sumX1_rate = dict((i, 0) for i in np.arange(0, 144))
sumX2_rate = dict((i, 0) for i in np.arange(0, 144))
dict_for_calc_rate = dict((i, 0) for i in np.arange(0, 144))
nbre_ampli_and_tot = dict((i, 0) for i in np.arange(0, 144))
write_in_new_file = 0
X1_trigger_rate = 0
X2_trigger_rate = 0
nbre_trigger_rate = 0
gtrig_header = {}
global_trigger_header_amplitude = dict((i, []) for i in [0, 2])
global_trigger_header_time = dict((i, []) for i in [0, 2])
gtrig_ampli_or_tot_old = dict((i, 0) for i in [0, 2])
countss = 0
gtrig_header_used_for_rate = {}
calc_rate = dict((i, 0) for i in np.arange(0, 144))
if self.entr5.get() == '': # attribute value 0 to entry 5 if it is equal to 0
self.entr5.delete(0, END)
self.entr5.insert(0, "0")
self.time_allowed_to_display_events = int(self.entr10.get()) * 1e-3
start_time = time.time()
duration = 0
pqr = 0
aux_dict_to_test_coincidence={}
while line != b'':# and countss < 40000:
countss += 1
duration += time.time() - start_time
start_time = time.time()
if int(Word_Id, 2) == TDM_ID and int(line_out_b[4:6], 2) == 0:
slot = int(line_out_b[6:11], 2)
line = file.read(4)
if line != b'':
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
if slot not in pin_complete_slots:
pin_complete_slots.append(slot)
else:
pin_complete_slots = []
pin_complete_slots.append(slot)
while int(Word_Id, 2) != TDM_ID and line != b'':
if int(Word_Id, 2) == TDM_ID and int(line_out_b[4:6], 2) == 1:
break
else:
if int(Word_Id, 2) == Special_Word_id and int(line_out_b[11], 2) == 0 and int(
line_out_b[12:32], 2) == 3:
print("Gtrig + Spill REset for slot {}".format(slot))
nmo = 1
else:
if int(Word_Id, 2) == Gtrig_Header_Id:
gtrig_header[slot] = int(line_out_b[4:32], 2)
while int(Word_Id, 2) != Gtrig_trailer_1_Id and line != b'':
if int(Word_Id, 2) == Hit_Amplitude_Id or int(Word_Id,
2) == Hit_Time_Id:
if slot == 0:
Channel_id = int(line_out_b[4:11], 2)
elif slot == 2:
Channel_id = int(line_out_b[4:11], 2) + 96
Hit_Id = int(line_out_b[11:14], 2)
Tag_Id = int(line_out_b[14:16], 2)
if int(Word_Id, 2) == Hit_Amplitude_Id:
Amplitude_Id = int(line_out_b[16:20], 2)
elif int(Word_Id, 2) == Hit_Time_Id:
Edge_time = int(line_out_b[16], 2)
Amplitude_or_tot_measurement = int(line_out_b[20:32], 2)
if len(
pin_complete_slots) == 2: # if pin_complete_slots == [0, 2]:
write_in_new_file = 1
if (gtrig_header[slot] - gtrig_ampli_or_tot_old[
slot]) != 0: # to verify
X1_trigger_rate += 1 / ((gtrig_header[slot] -
gtrig_ampli_or_tot_old[
slot]) * 10e-6)
X2_trigger_rate += (1 / (
(gtrig_header[slot] - gtrig_ampli_or_tot_old[
slot]) * 10e-6)) ** 2
nbre_trigger_rate += 1
gtrig_ampli_or_tot_old[slot] = gtrig_header[slot]
if (slot, Channel_id, Tag_Id,
Hit_Id) in dict_queue_edge.keys():
if int(Word_Id, 2) == Hit_Time_Id:
if (slot, Channel_id, Tag_Id,
Hit_Id) in dict_queue_edge.keys():
if Edge_time == 1:
dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][1] = Amplitude_or_tot_measurement
gtrig_header_used_for_rate[
(slot, Channel_id, Tag_Id,
Hit_Id)] = gtrig_header[slot]
elif Edge_time == 0 and dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][1] != 'a':
dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][0] = Amplitude_or_tot_measurement
else:
del dict_queue_edge[
(slot, Channel_id, Tag_Id, Hit_Id)]
dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)] = 4 * ['a']
dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][0] = Amplitude_or_tot_measurement
elif int(Word_Id, 2) == Hit_Amplitude_Id:
if Amplitude_Id == 3:
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
2] = Amplitude_or_tot_measurement
elif Amplitude_Id == 2 and dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
2] != 'a':
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
3] = Amplitude_or_tot_measurement
else:
del dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)]
'''dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)] = 4 * ['a']
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][2]=Amplitude_or_tot_measurement'''
try:
aux_diff_amplitude = dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
3] + \
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
2] + \
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
1] + \
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
0]
tot = dict_queue_edge[
(slot, Channel_id, Tag_Id, Hit_Id)][
1] - \
dict_queue_edge[
(slot, Channel_id, Tag_Id, Hit_Id)][
0]
if tot >= 0:
global_trigger_header_amplitude[slot].append(
gtrig_header[slot])
global_trigger_header_time[slot].append(
gtrig_header_used_for_rate[
(slot, Channel_id, Tag_Id,
Hit_Id)])
positive_tot += 1
val_LG=dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][2]
val_HG= dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][3]
if Channel_id not in aux_dict_to_test_coincidence.keys():
aux_dict_to_test_coincidence[Channel_id]=[val_LG]
else:
aux_dict_to_test_coincidence[Channel_id].append(val_LG)
data_LG[pqr][Channel_id]= val_LG
data_HG[pqr][Channel_id]= val_HG
data_time[pqr][Channel_id]=tot
#fill global histo
self.old_dict_pixelid_values_LG_for_histo_global[
keys].fill(val_LG, Channel_id)
self.old_dict_pixelid_values_HG_for_histo_global[
keys].fill(val_HG, Channel_id)
self.old_dict_pixelid_values_tot_for_histo_global[
keys].fill(tot, Channel_id)
#fill local histo
self.old_dict_pixelid_values_LG_for_histo_local[
keys].fill(val_LG, Channel_id)
self.old_dict_pixelid_values_HG_for_histo_local[
keys].fill(val_HG, Channel_id)
self.old_dict_pixelid_values_tot_for_histo_local[
keys].fill(tot, Channel_id)
event_data_amplitude_LG[nmo] = [(Channel_id,
dict_queue_edge[
(slot, Channel_id,
Tag_Id,
Hit_Id)][2])]
event_data_amplitude_HG[nmo] = [(Channel_id,
dict_queue_edge[
(slot, Channel_id,
Tag_Id,
Hit_Id)][3])]
event_data_tot[nmo] = (Channel_id,
tot)
if nbre_ampli_and_tot[Channel_id] != 0:
rate_aux = 1 / ((gtrig_header_used_for_rate[
(slot, Channel_id, Tag_Id,
Hit_Id)] - dict_for_calc_rate[
Channel_id]) * 10e-6)
sumX1_rate[
Channel_id] += rate_aux # + rate_HG # this rate is in Mhz. we divide by 10 because 10us is time between header and trailer
sumX2_rate[Channel_id] += (
rate_aux) ** 2 # + rate_HG ** 2 # this rate is in Mhz. we divide by 10 because 10us is time between header and trailer
nbre_ampli_and_tot[Channel_id] += 1
dict_for_calc_rate[
Channel_id] = gtrig_header_used_for_rate[
(slot, Channel_id, Tag_Id,
Hit_Id)]
nmo += 1
del gtrig_header_used_for_rate[
(slot, Channel_id, Tag_Id,
Hit_Id)]
else:
negative_tot += 1
del dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)]
except:
pass
else:
dict_queue_edge[(slot,
Channel_id, Tag_Id,
Hit_Id)] = 4 * ['a']
if int(Word_Id, 2) == Hit_Time_Id:
if Edge_time == 0:
dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)][0] = Amplitude_or_tot_measurement
elif Edge_time == 1:
del dict_queue_edge[
(slot, Channel_id, Tag_Id,
Hit_Id)]
elif int(Word_Id, 2) == Hit_Amplitude_Id:
if Amplitude_Id == 2:
dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)][
3] = Amplitude_or_tot_measurement
elif Amplitude_Id == 3:
del dict_queue_edge[(slot,
Channel_id,
Tag_Id,
Hit_Id)]
line = file.read(4)
if line != b'':
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
if int(Word_Id, 2) == TDM_ID and int(line_out_b[4:6], 2) == 1:
_break = 1
break
'''print("duration==", duration)
print(self.time_allowed_to_display_events)
print(self.flag_draw_trigger_rate)
print(write_in_new_file)'''
if write_in_new_file and self.flag_draw_trigger_rate == False:
self.trigger_sofware_in_pixel_configuration(aux_dict_to_test_coincidence)
if self.list_pixels_triggered==[]:
data_LG[pqr] = [0] * 144
data_HG[pqr] = [0] * 144
data_time[pqr] = [0] * 144
pickle.dump(["Event", data_LG[pqr], data_HG[pqr],data_time[pqr]], new_file)
write_in_new_file = 0
data_LG.append([0]*144)
data_HG.append([0]*144)
data_time.append([0]*144)
pqr+=1
aux_dict_to_test_coincidence={}
if self.flag_active_draw_button_for_histo_parent == 0:
self.b7.config(
state="active") # enable the draw histogram button
self.flag_active_draw_button_for_histo_parent = 1 # This flag is to know if i can have data to draw it in histogramm
if duration > self.time_allowed_to_display_events:
index_max_sum = [np.sum(l) for l in data_LG].index(
np.max([np.sum(l) for l in data_LG]))
self.data_electronics_LG = data_LG[index_max_sum]
self.data_electronics_HG = data_HG[index_max_sum]
self.data_electronics_tot = data_time[index_max_sum]
self.list_of_pixels_on_events = [data_LG.index(item) for item in
data_LG if item != 0]
if self.list_of_pixels_on_events !=[] :
if self.value_hg_or_lg.get() == "HG":
sum_to_have_more_event_ligthed = np.sum(self.data_electronics_HG)
elif self.value_hg_or_lg.get() == "LG":
sum_to_have_more_event_ligthed = np.sum(
self.data_electronics_LG)
elif self.value_hg_or_lg.get() == "TOT":
sum_to_have_more_event_ligthed = np.sum(
self.data_electronics_tot)
if sum_to_have_more_event_ligthed >= int(self.entr5.get()):
print("Boom")
self.plots_hex_in_canvas_pdp()
self.fen1.update_idletasks()
else:
print("kheops")
self.old_dict_pixelid_values_HG_for_histo_local = dict(
(i, Hist1D_local(self.bin_array_HG[i], 0, 5000, i)) for i in
np.arange(144))
self.old_dict_pixelid_values_LG_for_histo_local = dict(
(i, Hist1D_local(self.bin_array_LG[i], 0, 5000, i)) for i in
np.arange(144))
self.old_dict_pixelid_values_tot_for_histo_local = dict(
(i, Hist1D_local(self.bin_array_tot[i], 0, 5000, i)) for i in
np.arange(144))
self.list_of_pixels_on_events=[]
pqr = 0
data_LG = [[0] * 144]
data_HG = [[0] * 144]
data_time = [[0] * 144]
duration = 0
start_time = time.time()
if (self.fen2 is not None) and self.fen2.winfo_exists(): # condition to test if child fen2 window is open or not
#self.th2 = threading.Thread(target=self._trace_histo_pixel_draw_and_plot()).start() # this is to start at same time two functions
self._trace_histo_pixel_draw_and_plot()
if _break:
_break = 0
break
line = file.read(4)
if line != b'':
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
line = file.read(4)
if line != b'':
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
line = file.read(4)
if line != b'':
out_hex = ['{:02X}'.format(b) for b in line]
out_hex.reverse()
line_out = ''.join(out_hex)
line_out_b = bin(int(line_out, 16))[2:].zfill(32)
Word_Id = line_out_b[0:4]
for keys in sumX1_rate.keys():
if nbre_ampli_and_tot[keys] not in [0, 1]:
mean_rate[keys].append(sumX1_rate[keys] / (nbre_ampli_and_tot[keys] - 1))
std_rate[keys].append(
sqrt((sumX2_rate[keys] / (nbre_ampli_and_tot[keys] - 1)) - mean_rate[keys][0] ** 2))
# std_rate[keys].append(1)
else:
mean_rate[keys].append(0)
std_rate[keys].append(0)
if nbre_trigger_rate != 0:
mean_trigger_rate.append(X1_trigger_rate / nbre_trigger_rate)
std_trigger_rate.append(sqrt((X2_trigger_rate / nbre_trigger_rate) - (
X1_trigger_rate / nbre_trigger_rate) ** 2))
else:
mean_trigger_rate.append(0)
std_trigger_rate.append(0)
pickle.dump("END", new_file)
std_rate = dict((keys,np.mean(mean_rate[keys])) for keys in std_rate.keys())
mean_rate = dict((keys,np.mean(mean_rate[keys])) for keys in mean_rate.keys())
mean_trigger_rate=np.mean(mean_trigger_rate)
std_trigger_rate=np.mean(std_trigger_rate)
list_rate_components = [mean_rate, std_rate, mean_trigger_rate, std_trigger_rate]
print(
"[mean_rate ,std_rate,mean_trigger_rate,std_trigger_rate]===",
[aux[74] for aux in list_rate_components[0:2]], mean_trigger_rate, std_trigger_rate)
if self.flag_draw_trigger_rate==True:
self.list_mean_cosmicray_rate_HG.append(list_rate_components[0][0])
self.list_std_cosmicray_rate_HG.append(list_rate_components[1][0])
self.list_mean_cosmicray_rate_LG.append(list_rate_components[2][0] )
self.list_std_cosmicray_rate_LG.append(list_rate_components[3][0] )
self.list_mean_cosmicray_rate_tot.append(list_rate_components[4][0] )
self.list_std_cosmicray_rate_tot.append(list_rate_components[5][0] )
self.list_mean_trigger_rate_ampli.append(list_rate_components[6])
self.list_std_trigger_rate_ampli.append(list_rate_components[7])
self.list_mean_trigger_rate_tot.append(list_rate_components[8])
self.list_std_trigger_rate_tot.append(list_rate_components[9])
self.flag_finish_function_get_boards_values_from_file=True
print("time in step 2 (time of analysing all datas of babymind and copy it in result file)===============", time.time() - start_time_2)
self.stop_it()
def _draw_trigger_rate_and_cosmic_flux(self):
'''This function serve to draw trigger rate and cosmic rays flux. It
is relatif to the command of the draw button(b12)'''
answer = messagebox.askyesnocancel("Draw", "Are You Sure to do this operation?", icon='warning')
if answer:
if (self.fen2 is not None) and self.fen2.winfo_exists():
self._close_window_histogramm()
if self.flag_1==1: # i check if i display and/or store file from babymind
self.stop_it() # i stop evnt display
if self.flag_draw_trigger_rate == False:
folder_result_acquisition_babymind = "D:/resultat_acquisition_babymind/folder_result_acquisition_babymind"
for file_object in os.listdir(folder_result_acquisition_babymind):
os.remove(os.path.join(folder_result_acquisition_babymind, file_object))
self.flag_draw_trigger_rate=True
self.threshold_x_shape_in_trigger_plot=np.arange(int(self.entr11.get()),int(self.entr12.get()),int(self.entr13.get()))
if self.threshold_choices.get() == "LG":
threshold_old = self.threshold_LG
flag_stop_old = self.flag_stop
for threshold_aux in self.threshold_x_shape_in_trigger_plot:
self.var_threshold_LG.set("%s"%threshold_aux)
print(threshold_aux)
self.start_it()
self.flag_stop = 0
elif self.threshold_choices.get() == "HG":
threshold_old = self.threshold_LG
flag_stop_old = self.flag_stop
for threshold_aux in self.threshold_x_shape_in_trigger_plot:
self.var_threshold_LG.set("%s" % threshold_aux)
print(threshold_aux)
self.start_it()
self.flag_stop = 0
with open(self.file_to_analyze_rate,"wb") as trigger_file:
pickle.dump([self.list_mean_cosmicray_rate_HG, self.list_std_cosmicray_rate_HG ,self.list_mean_cosmicray_rate_LG ,self.list_std_cosmicray_rate_LG,self.list_mean_cosmicray_rate_tot,self.list_std_cosmicray_rate_tot,self.list_mean_trigger_rate_ampli,self.list_std_trigger_rate_ampli,self.list_mean_trigger_rate_tot,self.list_std_trigger_rate_tot],trigger_file)
#disable button drw histogramm, start and draw trigger rate
self.b3.config(state="disabled") # disable the start button to avoid multiple start event display
self.b7.config(state="disabled") # disable the start button to avoid multiple start event display
self.b12.config(state="disabled") # disable the start button to avoid multiple start event display
'''initialiser the window,canvas,buttons where the trigger rate will be plot'''
self.fen3 = Toplevel(self.fen1) # draw the children window of the window fen1
self.b13 = Button(self.fen3, text='Close', command=self._close_window_trigger_rate_and_cosmic_flux)
self.b13.grid(row=8, column=7, sticky='NSEW')
self._trace_trigger_rate_and_cosmic_flux() # draw this trigger rate
self.fen3.protocol("WM_DELETE_WINDOW",
self._close_window_trigger_rate_and_cosmic_flux) # this is reliated to the function _close_window_histogramm
#reinitialise parameters
self.list_mean_cosmicray_rate_HG = []
self.list_std_cosmicray_rate_HG = []
self.list_mean_cosmicray_rate_LG = []
self.list_std_cosmicray_rate_LG = []
self.list_mean_cosmicray_rate_tot = []
self.list_std_cosmicray_rate_tot = []
self.list_mean_trigger_rate_ampli = []
self.list_std_trigger_rate_ampli = []
self.list_mean_trigger_rate_tot = []
self.list_std_trigger_rate_tot = []
self.flag_draw_trigger_rate = False
if self.threshold_choices.get() == "LG":
self.threshold_LG = threshold_old
self.flag_stop = flag_stop_old
elif self.threshold_choices.get() == "HG":
self.threshold_HG = threshold_old
self.flag_stop = flag_stop_old
# this is to adapt size of the widgets,canvas,... with the size of window
self.fen3.rowconfigure(0, weight=1)
self.fen3.rowconfigure(1, weight=1)
self.fen3.rowconfigure(2, weight=1)
self.fen3.rowconfigure(3, weight=1)
self.fen3.rowconfigure(4, weight=1)
self.fen3.rowconfigure(5, weight=1)
self.fen3.rowconfigure(6, weight=1)
self.fen3.rowconfigure(7, weight=1)
self.fen3.rowconfigure(8, weight=1)
self.fen3.columnconfigure(0, weight=1)
self.fen3.columnconfigure(1, weight=1)
self.fen3.columnconfigure(2, weight=1)
self.fen3.columnconfigure(3, weight=1)
self.fen3.columnconfigure(5, weight=1)
self.fen3.columnconfigure(6, weight=1)
self.fen3.columnconfigure(7, weight=1)
def _trace_trigger_rate_and_cosmic_flux(self):
'''this function is to draw histogram in amplitude and tot of the pixels selected
since the event display started and in the intervalle of 4s'''
self.fig_trigger = plt.figure(facecolor="green")
self.axs_trigger_0 = self.fig_trigger.add_subplot(221)
self.axs_trigger_1 = self.fig_trigger.add_subplot(222)
self.axs_trigger_2 = self.fig_trigger.add_subplot(223)
self.axs_trigger_3 = self.fig_trigger.add_subplot(224)
print("rate parameters for plot")
print("threshold_x_shape_in_trigger_plot===",self.threshold_x_shape_in_trigger_plot)
print("list_mean_cosmicray_rate_HG===",self.list_mean_cosmicray_rate_HG)
print("list_mean_cosmicray_rate_tot===",self.list_mean_cosmicray_rate_tot)
print("list_mean_trigger_rate_ampli===",self.list_mean_trigger_rate_ampli)
print("list_mean_trigger_rate_tot===",self.list_mean_trigger_rate_tot)
self.list_std_trigger_rate_ampli=[0 if inst is 0 else np.log(inst) for inst in self.list_std_trigger_rate_ampli]
self.list_std_trigger_rate_tot = [0 if inst is 0 else np.log(inst) for inst in self.list_std_trigger_rate_tot]
self.threshold_x_shape_in_trigger_plot_in_PE_LG=[(threshold_LG - np.mean(self.pedestal_LG))/np.mean(self.Gain_LG) for threshold_LG in self.threshold_x_shape_in_trigger_plot]
self.threshold_x_shape_in_trigger_plot_in_PE_HG = [
(threshold_HG - np.mean(self.pedestal_HG)) / np.mean(self.Gain_HG) for threshold_HG in
self.threshold_x_shape_in_trigger_plot]
self.threshold_x_shape_in_trigger_plot_in_PE_LG=['%s'%e for e in self.threshold_x_shape_in_trigger_plot_in_PE_LG]
self.threshold_x_shape_in_trigger_plot_in_PE_HG = ['%s' % e for e in
self.threshold_x_shape_in_trigger_plot_in_PE_HG]
self.axs_trigger_0.errorbar(self.threshold_x_shape_in_trigger_plot,self.list_mean_trigger_rate_ampli,self.list_std_trigger_rate_ampli,fmt='-o')
self.axs_trigger_1.errorbar(self.threshold_x_shape_in_trigger_plot, self.list_mean_trigger_rate_tot,
self.list_std_trigger_rate_tot, fmt='-o')
self.axs_trigger_2.errorbar(self.threshold_x_shape_in_trigger_plot, self.list_mean_cosmicray_rate_HG,
self.list_std_cosmicray_rate_HG, fmt='-o')
self.axs_trigger_3.errorbar(self.threshold_x_shape_in_trigger_plot, self.list_mean_cosmicray_rate_tot,
self.list_std_cosmicray_rate_tot, fmt='-o')
self.fig_trigger.tight_layout()
self.axs_trigger_0.set_yscale("log")
self.axs_trigger_0.set_title("Trigger in amplitude")
self.axs_trigger_0.grid()
#self.axs_trigger_0.set_xlabel(" IN DAC")
self.axs_trigger_0_prime=self.axs_trigger_0.twiny()
self.axs_trigger_0_prime.set_xlim(self.axs_trigger_0.get_xlim())
self.axs_trigger_0_prime.set_xticks(self.threshold_x_shape_in_trigger_plot)
self.axs_trigger_0_prime.set_xticklabels(self.threshold_x_shape_in_trigger_plot_in_PE_HG)
#self.axs_trigger_0_prime.set_xlabel(" IN PE")
self.axs_trigger_1.set_yscale("log")
self.axs_trigger_1.set_title("Trigger in tot")
self.axs_trigger_1.grid()
#self.axs_trigger_1.set_xlabel(" IN DAC")
self.axs_trigger_1_prime = self.axs_trigger_1.twiny()
self.axs_trigger_1_prime.set_xlim(self.axs_trigger_1.get_xlim())
self.axs_trigger_1_prime.set_xticks(self.threshold_x_shape_in_trigger_plot)
self.axs_trigger_1_prime.set_xticklabels(self.threshold_x_shape_in_trigger_plot_in_PE_HG)
#self.axs_trigger_1_prime.set_xlabel(" IN PE")
self.axs_trigger_2.set_yscale("log")
self.axs_trigger_2.set_title("CR flux in amplitude")
self.axs_trigger_2.grid()
#self.axs_trigger_2.set_xlabel(" IN DAC")
self.axs_trigger_2_prime = self.axs_trigger_2.twiny()
self.axs_trigger_2_prime.set_xlim(self.axs_trigger_2.get_xlim())
self.axs_trigger_2_prime.set_xticks(self.threshold_x_shape_in_trigger_plot)
self.axs_trigger_2_prime.set_xticklabels(self.threshold_x_shape_in_trigger_plot_in_PE_LG)
#self.axs_trigger_2_prime.set_xlabel(" IN PE")
self.axs_trigger_3.set_yscale("log")
self.axs_trigger_3.set_title("CR flux in tot")
self.axs_trigger_3.grid()
#self.axs_trigger_3.set_xlabel(" IN DAC")
self.axs_trigger_3_prime = self.axs_trigger_3.twiny()
self.axs_trigger_3_prime.set_xlim(self.axs_trigger_3.get_xlim())
self.axs_trigger_3_prime.set_xticks(self.threshold_x_shape_in_trigger_plot)
self.axs_trigger_3_prime.set_xticklabels(self.threshold_x_shape_in_trigger_plot_in_PE_LG)
#self.axs_trigger_3_prime.set_xlabel(" IN PE")
self.canvas_trigger = FigureCanvasTkAgg(self.fig_trigger, master=self.fen3)
self.canvas_trigger.show()
self.canvas_trigger.get_tk_widget().grid(row=1, column=1, rowspan=5, columnspan=6, padx=10, pady=5,
sticky='NSEW')
self.toolbar_frame_trigger = Frame(self.fen3, highlightcolor="red", highlightthickness=1,
highlightbackground="blue")
self.toolbar_frame_trigger.grid(row=0, column=1)
self.toolbar_trigger = NavigationToolbar2Tk(self.canvas_trigger, self.toolbar_frame_trigger)
self.canvas_trigger._tkcanvas.grid(row=1, column=1, rowspan=5, columnspan=6, padx=10, pady=5, sticky='NSEW')
def on_key_event(event):
print('you pressed %s' % event.key)
key_press_handler(event, self.canvas_trigger, self.toolbar_trigger)
self.canvas_trigger.mpl_connect('key_press_event', on_key_event)
self.fen3.attributes("-topmost", True) # this is to maintain fen3 in front of all windows
#while (self.fen3 is not None) and self.fen3.winfo_exists():
def _close_window_trigger_rate_and_cosmic_flux(self):
'''This function is to close the child window where histogramm have been plotting'''
# activate button drw histogramm, start and draw trigger rate
self.b3.config(state="active") # disable the start button to avoid multiple start event display
self.b7.config(state="active") # disable the start button to avoid multiple start event display
self.b12.config(state="active") # disable the start button to avoid multiple start event display
self.fen3.destroy() # this is necessary on Windows to prevent
self.fen3 = None
# Close the socket server
def daq_tdm_Applib_slotArray_v1(self,aux_name_file_recorded_by_babymind):
#------------------------------------------------------------------------------
# MAIN SCRIPT FUNCTIONS
# ------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------------------------------------
# --- fill there the config file names. just one in the array
#if self.daq_tdm_Applib==0:
cfgfilename_0 = self.config_file_aux_0
cfgfilename_1 = self.config_file_aux_1
#This will launch the TDM DAQ for MCB on slot 0 & SLAVE on slot 2 trough GTX chain
# Configure Master Clock FEB & TDM...
self.comm.Communicate('App.TDMPrepareDaq({}, "{}", {},{}, {}, {},{},{})\r'.format(self.MCB_BOARD_ID
, cfgfilename_0, self.MCB_BOARD_ID
, self.l_enableGtrig
, self.l_enableGtrigOnlyOnSpill
, self.l_enableReadoutOnSpillGate
, self.l_syncResetEn, self.beforeConfigure))
#"Prepare SYNC on slave slots")
self.comm.Communicate('App.ChainSetCheckSyncAndGtxs("{}|{}")\r'.format(self._boardIds[0], self._boardIds[1]))
#"prepare DAQ from last slot to USB slot 0")
self.comm.Communicate('App.TDMPrepareDaq({}, "{}", {},{}, {}, {},{},{})\r'.format(self._boardIds[0]
, cfgfilename_0, self.MCB_BOARD_ID
, self.l_enableGtrig
, self.l_enableGtrigOnlyOnSpill
, self.l_enableReadoutOnSpillGate
, self.l_syncResetEn, self.beforeConfigure))
self.comm.Communicate('App.TDMPrepareDaq({}, "{}", {},{}, {}, {},{},{})\r'.format(self._boardIds[1]
, cfgfilename_0, self.MCB_BOARD_ID
, self.l_enableGtrig
, self.l_enableGtrigOnlyOnSpill
, self.l_enableReadoutOnSpillGate
, self.l_syncResetEn, self.beforeConfigure))
#Starting DAQ on all slot with USB on SLOT0")
self.comm.Communicate(
'BoardLib.SetVariable("Board.UsbParam.FileLimit", {})\r'.format(self._FILE_LIMIT))
l_daqFile = aux_name_file_recorded_by_babymind
self.comm.Communicate(
'App.TDMStartDaqs("{}|{}", "{}", {})\r'.format(self._boardIds[0], self._boardIds[1], l_daqFile, self.MCB_BOARD_ID))
def pointy_top_hex(self,center_x, center_y, size_edge_to_edge, i):
"""Define coordinate of edges' pixels with pointy up """
rayon = size_edge_to_edge / 2
angle_deg = 60 * i + 30
angle_rad = pi / 180 * angle_deg
Point = (center_x + rayon * cos(angle_rad),
center_y + rayon * sin(angle_rad))
return Point
def make_mini_cam_mathieu(self, size_edge_to_edge):
mini_cam_mathieu = {}
with open("D:/resultat_acquisition_babymind/fichier_config_mini_camera_mathieu.txt","r") as file:
line = file.readline().split("\n")[0].split("\t")
while line[0] != "":
pixel_id = float(line[0])
pixel_center = (float(line[1]), float(line[2]))
xs = []
ys = []
for i in range(6):
Point = self.pointy_top_hex(pixel_center[0], pixel_center[1], size_edge_to_edge, i)
xs.append(Point[0])
ys.append(Point[1])
mini_cam_mathieu[pixel_id] = [(xs, ys), pixel_center]
line = file.readline().split("\n")[0].split("\t")
return mini_cam_mathieu
def make_mini_cam_mathieu_with_node(self, size_edge_to_edge):
'''
Create and Make the mapping of minicamera
'''
mini_cam_mathieu_with_node = {}
with open("D:/resultat_acquisition_babymind/mapping_mini_cam_with_node/MappingTable_MiniCamera.txt","r") as file:
line=file.readline()
line = file.readline().split("\n")[0].split("\t")
while line[0] != "":
pixel_id = float(line[6])
pixel_center = (float(line[7]), float(line[8]))
xs = []
ys = []
for i in range(6):
Point = self.pointy_top_hex(pixel_center[0], pixel_center[1], size_edge_to_edge, i)
xs.append(Point[0])
ys.append(Point[1])
mini_cam_mathieu_with_node[pixel_id] = [(xs, ys), pixel_center]
line = file.readline().split("\n")[0].split("\t")
dict_pixels_ids_1 = {117: 0, 84: 1, 116: 2, 113: 3, 111: 4, 86: 5, 85: 6, 90: 7, 119: 8, 112: 9, 110: 10, 129: 11,
72: 12, 87: 13, 88: 14, 91: 15, 118: 16, 115: 17, 109: 18, 128: 19, 125: 20, 123: 21,
74: 22, 73: 23, 78: 24, 89: 25, 95: 26, 94: 27, 105: 28, 114: 29, 108: 30, 131: 31,
124: 32, 122: 33, 75: 34, 76: 35, 79: 36, 93: 37, 92: 38, 48: 39, 104: 40, 101: 41,
99: 42, 130: 43, 127: 44, 121: 45, 77: 46, 83: 47, 82: 48, 50: 49, 49: 50, 54: 51,
107: 52, 100: 53, 98: 54, 141: 55, 126: 56, 120: 57, 81: 58, 80: 59, 60: 60, 51: 61, 52: 62,
55: 63, 106: 64, 103: 65, 97: 66, 140: 67, 137: 68, 135: 69, 62: 70, 61: 71, 66: 72, 53: 73,
59: 74, 58: 75, 10: 76, 102: 77, 96: 78, 143: 79, 136: 80, 134: 81, 63: 82, 64: 83, 67: 84,
57: 85, 56: 86, 6: 87, 7: 88, 11: 89, 8: 90, 142: 91, 139: 92, 133: 93, 65: 94, 71: 95,
70: 96, 46: 97, 0: 98, 1: 99, 4: 100, 5: 101, 9: 102, 22: 103, 138: 104, 132: 105,
69: 106, 68: 107, 42: 108, 43: 109, 47: 110, 44: 111, 2: 112, 3: 113, 18: 114, 19: 115,
23: 116, 20: 117, 36: 118, 37: 119, 40: 120, 41: 121, 45: 122, 34: 123, 12: 124, 13: 125,
16: 126, 17: 127, 21: 128, 38: 129, 39: 130, 30: 131, 31: 132, 35: 133, 32: 134, 14: 135,
15: 136, 24: 137, 25: 138, 28: 139, 29: 140, 33: 141, 26: 142, 27: 143}
dict_pixels_ids_1=dict((item,key) for (key,item) in dict_pixels_ids_1.items())
mini_cam_mathieu_with_node=dict((dict_pixels_ids_1[key],item) for (key,item) in mini_cam_mathieu_with_node.items())
return mini_cam_mathieu_with_node
def find_neighboors_pixels_by_scipy_method(self):
"""uses a KD-Tree to quickly find nearest neighbors of the pixels in a
camera. This function can be used to find the neighbor pixels if
such a list is not already present in the file.
Parameters
----------
pix_x : array_like
x position of each pixel
pix_y : array_like
y position of each pixels
rad : float
radius to consider neighbor it should be slightly larger
than the pixel diameter.
Returns
-------
array of neighbor indices in a list for each pixel
"""
rad=23.2 + 1 + 4
pixels=self.reatribute_id_pixels
list_centers_xs = []
list_centers_ys = []
list_pixels_id=[]
for pixels_id, polygons_data in pixels.items():
list_centers_xs.append(polygons_data[1][0])
list_centers_ys.append(polygons_data[1][1])
list_pixels_id.append(pixels_id)
points = np.array([list_centers_xs, list_centers_ys]).T
indices = np.arange(len(list_centers_xs))
kdtree = KDTree(points)
neighbors = [kdtree.query_ball_point(p, r=rad) for p in points]
for nn, ii in zip(neighbors, indices):
nn.remove(ii) # get rid of the pixel itself
print(neighbors)
return neighbors
def find_neighboors_pixels_by_my_method(self):
"""uses a KD-Tree to quickly find nearest neighbors of the pixels in a
camera. This function can be used to find the neighbor pixels if
such a list is not already present in the file.
Parameters
----------
pix_x : array_like
x position of each pixel
pix_y : array_like
y position of each pixels
rad : float
radius to consider neighbor it should be slightly larger
than the pixel diameter.
Returns
-------
array of neighbor indices in a list for each pixel
"""
rad=23.2 + 1 + 4
pixels=self.reatribute_id_pixels
dict_centers_xs_ys={}
list_pixels_id=[]
list_centers_xs_ys = []
neighboors={}
for pixels_id, polygons_data in pixels.items():
centers_xs =polygons_data[1][0]
centers_ys=polygons_data[1][1]
dict_centers_xs_ys[pixels_id]=(centers_xs,centers_ys)
list_centers_xs_ys.append((centers_xs,centers_ys))
list_pixels_id.append(pixels_id)
keys=dict_centers_xs_ys.keys()
values=dict_centers_xs_ys.values()
#print(dict_centers_xs_ys)
for pixels_id,centers_in_dict in dict_centers_xs_ys.items():
list_centers_xs_ys.remove(centers_in_dict)
for centers_in_list in list_centers_xs_ys:
if (sqrt((centers_in_dict[0] - centers_in_list[0])**2+(centers_in_dict[1] - \
centers_in_list[1])**2)-rad) <= 0:
if not pixels_id in neighboors.keys():
neighboors[pixels_id]=[list(keys)[list(values).index(centers_in_list)]]
else:
neighboors[pixels_id].append(list(keys)[list(values).index(centers_in_list)])
list_centers_xs_ys.append(centers_in_dict)
self.neighboors=neighboors
'''
neighboors={0: [1, 6, 46, 47, 44, 56], 1: [2, 6, 7, 4, 44, 0], 2: [3, 4, 34, 44, 45, 1], 3: [4, 5, 12, 18, 34, 2],
6: [7, 58, 59, 56, 0, 1], 7: [4, 10, 11, 58, 1, 6], 4: [5, 11, 1, 2, 3, 7], 5: [11, 8, 9, 18, 3, 4],
10: [11, 58, 102, 103, 106, 7], 11: [8, 102, 7, 4, 5, 10], 8: [9, 96, 102, 142, 5, 11], 9: [18, 19, 22, 142, 5, 8],
12: [13, 18, 34, 35, 32, 3], 13: [14, 18, 19, 16, 32, 12], 14: [15, 16, 32, 33, 13], 15: [16, 17, 14], 18: [19, 3, 5, 9, 12, 13],
19: [16, 22, 23, 9, 13, 18], 16: [17, 23, 13, 14, 15, 19], 17: [23, 20, 21, 15, 16], 22: [23, 138, 139, 142, 9, 19],
23: [20, 138, 19, 16, 17, 22], 20: [21, 132, 138, 17, 23], 21: [17, 20], 24: [25, 30, 39], 25: [26, 30, 31, 28, 24],
26: [27, 28, 25], 27: [28, 29, 26], 30: [31, 39, 41, 45, 24, 25], 31: [28, 34, 35, 45, 25, 30], 28: [29, 35, 25, 26, 27, 31],
29: [35, 32, 33, 27, 28], 34: [35, 45, 2, 3, 12, 31], 35: [32, 12, 31, 28, 29, 34], 32: [33, 12, 13, 14, 29, 35],
33: [14, 29, 32], 36: [37, 42, 68], 37: [38, 42, 43, 40, 36], 38: [39, 40, 37], 39: [40, 41, 24, 30, 38],
42: [43, 70, 71, 68, 36, 37], 43: [40, 46, 47, 70, 37, 42], 40: [41, 47, 37, 38, 39, 43], 41: [47, 44, 45, 30, 39, 40],
46: [47, 56, 57, 70, 0, 43], 47: [44, 0, 43, 40, 41, 46], 44: [45, 0, 1, 2, 41, 47], 45: [2, 30, 31, 34, 41, 44],
48: [49, 54, 94, 95, 92, 104], 49: [50, 54, 55, 52, 92, 48], 50: [51, 52, 82, 92, 93, 49], 51: [52, 53, 60, 66, 82, 50],
54: [55, 106, 107, 104, 48, 49], 55: [52, 58, 59, 106, 49, 54], 52: [53, 59, 49, 50, 51, 55], 53: [59, 56, 57, 66, 51, 52],
58: [59, 106, 6, 7, 10, 55], 59: [56, 6, 55, 52, 53, 58], 56: [57, 0, 6, 46, 53, 59], 57: [66, 67, 70, 46, 53, 56],
60: [61, 66, 82, 83, 80, 51], 61: [62, 66, 67, 64, 80, 60], 62: [63, 64, 80, 81, 61], 63: [64, 65, 62],
66: [67, 51, 53, 57, 60, 61], 67: [64, 70, 71, 57, 61, 66], 64: [65, 71, 61, 62, 63, 67], 65: [71, 68, 69, 63, 64],
70: [71, 42, 43, 46, 57, 67], 71: [68, 42, 67, 64, 65, 70], 68: [69, 36, 42, 65, 71], 69: [65, 68], 72: [73, 78, 87],
73: [74, 78, 79, 76, 72], 74: [75, 76, 73], 75: [76, 77, 74], 78: [79, 87, 89, 93, 72, 73], 79: [76, 82, 83, 93, 73, 78],
76: [77, 83, 73, 74, 75, 79], 77: [83, 80, 81, 75, 76], 82: [83, 93, 50, 51, 60, 79], 83: [80, 60, 79, 76, 77, 82],
80: [81, 60, 61, 62, 77, 83], 81: [62, 77, 80], 84: [85, 90, 116], 85: [86, 90, 91, 88, 84], 86: [87, 88, 85],
87: [88, 89, 72, 78, 86], 90: [91, 118, 119, 116, 84, 85], 91: [88, 94, 95, 118, 85, 90], 88: [89, 95, 85, 86, 87, 91],
89: [95, 92, 93, 78, 87, 88], 94: [95, 104, 105, 118, 48, 91], 95: [92, 48, 91, 88, 89, 94], 92: [93, 48, 49, 50, 89, 95],
93: [50, 78, 79, 82, 89, 92], 96: [97, 102, 142, 143, 140, 8], 97: [98, 102, 103, 100, 140, 96], 98: [99, 100, 130, 140, 141, 97],
99: [100, 101, 108, 114, 130, 98], 102: [103, 10, 11, 8, 96, 97], 103: [100, 106, 107, 10, 97, 102], 100: [101, 107, 97, 98, 99, 103],
101: [107, 104, 105, 114, 99, 100], 106: [107, 10, 54, 55, 58, 103], 107: [104, 54, 103, 100, 101, 106],
104: [105, 48, 54, 94, 101, 107], 105: [114, 115, 118, 94, 101, 104], 108: [109, 114, 130, 131, 128, 99],
109: [110, 114, 115, 112, 128, 108], 110: [111, 112, 128, 129, 109], 111: [112, 113, 110], 114: [115, 99, 101, 105, 108, 109],
115: [112, 118, 119, 105, 109, 114], 112: [113, 119, 109, 110, 111, 115], 113: [119, 116, 117, 111, 112],
118: [119, 90, 91, 94, 105, 115], 119: [116, 90, 115, 112, 113, 118], 116: [117, 84, 90, 113, 119], 117: [113, 116],
120: [121, 126, 135], 121: [122, 126, 127, 124, 120], 122: [123, 124, 121], 123: [124, 125, 122],
126: [127, 135, 137, 141, 120, 121], 127: [124, 130, 131, 141, 121, 126], 124: [125, 131, 121, 122, 123, 127],
125: [131, 128, 129, 123, 124], 130: [131, 141, 98, 99, 108, 127], 131: [128, 108, 127, 124, 125, 130],
128: [129, 108, 109, 110, 125, 131], 129: [110, 125, 128], 132: [133, 138, 20], 133: [134, 138, 139, 136, 132],
134: [135, 136, 133], 135: [136, 137, 120, 126, 134], 138: [139, 22, 23, 20, 132, 133], 139: [136, 142, 143, 22, 133, 138],
136: [137, 143, 133, 134, 135, 139], 137: [143, 140, 141, 126, 135, 136], 142: [143, 8, 9, 22, 96, 139],
143: [140, 96, 139, 136, 137, 142], 140: [141, 96, 97, 98, 137, 143], 141: [98, 126, 127, 130, 137, 140]}
'''
def draw_camera_pixel_ids(self,xs_center, ys_center, pixels_id, axes):
"""draw the camera pixels id in the camera"""
axes.text(xs_center, ys_center, pixels_id, fontsize=10, ha='center')
def draw_pixel_center(self,list_centers_xs, list_centers_ys, axes):
"""draw the camera'pixels centers """
axes.plot(list_centers_xs, list_centers_ys, 'y+')
def plot_pixels_grid_bis(self,pixels, data_from_electronics):
#cmap1 = cm.YlOrBr
#norm1 = colors.Normalize(np.min(data_from_electronics), np.max(data_from_electronics))
if self.box_choice.get() == "Temp":
self.norm1 = matplotlib.colors.Normalize(0,35)
else:
self.norm1 = matplotlib.colors.Normalize(np.min(data_from_electronics), np.max(data_from_electronics))
self.cmap1 = matplotlib.cm.ScalarMappable(norm=self.norm1, cmap=matplotlib.cm.jet)
self.cmap1.set_array([])
for pixel_id,polygones in self.dict_polygones.items():
polygones.set_facecolor(self.cmap1.to_rgba(data_from_electronics[int(pixel_id)]))
self.axes_fen1.add_patch(polygones)
self.cb_fen1.update_normal(self.cmap1)
self.cb_fen1.draw_all()
self.axes_fen1.axis('equal')
self.fig_fen1.savefig("D:/resultat_acquisition_babymind/figures/fig_%s"%self.number_figure)
self.number_figure+=1
# if you want to draw the center of the pixels
# draw_pixel_center(list_centers_xs, list_centers_ys)
#free memory
def plots_hex_in_canvas_pdp(self):
if self.box_choice.get() == "Temp":
self._update_box_messages("We are reading temperature of PDP")
print(self.usb2can.data_temperature)
self.plot_pixels_grid_bis(self.reatribute_id_pixels,
self.usb2can.data_temperature)
self.fen1.after(3000, self.start_it)
else:
if self.value_hg_or_lg.get() == "HG":
self._update_box_messages("We are in HG")
self._update_box_messages(self.list_of_pixels_on_events)
print(self.data_electronics_HG)
self.plot_pixels_grid_bis(self.reatribute_id_pixels,
self.data_electronics_HG)
elif self.value_hg_or_lg.get() == "LG":
self._update_box_messages("We are in LG")
self._update_box_messages(self.list_of_pixels_on_events)
print(self.data_electronics_LG)
self.plot_pixels_grid_bis(self.reatribute_id_pixels,
self.data_electronics_LG)
elif self.value_hg_or_lg.get() == "TOT":
self._update_box_messages("We are in TOT")
self._update_box_messages(self.list_of_pixels_on_events)
print(self.data_electronics_tot)
self.plot_pixels_grid_bis(self.reatribute_id_pixels,
self.data_electronics_tot)
def on_key_event(event):
print('you pressed %s' % event.key)
key_press_handler(event, self.canvas_fen1, self.toolbar_fen1)
self.canvas_fen1.draw()
self.canvas_fen1.mpl_connect('key_press_event', on_key_event)
self.canvas_fen1.flush_events()
# free memory
self.clear_figure(self.axes_fen1)
if self.box_choice.get() == "Temp":
del self.usb2can.data_temperature
else:
if self.value_hg_or_lg.get() == "HG":
del self.data_electronics_HG
elif self.value_hg_or_lg.get() == "LG":
del self.data_electronics_LG
elif self.value_hg_or_lg.get() == "TOT":
del self.data_electronics_tot
def clear_figure(self,axes):
plt.cla()
axes.clear()
#self.cb_fen1.remove()
#plt.close()
# del self.fig,self.axes
def trigger_sofware_in_pixel_configuration(self,dict_pixelid_values):
'''This function is to apply the trigger condition due to the pixel configuration'''
self.software_trigger_in_pixel_configuration = int(self.entr4.get())
if self.software_trigger_in_pixel_configuration == 1:
self.list_pixels_triggered = list(dict_pixelid_values.keys())
elif self.software_trigger_in_pixel_configuration == 2:
self.list_pixels_triggered = [pixels_test for pixels_test in list(dict_pixelid_values.keys()) \
if not set(self.neighboors[pixels_test]).isdisjoint \
(list(dict_pixelid_values.keys()))]
elif self.software_trigger_in_pixel_configuration == 3:
self.list_pixels_triggered = []
for pixels_test in list(dict_pixelid_values.keys()):
if not set(self.neighboors[pixels_test]).isdisjoint \
(list(dict_pixelid_values.keys())):
iter = list(dict_pixelid_values.keys())
iter.remove(pixels_test)
for pixels in list(set(list(dict_pixelid_values.keys())).intersection(self.neighboors[pixels_test])):
if not set(self.neighboors[pixels]).isdisjoint \
(iter):
self.list_pixels_triggered.append(pixels_test)
break
def export_data_electronis_values(self):
if self.entr5.get()=='': #attribute value 0 to entry 5 if it is equal to 0
self.entr5.delete(0,END)
self.entr5.insert(0,"0")
self.time_allowed_to_display_events = int(self.entr10.get()) * 1e-3
duration=0
dict_data_electronics_LG={}
dict_data_electronics_HG = {}
dict_data_electronics_tot = {}
start_time = time.time()
self.old_dict_pixelid_values_HG_for_histo_local = dict(
(i, Hist1D_local(self.bin_array_HG[i], 0, 5000, i)) for i in np.arange(144))
self.old_dict_pixelid_values_LG_for_histo_local = dict(
(i, Hist1D_local(self.bin_array_LG[i], 0, 5000, i)) for i in np.arange(144))
self.old_dict_pixelid_values_tot_for_histo_local = dict(
(i, Hist1D_local(self.bin_array_tot[i], 0, 5000, i)) for i in np.arange(144))
while duration < self.time_allowed_to_display_events:
where = self.new_file1.tell()
try:
line = pickle.load(self.new_file1)
#print(line)
if line=="END":
while self.flag_can_stop_all==0:
time.sleep(2)
print("End of the new file")
print("Event display is finish")
stop_test = time.time()
print('my test duration in step 3====', stop_test - self.start_test)
print('my test duration in step glopbal (duration since begining until end of display)====', stop_test - self.start_test_global)
self.stop_it()
break
except EOFError:
line=0
pass
if line==0:
time.sleep(1)
self.new_file1.seek(where)
else:
#print("succes read new file")
#print(line)
if line[0]=="Amplitude":
event_data_amplitude_LG=line[1]
event_data_amplitude_HG=line[2]
#print(line[0])
if event_data_amplitude_LG != {}:
list_tuples_LG = [var[0] for var in list(event_data_amplitude_LG.values())]
dict_pixelid_values_LG = {}
for (x, y) in enumerate(list_tuples_LG):
if y[0] not in dict_pixelid_values_LG.keys():
dict_pixelid_values_LG[y[0]] = [y[1]]
else:
dict_pixelid_values_LG[y[0]].append(y[1])
# the local and global histogram will be performing here
for keys, data in dict_pixelid_values_LG.items():
self.old_dict_pixelid_values_LG_for_histo_global[keys].fill(data, keys)
self.old_dict_pixelid_values_LG_for_histo_local[keys].fill(data, keys)
# data_electronics_LG = np.array([random.randint(0, self.threshold_DAC) for r in range(144)])
data_electronics_LG = np.zeros(144)
if self.box_choice.get()=="DAC": #test if i want to display events in the DAc mode
dict_pixelid_values_LG = {keys: np.max(dict_pixelid_values_LG[keys]) for keys in
dict_pixelid_values_LG.keys()}
# print(dict_pixelid_values_LG)
self.trigger_sofware_in_pixel_configuration(dict_pixelid_values_LG) #test trigger pixels configuration
#sum_to_have_more_event_ligthed_LG = np.sum(list(dict_pixelid_values_LG.values()))
sum_to_have_more_event_ligthed_LG = np.sum([dict_pixelid_values_LG[item] for item in
self.list_pixels_triggered])
if sum_to_have_more_event_ligthed_LG >= int(self.entr5.get()): #test trigger pixels sum values
# data_electronics_LG[list(dict_pixelid_values_LG.keys())] = list(dict_pixelid_values_LG.values())
data_electronics_LG[self.list_pixels_triggered] = [dict_pixelid_values_LG[item] for item in
self.list_pixels_triggered]
dict_data_electronics_LG[sum_to_have_more_event_ligthed_LG] = data_electronics_LG
elif self.box_choice.get()=="PE":
dict_pixelid_values_LG = {keys: ((np.max(dict_pixelid_values_LG[keys]) \
- self.pedestal_LG[keys])/self.Gain_LG[keys]) for keys in
dict_pixelid_values_LG.keys() if \
np.max(dict_pixelid_values_LG[keys])>=self.pedestal_LG[keys]}
# print(dict_pixelid_values_LG)
self.trigger_sofware_in_pixel_configuration(
dict_pixelid_values_LG) # test trigger pixels configuration
# sum_to_have_more_event_ligthed_LG = np.sum(list(dict_pixelid_values_LG.values()))
sum_to_have_more_event_ligthed_LG = np.sum([dict_pixelid_values_LG[item] for item in
self.list_pixels_triggered])
if sum_to_have_more_event_ligthed_LG >= int(self.entr5.get()): # test trigger pixels sum values
# data_electronics_LG[list(dict_pixelid_values_LG.keys())] = list(dict_pixelid_values_LG.values())
data_electronics_LG[self.list_pixels_triggered] = [dict_pixelid_values_LG[item] for \
item in self.list_pixels_triggered]
dict_data_electronics_LG[sum_to_have_more_event_ligthed_LG] = data_electronics_LG
if event_data_amplitude_HG != {}:
#print(list_tuples_HG)
list_tuples_HG = [var[0] for var in list(event_data_amplitude_HG.values())]
dict_pixelid_values_HG = {}
for (x, y) in enumerate(list_tuples_HG):
if y[0] not in dict_pixelid_values_HG.keys():
dict_pixelid_values_HG[y[0]] = [y[1]]
else:
dict_pixelid_values_HG[y[0]].append(y[1])
# the local and global histogram will be performing here
for keys, data in dict_pixelid_values_HG.items():
self.old_dict_pixelid_values_HG_for_histo_global[keys].fill(data, keys)
self.old_dict_pixelid_values_HG_for_histo_local[keys].fill(data, keys)
# data_electronics_HG = np.array([random.randint(0, self.threshold_DAC) for r in range(144)])
data_electronics_HG = np.zeros(144)
if self.box_choice.get()=="DAC": #test if i want to display events in the DAc mode
dict_pixelid_values_HG = {keys: np.max(dict_pixelid_values_HG[keys]) for keys in
dict_pixelid_values_HG.keys()}
# print(dict_pixelid_values_HG)
self.trigger_sofware_in_pixel_configuration(dict_pixelid_values_HG) #test trigger pixels configuration
#sum_to_have_more_event_ligthed_HG = np.sum(list(dict_pixelid_values_HG.values()))
sum_to_have_more_event_ligthed_HG = np.sum([dict_pixelid_values_HG[item] for item in
self.list_pixels_triggered])
if sum_to_have_more_event_ligthed_HG >= int(self.entr5.get()): #test trigger pixels sum values
# data_electronics_HG[list(dict_pixelid_values_HG.keys())] = list(dict_pixelid_values_HG.values())
data_electronics_HG[self.list_pixels_triggered] = [dict_pixelid_values_HG[item] for item in
self.list_pixels_triggered]
dict_data_electronics_HG[sum_to_have_more_event_ligthed_HG] = data_electronics_HG
elif self.box_choice.get()=="PE":
dict_pixelid_values_HG = {keys: ((np.max(dict_pixelid_values_HG[keys]) \
- self.pedestal_HG[keys])/self.Gain_HG[keys]) for keys in
dict_pixelid_values_HG.keys() if \
np.max(dict_pixelid_values_HG[keys])>=self.pedestal_HG[keys]}
# print(dict_pixelid_values_HG)
self.trigger_sofware_in_pixel_configuration(
dict_pixelid_values_HG) # test trigger pixels configuration
# sum_to_have_more_event_ligthed_HG = np.sum(list(dict_pixelid_values_HG.values()))
sum_to_have_more_event_ligthed_HG = np.sum([dict_pixelid_values_HG[item] for item in
self.list_pixels_triggered])
if sum_to_have_more_event_ligthed_HG >= int(self.entr5.get()): # test trigger pixels sum values
#print("self.entr5", self.entr5.get())
# data_electronics_HG[list(dict_pixelid_values_HG.keys())] = list(dict_pixelid_values_HG.values())
data_electronics_HG[self.list_pixels_triggered] = [dict_pixelid_values_HG[item] for \
item in self.list_pixels_triggered]
dict_data_electronics_HG[sum_to_have_more_event_ligthed_HG] = data_electronics_HG
elif line[0]=="tot":
event_data_tot=line[1]
if event_data_tot != {}:
list_tuples_tot = [var[0] for var in list(event_data_tot.values())]
dict_pixelid_values_tot = {}
for (x, y) in enumerate(list_tuples_tot):
if y[0] not in dict_pixelid_values_tot.keys():
dict_pixelid_values_tot[y[0]] = [y[1]]
else:
dict_pixelid_values_tot[y[0]].append(y[1])
# the local and global histogram will be performing here
for keys, data in dict_pixelid_values_tot.items():
self.old_dict_pixelid_values_tot_for_histo_global[keys].fill(data, keys)
self.old_dict_pixelid_values_tot_for_histo_local[keys].fill(data, keys)
# data_electronics_tot = np.array([random.randint(0, self.threshold_DAC) for r in range(144)])
data_electronics_tot = np.zeros(144)
if self.box_choice.get()=="DAC": #test if i want to display events in the DAc mode
dict_pixelid_values_tot = {keys: np.max(dict_pixelid_values_tot[keys]) for keys in
dict_pixelid_values_tot.keys()}
# print(dict_pixelid_values_tot)
self.trigger_sofware_in_pixel_configuration(dict_pixelid_values_tot) #test trigger pixels configuration
#sum_to_have_more_event_ligthed_tot = np.sum(list(dict_pixelid_values_tot.values()))
sum_to_have_more_event_ligthed_tot = np.sum([dict_pixelid_values_tot[item] for item in
self.list_pixels_triggered])
if sum_to_have_more_event_ligthed_tot >= int(self.entr5.get()): #test trigger pixels sum values
# data_electronics_tot[list(dict_pixelid_values_tot.keys())] = list(dict_pixelid_values_tot.values())
data_electronics_tot[self.list_pixels_triggered] = [dict_pixelid_values_tot[item] for item in
self.list_pixels_triggered]
dict_data_electronics_tot[sum_to_have_more_event_ligthed_tot] = data_electronics_tot
elif self.box_choice.get()=="PE":
dict_pixelid_values_tot = {keys: ((np.max(dict_pixelid_values_tot[keys]) \
- self.pedestal_tot[keys])/self.Gain_tot[keys]) for keys in
dict_pixelid_values_tot.keys() if \
np.max(dict_pixelid_values_tot[keys])>=self.pedestal_tot[keys]}
# print(dict_pixelid_values_tot)
self.trigger_sofware_in_pixel_configuration(
dict_pixelid_values_tot) # test trigger pixels configuration
# sum_to_have_more_event_ligthed_tot = np.sum(list(dict_pixelid_values_tot.values()))
sum_to_have_more_event_ligthed_tot = np.sum([dict_pixelid_values_tot[item] for item in
self.list_pixels_triggered])
if sum_to_have_more_event_ligthed_tot >= int(self.entr5.get()): # test trigger pixels sum values
#print("self.entr5", self.entr5.get())
# data_electronics_tot[list(dict_pixelid_values_tot.keys())] = list(dict_pixelid_values_tot.values())
data_electronics_tot[self.list_pixels_triggered] = [dict_pixelid_values_tot[item] for \
item in self.list_pixels_triggered]
dict_data_electronics_tot[sum_to_have_more_event_ligthed_tot] = data_electronics_tot
#stop_time = time.time()
duration = time.time() - start_time
start_time=time.time()
#print(dict_data_electronics_LG)
if dict_data_electronics_LG=={}:
#self.data_electronics_LG = np.array([random.randint(0, self.threshold_DAC) for r in range(144)])
self.data_electronics_LG = np.array([random.randint(0, 0) for r in range(144)])
dict_pixelid_values_LG={}
else:
self.data_electronics_LG = dict_data_electronics_LG[np.max(list(dict_data_electronics_LG.keys()))]
#self.data_electronics_LG = dict_data_electronics_LG[random.choice(list(dict_data_electronics_LG.keys()))]
if dict_data_electronics_HG=={}:
#self.data_electronics_HG = np.array([random.randint(0, self.threshold_DAC) for r in range(144)])
self.data_electronics_HG = np.array([random.randint(0, 0) for r in range(144)])
dict_pixelid_values_HG={}
else:
self.data_electronics_HG = dict_data_electronics_HG[np.max(list(dict_data_electronics_HG.keys()))]
#self.data_electronics_HG = dict_data_electronics_HG[random.choice(list(dict_data_electronics_HG.keys()))]
if dict_data_electronics_tot=={}:
#self.data_electronics_tot = np.array([random.randint(0, self.threshold_DAC) for r in range(144)])
self.data_electronics_tot = np.array([random.randint(0, 0) for r in range(144)])
dict_pixelid_values_tot={}
else:
self.data_electronics_tot = dict_data_electronics_tot[np.max(list(dict_data_electronics_tot.keys()))]
#self.data_electronics_tot = dict_data_electronics_tot[random.choice(list(dict_data_electronics_tot.keys()))]
#print(self.old_dict_pixelid_values_HG_for_histo_local)
#print(self.old_dict_pixelid_values_LG_for_histo_local)
# print(self.old_dict_pixelid_values_tot_for_histo_local)
self.list_of_pixels_on_events_LG=list(dict_pixelid_values_LG.keys())
self.list_of_pixels_on_events_HG = list(dict_pixelid_values_HG.keys())
self.list_of_pixels_on_events_tot = list(dict_pixelid_values_tot.keys())
#plt.close(self.fig)
self.plots_hex_in_canvas_pdp()
self.fen1.update_idletasks()
if self.flag_active_draw_button_for_histo_parent==0:
self.b7.config(state="active") # enable the draw histogram button
self.flag_active_draw_button_for_histo_parent = 1 #This flag is to know if i can have data to draw it in histogramm
if self.flag_1 == 1:
print("cycle")
self.flag_start = 0
if (self.fen2 is not None) and self.fen2.winfo_exists(): # condition to test if child fen2 window is open or not
#print(self.fen2.wm_state()) # this is to print the status of child fen 2 only if fen2 is open
#animation.FuncAnimation(self.canvas, self.export_data_electronis_values(), interval=5000)
#self.th2 = threading.Thread(target=self._trace_histo_pixel_draw_and_plot()).start() #this is to start at same time two functions
#self.th1 = threading.Thread(target=self.fen1.after(5000, self.start_it())).start() #this is to start at same time two functions
#mp2 = multiprocessing.Process(target=self._trace_histo_pixel_draw_and_plot(),args=(...)).start() #this is to start at same time two functions
#mp1 = multiprocessing.Process(target=self.fen1.after(5000, self.start_it()),args=(...)).start() #this is to start at same time two functions
self.th2 = threading.Thread(target=self._trace_histo_pixel_draw_and_plot()).start() # this is to start at same time two functions
#else :
self.fen1.after(5000, self.start_it)
class get_data_from_usb2can_ixxat():
#def __init__(self,power_supply):
def __init__(self):
#self.power_supply=power_supply
self.CRED = '\033[91m'
self.CGREEN = '\33[32m'
self.CVIOLET = '\33[35m'
self.CEND = '\033[0m'
self.flag_connect_usb2can=0
self.flag_HV_ON = 0
self.time_out=1 #In seconds
self.critical_temperature=43
self.critical_HV = 80
'''
self.dict_arbitrationId_contains_modules_and_pixelsId = {
0x602: [[2], [98, 99, 112, 113, 87, 88, 100, 101, 76, 89, 90, 102]],
0x603: [[7], [124, 125, 135, 136, 114, 115, 126, 127, 103, 116, 117, 128]],
0x604: [[8], [137, 138, 142, 143, 131, 132, 139, 140, 123, 133, 134, 141]],
0x605: [[9], [118, 119, 129, 130, 108, 109, 120, 121, 97, 110, 111, 122]],
0x606: [[3], [39, 50, 49, 61, 51, 63, 62, 73, 75, 74, 86, 85]],
0x607: [[10], [60, 71, 70, 82, 72, 84, 83, 94, 96, 95, 107, 106]],
0x608: [[11], [12, 23, 22, 34, 24, 36, 35, 46, 48, 47, 59, 58]],
0x609: [[12], [1, 6, 5, 13, 7, 15, 14, 25, 27, 26, 38, 37]],
0x60A: [[1], [78, 66, 54, 42, 77, 65, 53, 41, 64, 52, 40, 28]],
0x60B: [[4], [30, 18, 10, 4, 29, 17, 9, 3, 16, 8, 2, 0]],
0x60C: [[5], [57, 45, 33, 21, 56, 44, 32, 20, 43, 31, 19, 11]],
0x60D: [[6], [105, 93, 81, 69, 104, 92, 80, 68, 91, 79, 67, 55]]}
'''
self.dict_arbitrationId_contains_modules_and_pixelsId = {
0x602: [[2], [0, 1, 2, 3, 6, 7, 4, 5, 10, 11, 8, 9]],
0x603: [[7], [12, 13, 14, 15, 18, 19, 16, 17, 22, 23, 20, 21]],
0x604: [[8], [24, 25, 26, 27, 30, 31, 28, 29, 34, 35, 32, 33]],
0x605: [[9], [36, 37, 38, 39, 42, 43, 40, 41, 46, 47, 44, 45]],
0x606: [[3], [48, 49, 50, 51, 54, 55, 52, 53, 58, 59, 56, 57]],
0x607: [[10], [60, 61, 62, 63, 66, 67, 64, 65, 70, 71, 68, 69]],
0x608: [[11], [72, 73, 74, 75, 78, 79, 76, 77, 82, 83, 80, 81]],
0x609: [[12], [84, 85, 86, 87, 90, 91, 88, 89, 94, 95, 92, 93]],
0x60A: [[1], [96, 97, 98, 99, 102, 103, 100, 101, 106, 107, 104, 105]],
0x60B: [[4], [108, 109, 110, 111, 114, 115, 112, 113, 118, 119, 116, 117]],
0x60C: [[5], [120, 121, 122, 123, 126, 127, 124, 125, 130, 131, 128, 129]],
0x60D: [[6], [132, 133, 134, 135, 138, 139, 136, 137, 142, 143, 140, 141]]}
def connect_interface_usb2can_Ixxat(self):
if self.flag_connect_usb2can==0:
self.flag_connect_usb2can += 1
self.bus = can.interface.Bus(bustype='ixxat', channel=0, bitrate=181000)
print(self.CGREEN + "Usb2can Ixxat has been connected with success" + self.CEND)
self.jump_to_application_usb2can()
else:
print(self.CGREEN + "Usb2can Ixxat already connected" + self.CEND)
self.empty_the_fifo()
def shutdown_interface_usb2can_Ixxat(self):
if self.flag_HV_ON==1:
self.set_HV_OFF_PDP()
else:
print(self.CVIOLET + "The HV of the PDP has never been ON"+ self.CEND)
if self.flag_connect_usb2can == 1:
self.flag_connect_usb2can = 0
self.bus.shutdown()
print(self.CVIOLET + '\33[35m' + "Usb2can Ixxat has been shutdown properly" + self.CEND)
else:
print(self.CVIOLET + '\33[35m' + "Usb2can Ixxat is already deconnected" + self.CEND)
self.empty_the_fifo()
def empty_the_fifo(self):
try:
self.rx=str(self.bus.recv(self.time_out))
while self.rx!="None":
self.rx = str(self.bus.recv(self.time_out))
except:
pass
print(self.CVIOLET + "The fifo is empty" + self.CEND)
print(self.rx)
def jump_to_application_usb2can(self):
for can_node in self.dict_arbitrationId_contains_modules_and_pixelsId.keys():
msg=can.Message(arbitration_id=can_node,data=[0x1F,0x05],extended_id=False)
self.bus.send(msg)
time.sleep(0.5)
self.rx=str(self.bus.recv(self.time_out))
if self.rx=="None":
print(self.CGREEN+ "Jump to application in module %s"%self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][0][0] + self.CEND)
else:
print(self.rx)
print(self.CRED+ "Jump to application failed in module %s"%self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][0][0] + self.CEND)
#raise IOError ("Jump to application failed")
self.empty_the_fifo()
def set_HV_ON_PDP(self):
self.flag_HV_ON += 1
for can_node in self.dict_arbitrationId_contains_modules_and_pixelsId.keys():
msg = can.Message(arbitration_id=can_node, data=[0x07, 0x8F, 0xFF], extended_id=False)
self.bus.send(msg)
time.sleep(0.5)
self.empty_the_fifo()
print(self.CGREEN + "The HV of the PDP is ON" + self.CEND)
def set_HV_OFF_PDP(self):
self.flag_HV_ON = 0
for can_node in self.dict_arbitrationId_contains_modules_and_pixelsId.keys():
msg = can.Message(arbitration_id=can_node, data=[0x07, 0x80, 0x00], extended_id=False)
self.bus.send(msg)
time.sleep(0.5)
self.empty_the_fifo()
print(self.CVIOLET + "The HV of the PDP is going OFF properly"+ self.CEND)
def get_HV_PDP(self):
self.data_HV = [0] * 144
for can_node in self.dict_arbitrationId_contains_modules_and_pixelsId.keys():
msg = can.Message(arbitration_id=can_node, data=[0x01, 0x04], extended_id=False)
self.bus.send(msg)
for patch in range(4):
self.rx = str(self.bus.recv(self.time_out)).split()
dlc = int(self.rx[6])
if dlc!=1:
print(self.rx)
flag_patch = int(self.rx[7])
HV_0 = (int(self.rx[8] + self.rx[9], 16)* 1.67375e-3)
HV_1 = (int(self.rx[10] + self.rx[11], 16)* 1.67375e-3)
HV_2 = (int(self.rx[12] + self.rx[13], 16) * 1.67375e-3)
if flag_patch == 1:
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][0]] = HV_0
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][1]] = HV_1
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][2]] = HV_2
if flag_patch == 21:
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][3]] = HV_0
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][4]] = HV_1
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][5]] = HV_2
if flag_patch == 41:
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][6]] = HV_0
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][7]] = HV_1
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][8]] = HV_2
if flag_patch == 61:
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][9]] = HV_0
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][10]] = HV_1
self.data_HV[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][11]] = HV_2
test_HV_critical_or_negative_values = [temp for temp in self.data_HV if temp>=self.critical_HV or temp < 0]
if test_HV_critical_or_negative_values == []:
print(self.data_HV)
print(self.CVIOLET + "Get HV's PDP properly" + self.CEND)
print("THe maximum HV in PDP is %s" % np.max(self.data_HV))
else:
self.shutdown_interface_usb2can_Ixxat()
print(test_HV_critical_or_negative_values)
print(self.data_HV)
nbre = 0
while nbre <= 8:
print(self.CRED + "HV has reached critical value or is negative" + self.CEND)
nbre += 1
#raise IOError("HV has reached critical value or is negative")
self.empty_the_fifo()
def get_temperature_PDP(self):
self.data_temperature = [0] * 144
for can_node in self.dict_arbitrationId_contains_modules_and_pixelsId.keys():
msg = can.Message(arbitration_id=can_node, data=[0x01, 0x00], extended_id=False)
self.bus.send(msg)
#time.sleep(1)
for patch in range(4):
self.rx = self.bus.recv(self.time_out)
self.rx=str(self.rx).split()
dlc = int(self.rx[6])
if dlc!=7:
print(self.rx)
print(self.usb2can.flag_connect_usb2can , self.usb2can.flag_HV_ON)
# power_supply.Communicate('INST OUT1\n')
# power_supply.Communicate('OUTP OFF\n')
# power_supply.Communicate('INST OUT2\n')
# power_supply.Communicate('OUTP OFF\n')
# power_supply.Communicate('INST OUT3\n')
# power_supply.Communicate('OUTP OFF\n')
# power_supply.Communicate('INST OUT1\n')
# power_supply.Communicate('OUTP ON\n')
# power_supply.Communicate('INST OUT2\n')
# power_supply.Communicate('OUTP ON\n')
# power_supply.Communicate('INST OUT3\n')
# power_supply.Communicate('OUTP ON\n')
time.sleep(1)
self.flag_connect_usb2can = 0
self.flag_HV_ON = 0
self.connect_interface_usb2can_Ixxat()
time.sleep(1)
self.set_HV_ON_PDP()
time.sleep(1)
self.get_temperature_PDP()
flag_patch = int(self.rx[7])
temperature_0 = (int(self.rx[8] + self.rx[9], 16) - 272.93) / 5.97403
temperature_1 = (int(self.rx[10] + self.rx[11], 16) - 272.93) / 5.97403
temperature_2 = (int(self.rx[12] + self.rx[13], 16) - 272.93) / 5.97403
if flag_patch==1:
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][0]] = temperature_0
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][1]] = temperature_1
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][2]] = temperature_2
if flag_patch==21:
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][3]] = temperature_0
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][4]] = temperature_1
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][5]] = temperature_2
if flag_patch==41:
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][6]] = temperature_0
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][7]] = temperature_1
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][8]] = temperature_2
if flag_patch==61:
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][9]] = temperature_0
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][10]] = temperature_1
self.data_temperature[
self.dict_arbitrationId_contains_modules_and_pixelsId[can_node][1][11]] = temperature_2
test_temprature_critical_or_negative_values=[temp for temp in self.data_temperature if temp>=self.critical_temperature or temp<0]
if test_temprature_critical_or_negative_values==[]:
print(self.CVIOLET+ "We Get temperature's PDP properly" +self.CEND)
print(self.CVIOLET+"THe maximum temperature in PDP is %s" %np.max(self.data_temperature) +self.CEND)
#print(self.flag_connect_usb2can)
else:
self.shutdown_interface_usb2can_Ixxat()
print(test_temprature_critical_or_negative_values)
print(self.data_temperature)
nbre=0
while nbre<=8:
print(self.CRED+ "Temperature has reached critical value or is negative. \n Please switch off the power supply of the slow control" + self.CEND)
nbre+=1
#raise IOError ("Temperature has reached critical value or is negative")
#'''
# power_supply.Communicate('INST OUT1\n')
# power_supply.Communicate('OUTP OFF\n')
# power_supply.Communicate('INST OUT2\n')
# power_supply.Communicate('OUTP OFF\n')
# power_supply.Communicate('INST OUT3\n')
# power_supply.Communicate('OUTP OFF\n')
time.sleep(1200)
# power_supply.Communicate('INST OUT1\n')
# power_supply.Communicate('OUTP ON\n')
# power_supply.Communicate('INST OUT2\n')
# power_supply.Communicate('OUTP ON\n')
# power_supply.Communicate('INST OUT3\n')
# power_supply.Communicate('OUTP ON\n')
self.flag_connect_usb2can = 0
self.flag_HV_ON = 0
time.sleep(1)
self.connect_interface_usb2can_Ixxat()
time.sleep(1)
self.set_HV_ON_PDP()
time.sleep(1)
self.get_temperature_PDP()
#'''
self.empty_the_fifo()
class Communication_with_babymind_via_socket_server(object):
def __init__(self):
self.connect_socket_server_on_or_off = 0 #initrialise flag to specify that i am not cobnnected to the scocket server
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_address = ("129.194.53.160", 11000)
print(sys.stderr, ('connecting to %s port %s' % self.server_address))
def Connect(self):
if self.connect_socket_server_on_or_off==0:
self.sock.connect(self.server_address)
self.connect_socket_server_on_or_off+=1
def Communicate(self, string):
print(string)
#print(('%s\r,"utf-8"')%string)
# socket.send(bytes('BoardLib.OpenConfigFile("C:/BabyMindFrontEnd-v1.0.1.614/config/HexSiPM_07_02_2018.xml")\r', "utf-8"))
self.sock.send(bytes(string,"utf-8")) # Send command
time.sleep(0.5) # wait for one second
self.message_receive_from_babymind=self.sock.recv(1024).decode()
print(self.message_receive_from_babymind) # Wait for reply
def deconnect(self):
if self.connect_socket_server_on_or_off == 1:
self.connect_socket_server_on_or_off = 0
self.sock.close()
print("Socket server has been closed")
else:
print("Socket server is already deconnected")
class Communication_with_power_supply(object):
def __init__(self):
self.dev = usb.core.find(idVendor=0x403, idProduct=0xED72)
self.dev.set_configuration()
self.cfg = self.dev.get_active_configuration()
self.intf = self.cfg[(0, 0)]
self.ep = usb.util.find_descriptor(
self.intf,
# match the first OUT endpoint
custom_match= \
lambda e: \
usb.util.endpoint_direction(e.bEndpointAddress) == \
usb.util.ENDPOINT_OUT)
assert self.ep is not None
def Communicate(self, string):
'''
:param string:
-'Opall 0\n' # set all the output Off. Can also use 1 for on
:return:
'''
print(string)
self.ep.write(bytes(string, "utf-8")) # Send command
time.sleep(0.5) # wait for one second
class Application:#Frame):
'''Fenêtre principale de l'application'''
# def __init__(self,fen1,comm,usb2can,power_supply):
def __init__(self, fen1, comm, usb2can):
#Frame.__init__(self)
#self.master.title("Events on the mini camera""Events on the mini camera")
#self.pack()
self.fen1=fen1
self.fen1.wm_title("Events on the mini camera")
self.flag_start = 0
self.comm=comm
self.usb2can=usb2can
# self.power_supply=power_supply
self.txt1 = Label(self.fen1, text='Threshold\nDAC :')
self.txt1.grid(row=2, column=1, sticky='NSEW')
self.var_threshold_DAC = StringVar(self.fen1)
self.var_threshold_DAC.set("540")
self.entr1 = Spinbox(self.fen1, from_=0, to=1023, textvariable=self.var_threshold_DAC)
self.entr1.grid(row=2, column=2)
#self.threshold_DAC = int(self.entr1.get())
self.txt16 = Label(self.fen1, text='Threshold\nHG :')
self.txt16.grid(row=1, column=1, sticky='NSEW')
self.var_threshold_HG = StringVar(self.fen1)
self.var_threshold_HG.set("781")
self.entr14 = Spinbox(self.fen1, from_=0, to=4095, textvariable=self.var_threshold_HG)
self.entr14.grid(row=1, column=2)
self.threshold_HG = int(self.entr14.get())
self.txt17 = Label(self.fen1, text='Threshold\nLG :')
self.txt17.grid(row=1, column=3, sticky='NSEW')
self.var_threshold_LG = StringVar(self.fen1)
self.var_threshold_LG.set("131")
self.entr15 = Spinbox(self.fen1, from_=0, to=4095, textvariable=self.var_threshold_LG)
self.entr15.grid(row=1, column=4)
self.threshold_LG = int(self.entr15.get())
"""
if self.flag_start==0:
self.data_electronics_LG=np.array([random.randint(120, 120+self.threshold_DAC) for r in range(144)])
self.data_electronics_HG = np.array([random.randint(930, 930+self.threshold_DAC) for r in range(144)])
self.list_of_pixels_on_events_LG=[None]
self.list_of_pixels_on_events_HG=[None]
"""
# this is to plot the initial PDP with random vaues between 0 and threshold
self.init_PDP = Canon(self.fen1, self.flag_start, self.comm, self.usb2can, self.entr1, self.var_threshold_DAC,
self.entr14, self.var_threshold_HG, self.entr15, self.var_threshold_LG)
class Hist1D_global(object):
def __init__(self, nbins, xlow, xhigh, key=None):
self.nbins = nbins
self.xlow = xlow
self.xhigh = xhigh
if key:
self.hist_global, self.edges_global, self.bins_global = {}, {}, {}
self.hist_global[key], self.edges_global[key] = np.histogram([], bins=self.nbins,
range=(self.xlow, self.xhigh))
self.bins_global[key] = (self.edges_global[key][:-1] + self.edges_global[key][1:]) / 2.
else:
self.hist_global, self.edges_global = np.histogram([], bins=self.nbins, range=(self.xlow, self.xhigh))
self.bins_global = (self.edges_global[:-1] + self.edges_global[1:]) / 2.
def fill(self, arr, key=None):
hist, edges = np.histogram(arr, bins=self.nbins, range=(self.xlow, self.xhigh))
if key:
self.hist_global[key] += hist
else:
self.hist_global += hist
@property
def data(self):
return self.bins_global, self.hist_global
'''
if __name__ == "__main__":
h = Hist1D(100, 0, 1)
for _ in range(1000):
a = np.random.random((3,))
h.fill(a)
plt.step(*h.data)
plt.show()
h = Hist1D(19, -3, 12)
rng = np.random.RandomState(10) # deterministic random data
data = np.hstack((rng.normal(size=1000),rng.normal(loc=5, scale=2, size=1000)))
h.fill(data)
plt.step(*h.data)
plt.show()
a = np.array([22,87,5,43,56,73,55,54,11,20,51,5,79,31,27])
hist,bins = np.histogram(a,bins = [0,20,40,60,80,100])
bins_c=(bins[:-1] + bins[1:]) / 2.
plt.bar(bins_c,hist,bins_c[1]-bins_c[0],align='center',edgecolor='black')
'''
class Hist1D_local(object):
def __init__(self, nbins, xlow, xhigh, key=None):
self.nbins = nbins
self.xlow = xlow
self.xhigh = xhigh
if key:
self.hist_local, self.edges_local, self.bins_local = {}, {}, {}
self.hist_local[key], self.edges_local[key] = np.histogram([], bins=self.nbins,
range=(self.xlow, self.xhigh))
self.bins_local[key] = (self.edges_local[key][:-1] + self.edges_local[key][1:]) / 2.
else:
self.hist_local, self.edges_local = np.histogram([], bins=self.nbins, range=(self.xlow, self.xhigh))
self.bins_local = (self.edges_local[:-1] + self.edges_local[1:]) / 2.
def fill(self, arr, key=None):
hist, edges = np.histogram(arr, bins=self.nbins, range=(self.xlow, self.xhigh))
if key:
self.hist_local[key] += hist
else:
self.hist_local += hist
@property
def data(self):
return self.bins_local, self.hist_local
'''
if __name__ == "__main__":
h = Hist1D(100, 0, 1)
for _ in range(1000):
a = np.random.random((3,))
h.fill(a)
plt.step(*h.data)
plt.show()
h = Hist1D(19, -3, 12)
rng = np.random.RandomState(10) # deterministic random data
data = np.hstack((rng.normal(size=1000),rng.normal(loc=5, scale=2, size=1000)))
h.fill(data)
plt.step(*h.data)
plt.show()
a = np.array([22,87,5,43,56,73,55,54,11,20,51,5,79,31,27])
hist,bins = np.histogram(a,bins = [0,20,40,60,80,100])
bins_c=(bins[:-1] + bins[1:]) / 2.
plt.bar(bins_c,hist,bins_c[1]-bins_c[0],align='center',edgecolor='black')
'''
class Hist2D(object):
def __init__(self, nxbins, xlow, xhigh, nybins, ylow, yhigh):
self.nxbins = nxbins
self.xhigh = xhigh
self.xlow = xlow
self.nybins = nybins
self.yhigh = yhigh
self.ylow = ylow
self.nbins = (nxbins, nybins)
self.ranges = ((xlow, xhigh), (ylow, yhigh))
self.hist, xedges, yedges = np.histogram2d([], [], bins=self.nbins, range=self.ranges)
self.xbins = (xedges[:-1] + xedges[1:]) / 2.
self.ybins = (yedges[:-1] + yedges[1:]) / 2.
def fill(self, xarr, yarr):
hist, _, _ = np.histogram2d(xarr, yarr, bins=self.nbins, range=self.ranges)
self.hist += hist
@property
def data(self):
return self.xbins, self.ybins, self.hist
'''
if __name__ == "__main__":
h = Hist2D(100, 0, 1, 100, 0, 1)
for _ in range(1000):
x, y = np.random.random((3,)), np.random.random((3,))
h.fill(x, y)
plt.pcolor(*h.data)
plt.show()
'''
if __name__ =='__main__':
def kill():
'''Close event display and stop program when i click on close of the tkinter window'''
if messagebox.askokcancel("Quit", "Do you really wish to quit?"):
child_pid = os.getpid() # get children pid of parent pid pycharm
if type(child_pid) is list:
for child in child_pid:
subprocess.call(['taskkill', '/F', '/T', '/PID', str(child)])
else:
#os.system('kill -TERM -P {pid}'.format(pid=parent_pid))
subprocess.call(['taskkill', '/F', '/T', '/PID', str(child_pid)])
sys.exit(1)
fen1 = Tk()
# power_supply = Communication_with_power_supply()
# power_supply.Communicate('INST OUT1\n')
# power_supply.Communicate('VOLT 24\n')
# power_supply.Communicate('CURR 4\n')
# power_supply.Communicate('INST OUT2\n')
# power_supply.Communicate('VOLT 24\n')
# power_supply.Communicate('CURR 2.6\n')
# power_supply.Communicate('INST OUT3\n')
# power_supply.Communicate('VOLT 24\n')
# power_supply.Communicate('CURR 1.6\n')
comm = Communication_with_babymind_via_socket_server()
# usb2can=get_data_from_usb2can_ixxat(power_supply)
usb2can = get_data_from_usb2can_ixxat()
# fen1.geometry('1300x650+0+0')
# fen1.resizable(width=False, height=False)
fen1.protocol("WM_DELETE_WINDOW", kill) #this is reliated to the function callback
# App=Application(fen1,comm,usb2can,power_supply)
App = Application(fen1, comm, usb2can)
fen1.mainloop()
#fen1.update_idletasks()
#fen1.update()
|
vnokcoin.py | # encoding: UTF-8
import hashlib
import zlib
import json
from time import sleep
from threading import Thread
import websocket
# OKCOIN网站
OKCOIN_CNY = 'wss://real.okcoin.cn:10440/websocket/okcoinapi'
OKCOIN_USD = 'wss://real.okex.com:10441/websocket/okexapi'
# 账户货币代码
CURRENCY_CNY = 'cny'
CURRENCY_USD = 'usd'
# 电子货币代码
SYMBOL_BTC = 'btc'
SYMBOL_LTC = 'ltc'
SYMBOL_ETH = 'eth'
# 行情深度
DEPTH_20 = 20
DEPTH_60 = 60
# K线时间区间
INTERVAL_1M = '1min'
INTERVAL_3M = '3min'
INTERVAL_5M = '5min'
INTERVAL_15M = '15min'
INTERVAL_30M = '30min'
INTERVAL_1H = '1hour'
INTERVAL_2H = '2hour'
INTERVAL_4H = '4hour'
INTERVAL_6H = '6hour'
INTERVAL_1D = 'day'
INTERVAL_3D = '3day'
INTERVAL_1W = 'week'
# 交易代码,需要后缀货币名才能完整
TRADING_SYMBOL_BTC = 'btc_'
TRADING_SYMBOL_LTC = 'ltc_'
TRADING_SYMBOL_ETH = 'eth_'
# 委托类型
TYPE_BUY = 'buy'
TYPE_SELL = 'sell'
TYPE_BUY_MARKET = 'buy_market'
TYPE_SELL_MARKET = 'sell_market'
# 期货合约到期类型
FUTURE_EXPIRY_THIS_WEEK = 'this_week'
FUTURE_EXPIRY_NEXT_WEEK = 'next_week'
FUTURE_EXPIRY_QUARTER = 'quarter'
# 期货委托类型
FUTURE_TYPE_LONG = 1
FUTURE_TYPE_SHORT = 2
FUTURE_TYPE_SELL = 3
FUTURE_TYPE_COVER = 4
# 期货是否用现价
FUTURE_ORDER_MARKET = 1
FUTURE_ORDER_LIMIT = 0
# 期货杠杆
FUTURE_LEVERAGE_10 = 10
FUTURE_LEVERAGE_20 = 20
# 委托状态
ORDER_STATUS_NOTTRADED = 0
ORDER_STATUS_PARTTRADED = 1
ORDER_STATUS_ALLTRADED = 2
ORDER_STATUS_CANCELLED = -1
ORDER_STATUS_CANCELLING = 4
########################################################################
class OkCoinApi(object):
"""基于Websocket的API对象"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.apiKey = '' # 用户名
self.secretKey = '' # 密码
self.host = '' # 服务器地址
self.currency = '' # 货币类型(usd或者cny)
self.ws = None # websocket应用对象
self.thread = None # 工作线程
#######################
## 通用函数
#######################
#----------------------------------------------------------------------
def readData(self, evt):
"""解压缩推送收到的数据"""
# 创建解压器
decompress = zlib.decompressobj(-zlib.MAX_WBITS)
# 将原始数据解压成字符串
inflated = decompress.decompress(evt) + decompress.flush()
# 通过json解析字符串
data = json.loads(inflated)
return data
#----------------------------------------------------------------------
def generateSign(self, params):
"""生成签名"""
l = []
for key in sorted(params.keys()):
l.append('%s=%s' %(key, params[key]))
l.append('secret_key=%s' %self.secretKey)
sign = '&'.join(l)
return hashlib.md5(sign.encode('utf-8')).hexdigest().upper()
#----------------------------------------------------------------------
def onMessage(self, ws, evt):
"""信息推送"""
print 'onMessage'
data = self.readData(evt)
print data
#----------------------------------------------------------------------
def onError(self, ws, evt):
"""错误推送"""
print 'onError'
print evt
#----------------------------------------------------------------------
def onClose(self, ws):
"""接口断开"""
print 'onClose'
#----------------------------------------------------------------------
def onOpen(self, ws):
"""接口打开"""
print 'onOpen'
#----------------------------------------------------------------------
def connect(self, host, apiKey, secretKey, trace=False):
"""连接服务器"""
self.host = host
self.apiKey = apiKey
self.secretKey = secretKey
if self.host == OKCOIN_CNY:
self.currency = CURRENCY_CNY
else:
self.currency = CURRENCY_USD
# websocket.enableTrace(trace)
self.ws = websocket.WebSocketApp(host,
on_message=self.onMessage,
on_error=self.onError,
on_close=self.onClose,
on_open=self.onOpen)
self.thread = Thread(target=self.ws.run_forever)
self.thread.start()
#----------------------------------------------------------------------
def reconnect(self):
"""重新连接"""
# 首先关闭之前的连接
self.close()
# 再执行重连任务
self.ws = websocket.WebSocketApp(self.host,
on_message=self.onMessage,
on_error=self.onError,
on_close=self.onClose,
on_open=self.onOpen)
self.thread = Thread(target=self.ws.run_forever)
self.thread.start()
#----------------------------------------------------------------------
def close(self):
"""关闭接口"""
if self.thread and self.thread.isAlive():
self.ws.close()
self.thread.join()
#----------------------------------------------------------------------
def sendMarketDataRequest(self, channel):
"""发送行情请求"""
# 生成请求
d = {}
d['event'] = 'addChannel'
d['binary'] = 1
d['channel'] = channel
# 使用json打包并发送
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
except websocket.WebSocketConnectionClosedException:
pass
# ----------------------------------------------------------------------
def login(self):
"""发送行情请求"""
# 生成请求
params = {}
params['api_key'] = self.apiKey
params['sign'] = self.generateSign(params)
d = {}
d['event'] = 'login'
d['binary'] = 1
d['channel'] = 'login'
d['parameters'] = params
# 使用json打包并发送
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
except websocket.WebSocketConnectionClosedException:
pass
#----------------------------------------------------------------------
def sendTradingRequest(self, channel, params):
"""发送交易请求"""
# 在参数字典中加上api_key和签名字段
params['api_key'] = self.apiKey
params['sign'] = self.generateSign(params)
# 生成请求
d = {}
d['event'] = 'addChannel'
d['binary'] = 1
d['channel'] = channel
d['parameters'] = params
# 使用json打包并发送
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
except websocket.WebSocketConnectionClosedException:
pass
#######################
## 现货相关
#######################
#----------------------------------------------------------------------
def subscribeSpotTicker(self, symbol):
"""订阅现货普通报价"""
self.sendMarketDataRequest('ok_sub_spot_%s_ticker' %(symbol))
#----------------------------------------------------------------------
def subscribeSpotDepth(self, symbol, depth):
"""订阅现货深度报价"""
self.sendMarketDataRequest('ok_sub_spot_%s_depth_%s' %(symbol, depth))
#----------------------------------------------------------------------
def subscribeSpotTradeData(self, symbol):
"""订阅现货成交记录"""
self.sendMarketDataRequest('ok_sub_spot_%s_deals' %(symbol))
#----------------------------------------------------------------------
def subscribeSpotKline(self, symbol, interval):
"""订阅现货K线"""
self.sendMarketDataRequest('ok_sub_spot_%s_kline_%s' %(symbol, interval))
#----------------------------------------------------------------------
def spotTrade(self, symbol, type_, price, amount):
"""现货委托"""
params = {}
params['symbol'] = str(symbol)
params['type'] = str(type_)
params['price'] = str(price)
params['amount'] = str(amount)
channel = 'ok_spot_order'
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def spotCancelOrder(self, symbol, orderid):
"""现货撤单"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
channel = 'ok_spot_cancel_order'
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def spotUserInfo(self):
"""查询现货账户"""
channel = 'ok_spot_userinfo'
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def spotOrderInfo(self, symbol, orderid):
"""查询现货委托信息"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
channel = 'ok_spot_orderinfo'
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def subscribeSpotTrades(self, symbol):
"""订阅现货成交信息"""
channel = 'ok_sub_spot_%s_order' %symbol
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def subscribeSpotUserInfo(self):
"""订阅现货账户信息"""
channel = 'ok_spot_userinfo'
self.sendTradingRequest(channel, {})
#######################
## 期货相关
#######################
#----------------------------------------------------------------------
def subscribeFutureTicker(self, symbol, expiry):
"""订阅期货普通报价"""
self.sendMarketDataRequest('ok_sub_future%s_%s_ticker_%s' %(self.currency, symbol, expiry))
#----------------------------------------------------------------------
def subscribeFutureDepth(self, symbol, expiry, depth):
"""订阅期货深度报价"""
self.sendMarketDataRequest('ok_sub_future%s_%s_depth_%s_%s' %(self.currency, symbol,
expiry, depth))
#----------------------------------------------------------------------
def subscribeFutureTradeData(self, symbol, expiry):
"""订阅期货成交记录"""
self.sendMarketDataRequest('ok_sub_future%s_%s_trade_%s' %(self.currency, symbol, expiry))
#----------------------------------------------------------------------
def subscribeFutureKline(self, symbol, expiry, interval):
"""订阅期货K线"""
self.sendMarketDataRequest('ok_sub_future%s_%s_kline_%s_%s' %(self.currency, symbol,
expiry, interval))
#----------------------------------------------------------------------
def subscribeFutureIndex(self, symbol):
"""订阅期货指数"""
self.sendMarketDataRequest('ok_sub_future%s_%s_index' %(self.currency, symbol))
#----------------------------------------------------------------------
def futureTrade(self, symbol, expiry, type_, price, amount, order, leverage):
"""期货委托"""
params = {}
params['symbol'] = str(symbol+self.currency)
params['type'] = str(type_)
params['price'] = str(price)
params['amount'] = str(amount)
params['contract_type'] = str(expiry)
params['match_price'] = str(order)
params['lever_rate'] = str(leverage)
channel = 'ok_future%s_trade' %(self.currency)
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def futureCancelOrder(self, symbol, expiry, orderid):
"""期货撤单"""
params = {}
params['symbol'] = str(symbol+self.currency)
params['order_id'] = str(orderid)
params['contract_type'] = str(expiry)
channel = 'ok_future%s_cancel_order' %(self.currency)
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def futureUserInfo(self):
"""查询期货账户"""
channel = 'ok_future%s_userinfo' %(self.currency)
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def futureOrderInfo(self, symbol, expiry, orderid, status, page, length):
"""查询期货委托信息"""
params = {}
params['symbol'] = str(symbol+self.currency)
params['order_id'] = str(orderid)
params['contract_type'] = expiry
params['status'] = status
params['current_page'] = page
params['page_length'] = length
channel = 'ok_future%s_orderinfo' %(self.currency)
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def subscribeFutureTrades(self):
"""订阅期货成交信息"""
channel = 'ok_sub_future%s_trades' %(self.currency)
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def subscribeFutureUserInfo(self):
"""订阅期货账户信息"""
channel = 'ok_sub_future%s_userinfo' %(self.currency)
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def subscribeFuturePositions(self):
"""订阅期货持仓信息"""
channel = 'ok_sub_future%s_positions' %(self.currency)
self.sendTradingRequest(channel, {})
|
train.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import argparse
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.data as data
import megengine.data.transform as T
import megengine.distributed as dist
import megengine.functional as F
import megengine.jit as jit
import megengine.optimizer as optim
import shufflenet_v2 as M
from tensorboardX import SummaryWriter
logger = mge.get_logger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-a", "--arch", default="shufflenet_v2_x0_5", type=str)
parser.add_argument("-d", "--data", default=None, type=str)
parser.add_argument("-s", "--save", default="./models", type=str)
parser.add_argument("-m", "--model", default=None, type=str)
parser.add_argument('-o', '--output', type=str, required=True, help='set path for checkpoints \w tensorboard')
parser.add_argument("-b", "--batch-size", default=128, type=int)
parser.add_argument("--learning-rate", default=0.0625, type=float)
parser.add_argument("--momentum", default=0.9, type=float)
parser.add_argument("--weight-decay", default=4e-5, type=float)
parser.add_argument("--steps", default=300000, type=int)
parser.add_argument("-n", "--ngpus", default=None, type=int)
parser.add_argument("-w", "--workers", default=4, type=int)
parser.add_argument("--report-freq", default=50, type=int)
args = parser.parse_args()
world_size = mge.get_device_count("gpu") if args.ngpus is None else args.ngpus
save_dir = os.path.join(args.save, args.arch, "b{}".format(args.batch_size * world_size))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
mge.set_log_file(os.path.join(save_dir, "log.txt"))
if not os.path.exists(args.output):
os.makedirs(args.output)
if world_size > 1:
# scale learning rate by number of gpus
args.learning_rate *= world_size
# start distributed training, dispatch sub-processes
mp.set_start_method("spawn")
processes = []
for rank in range(world_size):
p = mp.Process(target=worker, args=(rank, world_size, args))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
worker(0, 1, args)
def get_parameters(model):
group_no_weight_decay = []
group_weight_decay = []
for pname, p in model.named_parameters(requires_grad=True):
if pname.find("weight") >= 0 and len(p.shape) > 1:
# print("include ", pname, p.shape)
group_weight_decay.append(p)
else:
# print("not include ", pname, p.shape)
group_no_weight_decay.append(p)
assert len(list(model.parameters())) == len(group_weight_decay) + len(
group_no_weight_decay
)
groups = [
dict(params=group_weight_decay),
dict(params=group_no_weight_decay, weight_decay=0.0),
]
return groups
def worker(rank, world_size, args):
# pylint: disable=too-many-statements
mge.set_log_file(os.path.join(args.save, args.arch, "log.txt"))
if world_size > 1:
# Initialize distributed process group
logger.info("init distributed process group {} / {}".format(rank, world_size))
dist.init_process_group(
master_ip="localhost",
master_port=23456,
world_size=world_size,
rank=rank,
dev=rank,
)
save_dir = os.path.join(args.save, args.arch)
if rank == 0:
prefixs=['train', 'valid']
writers = {prefix: SummaryWriter(os.path.join(args.output, prefix)) for prefix in prefixs}
model = getattr(M, args.arch)()
step_start = 0
if args.model:
logger.info("load weights from %s", args.model)
model.load_state_dict(mge.load(args.model))
step_start = int(args.model.split("-")[1].split(".")[0])
optimizer = optim.SGD(
get_parameters(model),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
# Define train and valid graph
@jit.trace(symbolic=True)
def train_func(image, label):
model.train()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
optimizer.backward(loss) # compute gradients
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
@jit.trace(symbolic=True)
def valid_func(image, label):
model.eval()
logits = model(image)
loss = F.cross_entropy_with_softmax(logits, label, label_smooth=0.1)
acc1, acc5 = F.accuracy(logits, label, (1, 5))
if dist.is_distributed(): # all_reduce_mean
loss = dist.all_reduce_sum(loss) / dist.get_world_size()
acc1 = dist.all_reduce_sum(acc1) / dist.get_world_size()
acc5 = dist.all_reduce_sum(acc5) / dist.get_world_size()
return loss, acc1, acc5
# Build train and valid datasets
logger.info("preparing dataset..")
train_dataset = data.dataset.ImageNet(args.data, train=True)
train_sampler = data.Infinite(data.RandomSampler(
train_dataset, batch_size=args.batch_size, drop_last=True
))
train_queue = data.DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
[
T.RandomResizedCrop(224),
T.RandomHorizontalFlip(),
T.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
train_queue = iter(train_queue)
valid_dataset = data.dataset.ImageNet(args.data, train=False)
valid_sampler = data.SequentialSampler(
valid_dataset, batch_size=100, drop_last=False
)
valid_queue = data.DataLoader(
valid_dataset,
sampler=valid_sampler,
transform=T.Compose(
[
T.Resize(256),
T.CenterCrop(224),
T.ToMode("CHW"),
]
),
num_workers=args.workers,
)
# Start training
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step in range(step_start, args.steps + 1):
# Linear learning rate decay
decay = 1.0
decay = 1 - float(step) / args.steps if step < args.steps else 0
for param_group in optimizer.param_groups:
param_group["lr"] = args.learning_rate * decay
image, label = next(train_queue)
time_data=time.time()-t
image = image.astype("float32")
label = label.astype("int32")
n = image.shape[0]
optimizer.zero_grad()
loss, acc1, acc5 = train_func(image, label)
optimizer.step()
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
objs.update(loss.numpy()[0], n)
total_time.update(time.time() - t)
time_iter=time.time()-t
t = time.time()
if step % args.report_freq == 0 and rank == 0:
logger.info(
"TRAIN Iter %06d: lr = %f,\tloss = %f,\twc_loss = 1,\tTop-1 err = %f,\tTop-5 err = %f,\tdata_time = %f,\ttrain_time = %f,\tremain_hours=%f",
step,
args.learning_rate * decay,
float(objs.__str__().split()[1]),
1-float(top1.__str__().split()[1])/100,
1-float(top5.__str__().split()[1])/100,
time_data,
time_iter - time_data,
time_iter * (args.steps - step) / 3600,
)
writers['train'].add_scalar('loss', float(objs.__str__().split()[1]), global_step=step)
writers['train'].add_scalar('top1_err', 1-float(top1.__str__().split()[1])/100, global_step=step)
writers['train'].add_scalar('top5_err', 1-float(top5.__str__().split()[1])/100, global_step=step)
objs.reset()
top1.reset()
top5.reset()
total_time.reset()
if step % 10000 == 0 and rank == 0 and step != 0:
logger.info("SAVING %06d", step)
mge.save(
model.state_dict(),
os.path.join(save_dir, "checkpoint-{:06d}.pkl".format(step)),
)
if step % 10000 == 0 and step != 0:
loss, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logger.info("TEST Iter %06d: loss = %f,\tTop-1 err = %f,\tTop-5 err = %f", step, loss, 1-valid_acc/100, 1-valid_acc5/100)
if rank == 0:
writers['valid'].add_scalar('loss', loss, global_step=step)
writers['valid'].add_scalar('top1_err', 1-valid_acc/100, global_step=step)
writers['valid'].add_scalar('top5_err', 1-valid_acc5/100, global_step=step)
mge.save(
model.state_dict(), os.path.join(save_dir, "checkpoint-{:06d}.pkl".format(step))
)
loss, valid_acc, valid_acc5 = infer(valid_func, valid_queue, args)
logger.info("TEST Iter %06d: loss=%f,\tTop-1 err = %f,\tTop-5 err = %f", step, _, 1-valid_acc/100, 1-valid_acc5/100)
def infer(model, data_queue, args):
objs = AverageMeter("Loss")
top1 = AverageMeter("Acc@1")
top5 = AverageMeter("Acc@5")
total_time = AverageMeter("Time")
t = time.time()
for step, (image, label) in enumerate(data_queue):
n = image.shape[0]
image = image.astype("float32") # convert np.uint8 to float32
label = label.astype("int32")
loss, acc1, acc5 = model(image, label)
objs.update(loss.numpy()[0], n)
top1.update(100 * acc1.numpy()[0], n)
top5.update(100 * acc5.numpy()[0], n)
total_time.update(time.time() - t)
t = time.time()
if step % args.report_freq == 0 and dist.get_rank() == 0:
logger.info(
"Step %d, %s %s %s %s",
step,
objs,
top1,
top5,
total_time,
)
return objs.avg, top1.avg, top5.avg
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":.3f"):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
if __name__ == "__main__":
main()
|
flaskApp.py | """
Copyright (c) 2021 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
from flask import Flask, render_template, request, url_for, json, redirect
from config import meraki_api_key, network_id, sensor_mapping, ASHRAE_low, ASHRAE_high
import requests, threading, time, pytz
from datetime import datetime, timedelta
app = Flask(__name__)
alertprofiles_to_snooze = []
@app.route('/', methods=['GET'])
def index():
return render_template("start_page.html")
@app.route('/heatmap', methods=['GET', 'POST'])
def heatmap():
global temp_values
global alert_profiles
global alert_profiles_overview
global sensor
global alertprofiles_to_snooze
sensor = request.args.get('sensor')
alert_profiles = []
alert_profiles_overview = []
tz_London = pytz.timezone('Europe/London')
timedelta_snooze = datetime.now(tz_London).strftime("%H:%M:%S")
timedelta_snooze_compare = datetime.strptime(timedelta_snooze, "%H:%M:%S")
for ap_to_snooze_delete in alertprofiles_to_snooze:
ap_to_snooze_delete_time = datetime.strptime(ap_to_snooze_delete['snooze_until'], "%H:%M:%S")
if timedelta_snooze_compare > ap_to_snooze_delete_time:
alertprofiles_to_snooze.remove(ap_to_snooze_delete)
if sensor == None:
logic = 0
url = 'https://api.meraki.com/api/v0/networks/' + network_id + '/sensors/stats/latestBySensor?metric=temperature'
headers = {
'X-Cisco-Meraki-API-Key': meraki_api_key,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
data = {
"serials": []
}
for mapping in sensor_mapping:
data['serials'].append(mapping['serial'])
get_temp = requests.get(url, headers=headers, data=json.dumps(data), verify=False)
temp_values = {}
for temp in get_temp.json():
for sensor in sensor_mapping:
if temp['serial'] == sensor['serial']:
temp_values[sensor['name']] = str(round(float(temp['value']), 2)) + ' °C'
url = 'https://api.meraki.com/api/v1/networks/' + network_id + '/sensors/alerts/profiles'
headers = {
'X-Cisco-Meraki-API-Key': meraki_api_key,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
get_alert_profiles = requests.get(url, headers=headers, verify=False)
for profile in get_alert_profiles.json():
profile_sensor_mapping = []
for sens in sensor_mapping:
if sens['serial'] in profile['serials']:
profile_sensor_mapping.append(sens['name'])
dict = {
'name': profile['name'],
'type': profile['conditions'][0]['type'],
'activated': profile_sensor_mapping,
'id': profile['id'],
'applied_sensors': profile['serials']
}
alert_profiles_overview.append(dict)
elif sensor != None:
url = 'https://api.meraki.com/api/v1/networks/' + network_id + '/sensors/alerts/profiles'
headers = {
'X-Cisco-Meraki-API-Key': meraki_api_key,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
get_alert_profiles = requests.get(url, headers=headers, verify=False)
if sensor == 'all':
logic = 0
for profile in get_alert_profiles.json():
profile_sensor_mapping = []
for sens in sensor_mapping:
if sens['serial'] in profile['serials']:
profile_sensor_mapping.append(sens['name'])
dict = {
'name': profile['name'],
'type': profile['conditions'][0]['type'],
'activated': profile_sensor_mapping,
'id': profile['id'],
'applied_sensors': profile['serials']
}
alert_profiles_overview.append(dict)
else:
logic = 1
for sen in sensor_mapping:
if sen['name'] == str(sensor):
serial = sen['serial']
for profile in get_alert_profiles.json():
if serial in profile['serials']:
activated = 'yes'
else:
activated = 'no'
dict = {
'name': profile['name'],
'type': profile['conditions'][0]['type'],
'activated': activated,
'id': profile['id'],
'applied_sensors': profile['serials']
}
alert_profiles.append(dict)
return render_template("dc_heatmap.html", temp_values=temp_values, logic=logic, alert_profiles=alert_profiles,
alert_profiles_overview=alert_profiles_overview, sensor=sensor, ASHRAE_low=ASHRAE_low,
ASHRAE_high=ASHRAE_high, alertprofiles_to_snooze=alertprofiles_to_snooze)
@app.route('/snooze_sensors', methods=['GET', 'POST'])
def snooze_sensors():
global alertprofiles_to_snooze
req = request.form
sensor_name = request.args.get('sensor')
for sen in sensor_mapping:
if sen['name'] == sensor_name:
sensor_serial = sen['serial']
prefix_alertprofiles = "checkbox-"
alertprofiles_dict = {key: val for key, val in req.items() if key.startswith(prefix_alertprofiles)}
alertprofiles_to_snooze = []
for key in alertprofiles_dict.keys():
short_key = key.replace("checkbox-", "")
short_key_dict = {
'name': short_key,
'snoozed_sensors': []
}
alertprofiles_to_snooze.append(short_key_dict)
global alert_profiles
global alert_profiles_overview
# put requests to take sensors off alert profiles
if sensor_name != None: # for a single sensor
for ap in alert_profiles:
for ap_to_snooze in alertprofiles_to_snooze:
sensorlist_putrequest = ap['applied_sensors']
if ap['name'] == ap_to_snooze['name']:
sensorlist_putrequest.remove(sensor_serial)
sensorlist_putrequest = list(dict.fromkeys(sensorlist_putrequest))
ap_to_snooze['id'] = ap['id']
ap_to_snooze['snoozed_sensors'].append(sensor_serial)
url = 'https://api.meraki.com/api/v1/networks/' + network_id + '/sensors/alerts/profiles/' + \
ap_to_snooze['id']
headers = {
'X-Cisco-Meraki-API-Key': meraki_api_key,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
get_alert_profile = requests.get(url, headers=headers, verify=False)
response = get_alert_profile.json()
data = {
'name': response['name'],
'scheduleId': response['scheduleId'],
'conditions': response['conditions'],
'recipients': response['recipients'],
'serials': sensorlist_putrequest
}
put_alert_profile = requests.put(url, headers=headers, data=json.dumps(data), verify=False)
print(put_alert_profile)
snooze_minutes = (int(req['snooze_minutes']) * 60) + 13
tz_London = pytz.timezone('Europe/London')
timedelta_snooze = (datetime.now(tz_London) + timedelta(seconds=snooze_minutes)).strftime(
"%H:%M:%S")
snooze_until = str(timedelta_snooze)
ap_to_snooze['snooze_until'] = snooze_until
else: # for all sensors
for ap in alert_profiles_overview:
for ap_to_snooze in alertprofiles_to_snooze:
if ap['name'] == ap_to_snooze['name']:
ap_to_snooze['id'] = ap['id']
for sen in sensor_mapping:
sensorlist_putrequest = ap['applied_sensors']
if sen['serial'] in sensorlist_putrequest:
sensorlist_putrequest.remove(sen['serial'])
ap_to_snooze['snoozed_sensors'].append(sen['serial'])
sensorlist_putrequest = list(dict.fromkeys(sensorlist_putrequest))
url = 'https://api.meraki.com/api/v1/networks/' + network_id + '/sensors/alerts/profiles/' + \
ap_to_snooze['id']
headers = {
'X-Cisco-Meraki-API-Key': meraki_api_key,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
get_alert_profile = requests.get(url, headers=headers, verify=False)
response = get_alert_profile.json()
data = {
'name': response['name'],
'scheduleId': response['scheduleId'],
'conditions': response['conditions'],
'recipients': response['recipients'],
'serials': sensorlist_putrequest
}
put_alert_profile = requests.put(url, headers=headers, data=json.dumps(data), verify=False)
print(put_alert_profile)
snooze_minutes = (int(req['snooze_minutes']) * 60) + 10
tz_London = pytz.timezone('Europe/London')
timedelta_snooze = (datetime.now(tz_London) + timedelta(seconds=snooze_minutes)).strftime(
"%H:%M:%S")
snooze_until = str(timedelta_snooze)
ap_to_snooze['snooze_until'] = snooze_until
# countdown to put them back on alert profile after x minutes
th = threading.Thread(target=snoozing, args=[alertprofiles_to_snooze, snooze_minutes])
th.start()
if sensor_name == None:
sensor_name = 'all'
return redirect(url_for('.heatmap', sensor=sensor_name))
def snoozing(alertprofiles_to_snooze, snooze_minutes):
time.sleep(snooze_minutes)
for ap_to_snooze in alertprofiles_to_snooze:
url = 'https://api.meraki.com/api/v1/networks/' + network_id + '/sensors/alerts/profiles/' + ap_to_snooze['id']
headers = {
'X-Cisco-Meraki-API-Key': meraki_api_key,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
get_alert_profile = requests.get(url, headers=headers, verify=False)
response = get_alert_profile.json()
sensorlist_putrequest = response['serials']
for sensor in ap_to_snooze['snoozed_sensors']:
sensorlist_putrequest.append(sensor)
data = {
'name': response['name'],
'scheduleId': response['scheduleId'],
'conditions': response['conditions'],
'recipients': response['recipients'],
'serials': sensorlist_putrequest
}
put_alert_profile = requests.put(url, headers=headers, data=json.dumps(data), verify=False)
print(put_alert_profile)
@app.route('/submit_sensor', methods=['GET', 'POST'])
def submit_sensor():
global alert_profiles
global alert_profiles_overview
global sensor
req = request.form
sensor_alertprofile_string = list(req.keys())[0]
sensor_alertprofile_list = sensor_alertprofile_string.split('...')
sensor_alertprofile_dict = {
'sensor': sensor_alertprofile_list[0],
'alertprofile': sensor_alertprofile_list[1],
'change': sensor_alertprofile_list[2]
}
serial = []
for sen in sensor_mapping:
if sensor_alertprofile_dict['sensor'] == 'all':
serial.append(sen['serial'])
dict_alerts = alert_profiles_overview
else:
if sen['name'] == sensor_alertprofile_dict['sensor']:
serial.append(sen['serial'])
dict_alerts = alert_profiles
headers = {
'X-Cisco-Meraki-API-Key': meraki_api_key,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
for ale in dict_alerts:
if ale['name'] == sensor_alertprofile_dict['alertprofile']:
alertprofile_id = ale['id']
url = 'https://api.meraki.com/api/v1/networks/' + network_id + '/sensors/alerts/profiles/' + alertprofile_id
get_alert_profile = requests.get(url, headers=headers, verify=False)
response = get_alert_profile.json()
sensorlist_putrequest = response['serials']
if sensor_alertprofile_dict['change'] == 'assign':
for s in serial:
if s not in sensorlist_putrequest:
sensorlist_putrequest.append(s)
elif sensor_alertprofile_dict['change'] == 'unassign':
for s in serial:
if s in sensorlist_putrequest:
sensorlist_putrequest.remove(s)
data = {
'name': response['name'],
'scheduleId': response['scheduleId'],
'conditions': response['conditions'],
'recipients': response['recipients'],
'serials': sensorlist_putrequest
}
put_alert_profile = requests.put(url, headers=headers, data=json.dumps(data), verify=False)
print(put_alert_profile)
return redirect(url_for('.heatmap', sensor=sensor))
@app.route('/add_alertprofile', methods=['GET', 'POST'])
def add_alertprofile():
req = request.form
sensor_name = request.args.get('sensor')
if sensor_name == "all":
try:
if req["ap_sensorapply_all"] == "on":
serial = []
for s in sensor_mapping:
serial.append(s['serial'])
except:
serial = []
conditions = []
condition_typ = req["ap_conditiontype_all"]
if condition_typ == "temperature":
try:
c = {
"type": condition_typ,
"unit": "celsius",
"disabled": False,
"direction": "+",
"threshold": int(req["above_t_value_all"])
}
if req["above_t_time_all"] != "any":
duration = int(req["above_t_time_all"]) * 60
c['duration'] = duration
conditions.append(c)
except Exception as e:
print(e)
try:
c = {
"type": condition_typ,
"unit": "celsius",
"disabled": False,
"direction": "-",
"threshold": int(req["below_t_value_all"])
}
if req["below_t_time_all"] != "any":
duration = int(req["below_t_time_all"]) * 60
c['duration'] = duration
conditions.append(c)
except Exception as e:
print(e)
elif condition_typ == "humidity":
try:
c = {
"type": condition_typ,
"disabled": False,
"direction": "+",
"threshold": int(req["above_h_value_all"])
}
if req["above_h_time_all"] != "any":
duration = int(req["above_h_time_all"]) * 60
c['duration'] = duration
conditions.append(c)
except:
print("Error with Humidity sensor")
try:
c = {
"type": condition_typ,
"disabled": False,
"direction": "-",
"threshold": int(req["below_h_value_all"])
}
if req["below_h_time_all"] != "any":
duration = int(req["below_h_time_all"]) * 60
c['duration'] = duration
conditions.append(c)
except:
print("Error with humidity sensor")
elif condition_typ == "water_detection":
c = {
"type": condition_typ,
"disabled": False,
"direction": "+",
"threshold": 1
}
conditions.append(c)
elif condition_typ == "door":
c = {
"type": condition_typ,
"disabled": False,
"duration": 0,
"direction": "+",
"threshold": 1,
"disabledDuration": True
}
conditions.append(c)
data = {
"name": req["ap_name_all"],
"scheduleId": "",
"conditions": conditions,
"recipients": {
"emails": [],
"smsNumbers": [],
"httpServerIds": []
},
"serials": serial
}
else:
try:
if req["ap_sensorapply"] == "on":
serial = []
for s in sensor_mapping:
if sensor_name == s['name']:
serial.append(s['serial'])
except:
serial = []
conditions = []
condition_typ = req["ap_conditiontype"]
if condition_typ == "temperature":
try:
c = {
"type": condition_typ,
"unit": "celsius",
"disabled": False,
"direction": "+",
"threshold": int(req["above_t_value"])
}
if req["above_t_time"] != "any":
duration = int(req["above_t_time"]) * 60
c['duration'] = duration
conditions.append(c)
except:
print("Error with temperature sensor")
try:
c = {
"type": condition_typ,
"unit": "celsius",
"disabled": False,
"direction": "-",
"threshold": int(req["below_t_value"])
}
if req["below_t_time"] != "any":
duration = int(req["below_t_time"]) * 60
c['duration'] = duration
conditions.append(c)
except:
print("Error with Temperature sensor")
elif condition_typ == "humidity":
try:
c = {
"type": condition_typ,
"disabled": False,
"direction": "+",
"threshold": int(req["above_h_value"])
}
if req["above_h_time"] != "any":
duration = int(req["above_h_time"]) * 60
c['duration'] = duration
conditions.append(c)
except:
print("Error with humidity sensor")
try:
c = {
"type": condition_typ,
"disabled": False,
"direction": "-",
"threshold": int(req["below_h_value"])
}
if req["below_h_time"] != "any":
duration = int(req["below_h_time"]) * 60
c['duration'] = duration
conditions.append(c)
except:
print("Error with humifity sensor")
elif condition_typ == "water_detection":
c = {
"type": condition_typ,
"disabled": False,
"direction": "+",
"threshold": 1
}
conditions.append(c)
elif condition_typ == "door":
c = {
"type": condition_typ,
"disabled": False,
"duration": 0,
"direction": "+",
"threshold": 1,
"disabledDuration": True
}
conditions.append(c)
data = {
"name": req["ap_name"],
"scheduleId": "",
"conditions": conditions,
"recipients": {
"emails": [],
"smsNumbers": [],
"httpServerIds": []
},
"serials": serial
}
# add alert profile
url = "https://api.meraki.com/api/v1/networks/" + network_id + "/sensors/alerts/profiles"
headers = {
'X-Cisco-Meraki-API-Key': meraki_api_key,
'Content-Type': 'application/json',
'Accept': 'application/json'
}
response = requests.request("POST", url, headers=headers, data=json.dumps(data))
print(response)
return redirect(url_for('.heatmap', sensor=sensor_name))
@app.route('/grafana_chart', methods=['GET', 'POST'])
def grafana_chart():
content = 'Grafana Chart'
return render_template("grafana_import.html", content=content)
if __name__ == "__main__":
app.run(port=5001, debug=True, threaded=True)
|
PyShell.py | #! /usr/bin/env python
from __future__ import print_function
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import io
import linecache
from code import InteractiveInterpreter
from platform import python_version, system
try:
from Tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
from idlelib import IOBinding
IDENTCHARS = string.ascii_letters + string.digits + "_"
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
import warnings
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way."""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n" % (category.__name__, message)
return s
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, IOError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath,"r") as old_file:
lines = old_file.readlines()
except IOError:
lines = []
try:
with open(self.breakpointPath,"w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except IOError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
console.write("\n{0} {1} {0}".format(halfbar, tag))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec", dont_inherit=True)
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, unicode) and IOBinding.encoding != 'utf-8':
try:
source = '# -*- coding: %s -*-\n%s' % (
IOBinding.encoding,
source.encode(IOBinding.encoding))
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Window"),
("help", "_Help"),
]
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
from idlelib import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.StackViewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super(PyShell, self).rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert', '<', 'iomark'):
return 'disabled'
return super(PyShell, self).rmenu_check_paste()
class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self._encoding = encoding
@property
def encoding(self):
return self._encoding
@property
def name(self):
return '<%s>' % self.tags
def isatty(self):
return True
class PseudoOutputFile(PseudoFile):
def writable(self):
return True
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if type(s) not in (unicode, str, bytearray):
# See issue #19481
if isinstance(s, unicode):
s = unicode.__getitem__(s, slice(None))
elif isinstance(s, str):
s = str.__str__(s)
elif isinstance(s, bytearray):
s = bytearray.__str__(s)
else:
raise TypeError('must be string, not ' + type(s).__name__)
return self.shell.write(s, self.tags)
class PseudoInputFile(PseudoFile):
def __init__(self, shell, tags, encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self._line_buffer = ''
def readable(self):
return True
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, (int, long)):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
if size < 0:
while True:
line = self.shell.readline()
if not line: break
result += line
else:
while len(result) < size:
line = self.shell.readline()
if not line: break
result += line
self._line_buffer = result[size:]
result = result[:size]
return result
def readline(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, (int, long)):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
size = len(line)
eol = line.find('\n', 0, size)
if eol >= 0:
size = eol + 1
self._line_buffer = line[size:]
return line[:size]
def close(self):
self.shell.close()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script, file=sys.stderr)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if dir not in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# start editor and/or shell windows:
root = Tk(className="Idle")
root.withdraw()
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif TkVersion >= 8.5:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(file=iconfile) for iconfile in iconfiles]
root.tk.call('wm', 'iconphoto', str(root), "-default", *icons)
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if macosxSupport.isAquaTk():
# There are some screwed up <2> class bindings for text
# widgets defined in Tk which we need to do away with.
# See issue #24801.
root.unbind_class('Text', '<B2>')
root.unbind_class('Text', '<B2-Motion>')
root.unbind_class('Text', '<<PasteSelection>>')
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
test_monitors.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
import os
import pytest
import subprocess
import time
import ray
from ray.tests.utils import run_and_get_output
def _test_cleanup_on_driver_exit(num_redis_shards):
stdout = run_and_get_output([
"ray",
"start",
"--head",
"--num-redis-shards",
str(num_redis_shards),
])
lines = [m.strip() for m in stdout.split("\n")]
init_cmd = [m for m in lines if m.startswith("ray.init")]
assert 1 == len(init_cmd)
redis_address = init_cmd[0].split("redis_address=\"")[-1][:-2]
max_attempts_before_failing = 100
# Wait for monitor.py to start working.
time.sleep(2)
def StateSummary():
obj_tbl_len = len(ray.global_state.object_table())
task_tbl_len = len(ray.global_state.task_table())
func_tbl_len = len(ray.global_state.function_table())
return obj_tbl_len, task_tbl_len, func_tbl_len
def Driver(success):
success.value = True
# Start driver.
ray.init(redis_address=redis_address)
summary_start = StateSummary()
if (0, 1) != summary_start[:2]:
success.value = False
# Two new objects.
ray.get(ray.put(1111))
ray.get(ray.put(1111))
attempts = 0
while (2, 1, summary_start[2]) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
success.value = False
break
@ray.remote
def f():
ray.put(1111) # Yet another object.
return 1111 # A returned object as well.
# 1 new function.
attempts = 0
while (2, 1, summary_start[2] + 1) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
success.value = False
break
ray.get(f.remote())
attempts = 0
while (4, 2, summary_start[2] + 1) != StateSummary():
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
success.value = False
break
ray.shutdown()
success = multiprocessing.Value("b", False)
driver = multiprocessing.Process(target=Driver, args=(success, ))
driver.start()
# Wait for client to exit.
driver.join()
# Just make sure Driver() is run and succeeded.
assert success.value
# Check that objects, tasks, and functions are cleaned up.
ray.init(redis_address=redis_address)
attempts = 0
while (0, 1) != StateSummary()[:2]:
time.sleep(0.1)
attempts += 1
if attempts == max_attempts_before_failing:
break
assert (0, 1) == StateSummary()[:2]
ray.shutdown()
subprocess.Popen(["ray", "stop"]).wait()
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with the new GCS API.")
def test_cleanup_on_driver_exit_single_redis_shard():
_test_cleanup_on_driver_exit(num_redis_shards=1)
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Hanging with the new GCS API.")
def test_cleanup_on_driver_exit_many_redis_shards():
_test_cleanup_on_driver_exit(num_redis_shards=5)
_test_cleanup_on_driver_exit(num_redis_shards=31)
|
rdd.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from base64 import standard_b64encode as b64enc
import copy
from collections import defaultdict
from itertools import chain, ifilter, imap
import operator
import os
import sys
import shlex
import traceback
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
import warnings
from heapq import heappush, heappop, heappushpop
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, pack_long
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler
from pyspark.storagelevel import StorageLevel
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
def _extract_concise_traceback():
tb = traceback.extract_stack()
if len(tb) == 0:
return "I'm lost!"
# HACK: This function is in a file called 'rdd.py' in the top level of
# everything PySpark. Just trim off the directory name and assume
# everything in that tree is PySpark guts.
file, line, module, what = tb[len(tb) - 1]
sparkpath = os.path.dirname(file)
first_spark_frame = len(tb) - 1
for i in range(0, len(tb)):
file, line, fun, what = tb[i]
if file.startswith(sparkpath):
first_spark_frame = i
break
if first_spark_frame == 0:
file, line, fun, what = tb[0]
return "%s at %s:%d" % (fun, file, line)
sfile, sline, sfun, swhat = tb[first_spark_frame]
ufile, uline, ufun, uwhat = tb[first_spark_frame-1]
return "%s at %s:%d" % (sfun, ufile, uline)
_spark_stack_depth = 0
class _JavaStackTrace(object):
def __init__(self, sc):
self._traceback = _extract_concise_traceback()
self._context = sc
def __enter__(self):
global _spark_stack_depth
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(self._traceback)
_spark_stack_depth += 1
def __exit__(self, type, value, tb):
global _spark_stack_depth
_spark_stack_depth -= 1
if _spark_stack_depth == 0:
self._context._jsc.setCallSite(None)
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
def __repr__(self):
return self._jrdd.toString()
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self._jrdd.cache()
return self
def persist(self, storageLevel):
"""
Set this RDD's storage level to persist its values across operations after the first time
it is computed. This can only be used to assign a new storage level if the RDD does not
have a storage level set yet.
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
else:
return None
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
"""
def func(split, iterator): return imap(f, iterator)
return PipelinedRDD(self, func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator): return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator): return f(iterator)
return self.mapPartitionsWithIndex(func)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator): return ifilter(f, iterator)
return self.mapPartitions(func)
def distinct(self):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x) \
.map(lambda (x, _): x)
def sample(self, withReplacement, fraction, seed):
"""
Return a sampled subset of this RDD (relies on numpy and falls back
on default random generator if numpy is unavailable).
>>> sc.parallelize(range(0, 100)).sample(False, 0.1, 2).collect() #doctest: +SKIP
[2, 3, 20, 21, 24, 41, 42, 66, 67, 89, 90, 98]
"""
assert fraction >= 0.0, "Invalid fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed):
"""
Return a fixed-size sampled subset of this RDD (currently requires numpy).
>>> sc.parallelize(range(0, 10)).takeSample(True, 10, 1) #doctest: +SKIP
[4, 2, 1, 8, 2, 7, 0, 4, 1, 4]
"""
fraction = 0.0
total = 0
multiplier = 3.0
initialCount = self.count()
maxSelected = 0
if (num < 0):
raise ValueError
if (initialCount == 0):
return list()
if initialCount > sys.maxint - 1:
maxSelected = sys.maxint - 1
else:
maxSelected = initialCount
if num > initialCount and not withReplacement:
total = maxSelected
fraction = multiplier * (maxSelected + 1) / initialCount
else:
fraction = multiplier * (num + 1) / initialCount
total = num
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < total:
if seed > sys.maxint - 2:
seed = -1
seed += 1
samples = self.sample(withReplacement, fraction, seed).collect()
sampler = RDDSampler(withReplacement, fraction, seed+1)
sampler.shuffle(samples)
return samples[0:total]
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
return rdd
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
return RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
def _reserialize(self):
if self._jrdd_deserializer == self.ctx.serializer:
return self
else:
return self.map(lambda x: x, preservesPartitioning=True)
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc = lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5), ('little', 4), ('Mary', 1), ('was', 8), ('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
bounds = list()
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
if numPartitions > 1:
rddSize = self.count()
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
samples = sorted(samples, reverse=(not ascending), key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
for i in range(0, numPartitions - 1):
index = (len(samples) - 1) * (i + 1) / numPartitions
bounds.append(samples[index])
def rangePartitionFunc(k):
p = 0
while p < len(bounds) and keyfunc(k) > bounds[p]:
p += 1
if ascending:
return p
else:
return numPartitions-1-p
def mapFunc(iterator):
yield sorted(iterator, reverse=(not ascending), key=lambda (k, v): keyfunc(k))
return (self.partitionBy(numPartitions, partitionFunc=rangePartitionFunc)
.mapPartitions(mapFunc,preservesPartitioning=True)
.flatMap(lambda x: x, preservesPartitioning=True))
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator): yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize([1, 2, 3]).pipe('cat').collect()
['1', '2', '3']
"""
def func(iterator):
pipe = Popen(shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in pipe.stdout)
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
yield None
self.mapPartitions(processPartition).collect() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with _JavaStackTrace(self.context) as st:
bytesInJava = self._jrdd.collect().iterator()
return list(self._collect_iterator_through_file(bytesInJava))
def _collect_iterator_through_file(self, iterator):
# Transferring lots of data through Py4J can be slow because
# socket.readline() is inefficient. Instead, we'll dump the data to a
# file and read it back.
tempFile = NamedTemporaryFile(delete=False, dir=self.ctx._temp_dir)
tempFile.close()
self.ctx._writeToFile(iterator, tempFile.name)
# Read the data into Python and deserialize it:
with open(tempFile.name, 'rb') as tempFile:
for item in self._jrdd_deserializer.load_stream(tempFile):
yield item
os.unlink(tempFile.name)
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
"""
def func(iterator):
acc = None
for obj in iterator:
if acc is None:
acc = obj
else:
acc = f(obj, acc)
if acc is not None:
yield acc
vals = self.mapPartitions(func).collect()
return reduce(f, vals)
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
# TODO: aggregate
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which corrects for bias in
estimating the standard deviation by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects for bias in
estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().top(2)
[6, 5]
"""
def topIterator(iterator):
q = []
for k in iterator:
if len(q) < num:
heappush(q, k)
else:
heappushpop(q, k)
yield q
def merge(a, b):
return next(topIterator(a + b))
return sorted(self.mapPartitions(topIterator).reduce(merge), reverse=True)
def take(self, num):
"""
Take the first num elements of the RDD.
This currently scans the partitions *one by one*, so it will be slow if
a lot of partitions are required. In that case, use L{collect} to get
the whole RDD instead.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
"""
def takeUpToNum(iterator):
taken = 0
while taken < num:
yield next(iterator)
taken += 1
# Take only up to num elements from each partition we try
mapped = self.mapPartitions(takeUpToNum)
items = []
# TODO(shivaram): Similar to the scala implementation, update the take
# method to scan multiple splits based on an estimate of how many elements
# we have per-split.
with _JavaStackTrace(self.context) as st:
for partition in range(mapped._jrdd.splits().size()):
partitionsToTake = self.ctx._gateway.new_array(self.ctx._jvm.int, 1)
partitionsToTake[0] = partition
iterator = mapped._jrdd.collectPartitions(partitionsToTake)[0].iterator()
items.extend(mapped._collect_iterator_through_file(iterator))
if len(items) >= num:
break
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
"""
return self.take(1)[0]
def saveAsTextFile(self, path):
"""
Save this RDD as a text file, using string representations of elements.
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
yield x.encode("utf-8")
keyed = PipelinedRDD(self, func)
keyed._bypass_serializer = True
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for (k, v) in iterator:
m[k] = v if k not in m else func(m[k], v)
yield m
def mergeMaps(m1, m2):
for (k, v) in m2.iteritems():
m1[k] = v if k not in m1 else func(m1[k], v)
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in other have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
def partitionBy(self, numPartitions, partitionFunc=hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
# Transferring O(n) objects to Java is too expensive. Instead, we'll
# form the hash buckets in Python, transferring O(numPartitions) objects
# to Java. Each object is a (splitNumber, [objects]) pair.
outputSerializer = self.ctx._unbatched_serializer
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
for (k, v) in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
for (split, items) in buckets.iteritems():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = PipelinedRDD(self, add_shuffle_key)
keyed._bypass_serializer = True
with _JavaStackTrace(self.context) as st:
pairRDD = self.ctx._jvm.PairwiseRDD(keyed._jrdd.rdd()).asJavaPairRDD()
partitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = pairRDD.partitionBy(partitioner).values()
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
# This is required so that id(partitionFunc) remains unique, even if
# partitionFunc is a lambda:
rdd._partitionFunc = partitionFunc
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self.ctx.defaultParallelism
def combineLocally(iterator):
combiners = {}
for x in iterator:
(k, v) = x
if k not in combiners:
combiners[k] = createCombiner(v)
else:
combiners[k] = mergeValue(combiners[k], v)
return combiners.iteritems()
locally_combined = self.mapPartitions(combineLocally)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
combiners = {}
for (k, v) in iterator:
if not k in combiners:
combiners[k] = v
else:
combiners[k] = mergeCombiners(combiners[k], v)
return combiners.iteritems()
return shuffled.mapPartitions(_mergeCombiners)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func" and a neutral "zeroValue"
which may be added to the result an arbitrary number of times, and must not change
the result (e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> rdd.foldByKey(0, add).collect()
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda v: func(zeroValue, v), func, func, numPartitions)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(x.groupByKey().collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
return a + b
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions)
# TODO: add tests
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
# TODO: support varargs cogroup of several RDDs.
def groupWith(self, other):
"""
Alias for cogroup.
"""
return self.cogroup(other)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as well
as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.cogroup(y).collect())
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup(self, other, numPartitions)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching key
in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
filter_func = lambda (key, vals): len(vals[0]) > 0 and len(vals[1]) == 0
map_func = lambda (key, vals): [(key, val) for val in vals[0]]
return self.cogroup(other, numPartitions).filter(filter_func).flatMap(map_func)
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
rdd = other.map(lambda x: (x, True)) # note: here 'True' is just a placeholder
return self.map(lambda x: (x, True)).subtractByKey(rdd).map(lambda tpl: tpl[0]) # note: here 'True' is just a placeholder
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> sorted(x.cogroup(y).collect())
[(0, ([0], [0])), (1, ([1], [1])), (2, ([], [2])), (3, ([], [3])), (4, ([2], [4]))]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD. Internally, this uses
a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider using `coalesce`,
which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the first element in each RDD
second element in each RDD, etc. Assumes that the two RDDs have the same number of
partitions and the same number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def name(self):
"""
Return the name of this RDD.
"""
name_ = self._jrdd.name()
if not name_:
return None
return name_.encode('utf-8')
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.setName('RDD1')
>>> rdd1.name()
'RDD1'
"""
self._jrdd.setName(name)
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if not debug_string:
return None
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, 1)
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
# TODO: `lookup` is disabled because we can't make direct comparisons based
# on the key; we need to compare the hash of the key to the hash of the
# keys in the pairs. This could be an expensive operation, since those
# hashes aren't retained.
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
serializer = NoOpSerializer()
else:
serializer = self.ctx.serializer
command = (self.func, self._prev_jrdd_deserializer, serializer)
pickled_command = CloudPickleSerializer().dumps(command)
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in self.ctx._pickled_broadcast_vars],
self.ctx._gateway._gateway_client)
self.ctx._pickled_broadcast_vars.clear()
class_tag = self._prev_jrdd.classTag()
env = MapConverter().convert(self.ctx.environment,
self.ctx._gateway._gateway_client)
includes = ListConverter().convert(self.ctx._python_includes,
self.ctx._gateway._gateway_client)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_command), env, includes, self.preservesPartitioning,
self.ctx.pythonExec, broadcast_vars, self.ctx._javaAccumulator,
class_tag)
self._jrdd_val = python_rdd.asJavaRDD()
return self._jrdd_val
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs,optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
__init__.py | import logging
try:
from gevent.event import Event
from gevent.lock import RLock
except ImportError:
from threading import Event, RLock
logger = logging.getLogger(__name__)
ERR_LOCK = RLock()
UNHANDLED_ERRORS = set()
def defer_error(error):
with ERR_LOCK:
UNHANDLED_ERRORS.add(error)
def remove_deferred_error(error):
with ERR_LOCK:
if error in UNHANDLED_ERRORS:
UNHANDLED_ERRORS.remove(error)
def get_unhandled_errors():
return UNHANDLED_ERRORS
def cleanup():
with ERR_LOCK:
if UNHANDLED_ERRORS:
for err in UNHANDLED_ERRORS:
logger.error('Possibly unhandled error in a promise %s' % err)
if hasattr(err, 'printme'):
err.printme()
else:
logger.exception(err)
UNHANDLED_ERRORS.clear()
import atexit
atexit.register(cleanup)
class Args(tuple):
pass
class CountdownLatch:
def __init__(self, count):
assert count >= 0
self._lock = RLock()
self._count = count
def dec(self):
with self._lock:
assert self._count > 0
self._count -= 1
# Return inside lock to return the correct value,
# otherwise an other thread could already have
# decremented again.
return self._count
@property
def count(self):
return self._count
class Promise:
"""
This is a class that attempts to comply with the
Promises/A+ specification and test suite:
http://promises-aplus.github.io/promises-spec/
"""
# These are the potential states of a promise
PENDING = -1
REJECTED = 0
FULFILLED = 1
def __init__(self):
"""
Initialize the Promise into a pending state.
"""
self._state = self.PENDING
self._value = None
self._reason = None
self._cb_lock = RLock()
self._callbacks = []
self._errbacks = []
self._event = Event()
@staticmethod
def fulfilled(x):
p = Promise()
p.fulfill(x)
return p
@staticmethod
def rejected(reason):
p = Promise()
p.reject(reason)
return p
def fulfill(self, x):
"""
Fulfill the promise with a given value.
"""
if self is x:
raise TypeError("Cannot resolve promise with itself.")
elif _isPromise(x):
try:
_promisify(x).done(self.fulfill, self.reject)
except Exception as e:
self.reject(e)
else:
self._fulfill(x)
return self
def _fulfill(self, value):
with self._cb_lock:
if self._state != Promise.PENDING:
return
self._value = value
self._state = self.FULFILLED
callbacks = self._callbacks
# We will never call these callbacks again, so allow
# them to be garbage collected. This is important since
# they probably include closures which are binding variables
# that might otherwise be garbage collected.
#
# Prevent future appending
self._callbacks = None
# Notify all waiting
self._event.set()
for callback in callbacks:
try:
callback(value)
except Exception:
# TODO: use logging
logger.exception('Error while handling success callback %s.', callback)
def reject(self, reason):
"""
Reject this promise for a given reason.
"""
if not isinstance(reason, Exception):
raise ValueError('Rejection reason should be instance of Exception (got %r)' % type(reason))
with self._cb_lock:
if self._state != Promise.PENDING:
return self
self._reason = reason
self._state = self.REJECTED
errbacks = self._errbacks
# We will never call these errbacks again, so allow
# them to be garbage collected. This is important since
# they probably include closures which are binding variables
# that might otherwise be garbage collected.
#
# Prevent future appending
self._errbacks = None
# Notify all waiting
self._event.set()
if not errbacks:
defer_error(reason)
else:
for errback in errbacks:
try:
errback(reason)
remove_deferred_error(reason)
except Exception:
logger.exception('Exception while handling error callback %s.', errback)
return self
@property
def isPending(self):
"""Indicate whether the Promise is still pending. Could be wrong the moment the function returns."""
return self._state == self.PENDING
@property
def isFulfilled(self):
"""Indicate whether the Promise has been fulfilled. Could be wrong the moment the function returns."""
return self._state == self.FULFILLED
@property
def isRejected(self):
"""Indicate whether the Promise has been rejected. Could be wrong the moment the function returns."""
return self._state == self.REJECTED
@property
def value(self):
return self._value
@property
def reason(self):
return self._reason
def get(self, timeout=None):
"""Get the value of the promise, waiting if necessary."""
self.wait(timeout)
if self._state == self.PENDING:
raise ValueError("Value not available, promise is still pending")
elif self._state == self.FULFILLED:
return self._value
else:
raise self._reason
def wait(self, timeout=None):
"""
An implementation of the wait method which doesn't involve
polling but instead utilizes a "real" synchronization
scheme.
"""
return self._event.wait(timeout)
def addCallback(self, f):
"""
Add a callback for when this promis is fulfilled. Note that
if you intend to use the value of the promise somehow in
the callback, it is more convenient to use the 'then' method.
"""
assert _isFunction(f)
with self._cb_lock:
if self._state == self.PENDING:
self._callbacks.append(f)
return self
# This is a correct performance optimization in case of concurrency.
# State can never change once it is not PENDING anymore and is thus safe to read
# without acquiring the lock.
if self._state == self.FULFILLED:
f(self._value)
else:
pass
return self
def addErrback(self, f):
"""
Add a callback for when this promis is rejected. Note that
if you intend to use the rejection reason of the promise
somehow in the callback, it is more convenient to use
the 'then' method.
"""
assert _isFunction(f)
with self._cb_lock:
if self._state == self.PENDING:
self._errbacks.append(f)
return self
# This is a correct performance optimization in case of concurrency.
# State can never change once it is not PENDING anymore and is thus safe to read
# without acquiring the lock.
if self._state == self.REJECTED:
f(self._reason)
remove_deferred_error(self._reason)
else:
pass
return self
def done(self, success=None, failure=None):
"""
This method takes two optional arguments. The first argument
is used if the "self promise" is fulfilled and the other is
used if the "self promise" is rejected. In contrast to then,
the return value of these callback is ignored and nothing is
returned.
"""
with self._cb_lock:
if success is not None:
self.addCallback(success)
if failure is not None:
self.addErrback(failure)
def done_all(self, *handlers):
"""
:type handlers: list[(object) -> object] | list[((object) -> object, (object) -> object)]
"""
if len(handlers) == 0:
return
elif len(handlers) == 1 and isinstance(handlers[0], list):
handlers = handlers[0]
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
self.done(s, f)
elif isinstance(handler, dict):
s = handler.get('success')
f = handler.get('failure')
self.done(s, f)
else:
self.done(success=handler)
def then(self, success=None, failure=None):
"""
This method takes two optional arguments. The first argument
is used if the "self promise" is fulfilled and the other is
used if the "self promise" is rejected. In either case, this
method returns another promise that effectively represents
the result of either the first of the second argument (in the
case that the "self promise" is fulfilled or rejected,
respectively).
Each argument can be either:
* None - Meaning no action is taken
* A function - which will be called with either the value
of the "self promise" or the reason for rejection of
the "self promise". The function may return:
* A value - which will be used to fulfill the promise
returned by this method.
* A promise - which, when fulfilled or rejected, will
cascade its value or reason to the promise returned
by this method.
* A value - which will be assigned as either the value
or the reason for the promise returned by this method
when the "self promise" is either fulfilled or rejected,
respectively.
:type success: (object) -> object
:type failure: (object) -> object
:rtype : Promise
"""
ret = Promise()
def callAndFulfill(v):
"""
A callback to be invoked if the "self promise"
is fulfilled.
"""
try:
if _isFunction(success):
if isinstance(v, Args):
ret.fulfill(success(*v))
else:
ret.fulfill(success(v))
else:
ret.fulfill(v)
except Exception as e:
# TODO: use logging
import traceback, sys
e.traceback = sys.exc_type, sys.exc_value, sys.exc_traceback
e.printme = lambda: logger.exception(''.join(traceback.format_exception(*e.traceback, limit=50)))
ret.reject(e)
def callAndReject(r):
"""
A callback to be invoked if the "self promise"
is rejected.
"""
try:
if _isFunction(failure):
ret.reject(failure(r))
remove_deferred_error(r)
else:
ret.reject(r)
except Exception as e:
import traceback, sys
e.traceback = sys.exc_type, sys.exc_value, sys.exc_traceback
e.printme = lambda: logger.exception(''.join(traceback.format_exception(*e.traceback, limit=50)))
ret.reject(e)
self.done(callAndFulfill, callAndReject)
return ret
def then_all(self, *handlers):
"""
Utility function which calls 'then' for each handler provided. Handler can either
be a function in which case it is used as success handler, or a tuple containing
the success and the failure handler, where each of them could be None.
:type handlers: list[(object) -> object] | list[((object) -> object, (object) -> object)]
:param handlers
:rtype : list[Promise]
"""
if len(handlers) == 0:
return []
elif len(handlers) == 1 and isinstance(handlers[0], list):
handlers = handlers[0]
promises = []
for handler in handlers:
if isinstance(handler, tuple):
s, f = handler
promises.append(self.then(s, f))
elif isinstance(handler, dict):
s = handler.get('success')
f = handler.get('failure')
promises.append(self.then(s, f))
else:
promises.append(self.then(success=handler))
return promises
def _isFunction(v):
"""
A utility function to determine if the specified
value is a function.
"""
return v is not None and hasattr(v, "__call__")
def _isPromise(obj):
"""
A utility function to determine if the specified
object is a promise using "duck typing".
"""
return isinstance(obj, Promise) or (
hasattr(obj, "done") and _isFunction(getattr(obj, "done"))) or (
hasattr(obj, "then") and _isFunction(getattr(obj, "then")))
def _promisify(obj):
if isinstance(obj, Promise):
return obj
elif hasattr(obj, "done") and _isFunction(getattr(obj, "done")):
p = Promise()
obj.done(p.fulfill, p.reject)
return p
elif hasattr(obj, "then") and _isFunction(getattr(obj, "then")):
p = Promise()
obj.then(p.fulfill, p.reject)
return p
else:
raise TypeError("Object is not a Promise like object.")
def listPromise(*promises):
"""
A special function that takes a bunch of promises
and turns them into a promise for a vector of values.
In other words, this turns an list of promises for values
into a promise for a list of values.
"""
if len(promises) == 1 and isinstance(promises[0], list):
promises = promises[0]
if len(promises) == 0:
return Promise.fulfilled([])
ret = Promise()
counter = CountdownLatch(len(promises))
def handleSuccess(_):
if counter.dec() == 0:
value = list(map(lambda p: p.value, promises))
ret.fulfill(value)
for p in promises:
assert _isPromise(p)
_promisify(p).done(handleSuccess, ret.reject)
return ret
def dictPromise(m):
"""
A special function that takes a dictionary of promises
and turns them into a promise for a dictionary of values.
In other words, this turns an dictionary of promises for values
into a promise for a dictionary of values.
"""
if len(m) == 0:
return Promise.fulfilled({})
ret = Promise()
counter = CountdownLatch(len(m))
def handleSuccess(_):
if counter.dec() == 0:
value = {}
for k in m:
value[k] = m[k].value
ret.fulfill(value)
for p in m.values():
assert _isPromise(p)
_promisify(p).done(handleSuccess, ret.reject)
return ret
def _process(p, f):
try:
val = f()
p.fulfill(val)
except Exception as e:
p.reject(e)
try:
import gevent
def spawn(f):
p = Promise()
g = gevent.spawn(lambda: _process(p, f))
return p
except ImportError:
pass
if "spawn" not in dir():
try:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)
def spawn(f):
p = Promise()
executor.submit(_process, p, f)
return p
except ImportError:
pass
if "spawn" not in dir():
from threading import Thread
def spawn(f):
p = Promise()
t = Thread(target=_process, args=(p, f))
t.start()
return p
|
bot_utils.py | import logging
import re
import threading
import time
from bot import download_dict, download_dict_lock
LOGGER = logging.getLogger(__name__)
MAGNET_REGEX = r"magnet:\?xt=urn:btih:[a-zA-Z0-9]*"
URL_REGEX = r"(?:(?:https?|ftp):\/\/)?[\w/\-?=%.]+\.[\w/\-?=%.]+"
class MirrorStatus:
STATUS_UPLOADING = "𝗨𝗽𝗹𝗼𝗮𝗱𝗶𝗻𝗚...⬆️"
STATUS_DOWNLOADING = "𝗗𝗼𝘄𝗻𝗹𝗼𝗮𝗱𝗶𝗻𝗚...⬇️"
STATUS_WAITING = "Queued...📝"
STATUS_FAILED = "Failed 🚫. Cleaning download"
STATUS_CANCELLED = "Cancelled ❎"
STATUS_ARCHIVING = "Archiving...🔐"
STATUS_EXTRACTING = "Extracting...📂"
PROGRESS_MAX_SIZE = 100 // 8
PROGRESS_INCOMPLETE = ['▓', '▓', '▓', '▓', '▓', '▓', '▓']
SIZE_UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
class setInterval:
def __init__(self, interval, action):
self.interval = interval
self.action = action
self.stopEvent = threading.Event()
thread = threading.Thread(target=self.__setInterval)
thread.start()
def __setInterval(self):
nextTime = time.time() + self.interval
while not self.stopEvent.wait(nextTime - time.time()):
nextTime += self.interval
self.action()
def cancel(self):
self.stopEvent.set()
def get_readable_file_size(size_in_bytes) -> str:
if size_in_bytes is None:
return '0B'
index = 0
while size_in_bytes >= 1024:
size_in_bytes /= 1024
index += 1
try:
return f'{round(size_in_bytes, 2)}{SIZE_UNITS[index]}'
except IndexError:
return 'File too large'
def getDownloadByGid(gid):
with download_dict_lock:
for dl in download_dict.values():
status = dl.status()
if status != MirrorStatus.STATUS_UPLOADING and status != MirrorStatus.STATUS_ARCHIVING\
and status != MirrorStatus.STATUS_EXTRACTING:
if dl.gid() == gid:
return dl
return None
def get_progress_bar_string(status):
completed = status.processed_bytes() / 8
total = status.size_raw() / 8
if total == 0:
p = 0
else:
p = round(completed * 100 / total)
p = min(max(p, 0), 100)
cFull = p // 8
cPart = p % 8 - 1
p_str = '▓' * cFull
if cPart >= 0:
p_str += PROGRESS_INCOMPLETE[cPart]
p_str += '░' * (PROGRESS_MAX_SIZE - cFull)
p_str = f"[{p_str}]"
return p_str
def get_readable_message():
with download_dict_lock:
msg = "<b> ════ @FURYxThoR ════ </b>"
for download in list(download_dict.values()):
msg += f"\n📁 𝗙𝗶𝗹𝗲𝗡𝗮𝗺𝗲: <code>{download.name()}</code>"
msg += f"\n {download.status()}"
if download.status() != MirrorStatus.STATUS_ARCHIVING and download.status() != MirrorStatus.STATUS_EXTRACTING:
msg += f"\n{get_progress_bar_string(download)} {download.progress()}" \
f"\n<b>○ Done ✓:</b> {get_readable_file_size(download.processed_bytes())} of {download.size()}" \
f"\n<b>○ Speed :</b> {download.speed()}, \n<b>○ ETA:</b> {download.eta()} "
# if hasattr(download, 'is_torrent'):
try:
msg += f"\n<b>○ Seeders:</b> {download.aria_download().num_seeders}" \
f" & <b>○ Peers :</b> {download.aria_download().connections}"
except:
pass
if download.status() == MirrorStatus.STATUS_DOWNLOADING:
msg += f"\n<b>○ ⛔</b>: <code> /cancel1 {download.gid()}</code>"
msg += "\n\n"
return msg
def get_readable_time(seconds: int) -> str:
result = ''
(days, remainder) = divmod(seconds, 86400)
days = int(days)
if days != 0:
result += f'{days}d'
(hours, remainder) = divmod(remainder, 3600)
hours = int(hours)
if hours != 0:
result += f'{hours}h'
(minutes, seconds) = divmod(remainder, 60)
minutes = int(minutes)
if minutes != 0:
result += f'{minutes}m'
seconds = int(seconds)
result += f'{seconds}s'
return result
def is_url(url: str):
url = re.findall(URL_REGEX, url)
if url:
return True
return False
def is_magnet(url: str):
magnet = re.findall(MAGNET_REGEX, url)
if magnet:
return True
return False
def is_mega_link(url: str):
return "mega.nz" in url
def get_mega_link_type(url: str):
if "folder" in url:
return "folder"
elif "file" in url:
return "file"
elif "/#F!" in url:
return "folder"
return "file"
def new_thread(fn):
"""To use as decorator to make a function call threaded.
Needs import
from threading import Thread"""
def wrapper(*args, **kwargs):
thread = threading.Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
|
subproc_vec_env.py | # The MIT License
#
# Copyright (c) 2017 OpenAI (http://openai.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import multiprocessing as mp
import numpy as np
from .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars
def worker(remote, parent_remote, env_fn_wrappers):
def step_env(env, action):
ob, reward, done, info = env.step(action)
if done:
ob = env.reset()
return ob, reward, done, info
parent_remote.close()
envs = [env_fn_wrapper() for env_fn_wrapper in env_fn_wrappers.x]
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
remote.send([step_env(env, action) for env, action in zip(envs, data)])
elif cmd == 'reset':
remote.send([env.reset() for env in envs])
elif cmd == 'render':
remote.send([env.render(mode='rgb_array') for env in envs])
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces_spec':
remote.send((envs[0].observation_space, envs[0].action_space, envs[0].spec))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
for env in envs:
env.close()
class SubprocVecEnv(VecEnv):
"""
VecEnv that runs multiple environments in parallel in subproceses and communicates with them via pipes.
Recommended to use when num_envs > 1 and step() can be a bottleneck.
"""
def __init__(self, env_fns, spaces=None, context='spawn', in_series=1):
"""
Arguments:
env_fns: iterable of callables - functions that create environments to run in subprocesses. Need to be cloud-pickleable
in_series: number of environments to run in series in a single process
(e.g. when len(env_fns) == 12 and in_series == 3, it will run 4 processes, each running 3 envs in series)
"""
self.waiting = False
self.closed = False
self.in_series = in_series
nenvs = len(env_fns)
assert nenvs % in_series == 0, "Number of envs must be divisible by number of envs to run in series"
self.nremotes = nenvs // in_series
env_fns = np.array_split(env_fns, self.nremotes)
ctx = mp.get_context(context)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(self.nremotes)])
self.ps = [ctx.Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
with clear_mpi_env_vars():
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces_spec', None))
observation_space, action_space, self.spec = self.remotes[0].recv()
self.viewer = None
VecEnv.__init__(self, nenvs, observation_space, action_space)
def step_async(self, actions):
self._assert_not_closed()
actions = np.array_split(actions, self.nremotes)
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
self._assert_not_closed()
results = [remote.recv() for remote in self.remotes]
results = _flatten_list(results)
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
self._assert_not_closed()
for remote in self.remotes:
remote.send(('reset', None))
obs = [remote.recv() for remote in self.remotes]
obs = _flatten_list(obs)
return _flatten_obs(obs)
def close_extras(self):
self.closed = True
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
def get_images(self):
self._assert_not_closed()
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
imgs = _flatten_list(imgs)
return imgs
def _assert_not_closed(self):
assert not self.closed, "Trying to operate on a SubprocVecEnv after calling close()"
def __del__(self):
if not self.closed:
self.close()
def _flatten_obs(obs):
assert isinstance(obs, (list, tuple))
assert len(obs) > 0
if isinstance(obs[0], dict):
keys = obs[0].keys()
return {k: np.stack([o[k] for o in obs]) for k in keys}
else:
return np.stack(obs)
def _flatten_list(l):
assert isinstance(l, (list, tuple))
assert len(l) > 0
assert all([len(l_) > 0 for l_ in l])
return [l__ for l_ in l for l__ in l_]
|
run.py | #!/usr/bin/env python3
# SPDX-License-Identifier: MIT
##################
# Use this script to start the Cytom server
##################
import socket
import sys
import argparse
# import pickle
import os
import json
import subprocess
import psutil
import shlex
import time
import math
import datetime
import matplotlib.pyplot as plt
from multiprocessing import Process, Queue
from sklearn.linear_model import LinearRegression
import config
import build
import utils
class SingleGraphConfig:
def __init__(self, parent, run, algorithm, graph, insertion_batch, tile_batch,
traversal, dynamic_compaction, tile_distribution_strategy, in_memory_ingestion,
enable_interactive_algo, enable_writing_edges, enable_edge_apis,
enable_algorithm_reexecution, enable_reading_edges, enable_writing_results,
deletions, enable_selective_scheduling, delta):
self.run = run
self.parent = parent
self.algorithm = algorithm
self.graph = graph
self.insertion_batch = insertion_batch
self.tile_batch = tile_batch
self.traversal = traversal
self.dynamic_compaction = dynamic_compaction
self.tile_distribution_strategy = tile_distribution_strategy
self.in_memory_ingestion = in_memory_ingestion
self.enable_interactive_algo = enable_interactive_algo
self.enable_writing_edges = enable_writing_edges
self.enable_reading_edges = enable_reading_edges
self.enable_edge_apis = enable_edge_apis
self.enable_algorithm_reexecution = enable_algorithm_reexecution
self.enable_writing_results = enable_writing_results
self.deletions = deletions
self.enable_selective_scheduling = enable_selective_scheduling
self.delta = delta
self.generateArguments()
def generateArguments(self):
input_file = "~"
generator = ""
max_vertices = 0
count_edges = 0
if self.graph in config.GRAPH_SETTINGS_DELIM:
max_vertices = config.GRAPH_SETTINGS_DELIM[self.graph]["vertices"]
generator = "binary"
input_file = config.INPUT_FILE[self.graph]["binary"]
count_edges = config.GRAPH_SETTINGS_DELIM[self.graph]["edges"]
elif self.graph in config.GRAPH_SETTINGS_RMAT:
max_vertices = config.GRAPH_SETTINGS_RMAT[self.graph]["vertices"]
count_edges = config.GRAPH_SETTINGS_RMAT[self.graph]["edges"]
generator = "rmat"
if self.parent.args.max_edges > 0:
count_edges = min(count_edges, self.parent.args.max_edges)
meta_tiles_per_dimension = 0
tile_distribution_strategy = self.tile_distribution_strategy
if self.tile_distribution_strategy == "hierarchical-tile-distributor":
tile_distribution_strategy = "tile-distributor"
count_tiles = max_vertices / config.VERTICES_PER_TILE
meta_tiles_per_dimension = min(utils.previous_power_of_2(count_tiles / 4),
config.HIERACHICAL_TILE_DISTRIBUTION_MAX_META_TILES_PER_DIMENSION)
meta_tiles_per_dimension = max(meta_tiles_per_dimension, 1)
# enable_adaptive_scheduling = "1" if config.ALGORITHM_ENABLE_SELECTIVE_SCHEDULING[
# self.algorithm] else "0"
enable_adaptive_scheduling = self.enable_selective_scheduling
count_meta_tile_managers = 6
count_edge_inserters = 6
if self.parent.args.disable_algorithm:
count_meta_tile_managers = 24
count_edge_inserters = 10
algo_start_vertex = config.DEFAULT_ALGORITHM_START_VERTEX
if self.parent.args.algorithm_start_vertex != algo_start_vertex:
algo_start_vertex = self.parent.args.algorithm_start_vertex
algo_start = self.parent.args.algorithm_threshold * count_edges
if self.parent.args.algorithm_threshold == -1.0:
algo_start = config.MIN_COUNT_EDGES
algo_start = int(round(algo_start))
self.arguments = {
"max-vertices": max_vertices,
"vertices-per-tile": config.VERTICES_PER_TILE,
"batch-insertion-count": self.insertion_batch,
"meta-tile-manager-count": count_meta_tile_managers,
"count-edges": count_edges,
"edge-inserters-count": count_edge_inserters,
"use-dynamic-compaction": self.dynamic_compaction,
"count-parallel-compaction": 48,
"enable-edge-apis": self.enable_edge_apis,
"count-algorithm-executors": 24,
"algorithm-tile-batching": self.tile_batch,
"algorithm-iterations": self.parent.args.max_iterations,
"path-perf-events": config.PERF_EVENTS_ROOT,
"enable-perf-event-collection": "1" if self.parent.args.collect_perf_events else "0",
"enable-static-algorithm-before-compaction": "0",
"enable-static-algorithm-after-compaction": "0",
"enable-rewrite-ids": config.DEFAULT_REWRITE_IDS,
"traversal": self.traversal,
"generator": generator,
"binary-input-file": input_file,
"algorithm": self.algorithm,
"deletion-percentage": self.deletions,
"enable-interactive-algorithm": self.enable_interactive_algo,
"enable-lazy-algorithm": "0",
"count-algorithm-appliers": 24,
"enable-adaptive-scheduling": enable_adaptive_scheduling,
"tile-distribution-strategy": tile_distribution_strategy,
"path-to-edges-output": config.PATH_EDGES_OUTPUT,
"enable-writing-edges": self.enable_writing_edges,
"enable-in-memory-ingestion": self.in_memory_ingestion,
"td-count-meta-tiles-per-dimension": meta_tiles_per_dimension,
"algo-start-id": algo_start_vertex,
"enable-reading-edges": self.enable_reading_edges,
"path-to-edges-input": config.PATH_EDGES_INPUT,
"enable-algorithm-reexecution": self.enable_algorithm_reexecution,
"path-to-results": config.PATH_RESULTS,
"enable-write-results": self.enable_writing_results,
"delta": self.delta,
"start-algorithm-count": algo_start,
"output-detailed-tile-stats": "1" if self.parent.args.detailed_tile_stats else "0",
}
def generateLogFileName(self):
filename = "log"
if self.parent.args.prefix is not None:
filename += "_" + self.parent.args.prefix
tile_distribution_strategy = self.arguments["tile-distribution-strategy"]
if tile_distribution_strategy == "tile-distributor":
if self.arguments["td-count-meta-tiles-per-dimension"] == 0:
tile_distribution_strategy = "flat-tile-distributor"
arg_list = [
self.graph,
self.arguments["algorithm"],
self.arguments["batch-insertion-count"],
self.arguments["use-dynamic-compaction"],
self.arguments["algorithm-tile-batching"],
self.arguments["traversal"],
tile_distribution_strategy,
self.arguments["deletion-percentage"],
self.arguments["enable-interactive-algorithm"],
self.arguments["enable-in-memory-ingestion"],
self.arguments["enable-writing-edges"],
self.arguments["enable-reading-edges"],
self.arguments["enable-edge-apis"],
self.arguments["enable-algorithm-reexecution"],
self.arguments["enable-write-results"],
self.arguments["enable-adaptive-scheduling"],
self.arguments["delta"],
self.run
]
if self.parent.args.perf_stat:
arg_list = ["perf_stat"] + arg_list
for argument in arg_list:
filename += "_" + str(argument)
filename += ".txt"
return os.path.join(config.LOG_ROOT, filename)
def genConfigStr(self):
log_filename = self.generateLogFileName()
config_str = ""
args_count = 0
for key, value in self.arguments.items():
config_str += "--%s %s " % (key, value)
return config_str
class GraphConfigGenerator:
def __init__(self, args):
self.args = args
self.configs = ConfigLists(self.args)
self.configs.generateConfigs()
def genConfigs(self):
configs = []
# Loop over all options, generate set of runs.
for run in self.configs.runs:
for algorithm in self.configs.algorithms:
for graph in self.configs.graphs:
for insertion_batch in self.configs.insertion_batches:
for tile_batch in self.configs.tile_batches:
for traversal in self.configs.traversals:
for dynamic_compaction in self.configs.dynamic_compactions:
for tile_distribution_strategy in self.configs.tile_distribution_strategies:
for in_memory_ingestion in self.configs.in_memory_ingestion:
for interactive_algo in self.configs.interactive_algo:
for enable_write_edges in self.configs.enable_write_edges:
for enable_edge_api in self.configs.enable_edge_apis:
for enable_algorithm_reexecution in self.configs.enable_algorithm_reexecution:
for enable_reading_edges in self.configs.enable_reading_edges:
for enable_writing_results in self.configs.enable_writing_results:
for deletions in self.configs.deletions:
for selective_scheduling in self.configs.selective_scheduling:
for delta in self.configs.delta:
single_config = SingleGraphConfig(self, run, algorithm,
graph, insertion_batch,
tile_batch, traversal,
dynamic_compaction,
tile_distribution_strategy,
in_memory_ingestion,
interactive_algo,
enable_write_edges,
enable_edge_api,
enable_algorithm_reexecution,
enable_reading_edges,
enable_writing_results,
deletions,
selective_scheduling,
delta)
configs.append(single_config)
return configs
class ConfigLists:
def __init__(self, args):
self.args = args
def generateConfigs(self):
# submit new jobs - generate all arguments
if self.args.algorithm is not None:
self.algorithms = self.args.algorithm.split(",")
self.algorithms = [x.strip() for x in self.algorithms]
else:
self.algorithms = config.ALGORITHMS
# assign a default algorithm when the algorithms are disabled.
if self.args.disable_algorithm:
self.algorithms = ["bfs"]
if self.args.graph is not None:
self.graphs = self.args.graph.split(",")
self.graphs = [x.strip() for x in self.graphs]
else:
self.graphs = config.GRAPHS
if self.args.insertion_batch_size is not None:
self.insertion_batches = []
if isinstance(self.args.insertion_batch_size, str) and ".." in self.args.insertion_batch_size:
split = self.args.insertion_batch_size.split("..")
min_batch_size = int(split[0])
max_batch_size = int(split[1])
elif isinstance(self.args.insertion_batch_size,
str) and "," in self.args.insertion_batch_size:
split = self.args.insertion_batch_size.split(",")
self.insertion_batches = [int(x.strip()) for x in split]
else:
min_batch_size = int(self.args.insertion_batch_size)
max_batch_size = int(self.args.insertion_batch_size)
if len(self.insertion_batches) == 0:
i = min_batch_size
while i <= max_batch_size:
self.insertion_batches.append(i)
i = i * 2
else:
self.insertion_batches = [config.DEFAULT_INSERTION_BATCH_SIZE]
if self.args.tile_batch_size is not None:
if isinstance(self.args.tile_batch_size, str) and ".." in self.args.tile_batch_size:
split = self.args.tile_batch_size.split("..")
min_batch_size = int(split[0])
max_batch_size = int(split[1])
else:
min_batch_size = int(self.args.tile_batch_size)
max_batch_size = int(self.args.tile_batch_size)
i = min_batch_size
self.tile_batches = []
while i <= max_batch_size:
self.tile_batches.append(i)
i *= 2
else:
self.tile_batches = [config.DEFAULT_TILE_BATCH_SIZE]
if self.args.enable_edge_apis:
self.enable_edge_apis = ["0", "1"]
else:
self.enable_edge_apis = [config.DEFAULT_ENABLE_EDGE_APIS]
if self.args.enable_algorithm_reexecution:
self.enable_algorithm_reexecution = ["1"]
else:
self.enable_algorithm_reexecution = [config.DEFAULT_ALGORITHM_REEXECUTION]
if self.args.enable_traversals:
self.traversals = config.TRAVERSALS
else:
self.traversals = [config.DEFAULT_TRAVERSAL]
if self.args.dynamic_compaction is not None:
self.dynamic_compactions = ["0", "1"]
else:
self.dynamic_compactions = [config.DEFAULT_DYNAMIC_COMPACTION]
if self.args.tile_distribution_strategy is not None:
self.tile_distribution_strategies = config.TILE_DISTRIBUTION_STRATEGIES
else:
self.tile_distribution_strategies = [config.DEFAULT_TILE_DISTRIBUTION_STRATEGY]
if self.args.in_memory_ingestion:
self.in_memory_ingestion = ["1"]
else:
self.in_memory_ingestion = [config.DEFAULT_IN_MEMORY_INGESTION]
if self.args.interactive_algo is not None:
self.interactive_algo = ["0", "1"]
else:
self.interactive_algo = [config.DEFAULT_INTERACTIVE_ALGO]
if self.args.disable_algorithm:
self.interactive_algo = ["0"]
if self.args.enable_write_edges:
self.enable_write_edges = ["1"]
else:
self.enable_write_edges = [config.DEFAULT_WRITE_EDGES]
if self.args.read_edges is not None:
self.enable_reading_edges = [self.args.read_edges]
else:
self.enable_reading_edges = [config.DEFAULT_READ_EDGES]
if self.args.enable_writing_results:
self.enable_writing_results = ["1"]
else:
self.enable_writing_results = [config.DEFAULT_WRITE_RESULTS]
if self.args.enable_deletions:
self.deletions = ["0.01", "0.05", "0.10"]
else:
self.deletions = ["0.00"]
if self.args.override_selective_scheduling:
self.selective_scheduling = ["0", "1"]
elif self.args.disable_selective_scheduling:
self.selective_scheduling = ["0"]
else:
self.selective_scheduling = ["1"]
if self.args.enable_delta:
self.delta = config.DELTAS
else:
self.delta = [config.DEFAULT_DELTA]
self.runs = [i for i in range(int(self.args.runs))]
def countConfigs(self):
return len(self.runs) * len(self.algorithms) * len(self.graphs) * len(
self.insertion_batches) * len(self.tile_batches) * len(self.traversals) * len(
self.dynamic_compactions) * len(self.tile_distribution_strategies) * len(
self.in_memory_ingestion) * len(self.interactive_algo) * len(self.enable_write_edges) * len(
self.enable_reading_edges) * len(self.enable_edge_apis) * len(
self.enable_algorithm_reexecution) * len(self.enable_writing_results) * len(
self.deletions) * len(self.selective_scheduling) * len(self.delta)
def sched_func(algo_q, stat_q):
scheduler = Scheduler(stat_q)
exit = False
while not exit:
# check algo_q
while True:
try:
new_algos = algo_q.get(timeout=1)
if new_algos == 0:
# prepare to exit, no more algos
exit = True
else:
scheduler.add_algos(new_algos)
except:
# print("algo queue is empty, now launch scheduler..")
break
if scheduler.num_of_unexec_algos > 0:
scheduler.schedule()
else:
time.sleep(1)
# exit
# print("--------Scheduler is existing..")
scheduler.exit()
class Scheduler():
def __init__(self, stat_q):
self.unfit = True
self.model_update = False
self.model_X = config.model_X
self.model_y = config.model_y
# read profiler data from config
self.profile_data = config.PROFILE_DATA
self.task_id = 0
self.dataset_algo_map = dict() # unexecute algos, <dataset, [algos]>
self.num_of_unexec_algos = 0
self.stat_q = stat_q
self.predict_cur_cpu_usage = 0
self.alive_tasks = dict() # <task_id, (process, predict_cpu_usage)>
self.model = LinearRegression() # Features: sum_profile_cpu_usage, max_profile_cpu_usage, avg_profile_cpu_usage, num_of_algos
if self.model_X and self.model_y:
self.model.fit(self.model_X, self.model_y)
self.unfit = False
def add_algos(self, algos):
for algo in algos:
if algo.graph in self.dataset_algo_map:
self.dataset_algo_map[algo.graph].append(algo)
else:
self.dataset_algo_map[algo.graph] = [algo]
self.num_of_unexec_algos += 1
def check_stat_q(self):
# check stat_q: update scheduler if new data available in queue
X = []
y = []
while True:
try:
new_data = self.stat_q.get_nowait()
profile_single = new_data[0]
if profile_single:
algorithm = new_data[1]
graph = new_data[2]
result_cpu_usage = new_data[3]
running_time = new_data[4]
self.saveProfileData(algorithm, graph, result_cpu_usage, running_time)
else:
sum_profile_cpu_usage = new_data[1]
# max_profile_cpu_usage = new_data[2]
# avg_profile_cpu_usage = new_data[3]
# num_of_algos = new_data[4]
result_cpu_usage = new_data[2]
# X.append([sum_profile_cpu_usage, max_profile_cpu_usage, avg_profile_cpu_usage, num_of_algos])
X.append([sum_profile_cpu_usage])
y.append(result_cpu_usage)
except:
# print("stat queue is empty now")
break
if X and y:
self.fitTrueData(X, y)
def schedule(self):
'''
Args:
'unsched_algos': a list of SingleGraphConfig
Method: remove algos in dataset_algo_map which are executed successfully
'''
self.check_stat_q()
# print("-----------[Now schedule]:")
# sort algos by profile_cpu_usage in each dataset
for dataset in self.dataset_algo_map:
algos = self.dataset_algo_map[dataset]
algo_names = [algo.algorithm for algo in algos]
profile_cpu_usage_list = self.get_profile_cpu_usage(algo_names, dataset)
self.dataset_algo_map[dataset] = [algo for _,algo in sorted(zip(profile_cpu_usage_list,algos), key=lambda pair: pair[0], reverse=True)]
# print(dataset+":", ','.join([algo.algorithm for algo in self.dataset_algo_map[dataset]]))
# schedule as level increases
continue_sched = True
level = 0 # schedule 1/(2^level) algos in a dataset group
while continue_sched:
untouch_dataset = 0
for dataset in self.dataset_algo_map:
self.update_predict_cur_cpu_usage()
runtime_cpu_usage = psutil.cpu_percent(interval=0.5)
current_cpu_usage = max(self.predict_cur_cpu_usage, runtime_cpu_usage)
# print("predict current cpu usage: ", self.predict_cur_cpu_usage, "runtime cpu usage:", runtime_cpu_usage)
if self.num_of_unexec_algos == 0 or current_cpu_usage >= 90.0:
continue_sched = False
break
if not self.dataset_algo_map[dataset]:
# no algos of dataset
untouch_dataset += 1
continue
dataset_algos = self.dataset_algo_map[dataset]
num_of_dataset_algos = len(dataset_algos)
stride = num_of_dataset_algos - level
if stride <= 0:
untouch_dataset += 1
continue
exec_idxes = []
start_idx = 0
while start_idx < num_of_dataset_algos:
end_idx = min(start_idx+stride, num_of_dataset_algos)
sched_algos = dataset_algos[start_idx:end_idx]
# avail, sum_profile_cpu_usage, max_profile_cpu_usage, avg_profile_cpu_usage, predict_cpu_usage = self.check_cpu_usage(sched_algos, dataset)
avail, sum_profile_cpu_usage, predict_cpu_usage = self.check_cpu_usage(sched_algos, dataset)
if avail:
algo_names_list = [algo.algorithm for algo in sched_algos]
task_config_strs = [algo.genConfigStr() for algo in sched_algos]
# spawn a new process to execute task
# p = Process(target=execute_task, args=(self.stat_q, self.task_id, algo_names_list, task_config_strs, sum_profile_cpu_usage, max_profile_cpu_usage, avg_profile_cpu_usage, predict_cpu_usage, "log_tmp.txt", dataset)) # TODO: assign different logfile to each task
p = Process(target=execute_task, args=(self.stat_q, self.task_id, algo_names_list, task_config_strs, sum_profile_cpu_usage, predict_cpu_usage, "log_tmp.txt", dataset))
print("-------Now launch task", self.task_id, ":", algo_names_list, "on", dataset)
# print("Predict CPU usage: ", predict_cpu_usage)
p.start()
# update
self.alive_tasks[self.task_id] = (p, predict_cpu_usage)
self.predict_cur_cpu_usage += predict_cpu_usage
exec_idxes += list(range(start_idx,end_idx))
self.num_of_unexec_algos -= len(sched_algos)
self.task_id += 1
start_idx += stride
else:
start_idx += 1
self.dataset_algo_map[dataset] = [self.dataset_algo_map[dataset][idx] for idx in range(num_of_dataset_algos) if idx not in exec_idxes]
if untouch_dataset == len(self.dataset_algo_map):
continue_sched = False
level += 1
def check_cpu_usage(self, algos, graph):
algo_names = [algo.algorithm for algo in algos]
num_of_algos = len(algos)
profile_cpu_usage_list = self.get_profile_cpu_usage(algo_names, graph)
sum_profile_cpu_usage = sum(profile_cpu_usage_list)
# max_profile_cpu_usage = max(profile_cpu_usage_list)
# avg_profile_cpu_usage = sum_profile_cpu_usage / num_of_algos
if self.unfit:
predict_cpu_usage = sum_profile_cpu_usage
else:
# predict_cpu_usage = self.model.predict([[sum_profile_cpu_usage, max_profile_cpu_usage, avg_profile_cpu_usage, num_of_algos]])[0]
predict_cpu_usage = self.model.predict([[sum_profile_cpu_usage]])[0]
self.update_predict_cur_cpu_usage()
current_cpu_usage = max(self.predict_cur_cpu_usage, psutil.cpu_percent(interval=0.5))
if current_cpu_usage + predict_cpu_usage <= 100.0:
# return True, sum_profile_cpu_usage, max_profile_cpu_usage, avg_profile_cpu_usage, predict_cpu_usage
return True, sum_profile_cpu_usage, predict_cpu_usage
else:
return False, sum_profile_cpu_usage, predict_cpu_usage
def update_predict_cur_cpu_usage(self):
for task, process in list(self.alive_tasks.items()):
if process[0].is_alive():
# join and check if still alive
process[0].join(timeout=0)
if not process[0].is_alive():
# join successfully
self.predict_cur_cpu_usage -= process[1]
del self.alive_tasks[task]
else:
# p is joined, update
self.predict_cur_cpu_usage -= process[1]
del self.alive_tasks[task]
def get_profile_cpu_usage(self, algo_names, graph):
profile_graph_data = self.profile_data[graph]
avg_graph_cpu_usage = self.get_avg_graph_cpu_usage(graph)
# sum_cpu_usage = 0
profile_cpu_usage_list = []
for algo in algo_names:
if algo in profile_graph_data:
profile_cpu_usage_list.append(profile_graph_data[algo]["cpu_usage"])
else:
profile_cpu_usage_list.append(avg_graph_cpu_usage)
return profile_cpu_usage_list
def get_avg_graph_cpu_usage(self, graph):
profile_graph_data = self.profile_data[graph]
sum_graph_cpu_usage = 0
for algorithm in profile_graph_data:
sum_graph_cpu_usage += profile_graph_data[algorithm]["cpu_usage"]
return sum_graph_cpu_usage / len(profile_graph_data)
def fitTrueData(self, X, y):
self.model_X = self.model_X + X
self.model_y = self.model_y + y
# TODO: batch fit
self.model.fit(self.model_X, self.model_y)
self.model_update = True
if self.unfit:
self.unfit = False
def saveProfileData(self, algorithm, graph, result_cpu_usage, running_time):
# add data
if graph not in self.profile_data:
self.profile_data[graph] = {}
if algorithm not in self.profile_data[graph]:
self.profile_data[graph][algorithm] = {}
self.profile_data[graph][algorithm]["cpu_usage"] = result_cpu_usage
self.profile_data[graph][algorithm]["running_time"] = running_time
def exit(self):
start = time.time()
while self.num_of_unexec_algos > 0:
self.schedule()
time.sleep(1)
# join all alive process
for task in self.alive_tasks:
self.alive_tasks[task][0].join()
end = time.time()
# print("Total execution time(s):", end-start)
# check stat_q last time
self.check_stat_q()
# save config
profile_line = json.dumps(self.profile_data)
profile_line = "PROFILE_DATA = " + profile_line + "\n"
model_X_line = json.dumps(self.model_X)
model_X_line = "model_X = " + model_X_line + "\n"
model_y_line = json.dumps(self.model_y)
model_y_line = "model_y = " + model_y_line + "\n"
with open("config.py", "r") as f:
lines = f.readlines()
with open("config.py", "w") as f:
for line in lines:
if "PROFILE_DATA" in line:
f.write(profile_line)
elif "model_X" in line:
f.write(model_X_line)
elif "model_y" in line:
f.write(model_y_line)
else:
f.write(line)
class AlgorithmStore():
def __init__(self):
self.algorithms = config.ALGORITHMS
def connect(self, args):
if args.add:
self.add(args)
elif args.delete:
self.delete(args)
elif args.clear:
self.clear(args)
elif args.list:
print("\n".join(self.algorithms))
else:
print("-----------[AlgorithmStore]: Please specify action (add/delete/list)")
def add(self, args):
if not args.name:
print("-----------[AlgorithmStore]: Please specify name(s) of the algorithm(s)")
return
if not args.path:
print("-----------[AlgorithmStore]: Please specify path(s) of the algorithm(s)")
return
if len(args.name) != len(args.path):
print("-----------[AlgorithmStore]: Oops! The number of names does not equal to the number of paths.")
return
for i in range(len(args.name)):
# compile algorithm to bitcode and store
arg1 = "src_to_bc"
arg2 = 'input="' + args.path[i] + '"'
arg3 = 'output="' + args.name[i] + '.bc"'
subprocess.run(["make", arg1, arg2, arg3], stdout=subprocess.PIPE, text=True, cwd="../../algorithms")
# TODO: what if make fails?
# add tag to config.py
if args.name[i] not in self.algorithms:
self.algorithms.append(args.name[i])
print("-----------[AlgorithmStore]: Successfully add " + args.name[i])
else:
print("-----------[AlgorithmStore]: Successfully overwrite " + args.name[i])
def delete(self, args):
# TODO: check if file exist
if not args.name:
print("-----------[AlgorithmStore]: Please specify name(s) of the algorithm(s)")
return
# delete specific algorithm(s)
algo_bc_arr = ['{0}.bc'.format(args.name[i]) for i in range(len(args.name))]
algo_bc = " ".join(algo_bc_arr)
subprocess.run(["rm", algo_bc], stdout=subprocess.PIPE, text=True, cwd="../../algorithms")
# delete tag(s) in config.py
delete_algos = []
for name in args.name:
if name not in self.algorithms:
print("-----------[AlgorithmStore]: Oops!" + name + "is not a valid algorithm name")
else:
delete_algos.append(name)
res_algos = [algo for algo in self.algorithms if algo not in delete_algos]
self.algorithms = res_algos
delete_algos_str = " ".join(delete_algos)
print("-----------[AlgorithmStore]: Successfully delete " + delete_algos_str)
def clear(self, args):
# delete all algorithms
arg = "clean"
subprocess.run(["make", arg], stdout=subprocess.PIPE, text=True, cwd="../../algorithms")
self.algorithms = []
print("-----------[AlgorithmStore]: Successfully delete all algorithms")
def exit(self):
algorithm_line = json.dumps(self.algorithms)
algorithm_line = "ALGORITHMS = " + algorithm_line + "\n"
with open("config.py", "r") as f:
lines = f.readlines()
with open("config.py", "w") as f:
for line in lines:
if "ALGORITHMS" in line:
f.write(algorithm_line)
else:
f.write(line)
def clearCaches():
cmd = "sudo ./startup-config"
print(cmd)
os.system(cmd)
def execute_task(stat_q, task_id, algo_names, configs, sum_profile_cpu_usage, predict_cpu_usage, logfile, graph, profile_single=False):
'''
args:
algo_names: list of algorithms to be executed
configs: list of configs (as str) of algorithms to be executed
'''
algo_names_set = set(algo_names)
# link all used algorithms with main.bc
# llvm-link $(filter-out $@,$(MAKECMDGOALS)) -o link.bc
args = ["llvm-link", config.RELEASE_MAIN]
algorithm_set_bc_arr = ['{0}.bc'.format(algo) for algo in algo_names_set]
args += algorithm_set_bc_arr
link_output = "link_" + str(task_id) + ".bc"
args += ["-o", link_output]
p = subprocess.run(args, stdout=subprocess.PIPE, text=True, cwd="../../algorithms")
# compose jobs in main func
opt_load = "-load"
pass_path = "../build/Release-x86_64/lib/libParallizeAlgorithm.so"
pass_target = "-composeJobs"
input_arg = link_output
output_flag = "-o"
pass_output = "task_" + str(task_id) +".bc"
jobs_list = ["-jobs"] * (len(algo_names) * 2 - 1)
jobs_list[0::2] = algo_names
jobs_list.insert(0, "-jobs")
p = subprocess.run(["opt", opt_load, pass_path, pass_target, input_arg, output_flag, pass_output] + jobs_list, stdout=subprocess.PIPE, text=True, cwd="../../algorithms")
# TODO: delete link_idx.bc
# bc to executable
target = "bc_to_exe"
link_bc = "link_bc=" + link_output
task_bc = "task_bc=" + pass_output
task_s = "task_s=task_" + str(task_id) +".s"
task_exe = "task_exe=exe_" + str(task_id)
p = subprocess.run(["make", target, link_bc, task_bc, task_s, task_exe], stdout=subprocess.PIPE, text=True, cwd="../../algorithms")
# compose cmd with jobs
executable = config.EXE + "_" + str(task_id)
cmd = executable + " " + " ".join(configs)
# clearCaches()
logfile = os.path.join(config.LOG_ROOT, logfile)
# TODO: fix logfile by process.communicate()
# cmd += "2>&1 | tee " + logfile # TODO: how to format an unique logfile for certain set of jobs? (overwrite if re-run)
# execute_cmd(cmd)
args = shlex.split(cmd)
# print("execute cmd: ", " ".join(args))
cpu_count = psutil.cpu_count()
cpu_usage_sum = 0.0
time_axis = []
cpu_axis = []
# pre_cpu_usage = psutil.cpu_percent()
# profile_cpu_usage = self.get_profile_cpu_usage(algo_names, graph)
start = time.time()
p = psutil.Popen(args, stdout=subprocess.PIPE)
while p.is_running() and p.status() != psutil.STATUS_ZOMBIE:
# print("CPU Usage: ", str(p.cpu_percent(interval=0.5)/cpu_count)+"%")
cur_cpu_usage = p.cpu_percent(interval=0.5)/cpu_count
cpu_usage_sum += cur_cpu_usage
time_axis.append(datetime.datetime.now())
cpu_axis.append(cur_cpu_usage)
end = time.time()
running_time = end - start
cpu_usage_avg = cpu_usage_sum / len(cpu_axis)
print("------------------------------------------")
if profile_single:
print("Task", task_id, "has finished: ", algo_names, "on", graph)
else:
print("Task", task_id, "has finished: ", algo_names, "on", graph)
# rm executable
p = subprocess.run(["rm", executable], stdout=subprocess.PIPE, text=True)
# put data in queue
if profile_single:
stat_q.put([profile_single, algo_names[0], graph, cpu_usage_avg, running_time])
else:
stat_q.put([profile_single, sum_profile_cpu_usage, cpu_usage_avg])
class CytomManager():
def __init__(self):
self.algo_q = Queue() # for algos to schedule
self.stat_q = Queue() # for profile and SGD data
self.algorithm_store = AlgorithmStore()
# launch scheduler process
self.scheduler_process = Process(target=sched_func, args=(self.algo_q, self.stat_q))
self.scheduler_process.start()
self.launched_processes = []
self.job_submitted_time = []
# check build & LOGROOT
self.setup()
def setup(self):
# check build
# make_dir = config.SRC_ROOT
# make_cmd = "make;"
# cmd = "cd %s;%s" % (make_dir, make_cmd)
# os.system(cmd)
# mkdir for LOGs
utils.mkdirp(config.LOG_ROOT)
def submit_algos(self, algos):
'''
Args:
'algos': a list of SingleGraphConfig
'''
self.job_submitted_time.append(datetime.datetime.now())
self.algo_q.put(algos)
def connect_algorithm_store(self, args):
self.algorithm_store.connect(args)
def profile_all(self, algos):
'''
Args:
'jobs': a list of SingleGraphConfig
'''
task_id = 0
for algo in algos:
algo_names = [algo.algorithm]
configs = [algo.genConfigStr()]
execute_task(self.stat_q, task_id, algo_names, configs, 0, 0, "log_tmp.txt", algo.graph, profile_single=True)
task_id += 1
def exit(self):
self.algorithm_store.exit()
self.algo_q.put(0)
self.scheduler_process.join()
def run_cytom(args):
cytom_manager = CytomManager()
if args.sequential or args.profile:
generator = GraphConfigGenerator(args)
algos = generator.genConfigs()
cytom_manager.profile_all(algos)
elif args.concurrent:
generator = GraphConfigGenerator(args)
algos = generator.genConfigs()
start = time.time()
cytom_manager.submit_algos(algos)
elif args.algorithm_store:
cytom_manager.connect_algorithm_store(args)
cytom_manager.exit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--clean", action="store_true", dest="clean", default=False)
parser.add_argument("--distclean", action="store_true", dest="distclean", default=False)
parser.add_argument("--print-only", action="store_true", dest="print_only", default=False)
parser.add_argument("--prefix", default=None)
parser.add_argument("--analyze", action="store_true", dest="analyze", default=False)
parser.add_argument("--algorithm", default=None)
parser.add_argument("--graph", default=None)
parser.add_argument("--insertion-batch-size", default=None)
parser.add_argument("--tile-batch-size", default=None)
parser.add_argument("--enable-edge-apis", action="store_true", default=False)
parser.add_argument("--enable-algorithm-reexecution", action="store_true", default=False)
parser.add_argument("--enable-writing-results", action="store_true", default=False)
parser.add_argument("--tile-distribution-strategy", default=None)
parser.add_argument("--interactive-algo", default=None)
parser.add_argument("--disable-algorithm", action="store_true", default=False)
parser.add_argument("--enable-write-edges", action="store_true", default=False)
parser.add_argument("--enable-deletions", action="store_true", default=False)
parser.add_argument("--enable-delta", action="store_true", default=False)
parser.add_argument("--override-selective-scheduling", action="store_true", default=False)
parser.add_argument("--wait-after-execution", action="store_true", default=False)
parser.add_argument("--read-edges", default=None)
parser.add_argument("--in-memory-ingestion", action="store_true", default=False)
parser.add_argument("--enable-traversals", action="store_true", default=False)
parser.add_argument("--dynamic-compaction", default=None)
parser.add_argument("--max-edges", default=0, type=int)
parser.add_argument("--algorithm-threshold", default=0.0, type=float)
parser.add_argument("--runs", default=1)
parser.add_argument("--max-iterations", default=100, type=int)
parser.add_argument("--debug", action="store_true", dest="debug", default=False)
parser.add_argument("--iostat", action="store_true", dest="iostat", default=False)
parser.add_argument("--collect-perf-events", action="store_true", default=False)
parser.add_argument("--perf-stat", action="store_true", default=False)
parser.add_argument("--plot-config", default="")
parser.add_argument("--override-algorithm", default=None)
parser.add_argument("--override-graph", default=None)
parser.add_argument("--detailed-tile-stats", action="store_true", default=False)
parser.add_argument("--disable-selective-scheduling", action="store_true", dest="disable_selective_scheduling", default=False)
parser.add_argument("--algorithm-start-vertex", dest="algorithm_start_vertex", default=1, type=int)
# algorithm-store subparser
parser.add_argument("--algorithm-store", action="store_true", dest="algorithm_store", default=False) # algorithm_store commands
parser.add_argument('--add', action='store_true')
parser.add_argument('--delete', action='store_true')
parser.add_argument('--clear', action='store_true') # delete all added algorithms
parser.add_argument('--list', action='store_true') # list all added algorithm after other actions (add/delete)
parser.add_argument('--name', nargs="*",
help='name(s) of the new algorithm')
parser.add_argument('--path', nargs="*",
help='absolute path(s) to the new algorithm')
# parser.add_argument("--submit", action="store_true", dest="submit", default=False) # submit new job(s)
parser.add_argument('--sequential', action='store_true')
parser.add_argument('--concurrent', action='store_true')
parser.add_argument("--profile", action="store_true", dest="profile", default=False) # profile all jobs across all graphs datasets
args = parser.parse_args()
cpu_usage_sum = 0.0
time_axis = []
cpu_axis = []
start = time.time()
p = Process(target=run_cytom, args=[args])
p.start()
while p.is_alive():
cur_cpu_usage = psutil.cpu_percent(interval=0.5)
cpu_usage_sum += cur_cpu_usage
time_axis.append(datetime.datetime.now())
cpu_axis.append(cur_cpu_usage)
p.join(timeout=0)
end = time.time()
running_time = end - start
cpu_usage_avg = cpu_usage_sum / len(cpu_axis)
if args.sequential or args.concurrent:
print("----------Overall status--------------")
print("Average CPU usage:", str(cpu_usage_avg)+"%")
print("Running time:", str(running_time)+"s")
|
main.py | from sqlite3.dbapi2 import Cursor
from prettytable import PrettyTable
import vk_api
from vk_api.longpoll import VkLongPoll, VkEventType
from vk_api.keyboard import VkKeyboard, VkKeyboardColor
import random
import sqlite3
from game_math import RandomNumber
import psycopg2
import threading
import datetime
from vkcoinapi import *
import pikches
coin = VKCoin(key='sT_uy6[Py*jtHQU6QVFJ9SxAc=_OnqIgPK=UjE392y!p,Fxh7p', merchantId=545851228)
coin.setShopName('7B SHOP')
DATABASE_URL = 'postgres://eauprxzosofunb:922ae816b5a8cc2558170460098f6c961d89d7b656ed33c7d665e9e1e4c7108e@ec2-52-205-145-201.compute-1.amazonaws.com:5432/dfrmm2t89jd2ag'
API_VERSION = '5.126'
ranked = 0
ranks_points = [300, 500, 1000, 1200, 1500, 2000, 2200, 2500, 3000, 3200, 3500, 4000, 5000, 6000, 10000]
ranks_names = ["БЕЗ РАНГА", "Железо 3", "Железо 2", "Железо 1", "Бронза 3", "Бронза 2", "Бронза 1", "Серебро 3",
"Серебро 2", "Серебро 1", "Платина 3", "Платина 2", "Платина 1", "Алмаз", "Титан", "Непобедимый"]
col_coins = 1
col_abs = 1
conn = psycopg2.connect(DATABASE_URL, sslmode='require')
cursor = conn.cursor()
id_send = False
send_sendr = 0
pr = ""
sendr = ""
table = ""
senders = []
senders_2 = []
resh = []
otvets = []
ban_list = []
wait_resh = []
num = 0
ob_send = False
def update_bases(id):
if id in senders:
senders.remove(id)
if id in senders_2:
senders_2.remove(id)
def update_bases_game(id):
if id in resh:
num = resh.index(id)
resh.remove(id)
otvets.pop(num)
num = 0
# Создание таблицы
try:
cursor.execute("""CREATE TABLE USERS (ID INT, COINS INT, BONUS INT)""")
except:
print("Database users already created")
try:
cursor.execute("""CREATE TABLE SENDS (ID INT, MST_T TEXT, MSG TEXT)""")
except:
print("Database sends already created")
conn.commit()
from rank_manager import *
def write_msg(user_id, message):
rand_id = random.getrandbits(64)
vk.method('messages.send', {'user_id': user_id, 'message': message, 'random_id': rand_id})
def write_msg_pik(user_id, message, attach):
rand_id = random.getrandbits(64)
vk.method('messages.send', {'user_id': user_id, 'message': message, 'random_id': rand_id, 'attachment': attach})
def write_msg_kb(user_id, message, keyboard):
rand_id = random.getrandbits(64)
vk.method('messages.send',
{'user_id': user_id, 'message': message, 'random_id': rand_id, 'keyboard': keyboard.get_keyboard()})
def game_event(event):
ob_send = False
initis(event.user_id)
col = 0
max = 0
closh = ""
update_bases_game(event.user_id)
id_send = False
update_bases(event.user_id)
rn = RandomNumber()
if get_points(event.user_id) < 300:
col = 3
max = 10
closh = "легко"
elif get_points(event.user_id) < 2000:
col = 3
max = 100
closh = "средне"
elif get_points(event.user_id) < 4000:
col = 4
max = 100
closh = "трудно"
else:
col = 4
max = 1000
closh = "очень трудно"
prim = rn.generate(1, max, col)
write_msg(event.user_id,
f"Игра \"Примеры\". Твоя задача - решить пример. \nСложность: {closh}. Ответ округляй до целого в меньшую сторону. \nПример: {prim}")
resh.insert(len(resh), event.user_id)
otvets.insert(len(otvets), eval(prim))
def get_payment():
pass
token = "secret"
vk = vk_api.VkApi(token=token, api_version=API_VERSION)
vk_conn = vk.get_api()
longpoll = VkLongPoll(vk)
kb_start = VkKeyboard(one_time=True, inline=False)
kb_start.add_button(color=VkKeyboardColor.POSITIVE, label="Меню", payload={"type": "0x002_menu"})
kb_menu = VkKeyboard(one_time=False, inline=False)
kb_menu.add_button(color=VkKeyboardColor.PRIMARY, label="Меню")
kb_menu.add_line()
kb_menu.add_button(color=VkKeyboardColor.POSITIVE, label="Профиль")
kb_menu.add_line()
kb_menu.add_button(color=VkKeyboardColor.NEGATIVE, label="Играть")
kb_menu.add_line()
kb_menu.add_button(color=VkKeyboardColor.POSITIVE, label="Магазин")
kb_menu.add_button(color=VkKeyboardColor.SECONDARY, label="Подать заявку")
kb_admin = VkKeyboard(one_time=False, inline=True)
kb_admin.add_button(color=VkKeyboardColor.NEGATIVE, label="Данные")
kb_admin.add_line()
kb_admin.add_button(color=VkKeyboardColor.NEGATIVE, label="Заявки")
kb_admin.add_line()
kb_admin.add_button(color=VkKeyboardColor.NEGATIVE, label="Объявление")
kb_pik = VkKeyboard(one_time=False, inline=True)
kb_pik.add_button(color=VkKeyboardColor.POSITIVE, label="Мои пикчи")
kb_sender = VkKeyboard(one_time=False, inline=True)
kb_sender.add_button(color=VkKeyboardColor.PRIMARY, label="Алгебра и геометрия")
kb_sender.add_line()
kb_sender.add_button(color=VkKeyboardColor.NEGATIVE, label="Литра и русский")
kb_sender.add_line()
kb_sender.add_button(color=VkKeyboardColor.POSITIVE, label="Биология")
kb_sender.add_line()
kb_sender.add_button(color=VkKeyboardColor.PRIMARY, label="Улучшения бота")
kb_shop = VkKeyboard(one_time=False, inline=True)
kb_shop.add_button(color=VkKeyboardColor.NEGATIVE, label="Пикча Алека!")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.NEGATIVE, label="Пикча для богатых")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.POSITIVE, label="Рандом Пикча!")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.POSITIVE, label="Взлом рандома пикч!")
kb_shop.add_line()
kb_shop.add_button(color=VkKeyboardColor.PRIMARY, label="МЕГА РАНДОМ ПИКЧ!")
def initis(id):
user_get = vk_conn.users.get(user_ids=id)[0]
cursor.execute(f"""SELECT COINS from USERS where id={id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO USERS (ID, COINS, BONUS) VALUES ({id}, 0, 0)""")
conn.commit()
cursor.execute(f"""SELECT COINS from USERS where id={id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT POINTS from RANKS where id={id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO ranks VALUES ({id}, 0)""")
conn.commit()
cursor.execute(f"""SELECT points FROM ranks WHERE id={id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT INV from PIK where ID={id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
data_pik = "1111111111"
cursor.execute(f"""INSERT INTO PIK VALUES ({id}, {str(data_pik)})""")
conn.commit()
cursor.execute(f"""SELECT INV FROM PIK WHERE id={event.user_id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT INV from PIK where ID={id}""")
inv = str(cursor.fetchone()[0])
print(inv)
set = len(inv)
if set < 10:
get = 10 - set
print("GET: "+str(get))
get_inv = inv
data_r = get_inv + '1' * get
cursor.execute(f"""UPDATE PIK set INV={data_r} where ID={id}""")
conn.commit()
while True:
for event in longpoll.listen():
# Если пришло новое сообщение
if event.type == VkEventType.MESSAGE_NEW:
if event.to_me:
req_msg = event.text.lower()
req_msg_up = event.text
try:
initis(event.user_id)
if event.user_id in ban_list:
write_msg(event.user_id,
"Вы получили бан! Теперь вы не можете пользоваться ботом!\nДля разбана обращайтесь к администраторам!")
elif req_msg == "начать":
update_bases_game(event.user_id)
update_bases(event.user_id)
ob_send = False
id_send = False
send_sendr = 0
write_msg_kb(event.user_id,
"Привет! Это бот нашей группы (обновленный), и теперь вы сможете не просто подавать заявки, но и получать и накапливать монеты, повышать ранги и другое...",
kb_start)
user_get = vk_conn.users.get(user_ids=event.user_id)[0]
cursor.execute("""SELECT coins FROM users WHERE id={event.user_id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO users VALUES ({event.user_id}, 0, 0)""")
conn.commit()
cursor.execute(f"""SELECT coins FROM users WHERE id={event.user_id}""")
print(cursor.fetchone())
cursor.execute(f"""SELECT points FROM ranks WHERE id={event.user_id}""")
if not cursor.fetchall():
full_name = user_get['first_name'] + ' ' + user_get['last_name']
print(full_name)
cursor.execute(f"""INSERT INTO ranks VALUES ({event.user_id}, 0)""")
conn.commit()
cursor.execute(f"""SELECT points FROM ranks WHERE id={event.user_id}""")
print(cursor.fetchone())
elif req_msg == "меню":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
send_sendr = 0
write_msg_kb(event.user_id, "Меню. Выбери кнопку на панели под клавиатурой.", kb_menu)
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
write_msg_kb(event.user_id,
"Тссс, я тут услышал что ты админ, так что пользуйся кнопкой админов:",
kb_admin)
elif req_msg == "заявки":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
table = ""
id_send = False
send_sendr = 0
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
t = PrettyTable(["ID пользователя", "Тема", "Текст заявки"])
write_msg(event.user_id, r"Таблица заявок 📃, поданных учениками.")
cursor.execute("""SELECT * FROM sends""")
data_s = cursor.fetchall()
for row in data_s:
t.add_row([row[0], row[1], row[2]])
table += str(row[0]) + ": " + str(row[1]) + ", " + str(row[2]) + "\n\n"
print(t)
write_msg(event.user_id, table)
else:
write_msg(event.user_id, "Это место только для админов, тебе туда нельзя!")
elif req_msg == "данные":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
send_sendr = 0
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
write_msg(event.user_id, "Введите ID пользователя, чтобы узнать о нем информацию.")
id_send = True
else:
write_msg(event.user_id, "Это место только для админов, тебе туда нельзя!")
elif req_msg == "объявление":
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
ob_send = False
send_sendr = 0
if event.user_id == 545851228:
write_msg(event.user_id,
"Введите текст объявления, который будет отправлен всем зарегистрированным в боте.")
ob_send = True
elif event.user_id == 502085595:
write_msg(event.user_id,
"Прости, Леша, но во имя безопастности и защиты от спама, тебе тоже сюда нельзя(")
else:
write_msg(event.user_id, "Это место только для админов, тебе туда нельзя!")
elif req_msg == "подать заявку":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
id_send = False
write_msg_kb(event.user_id,
r"Выберите предмет для заявки. Сейчас поддерживаются такие предметы:",
kb_sender)
senders.insert(len(senders), event.user_id)
elif req_msg == "играть":
ob_send = False
initis(event.user_id)
col = 0
max = 0
closh = ""
update_bases_game(event.user_id)
id_send = False
update_bases(event.user_id)
rn = RandomNumber()
if get_points(event.user_id) < 300:
col = 3
max = 10
closh = "легко"
elif get_points(event.user_id) < 2000:
col = 3
max = 100
closh = "средне"
elif get_points(event.user_id) < 4000:
col = 4
max = 100
closh = "трудно"
else:
col = 4
max = 1000
closh = "очень трудно"
prim = rn.generate(1, max, col)
write_msg(event.user_id,
f"Игра \"Примеры\". Твоя задача - решить пример. \nСложность: {closh}. Ответ округляй до целого в меньшую сторону. \nПример: {prim}")
resh.insert(len(resh), event.user_id)
otvets.insert(len(otvets), eval(prim))
elif req_msg == "профиль":
ob_send = False
initis(event.user_id)
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
send_sendr = 0
ranking, ranked = get_rank(event.user_id)
points = get_points(event.user_id)
if points >= 10000:
ranked_more: str = "∞"
else:
ranked_more = str(ranks_points[ranked])
if ranking == "Непобедимый" and not have_pik(event.user_id, 8):
write_msg_pik(event.user_id, "Ты получил максимальный ранг в игре! Лови пикчу, которую можно получить только за это!\n\nСпасибо за ранг Непобедимый, большой вклад...", pikches.not_win)
add_pik(event.user_id, 8)
cursor.execute(f"""SELECT coins FROM users WHERE id={event.user_id}""")
write_msg_kb(event.user_id,
f"Профиль:\n1. Монеты: {cursor.fetchone()[0]} 💰\n2. Твой ранг: {ranking} 🌟\n3. Всего очков ранга: {points}/{ranked_more}.", kb_pik)
# elif req_msg == "бонус":
# ob_send = False
# initis(event.user_id)
# update_bases_game(event.user_id)
# update_bases(event.user_id)
# id_send = False
# send_sendr = 0
# write_msg(event.user_id,
# r"Бонус для beta тестировщиков или датамайнеров (везунчиков, которые написали боту во время теста) - 100 💰")
# cursor.execute(f"""SELECT bonus FROM users WHERE id={event.user_id}""")
# if cursor.fetchone()[0] == 0:
# cursor.execute(f"""UPDATE users SET bonus = 1 WHERE id={event.user_id}""")
# write_msg(event.user_id, r"Ты тоже получил бонус! - 100 💰")
# cursor.execute(f"""SELECT coins FROM users WHERE id={event.user_id}""")
# cursor.execute(
# f"""UPDATE users SET coins = {int(cursor.fetchone()[0]) + 100} WHERE id={event.user_id}""")
# conn.commit()
# else:
# write_msg(event.user_id, r"Что, захотел еще деньжат? Нее, бонус можно получить только раз!")
elif req_msg == "магазин":
write_msg_kb(event.user_id, "Магазин.\n\nЗдесь ты можешь купить рандомную пикчу из групп 7 параллель, 7б и Квазар.\n\nВ честь недавних событий ты можешь поддержать Алека, потратив 100 монет и получив его пикчу!\n\nДоступные пикчи для покупки:\n1. Алек (100 монет)\n2. Рандомная пикча! Может выпасть любая пикча из всех что есть. Шанс секретной: 1/1000 (100 монет)\n3. Взлом рандома пикч! Может выпасть любая пикча из всех что есть. Шанс секретной пикчи: 1/100 (10000 монет)\n4. Мега рандом пикча! ШАНС ВЫПАДЕНИЯ СЕКРЕТНОЙ ПИКЧИ 1/2 (100 000 монет)\n5. Пикча для богатых (10000 монет)", kb_shop)
elif req_msg == "мои пикчи":
text = "Твои пикчи:\n\n"
i = 0
i_2 = 0
for i in range(7):
if have_pik(event.user_id, i):
text = text + str(i+1) + ". " + pikches.pik_data[i] + "\n"
else:
text = text + str(i+1) + ". ????????" + "\n"
text = text + "\nСекретные пикчи либо супер редкие пикчи:\n\n"
for i_2 in range(3):
if have_pik(event.user_id, i_2 + len(pikches.pik_data)):
text = text + str(i_2+1) + ". " + pikches.pik_data_secret[i_2] + "\n"
else:
text = text + str(i_2+1) + ". ????????" + "\n"
text = text + "\nПродолжай собирать пикчи! Если ты собрал все, не радуйся, скоро я добавлю новые!"
write_msg(event.user_id, text)
elif req_msg == "пикча алека!":
if get_coins(event.user_id) >= 100:
write_msg_pik(event.user_id,
"Спасибо! Лови пикчу!", pikches.alek)
pik_id = 0
add_pik(event.user_id, pik_id)
add_coins(event.user_id, -100)
else:
write_msg(event.user_id, "Не хватает монет)")
elif req_msg == "пикча для богатых":
if get_coins(event.user_id) >= 10000:
write_msg_pik(event.user_id,
"Лови пикчу! На расстрел!", pikches.capit)
pik_id = 3
add_pik(event.user_id, pik_id)
add_coins(event.user_id, -10000)
else:
write_msg(event.user_id, "Не хватает монет")
elif req_msg == "рандом пикча!":
if get_coins(event.user_id) >= 100:
msg, pik, pik_id = get_random_pik(1000)
write_msg_pik(event.user_id, msg, pik)
add_pik(event.user_id, pik_id)
add_coins(event.user_id, -100)
else:
write_msg(event.user_id, "Не хватает монет.")
elif req_msg == "мега рандом пикч!":
if get_coins(event.user_id) >= 100000:
msg, pik, pik_id = get_random_pik(2)
write_msg_pik(event.user_id, msg, pik)
add_pik(event.user_id, pik_id)
add_coins(event.user_id, -100000)
else:
write_msg(event.user_id, "Не хватает монет. А ты че думаЛ. на такой рандом копить и копить надо!")
elif req_msg == "взлом рандома пикч!":
if get_coins(event.user_id) >= 10000:
msg, pik, pik_id = get_random_pik(100)
write_msg_pik(event.user_id, msg, pik)
add_pik(event.user_id, pik_id)
add_coins(event.user_id, -10000)
else:
write_msg(event.user_id,
"Не хватает монет. А ты че думаЛ. на такой рандом копить и копить надо!")
elif id_send:
ob_send = False
update_bases_game(event.user_id)
update_bases(event.user_id)
id_send = False
id_get = req_msg
cursor.execute(f"""SELECT * FROM users WHERE id={id_get}""")
data = cursor.fetchall()
print(data)
try:
user_get = vk_conn.users.get(user_ids=id_get)[0]
full_name = user_get['first_name'] + ' ' + user_get['last_name']
except:
write_msg(event.user_id,
"Неверное ID. ВКонтакте не имеет пользователя с таким ID\n\nЕсли вы считаете, что это ошибка, то покажите код ошибки автору бота.\n[ERROR] 0x606")
break
try:
print(data[0])
for row in data:
write_msg(event.user_id,
f"Имя пользователя: {full_name}\nЗарегистрирован в системе.\nКоличество монет: {row[1]}\nСтатус бонуса:{row[2]}")
if id_get == 502085595 or id_get == 545851228 or id_get == 13122641:
write_msg(event.user_id, f"Является администратором")
except:
write_msg(event.user_id,
f"Имя пользователя: {full_name}\nНе зарегистрирован в системе.\n\nЕсли вы считаете, что это ошибка, то покажите код ошибки автору бота.\n[ERROR] 0x707")
elif ob_send:
update_bases_game(event.user_id)
update_bases(event.user_id)
ob_send = False
ob_get = req_msg_up
cursor.execute("""SELECT id FROM users""")
data = cursor.fetchall()
print(data)
try:
for x in data:
if x != 13122641:
write_msg(x, ob_get)
except:
write_msg(event.user_id,
"Неверное ID. ВКонтакте не имеет пользователя с таким ID\n\nЕсли вы считаете, что это ошибка, то покажите код ошибки автору бота.\n[ERROR] 0x606")
break
elif event.user_id in resh:
ob_send = False
num = resh.index(event.user_id)
resh.remove(event.user_id)
otv = int(otvets[num])
otvets.pop(num)
if req_msg == str(otv):
write_msg(event.user_id,
f"Молодец, ответ правильный! Ты получаешь 10 очков ранга и в придачу {col_coins * col_abs} 💰!")
add_coins(event.user_id, col_coins * col_abs)
add_points(event.user_id, 10)
else:
write_msg(event.user_id, "Неверный ответ... Ты теряешь 5 очков ранга. Попробуй позже.")
add_points(event.user_id, -5)
th = threading.Thread(target=game_event(event))
elif event.user_id in senders:
ob_send = False
update_bases_game(event.user_id)
id_send = False
cursor.execute(f"SELECT * FROM SENDS WHERE ID={event.user_id}")
if not cursor.fetchall():
pr = req_msg_up
send_sendr = 2
senders.remove(event.user_id)
senders_2.insert(len(senders_2), event.user_id)
write_msg(event.user_id, r"Введите текст заявки 📃:")
else:
write_msg(event.user_id,
"Нельзя подавать больше одной заявки (защита от спама). Как твою заявку рассмотрят, ты сможешь подать следующую.")
senders.remove(event.user_id)
elif event.user_id in senders_2:
ob_send = False
update_bases_game(event.user_id)
id_send = False
senders_2.remove(event.user_id)
sendr = req_msg_up
write_msg(event.user_id, r"Заявка принята 📃")
cursor.execute(
f"""INSERT INTO sends VALUES ({event.user_id}, '{str(pr)}', '{str(sendr)}')""")
conn.commit()
print(event.user_id, ":", pr, ":", sendr)
send_sendr = 0
sendr = ""
pr = ""
else:
initis(event.user_id)
parse = req_msg.split(' ')
if parse[0] == "!лихорадка":
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
col_abs = int(parse[1])
write_msg(event.user_id, f"Коофицент лихорадки настроен на значение {col_abs}")
elif parse[0] == "!rollback" and event.user_id == 545851228:
write_msg(event.user_id, "Соединение с бд зарыто.")
cursor.execute("ROLLBACK")
elif parse[0] == "!ранг":
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
add_points(int(parse[1]), int(parse[2]))
write_msg(event.user_id,
f"Пользователю с айди {parse[1]} добавлено {parse[2]} очков ранга.")
elif parse[0] == "!монеты":
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
add_coins(int(parse[1]), int(parse[2]))
write_msg(event.user_id, f"Пользователю с айди {parse[1]} добавлено {parse[2]} монет.")
elif parse[0] == "!рассмотреть":
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
cursor.execute(f"""DELETE from SENDS where ID={int(parse[1])};""")
conn.commit()
write_msg(event.user_id,
f"""Заявка от пользователя с ID {parse[1]} рассмотрена и удалена из бд.\nПользователь уведомлен о рассмотрении.""")
write_msg(int(parse[1]),
f"""Ваша заявка была рассмотрена и удалена из баз данных.""")
elif parse[0] == "!бан":
if event.user_id == 502085595 or event.user_id == 545851228 or event.user_id == 13122641:
try:
id = int(parse[1])
parse.remove(parse[1])
parse.remove(parse[0])
if id == 545851228:
write_msg(event.user_id,
f"Владимир Константинов является автором и полным обладателем бота, его нельзя банить!")
else:
write_msg(event.user_id, f"Пользователь с айди {id} был забанен.")
ban_list.insert(len(ban_list), id)
write_msg(id,
f"""Вы были забанены! Сообщение от администраторов: {" ".join(parse)}""")
except:
write_msg(event.user_id, "Ошибка бана!")
elif parse[0] == "!разбан":
try:
ban_list.remove(int(parse[1]))
write_msg(event.user_id, "Пользователь разбанен")
write_msg(int(parse[1]), "Вы разбанены!")
except:
write_msg(event.user_id, "Ошибка разбана!")
elif parse[0] == "!секрет":
if parse[1] == "смерть_с_небес":
write_msg_pik(event.user_id,
"Вау, ты открыл одну из серетных пикч!\nШанс ее выпадения из рандом пикч 1/81!\n\nЛови пикчу!\nСупер редкая пикча!\n\nУдар с небес!!!",
pikches.vita)
if parse[1] == "история":
write_msg_pik(event.user_id,
"Вау, ты открыл одну из серетных пикч!\nШанс ее выпадения из рандом пикч 1/81!\n\nЛови пикчу!\nСупер редкая пикча!\n\nИстория с ПалМихом!!!",
pikches.hist)
add_pik(event.user_id, 9)
else:
write_msg(event.user_id,
"Не понятен запрос!\n\nЕсли вы считаете, что это ошибка, то покажите код ошибки автору бота.\n[ERROR] 0x001")
except Exception as err:
cursor.execute("ROLLBACK")
print(err)
print(senders)
print(senders_2)
|
cmdlineframes.py |
from __future__ import absolute_import, division, print_function
from iotbx.reflection_file_reader import any_reflection_file
from iotbx.gui_tools.reflections import ArrayInfo
from cctbx.miller import display2 as display
from crys3d.hklviewer import jsview_3d as view_3d
#from crys3d.hklviewer.jsview_3d import ArrayInfo
from cctbx import miller
from cctbx import crystal
from libtbx.math_utils import roundoff
from libtbx.str_utils import format_value
from cctbx.array_family import flex
from libtbx.utils import Sorry, to_str
from scitbx import matrix
from cctbx import sgtbx
from libtbx import group_args, version
import libtbx
import libtbx.load_env
import traceback
import sys, zmq, threading, time, cmath, zlib, os.path, math, re
NOREFLDATA = "No reflection data has been selected"
class settings_window () :
def set_index_span (self, index_span) :
self._index_span = index_span
def update_reflection_info (self, hkl, d_min, value) :
print(hkl, value)
if (hkl is None) :
self.hkl_info.SetValue("")
self.d_min_info.SetValue("")
self.value_info.SetValue("")
else :
self.hkl_info.SetValue("%d, %d, %d" % hkl)
d_min_str = format_value("%.3g", d_min)
self.d_min_info.SetValue(d_min_str)
value_str = format_value("%.3g", value, replace_none_with="---")
self.value_info.SetValue(value_str)
def clear_reflection_info (self) :
self.update_reflection_info(None, None, None)
class HKLViewFrame() :
def __init__ (self, *args, **kwds) :
self.valid_arrays = []
self.spacegroup_choices = []
self.procarrays = []
self.origarrays = {}
self.ano_spg_tpls =[]
self.merge_answer = [None]
self.dmin = -1
self.settings = display.settings()
self.verbose = 0
if 'verbose' in kwds:
self.verbose = eval(kwds['verbose'])
self.guiSocketPort=None
kwds['settings'] = self.settings
kwds['mprint'] = self.mprint
self.infostr = ""
self.hklfile_history = []
self.tncsvec = None
self.uservectors = []
self.new_miller_array_operations_lst = []
self.copyrightpaths = [("CCTBX copyright", libtbx.env.under_root(os.path.join("modules","cctbx_project","COPYRIGHT.txt"))),
("NGL copyright", libtbx.env.under_dist("crys3d","hklviewer/License_for_NGL.txt")),
("html2canvas copyright", libtbx.env.under_dist("crys3d","hklviewer/LICENSE_for_html2canvas.txt"))
]
self.zmqsleeptime = 0.1
if 'useGuiSocket' in kwds:
self.guiSocketPort = eval(kwds['useGuiSocket'])
self.context = zmq.Context()
self.guisocket = self.context.socket(zmq.PAIR)
self.guisocket.connect("tcp://127.0.0.1:%s" %self.guiSocketPort )
self.STOP = False
self.mprint("CCTBX starting socket thread", 1)
# name this thread to ensure any asyncio functions are called only from main thread
self.msgqueuethrd = threading.Thread(target = self.zmq_listen, name="HKLviewerZmqThread" )
self.msgqueuethrd.daemon = True
kwds['send_info_to_gui'] = self.SendInfoToGUI # function also used by hklview_3d
pyversion = "cctbx.python.version: " + str(sys.version_info[0])
# tell gui what python version we are
self.SendInfoToGUI(pyversion )
self.SendInfoToGUI({"copyrights": self.copyrightpaths,
"cctbxversion": version.get_version()} )
self.mprint("kwds= " +str(kwds), 1)
self.mprint("args= " + str(args), 1)
kwds['websockport'] = self.find_free_port()
kwds['parent'] = self
self.viewer = view_3d.hklview_3d( **kwds )
self.ResetPhilandViewer()
self.idx_data = None
self.NewFileLoaded = False
self.loaded_file_name = ""
self.fileinfo = None
if 'fileinfo' in kwds:
self.fileinfo = kwds.get('fileinfo', 1 )
self.hklin = None
if 'hklin' in kwds or 'HKLIN' in kwds:
self.hklin = kwds.get('hklin', kwds.get('HKLIN') )
self.LoadReflectionsFile(self.hklin)
if 'useGuiSocket' in kwds:
self.msgqueuethrd.start()
def __exit__(self, exc_type=None, exc_value=0, traceback=None):
self.viewer.__exit__(exc_type, exc_value, traceback)
self.mprint("Destroying HKLViewFrame", verbose=0) # this string is expected by HKLviewer.py so don't change
self.STOP = True
del self
#sys.exit()
def mprint(self, msg, verbose=0):
if verbose <= self.verbose:
if self.guiSocketPort:
self.SendInfoToGUI( { "info": msg } )
else:
print(msg)
def find_free_port(self):
import socket
s = socket.socket()
s.bind(('', 0)) # Bind to a free port provided by the host.
port = s.getsockname()[1]
s.close()
return port
def zmq_listen(self):
#time.sleep(5)
nan = float("nan") # workaround for "evaluating" any NaN values in the messages received
while not self.STOP:
try:
msgstr = self.guisocket.recv().decode("utf-8")
if msgstr == "":
continue
self.mprint("Received string:\n" + msgstr, verbose=1)
msgtype, mstr = eval(msgstr)
if msgtype=="dict":
self.viewer.datatypedict = eval(mstr)
if msgtype=="philstr":
new_phil = libtbx.phil.parse(mstr)
self.update_settings(new_phil)
time.sleep(self.zmqsleeptime)
except Exception as e:
self.mprint( str(e) + traceback.format_exc(limit=10), verbose=1)
self.mprint( "Shutting down zmq_listen() thread", 1)
self.guiSocketPort=None
def ResetPhilandViewer(self, extraphil=None):
self.master_phil = libtbx.phil.parse( masterphilstr )
self.currentphil = self.master_phil
if extraphil:
self.currentphil = self.currentphil.fetch(source = extraphil)
# Don't retain clip plane values as these are specific to each crystal
# so use clip plane parameters from the master phil
default_clipphil = self.master_phil.fetch().extract().clip_plane
currentparms = self.currentphil.extract()
currentparms.clip_plane = default_clipphil
self.currentphil = self.master_phil.format(python_object = currentparms)
self.params = self.currentphil.fetch().extract()
self.viewer.viewerparams = self.params.viewer
self.viewer.params = self.params
self.params.binner_idx = 0
self.params.nbins = 1
self.params.scene_bin_thresholds = ""
self.params.using_space_subgroup = False
self.viewer.symops = []
self.viewer.sg = None
self.viewer.proc_arrays = []
self.viewer.HKLscenedict = {}
self.uservectors = []
self.viewer.visual_symmxs = []
self.visual_symHKLs = []
self.viewer.sceneisdirty = True
self.viewer.isnewfile = True
if self.viewer.miller_array:
self.viewer.params.viewer.scene_id = None
self.viewer.RemoveStageObjects()
self.viewer.miller_array = None
self.viewer.lastviewmtrx = None
return self.viewer.params
def GetNewCurrentPhilFromString(self, philstr, oldcurrentphil):
user_phil = libtbx.phil.parse(philstr)
newcurrentphil = oldcurrentphil.fetch(source = user_phil)
diffphil = oldcurrentphil.fetch_diff(source = user_phil)
return newcurrentphil, diffphil
def GetNewCurrentPhilFromPython(self, pyphilobj, oldcurrentphil):
newcurrentphil, unusedphilparms = oldcurrentphil.fetch(source = pyphilobj, track_unused_definitions=True)
for parm in unusedphilparms:
self.mprint( "Received unrecognised phil parameter: " + parm.path, verbose=1)
diffphil = oldcurrentphil.fetch_diff(source = pyphilobj)
"""
oldcolbintrshld = oldcurrentphil.extract().scene_bin_thresholds
newcolbintrshld = oldcolbintrshld
if hasattr(pyphilobj.extract(), "scene_bin_thresholds"):
newcolbintrshld = pyphilobj.extract().scene_bin_thresholds
# fetch_diff doesn't seem able to correclty spot changes
# in the multiple scope phil object "scene_bin_thresholds"
# Must do it manually
params = newcurrentphil.extract()
if oldcolbintrshld != newcolbintrshld: # or old_binopacities != new_binopacities:
params.scene_bin_thresholds = newcolbintrshld
newcurrentphil = self.master_phil.format(python_object = params)
diffphil = self.master_phil.fetch_diff(source = newcurrentphil)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
"""
return newcurrentphil, diffphil
def SetCurrentPhilAsPython(self, pyphil):
newphil = master_phil.format(python_object= pyphil)
currphil = master_phil.fetch(source = newphil)
def update_settings(self, new_phil=None):
try:
if not new_phil:
#self.params = self.viewer.params
new_phil = self.master_phil.format(python_object = self.params)
#import code, traceback; code.interact(local=locals(), banner="".join( traceback.format_stack(limit=10) ) )
self.currentphil, diff_phil = self.GetNewCurrentPhilFromPython(new_phil, self.currentphil)
#diff = None
self.params = self.currentphil.extract()
phl = self.params
if len(diff_phil.all_definitions()) < 1 and not phl.mouse_moved:
self.mprint( "Nothing's changed", verbose=1)
return False
self.mprint("diff phil:\n" + diff_phil.as_str(), verbose=1 )
if view_3d.has_phil_path(diff_phil, "use_provided_miller_arrays"):
phl = self.ResetPhilandViewer(self.currentphil)
if not self.load_miller_arrays():
return False
self.viewer.lastscene_id = phl.viewer.scene_id
if view_3d.has_phil_path(diff_phil, "openfilename"):
phl = self.ResetPhilandViewer(self.currentphil)
if not self.load_reflections_file(phl.openfilename):
return False
self.viewer.lastscene_id = phl.viewer.scene_id
if view_3d.has_phil_path(diff_phil, "scene_id", "merge_data", "show_missing", \
"show_only_missing", "show_systematic_absences", "nbins", "binner_idx",\
"scene_bin_thresholds"):
if self.set_scene(phl.viewer.scene_id):
self.update_space_group_choices()
self.set_scene_bin_thresholds(strbinvals=phl.scene_bin_thresholds,
binner_idx=phl.binner_idx,
nbins=phl.nbins )
if phl.spacegroup_choice == None:
self.mprint("! spacegroup_choice == None")
#time.sleep(15)
if view_3d.has_phil_path(diff_phil, "spacegroup_choice"):
self.set_spacegroup_choice(phl.spacegroup_choice)
if view_3d.has_phil_path(diff_phil, "tabulate_miller_array_ids"):
self.tabulate_arrays(phl.tabulate_miller_array_ids)
#return True
if view_3d.has_phil_path(diff_phil, "miller_array_operations"):
self.make_new_miller_array()
if view_3d.has_phil_path(diff_phil, "using_space_subgroup") and phl.using_space_subgroup==False:
self.set_default_spacegroup()
if view_3d.has_phil_path(diff_phil, "shape_primitive"):
self.set_shape_primitive(phl.shape_primitive)
if view_3d.has_phil_path(diff_phil, "add_user_vector_hkl_op",
"add_user_vector_abc",
"add_user_vector_hkl"):
self.add_user_vector()
if view_3d.has_phil_path(diff_phil, "selected_info"):
self.viewer.array_info_format_tpl = []
for i,array in enumerate(self.procarrays):
if type(array.data()) == flex.std_string: # in case of status array from a cif file
uniquestrings = list(set(array.data()))
info = array.info()
array = array.customized_copy(data=flex.int([uniquestrings.index(d) for d in array.data()]))
array.set_info(info)
if array.space_group() is None:
array._unit_cell = uc
array._space_group_info = spg.info()
self.mprint("""No unit cell or space group info present in the %d. miller array.
Borrowing them from the first miller array""" %i)
wrap_labels = 25
arrayinfo = ArrayInfo(array,wrap_labels)
info_fmt, dummy, dummy2 = arrayinfo.get_selected_info_columns_from_phil(self.params )
self.viewer.array_info_format_tpl.append( info_fmt )
self.SendInfoToGUI({"array_infotpls": self.viewer.array_info_format_tpl})
colnames_select_lst = []
for philname,selected in list(self.params.selected_info.__dict__.items()):
if not philname.startswith("__"):
colnames_select_lst.append((philname, arrayinfo.caption_dict[philname], selected))
self.SendInfoToGUI({ "colnames_select_lst": colnames_select_lst })
if view_3d.has_phil_path(diff_phil, "save_image_name"):
self.SaveImageName(phl.save_image_name)
phl.save_image_name = None
if view_3d.has_phil_path(diff_phil, "action"):
ret = self.set_action(phl.action)
phl.action = "is_running" # ensure the same action in succession can be executed
if not ret:
return False
if view_3d.has_phil_path(diff_phil, "savefilename"):
self.SaveReflectionsFile(phl.savefilename)
phl.savefilename = None # ensure the same action in succession can be executed
if view_3d.has_phil_path(diff_phil, "viewer"):
self.viewer.settings = phl.viewer
self.settings = phl.viewer
self.params = self.viewer.update_settings(diff_phil, phl)
if view_3d.has_phil_path(diff_phil, "scene_id", "spacegroup_choice"):
self.list_vectors()
# parameters might have been changed. So update self.currentphil accordingly
self.currentphil = self.master_phil.format(python_object = self.params)
self.NewFileLoaded = False
phl.mouse_moved = False
self.SendCurrentPhilValues()
if (self.viewer.miller_array is None) :
self.mprint( NOREFLDATA, True)
return False
return True
except Exception as e:
self.mprint(to_str(e) + "\n" + traceback.format_exc(), 0)
return False
def update_clicked (self, index) :#hkl, d_min=None, value=None) :
if (index is None) :
self.settings_panel.clear_reflection_info()
else :
hkl, d_min, value = self.viewer.scene.get_reflection_info(index)
self.settings_panel.update_reflection_info(hkl, d_min, value)
def detect_Rfree(self, array):
from iotbx.reflection_file_utils import looks_like_r_free_flags_info
info = array.info()
if (array.is_integer_array()) and (looks_like_r_free_flags_info(info)) :
from iotbx.reflection_file_utils import get_r_free_flags_scores
score_array = get_r_free_flags_scores([array], None)
test_flag_value = score_array.test_flag_values[0]
if test_flag_value not in array.data():
return array # for the few cases where a miller array cannot be considered as a valid Rfree array
array = array.customized_copy(data=(array.data() == test_flag_value))
array.set_info(info)
array._data = array.data().as_int()
return array
def process_miller_array(self, array) :
if (array is None) : return
info = array.info()
if isinstance(info, str) :
labels = "TEST DATA"
else :
labels = info.label_string()
if (array.unit_cell() is None) or (array.space_group() is None) :
raise Sorry("No space group info is present in data")
details = []
self.infostr = ""
array = self.detect_Rfree(array)
sg = "%s" % array.space_group_info()
uc = "a=%g b=%g c=%g angles=%g,%g,%g" % array.unit_cell().parameters()
details_str = ""
if (len(details) > 0) :
details_str = "(%s)" % ", ".join(details)
array_info = group_args(
labels=labels,
details_str=details_str,
merge=self.params.merge_data,
sg=sg,
uc=uc)
return array, array_info
def process_all_miller_arrays(self, col):
self.mprint("Processing reflection data...")
self.procarrays = []
if self.params.merge_data == False:
self.settings.expand_to_p1 = False
self.settings.expand_anomalous = False
for c,arr in enumerate(self.valid_arrays):
procarray, procarray_info = self.process_miller_array(arr)
self.procarrays.append(procarray)
if c==col:
array_info = procarray_info
self.viewer.miller_array = procarray
if col is None:
array_info = procarray_info
return array_info
def set_miller_array(self, col=None) :
if col is not None and col >= len(self.viewer.hkl_scenes_info ):
return
array_info = self.process_all_miller_arrays(col)
self.viewer.set_miller_array(col, merge=array_info.merge,
details=array_info.details_str)
self.viewer.proc_arrays = self.procarrays
self.viewer.identify_suitable_fomsarrays()
def update_space_group_choices(self, col=None) :
if (self.viewer.miller_array is None and col is None) or \
self.params.using_space_subgroup:
return
if col is None:
current_miller_array_idx = self.viewer.HKLInfo_from_dict()[1]
else:
current_miller_array_idx = col
matching_valid_array = self.procarrays[ current_miller_array_idx ]
from cctbx.sgtbx.subgroups import subgroups
from cctbx import sgtbx
sg_info = matching_valid_array.space_group_info()
subgrs = subgroups(sg_info).groups_parent_setting()
self.spacegroup_choices = []
for i,subgroup in enumerate(subgrs) :
subgroup_info = sgtbx.space_group_info(group=subgroup)
self.spacegroup_choices.append(subgroup_info)
for i,e in enumerate(self.spacegroup_choices):
c = None
if str(sg_info) == str(e):
self.current_spacegroup = self.spacegroup_choices[i]
c = i
break
if c is None:
c = 0
self.spacegroup_choices.insert(c, sg_info)
self.current_spacegroup = sg_info
self.params.spacegroup_choice = c
spglst = [e.symbol_and_number() for e in self.spacegroup_choices] + ["original spacegroup"]
mydict = { "spacegroups": spglst }
self.SendInfoToGUI(mydict)
def set_spacegroup_choice(self, n) :
if (self.viewer.miller_array is None) :
raise Sorry("No data loaded!")
if n == len(self.spacegroup_choices): # selected the unmerged "original spacegroup" in the list
self.viewer.proc_arrays = self.procarrays
self.params.using_space_subgroup = False
else:
self.current_spacegroup = self.spacegroup_choices[n]
from cctbx import crystal
symm = crystal.symmetry(
space_group_info= self.current_spacegroup,
unit_cell=self.viewer.miller_array.unit_cell())
othervalidarrays = []
for validarray in self.procarrays:
# TODO: check if array is unmerged i.e. not symmetry unique
arr = validarray.expand_to_p1().customized_copy(crystal_symmetry=symm)
arr = arr.merge_equivalents().array().set_info(validarray.info())
arr = self.detect_Rfree(arr)
othervalidarrays.append( arr )
self.mprint( "MERGING 2", verbose=2)
self.viewer.proc_arrays = othervalidarrays
self.params.using_space_subgroup = True
self.viewer.set_miller_array()
for i,e in enumerate(self.spacegroup_choices):
self.mprint("%d, %s" %(i,e.symbol_and_number()) , verbose=0)
def SetSpaceGroupChoice(self, n):
self.params.spacegroup_choice = n
self.update_settings()
def SetDefaultSpaceGroup(self):
self.params.using_space_subgroup = False
self.update_settings()
def set_default_spacegroup(self):
self.viewer.proc_arrays = self.procarrays
self.viewer.set_miller_array()
self.viewer.identify_suitable_fomsarrays()
def MakeNewMillerArrayFrom(self, operation, label, arrid1, arrid2=None):
# get list of existing new miller arrays and operations if present
miller_array_operations_lst = []
#if self.params.miller_array_operations:
# miller_array_operations_lst = eval(self.params.miller_array_operations)
miller_array_operations_lst = [ ( operation, label, arrid1, arrid2 ) ]
self.params.miller_array_operations = str( miller_array_operations_lst )
self.update_settings()
def make_new_miller_array(self):
miller_array_operations_lst = eval(self.params.miller_array_operations)
unique_miller_array_operations_lst = []
for (operation, label, arrid1, arrid2) in miller_array_operations_lst:
for arr in self.procarrays:
if label in arr.info().labels + [ "", None]:
raise Sorry("Provide an unambiguous label for your new miller array!")
unique_miller_array_operations_lst.append( (operation, label, arrid1, arrid2) )
self.params.miller_array_operations = str(unique_miller_array_operations_lst)
from copy import deepcopy
millarr1 = deepcopy(self.procarrays[arrid1])
newarray = None
if arrid2 != -1:
millarr2 = deepcopy(self.procarrays[arrid2])
newarray = self.viewer.OperateOn2MillerArrays(millarr1, millarr2, operation)
else:
newarray = self.viewer.OperateOn1MillerArray(millarr1, operation)
if newarray is not None:
self.mprint("New dataset has %d reflections." %newarray.size())
newarray.set_info(millarr1._info )
newarray._info.labels = [ label ]
procarray, procarray_info = self.process_miller_array(newarray)
self.procarrays.append(procarray)
self.viewer.proc_arrays = self.procarrays
self.viewer.has_new_miller_array = True
wrap_labels = 25
#self.viewer.SupersetMillerArrays()
arrayinfo = ArrayInfo(procarray,wrap_labels)
info_fmt, headerstr, infostr = arrayinfo.get_selected_info_columns_from_phil(self.params )
self.viewer.array_info_format_tpl.append( info_fmt )
# isanomalous and spacegroup might not have been selected for displaying so send them separatately to GUI
self.ano_spg_tpls.append((arrayinfo.isanomalous, arrayinfo.spginf) )
hkls = self.origarrays["HKLs"]
nanarr = flex.double(len(hkls), float("nan"))
m = miller.match_indices(hkls, procarray.indices() )
indices_of_matched_hkls = m.pairs().column(0)
for i,e in enumerate(indices_of_matched_hkls):
nanarr[e] = procarray.data()[i]
self.origarrays[label] = list(nanarr)
mydict = { "array_infotpls": self.viewer.array_info_format_tpl,
"ano_spg_tpls": self.ano_spg_tpls,
"NewHKLscenes" : True,
"NewMillerArray" : True
}
self.SendInfoToGUI(mydict)
def prepare_dataloading(self):
self.viewer.isnewfile = True
#self.params.mergedata = None
self.params.viewer.scene_id = None
self.viewer.colour_scene_id = None
self.viewer.radii_scene_id = None
self.viewer.match_valarrays = []
self.viewer.proc_arrays = {}
self.spacegroup_choices = []
self.origarrays = {}
display.reset_settings()
self.settings = display.settings()
self.viewer.settings = self.params.viewer
self.viewer.mapcoef_fom_dict = {}
self.viewer.sceneid_from_arrayid = []
self.hklfile_history = []
self.tncsvec = None
self.loaded_file_name = ""
def finish_dataloading(self, arrays):
valid_arrays = []
self.viewer.array_info_format_tpl = []
spg = arrays[0].space_group()
uc = arrays[0].unit_cell()
self.ano_spg_tpls =[]
self.mprint("%d Miller arrays in this dataset:" %len(arrays))
spgset = set([])
for i,array in enumerate(arrays):
if type(array.data()) == flex.std_string: # in case of status array from a cif file
uniquestrings = list(set(array.data()))
info = array.info()
array = array.customized_copy(data=flex.int([uniquestrings.index(d) for d in array.data()]))
array.set_info(info)
if i>0:
if arrays[i-1].unit_cell() is not None:
previous_ucell = arrays[i-1].unit_cell()
if arrays[i-1].space_group() is not None:
previous_spg = arrays[i-1].space_group()
# A cif file might lack unit cell or space group for all the crystals in the file
if array.unit_cell() is None:
array._unit_cell = previous_ucell
self.mprint("""No unit cell present in the %d. miller array. Borrowing from previous miller array""" %i)
if array.space_group() is None:
symm_new = crystal.symmetry( unit_cell = previous_ucell,
space_group_info = previous_spg.info()
)
info = array.info()
array = array.customized_copy(crystal_symmetry = symm_new)
array.set_info(info)
self.mprint("""No space group present in the %d. miller array. Borrowing from previous miller array""" %i)
if array.space_group() is None:
raise Sorry("No space group definition found in the first miller array. Rendering in reciprocal space is not possible.")
wrap_labels = 25
arrayinfo = ArrayInfo(array,wrap_labels)
info_fmt, headerstr, infostr = arrayinfo.get_selected_info_columns_from_phil(self.params )
if i==0: # print formatted table of array info here
self.mprint(headerstr)
self.mprint(infostr)
self.viewer.array_info_format_tpl.append( info_fmt )
# isanomalous and spacegroup might not have been selected for displaying so send them separatately to GUI
self.ano_spg_tpls.append((arrayinfo.isanomalous, arrayinfo.spginf) )
spgset.add(arrayinfo.ucellinf)
if i==0:
# convert philstring of selected_info into a list so GUI can make a selection settings dialog
# for what columns to show in the millertable
colnames_select_lst = []
for philname,selected in list(self.params.selected_info.__dict__.items()):
if not philname.startswith("__"):
colnames_select_lst.append((philname, arrayinfo.caption_dict[philname], selected))
self.SendInfoToGUI({ "colnames_select_lst": colnames_select_lst })
valid_arrays.append(array)
self.valid_arrays = valid_arrays
self.SendInfoToGUI({"spacegroup_info": arrayinfo.spginf, "unitcell_info": list(spgset) })
if self.fileinfo:
return
self.NewFileLoaded = True
if (len(valid_arrays) == 0):
msg = "No arrays of the supported types present."
self.mprint(msg)
self.NewFileLoaded=False
elif (len(valid_arrays) >= 1):
self.set_miller_array()
self.update_space_group_choices(0) # get the default spacegroup choice
mydict = { "info": self.infostr,
"array_infotpls": self.viewer.array_info_format_tpl,
"bin_infotpls": self.viewer.bin_infotpls,
"ano_spg_tpls": self.ano_spg_tpls,
"html_url": self.viewer.url,
"tncsvec": self.tncsvec,
"merge_data": self.params.merge_data,
"spacegroups": [e.symbol_and_number() for e in self.spacegroup_choices],
"NewFileLoaded": self.NewFileLoaded,
"file_name": self.params.openfilename
}
self.SendInfoToGUI(mydict)
self.params.openfilename = None
def load_reflections_file(self, file_name):
file_name = to_str(file_name)
ret = False
if (file_name != ""):
try :
self.mprint("Reading file...")
self.prepare_dataloading()
hkl_file = any_reflection_file(file_name)
if hkl_file._file_type == 'cif':
# use new cif label parser for reflections
cifreader = hkl_file.file_content()
cifarrays = cifreader.as_miller_arrays(merge_equivalents=False)
arrays = []
for arr in cifarrays:
if arr.info().labels[-1] not in ['_refln.crystal_id', # avoid these un-displayable arrays
'HKLs','_refln.wavelength_id', '_refln.scale_group_code']:
arrays.append(arr)
# sanitise labels by removing redundant strings.
# remove the data name of this cif file from all labels
dataname = list(hkl_file._file_content.builder._model.keys())
unwantedstrings = dataname[:]
# remove "_refln." from all labels
unwantedstrings.append("_refln.")
unwantedstrings.append("_refln_")
for arr in arrays:
if len(arr.info().labels):
newlabels = []
for label in arr.info().labels:
found = False
for s in unwantedstrings:
if s in label:
newlabel = label.replace(s, "")
found = True
if len(newlabel) > 0:
newlabels.append(newlabel)
break
if not found:
newlabels.append(label)
arr.info().labels = newlabels
ciforigarrays = cifreader.as_original_arrays()[dataname[0]]
self.origarrays = {}
for key in ciforigarrays:
if key not in ['_refln.crystal_id', # avoid these un-displayable arrays
'_refln.wavelength_id', '_refln.scale_group_code']:
self.origarrays[key] = ciforigarrays[key]
# replace ? with nan in self.origarrays to allow sorting tables of data in HKLviewer
for labl in self.origarrays.keys():
origarray = self.origarrays[labl]
for i,e in enumerate(self.origarrays[labl]):
if e=="?":
origarray[i] = "nan"
try:
self.origarrays[labl] = flex.double(origarray)
except Exception as e:
self.origarrays[labl] = origarray
else: # some other type of reflection file than cif
arrays = hkl_file.as_miller_arrays(merge_equivalents=False)
if hkl_file._file_type == 'ccp4_mtz':
self.hklfile_history = list(hkl_file._file_content.history())
self.loaded_file_name = file_name
for e in self.hklfile_history:
if "TNCS NMOL" in e and "VECTOR" in e:
svec = e.split()[-3:]
t1 = float(svec[0])
t2 = float(svec[1])
t3 = float(svec[2])
if (t1*t1 + t2*t2 + t3*t3) > 0.0:
self.tncsvec = (t1, t2, t3)
self.mprint("tNCS vector found in header of mtz file: %s" %str(self.tncsvec) )
from iotbx import mtz
mtzobj = mtz.object(file_name)
nanval = float("nan")
self.origarrays["HKLs"] = mtzobj.extract_miller_indices()
for mtzlbl in mtzobj.column_labels():
col = mtzobj.get_column( mtzlbl )
newarr = col.extract_values_and_selection_valid().values.deep_copy()
for i,b in enumerate(col.extract_values_and_selection_valid().selection_valid):
if not b:
newarr[i] = nanval
self.origarrays[mtzlbl] = list(newarr)
self.finish_dataloading(arrays)
except Exception as e :
self.NewFileLoaded=False
self.mprint("".join(traceback.format_tb(e.__traceback__ )) + e.__repr__())
arrays = []
ret = True
return ret
def LoadReflectionsFile(self, openfilename):
self.params.openfilename = openfilename
self.update_settings()
def load_miller_arrays(self):
ret = False
try:
self.ResetPhilandViewer(self.currentphil)
self.prepare_dataloading()
self.finish_dataloading(self.provided_miller_arrays)
ret = True
except Exception as e :
self.NewFileLoaded=False
self.mprint("".join(traceback.format_tb(e.__traceback__ )) + e.__repr__())
arrays = []
return ret
def LoadMillerArrays(self, marrays):
self.provided_miller_arrays = marrays
self.params.use_provided_miller_arrays = True
self.update_settings()
def SaveReflectionsFile(self, savefilename):
if self.loaded_file_name == savefilename:
self.mprint("Not overwriting currently loaded file. Choose a different name!")
return
self.mprint("Saving file...")
fileextension = os.path.splitext(savefilename)[1]
if fileextension == ".mtz":
mtz1 = self.viewer.proc_arrays[0].as_mtz_dataset(column_root_label= self.viewer.proc_arrays[0].info().labels[0])
for i,arr in enumerate(self.viewer.proc_arrays):
if i==0:
continue
mtz1.add_miller_array(arr, column_root_label=arr.info().labels[0] )
try: # python2 or 3
mtz1.mtz_object().write(savefilename)
except Exception as e:
mtz1.mtz_object().write(savefilename.encode("ascii"))
self.mprint("Miller array(s) saved to: " + savefilename)
elif fileextension == ".cif":
import iotbx.cif
mycif = None
fname = savefilename
fnames = []
def save2cif(filename, mycif):
with open(filename.encode("ascii"), "w") as f:
f.write("data_%s\n#\n" %os.path.splitext(os.path.basename(filename))[0])
print(mycif.cif_block, file= f)
for i,arr in enumerate(self.viewer.proc_arrays):
arrtype = None
colnames = ["_refln.%s" %e for e in arr.info().labels ]
colname= None
if self.has_indices_with_multiple_data(arr):
# if array contains data with more than one data point for the same hkl index iotbx.cif
# cannot add additional arrays to the cif block so save this array in a separate file
singlecif = iotbx.cif.miller_arrays_as_cif_block(arr, array_type = arrtype,
column_name=colname, column_names = colnames )
fname = os.path.splitext(savefilename)[0] + "_%d"%i + os.path.splitext(savefilename)[1]
save2cif(fname, singlecif)
fnames.append(fname)
continue
if not mycif:
mycif = iotbx.cif.miller_arrays_as_cif_block(arr, array_type = arrtype,
column_name=colname, column_names = colnames )
else:
mycif.add_miller_array(arr, column_name= colname, array_type= arrtype,
column_names = colnames)
if mycif:
save2cif(savefilename, mycif)
fnames.append(savefilename)
self.mprint("Miller array(s) saved to: " + ",\n".join(fnames))
if len(fnames) > 1:
self.mprint("Unmerged data put into separate files")
else:
self.mprint("Can only save file in MTZ or CIF format. Sorry!")
def has_indices_with_multiple_data(self, arr):
return len(set(list(arr.indices()))) < arr.size()
def tabulate_arrays(self, datalabels):
if len(self.origarrays) == 0: # if not an mtz file then split columns
# SupersetMillerArrays may not be necessary if file formats except for cif and mtz can't store multiple data columns
#self.viewer.SupersetMillerArrays()
self.origarrays["HKLs"] = self.viewer.proc_arrays[0].indices()
for arr in self.viewer.proc_arrays:
if arr.is_complex_array():
ampls, phases = self.viewer.Complex2AmplitudesPhases(arr.data())
cmplxlst = [ "%.4f + %.4f * i"%(e.real, e.imag)
if not cmath.isnan(e) else display.nanval for e in arr.data() ]
self.origarrays[arr.info().label_string()] = cmplxlst
self.origarrays[arr.info().labels[0]] = list(ampls)
self.origarrays[arr.info().labels[-1]] = list(phases)
elif arr.is_hendrickson_lattman_array():
A,B,C,D = arr.data().as_abcd()
HLlst = [ "%.4f, %.4f, %.4f, %.4f"%(e[0], e[1], e[2], e[3]) for e in arr.data() ]
self.origarrays[arr.info().label_string()] = HLlst
self.origarrays[arr.info().labels[0]] = list(A)
self.origarrays[arr.info().labels[1]] = list(B)
self.origarrays[arr.info().labels[2]] = list(C)
self.origarrays[arr.info().labels[3]] = list(D)
elif arr.sigmas() is not None:
labels = arr.info().labels
# Labels could be something like ['I(+)', 'SIGI(+)', 'I(-)', 'SIGI(-)'].
# So group datalabels and sigmalabels separately assuming that sigma column contain the three letters "sig"
datalabel = ",".join([ e for e in labels if "sig" not in e.lower()])
sigmalabel = ",".join([ e for e in labels if "sig" in e.lower()])
self.origarrays[datalabel] = list(arr.data())
self.origarrays[sigmalabel] = list(arr.sigmas())
elif arr.is_integer_array():
list_with_nans = [ e if not e==display.inanval else display.nanval for e in arr.data() ]
if self.viewer.array_info_format_tpl[id][0] == 'FreeR_flag': # want True or False back
list_with_nans = [ 1==e if not cmath.isnan(e) else display.nanval for e in list_with_nans ]
self.origarrays[arr.info().label_string()] = list_with_nans
else:
self.origarrays[arr.info().label_string()] = list(arr.data())
indices = self.origarrays["HKLs"]
dres = self.procarrays[0].unit_cell().d( indices)
dreslst = [("d_res", roundoff(list(dres)),3)]
hkls = list(indices)
hkllst = [ ("H", [e[0] for e in hkls] ), ("K", [e[1] for e in hkls] ), ("L", [e[2] for e in hkls] )]
datalst = []
labellists = eval(datalabels)
for labels in labellists:
crystlbl = ""; wavelbl = ""; scalelbl =""
for i,label in enumerate(labels):
if "crystal_id" in label:
crystlbl = "," + label
if "wavelength_id" in label:
wavelbl = "," + label
if "scale_group_code" in label:
scalelbl = "," + label
for label in labels:
if "crystal_id" in label or "wavelength_id" in label or "scale_group_code" in label:
continue
fulllabel = label + crystlbl + wavelbl + scalelbl
datalst.append( (label, list(self.origarrays[fulllabel])))
self.idx_data = hkllst + dreslst + datalst
self.mprint("Sending table data...", verbose=0)
mydict = { "tabulate_miller_array": self.idx_data }
self.params.tabulate_miller_array_ids = "[]" # to allow reopening a closed window again
self.SendInfoToGUI(mydict)
def TabulateMillerArray(self, ids):
self.params.tabulate_miller_array_ids = str(ids)
self.update_settings()
def SetCameraType(self, camtype):
self.params.NGL.camera_type = camtype
self.update_settings()
def ExpandToP1(self, val, inbrowser=True):
self.params.viewer.expand_to_p1 = val
self.params.viewer.inbrowser = inbrowser
self.update_settings()
def ExpandAnomalous(self, val, inbrowser=True):
self.params.viewer.expand_anomalous = val
self.params.viewer.inbrowser = inbrowser
self.update_settings()
def ShowOnlyMissing(self, val):
self.params.viewer.show_only_missing = val
self.update_settings()
def ShowMissing(self, val):
self.params.viewer.show_missing = val
self.update_settings()
def ShowDataOverSigma(self, val):
self.params.viewer.show_data_over_sigma = val
self.update_settings()
def ShowSystematicAbsences(self, val):
self.params.viewer.show_systematic_absences = val
self.update_settings()
def ShowSlice(self, val, axis="h", index=0):
axisstr = axis.lower()
self.params.viewer.slice_mode = val
self.params.viewer.slice_axis = axisstr
self.params.viewer.slice_index = index
self.update_settings()
def set_scene_bin_thresholds(self, strbinvals = "", binner_idx = 0, nbins = 6):
nuniquevalues = -1
if not strbinvals:
binvals, nuniquevalues = self.viewer.calc_bin_thresholds(binner_idx, nbins)
else:
nan = float("nan")
binvals = eval(strbinvals)
if binvals and binner_idx == 0:
binvals = list( 1.0/flex.double(binvals) )
self.viewer.UpdateBinValues(binner_idx, binvals, nuniquevalues)
def SetSceneNbins(self, nbins, binner_idx = 0):
self.params.nbins = nbins
self.params.binner_idx = binner_idx
self.params.NGL.bin_opacities = str([ (1.0, e) for e in range(nbins) ])
self.update_settings()
def GetNumberingOfBinners(self):
return [ (i,e) for i,e in enumerate(self.viewer.bin_labels_type_idxs) ]
def SetSceneBinThresholds(self, binvals=[]):
self.params.scene_bin_thresholds = str(binvals)
self.params.nbins = len(binvals)
self.update_settings()
def SetOpacities(self, bin_opacities):
self.params.NGL.bin_opacities = str(bin_opacities)
self.update_settings()
def SetToolTipOpacity(self, val):
self.params.NGL.tooltip_alpha = val
self.update_settings()
def SetShowToolTips(self, val):
self.params.NGL.show_tooltips = val
self.update_settings()
def set_scene(self, scene_id):
self.viewer.binvals = []
if scene_id is None:
return False
self.viewer.colour_scene_id = scene_id
self.viewer.radii_scene_id = scene_id
self.viewer.set_miller_array(scene_id)
if (self.viewer.miller_array is None):
raise Sorry("No data loaded!")
self.mprint( "Miller array %s runs from hkls: %s to %s" \
%(self.viewer.miller_array.info().label_string(), self.viewer.miller_array.index_span().min(),
self.viewer.miller_array.index_span().max() ) )
self.mprint("Spacegroup: %s" %self.viewer.miller_array.space_group().info().symbol_and_number())
self.update_space_group_choices()
return True
def SetScene(self, scene_id):
self.params.viewer.scene_id = scene_id
self.update_settings()
def SetMergeData(self, val):
self.params.merge_data = val
self.update_settings()
def SetColourScene(self, colourcol):
self.params.viewer.colour_scene_id = colourcol
self.update_settings()
def SetRadiusScene(self, radiuscol):
self.params.viewer.radii_scene_id = radiuscol
self.update_settings()
def SetRadiiScale(self, scale=1.0, nth_power_scale = float("nan")):
"""
Scale radii. Decrease the contrast between large and small radii with nth_root_scale < 1.0
If nth_power_scale=0 then all radii will have the same size regardless of data values.
If nth_power_scale=NaN an automatic power will be computed ensuring the smallest radius
is 0.1 times the maximum radius
"""
self.params.viewer.scale = scale
self.params.viewer.nth_power_scale_radii = nth_power_scale
self.update_settings()
def SetColourRadiusToSigmas(self, val):
self.params.viewer.sigma_color_radius = val
self.update_settings()
def SetColourScheme(self, color_scheme, color_powscale=1.0):
self.params.viewer.color_scheme = color_scheme
self.params.viewer.color_powscale = color_powscale
self.update_settings()
def SetShapePrimitive(self, val):
self.params.shape_primitive = val
self.update_settings()
def set_shape_primitive(self, val):
if val == "points":
self.viewer.primitivetype = "PointBuffer"
else:
self.viewer.primitivetype = "sphereBuffer"
def SetAction(self, val):
self.params.action = val
self.update_settings()
def set_action(self, val):
if val == "reset_view":
self.viewer.SetAutoView()
if val == "is_terminating":
self.__exit__()
return False
return True
def SetFontSize(self, val):
self.params.NGL.fontsize = val
self.viewer.SetFontSize(val)
def list_vectors(self):
self.viewer.all_vectors = self.viewer.rotation_operators[:]
if self.tncsvec is not None:
uc = self.viewer.miller_array.unit_cell()
# TNCS vector is specified in realspace fractional coordinates. Convert it to cartesian
cartvec = list( self.tncsvec * matrix.sqr(uc.orthogonalization_matrix()) )
ln = len(self.viewer.all_vectors)
self.viewer.all_vectors.append( (ln, "TNCS", 0, cartvec, "", "", str(roundoff(self.tncsvec, 5)) ) )
self.viewer.all_vectors = self.viewer.all_vectors + self.uservectors
for (opnr, label, order, cartvec, hkl_op, hkl, abc) in self.viewer.all_vectors:
# avoid onMessage-DrawVector in HKLJavaScripts.js misinterpreting the commas in strings like "-x,z+y,-y"
name = label + hkl_op.replace(",", "_")
self.viewer.RemovePrimitives(name)
self.SendInfoToGUI( { "all_vectors": self.viewer.all_vectors } )
return self.viewer.all_vectors
def add_user_vector(self):
uc = self.viewer.miller_array.unit_cell()
ln = len(self.viewer.all_vectors)
label = self.params.viewer.user_label
order = 0
try:
hklvec = ""
abcvec = ""
hklop = ""
unwantedchars = " |(|)|[|]|{|}"
# individual characters separated by | substituted with a "" using re.sub()
if self.params.viewer.add_user_vector_hkl not in [None, "", "()"]:
hklvec = eval(re.sub(unwantedchars, "", self.params.viewer.add_user_vector_hkl))
# convert into cartesian space
cartvec = list( self.viewer.scene.renderscale*(hklvec * matrix.sqr(uc.fractionalization_matrix()).transpose()) )
elif self.params.viewer.add_user_vector_abc not in [None, "", "()"]:
abcvec = eval(re.sub(unwantedchars, "", self.params.viewer.add_user_vector_abc))
# convert into cartesian space
cartvec = list(abcvec * matrix.sqr(uc.orthogonalization_matrix()))
elif self.params.viewer.add_user_vector_hkl_op not in [None, ""]:
hklop = re.sub(unwantedchars, "", self.params.viewer.add_user_vector_hkl_op)
rt = sgtbx.rt_mx(symbol=hklop, r_den=12, t_den=144)
self.viewer.symops.append( rt ) #
(cartvec, a, label, order) = self.viewer.GetVectorAndAngleFromRotationMx( rt.r() )
if label:
label = "%s-fold_%s" %(str(int(roundoff(2*math.pi/a, 0))), self.params.viewer.user_label)
self.mprint("Rotation axis, %s, added" %label)
if label =="" or order==0:
self.mprint("Cannot compute a rotation axis from %s" %self.params.viewer.add_user_vector_hkl_op)
return
if (self.params.viewer.add_user_vector_hkl in [None, "", "()"] \
and self.params.viewer.add_user_vector_abc in [None, "", "()"] \
and self.params.viewer.add_user_vector_hkl_op) in [None, ""]:
self.mprint("No vector was specified")
self.uservectors.append( (ln, label, order, cartvec, hklop, str(hklvec), str(abcvec) ))
self.list_vectors()
except Exception as e:
raise Sorry( str(e))
self.params.viewer.add_user_vector_hkl_op = ""
self.params.viewer.add_user_vector_hkl = ""
self.params.viewer.add_user_vector_abc = ""
def AddUserVector(self, hkl_op="", abc="", hkl="", label=""):
"""
Vector can be specified as a rotation operator, say "-h-k,k,-l" subject to spacegroup contraints,
as a fractional vector in real space or as a fractional vector in reciprocal space. If
specified as a rotation operator the derived vector is the implicit rotation axis.
"""
self.params.viewer.user_label = label
self.params.viewer.add_user_vector_hkl_op = str(hkl_op)
self.params.viewer.add_user_vector_abc = str(abc)
self.params.viewer.add_user_vector_hkl = str(hkl)
self.update_settings()
def ShowRotationAxes(self, val):
self.params.viewer.show_symmetry_rotation_axes = val
self.update_settings()
def ShowVector(self, i, val=True):
self.params.viewer.show_vector = str([i, val])
self.update_settings()
def ShowUnitCell(self, val):
self.params.show_real_space_unit_cell = val
self.update_settings()
def ShowReciprocalUnitCell(self, val):
self.params.show_reciprocal_unit_cell = val
self.update_settings()
def SetClipPlane(self, use=True, hkldist=0.0, clipwidth=2.0):
if use:
self.params.clip_plane.hkldist = hkldist
self.params.clip_plane.clipwidth = clipwidth
self.params.slice_mode = False
self.params.inbrowser = True
else:
self.params.clip_plane.clipwidth = None
self.update_settings()
def SinglePlaneOfReflections(self, use=True, axis="h", slice_index=0 ):
if use:
viewer.slice_axis = axis
viewer.is_parallel = False
viewer.slice_mode = True
viewer.inbrowser = False
viewer.fixorientation = "reflection_slice"
viewer.slice_index = slice_index
else:
viewer.slice_mode = False
viewer.inbrowser = True
viewer.fixorientation = "None"
self.update_settings()
def OrientVector(self, vecnr, is_parallel, val=True):
viewer.fixorientation = "None"
if val:
viewer.is_parallel = is_parallel
viewer.fixorientation = "vector"
viewer.show_vector = '[%d, True]' %vecnr
self.update_settings()
def AnimateRotateAroundVector(self, vecnr, speed):
self.params.clip_plane.animate_rotation_around_vector = str([vecnr, speed])
self.update_settings()
def RotateAroundVector(self, vecnr, dgr):
self.params.clip_plane.angle_around_vector = str([vecnr, dgr])
self.update_settings()
def ShowHKL(self, hkl):
self.params.viewer.show_hkl = str(hkl)
self.update_settings()
def SetMouseSpeed(self, trackspeed):
self.params.NGL.mouse_sensitivity = trackspeed
self.update_settings()
def GetMouseSpeed(self):
self.viewer.GetMouseSpeed()
return self.params.NGL.mouse_sensitivity
def GetSpaceGroupChoices(self):
"""
return array of strings with available subgroups of the space group
"""
if (self.viewer.miller_array is None) :
self.mprint( NOREFLDATA)
if self.spacegroup_choices:
return [e.symbol_and_number() for e in self.spacegroup_choices]
return []
def SaveImageName(self, fname):
self.viewer.MakeImage(fname)
def SendCurrentPhilValues(self):
philstrvalsdict = {}
for e in self.currentphil.all_definitions():
philstrvalsdict[e.path] = e.object.extract()
mydict = { "current_phil_strings": philstrvalsdict }
self.SendInfoToGUI(mydict)
if self.viewer.params.viewer.scene_id is not None:
self.SendInfoToGUI({ "used_nth_power_scale_radii": self.viewer.HKLscene_from_dict().nth_power_scale_radii })
def GetHtmlURL(self):
return self.viewer.url
def GetHtmlstring(self):
return self.viewer.htmlstr
def GetArrayInfotpls(self):
"""
return array of tuples with information on each miller array
"""
return self.viewer.array_info_format_tpl
def GetSceneDataLabels(self):
return [ e[3][0] for e in myHKLview.viewer.hkl_scenes_infos ]
def GetHklScenesInfos(self):
"""
return array of strings with information on each processed miller array
which may have been expanded with anomalous reflections or truncated to non-anomalous reflections
as to match the currently selected miller array
"""
return self.viewer.hkl_scenes_infos
def GetBinInfo(self):
"""
return array of number of hkls and bin boundaries of the bins the current miller array data has been sorted into.
Useful when deciding which bin of reflections to make transparent
"""
return self.viewer.binstrs
def SendInfoToGUI(self, infodict, binary=True):
if self.guiSocketPort:
m = str(infodict).encode("utf-8")
if not binary:
self.guisocket.send( m )
else:
if type(m) is not bytes:
m = bytes(m)
bindict = zlib.compress( m )
self.guisocket.send( bindict )
masterphilstr = """
openfilename = None
.type = path
use_provided_miller_arrays = False
.type = bool
savefilename = None
.type = path
save_image_name = None
.type = path
merge_data = False
.type = bool
miller_array_operations = ''
.type = str
spacegroup_choice = 0
.type = int
using_space_subgroup = False
.type = bool
mouse_moved = False
.type = bool
real_space_unit_cell_scale_fraction = None
.type = float
reciprocal_unit_cell_scale_fraction = None
.type = float
clip_plane {
angle_around_vector = \"[0,0]\"
.type = str
animate_rotation_around_vector = \"[0,0]\"
.type = str
hkldist = 0.0
.type = float
clipwidth = None
.type = float
fractional_vector = reciprocal *realspace
.type = choice
bequiet = False
.type = bool
}
%s
scene_bin_thresholds = ''
.type = str
binner_idx = 0
.type = int
nbins = 1
.type = int(value_min=1, value_max=40)
shape_primitive = *'spheres' 'points'
.type = choice
viewer {
scene_id = None
.type = int
ncolourlabels = 6
.type = int
show_symmetry_rotation_axes = False
.type = bool
show_vector = ''
.type = str
add_user_vector_hkl_op = ""
.type = str
add_user_vector_abc = ""
.type = str
add_user_vector_hkl = ""
.type = str
user_label = ""
.type = str
show_hkl = ""
.type = str
is_parallel = False
.type = bool
fixorientation = vector reflection_slice *None
.type = choice
angle_around_XHKL_vector = 0.0
.type = float
angle_around_YHKL_vector = 0.0
.type = float
angle_around_ZHKL_vector = 0.0
.type = float
%s
}
NGL {
%s
}
action = *is_running is_terminating reset_view
.type = choice
tabulate_miller_array_ids = "[]"
.type = str
""" %(ArrayInfo.arrayinfo_phil_str, display.philstr, view_3d.ngl_philstr)
def run():
"""
utility function for passing keyword arguments more directly to HKLViewFrame()
"""
#time.sleep(10) # enough for attaching debugger
# dirty hack for parsing a file path with spaces of a browser if not using default
args = sys.argv[1:]
sargs = " ".join(args)
qchar = "'"
if sargs.find("'") > -1:
quote1 = sargs.find(qchar)
if sargs[ quote1 + 1:].find(qchar) < 0:
raise Sorry("Missing quote in arguments")
quote2 = sargs[ quote1 + 1:].find(qchar) + quote1 + 1
space1 = sargs[ :quote1].rfind(" ")
arg = sargs[space1 +1: quote2 +1]
sargs2 = sargs.replace(arg,"")
args = sargs2.split(" ")
arg = arg.replace("'","")
arg = arg.replace('"',"")
arg = arg.replace('\\', '/') # webbrowser module wants browser paths having unix forward slashes
args.append(arg)
kwargs = dict(arg.split('=') for arg in args if '=' in arg)
#check if any argument is a filename
for arg in args:
# if so add it as a keyword argument
if os.path.isfile(arg) and '=' not in arg:
kwargs['hklin'] = arg
myHKLview = HKLViewFrame(**kwargs)
return myHKLview
if __name__ == '__main__':
run()
|
serialproxy.py | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import socket
from eventlet import patcher
from nova import exception
from nova.i18n import _
from compute_hyperv.nova import constants
# Note(lpetrut): Eventlet greenpipes are not supported on Windows. The named
# pipe handlers implemented in os-win use Windows API calls which can block
# the whole thread. In order to avoid this, those workers run in separate
# 'native' threads.
#
# As this proxy communicates with those workers via queues, the serial console
# proxy workers have to run in 'native' threads as well.
threading = patcher.original('threading')
def handle_socket_errors(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
self._client_connected.clear()
return wrapper
class SerialProxy(threading.Thread):
def __init__(self, instance_name, addr, port, input_queue,
output_queue, client_connected):
super(SerialProxy, self).__init__()
self.setDaemon(True)
self._instance_name = instance_name
self._addr = addr
self._port = port
self._conn = None
self._input_queue = input_queue
self._output_queue = output_queue
self._client_connected = client_connected
self._stopped = threading.Event()
def _setup_socket(self):
try:
self._sock = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
self._sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1)
self._sock.bind((self._addr, self._port))
self._sock.listen(1)
except socket.error as err:
self._sock.close()
msg = (_('Failed to initialize serial proxy on '
'%(addr)s:%(port)s, handling connections '
'to instance %(instance_name)s. Error: %(error)s') %
{'addr': self._addr,
'port': self._port,
'instance_name': self._instance_name,
'error': err})
raise exception.NovaException(msg)
def stop(self):
self._stopped.set()
self._client_connected.clear()
if self._conn:
self._conn.shutdown(socket.SHUT_RDWR)
self._conn.close()
self._sock.close()
def run(self):
self._setup_socket()
while not self._stopped.isSet():
self._accept_conn()
@handle_socket_errors
def _accept_conn(self):
self._conn, client_addr = self._sock.accept()
self._client_connected.set()
workers = []
for job in [self._get_data, self._send_data]:
worker = threading.Thread(target=job)
worker.setDaemon(True)
worker.start()
workers.append(worker)
for worker in workers:
worker_running = (worker.is_alive() and
worker is not threading.current_thread())
if worker_running:
worker.join()
self._conn.close()
self._conn = None
@handle_socket_errors
def _get_data(self):
while self._client_connected.isSet():
data = self._conn.recv(constants.SERIAL_CONSOLE_BUFFER_SIZE)
if not data:
self._client_connected.clear()
return
self._input_queue.put(data)
@handle_socket_errors
def _send_data(self):
while self._client_connected.isSet():
data = self._output_queue.get_burst()
if data:
self._conn.sendall(data)
|
pgrep_v2.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from multiprocessing import Process, Value, Queue, Semaphore
import argparse
import re
import os
import sys
q = Queue() #inicialização da queue
sem = Semaphore(1) #inicialização do semáforo
n = Value('i',0) #inicialização da variável partilhada
def pgrep(list_files, text_pattern):
"""Return the number of lines where the text appears.
Args:
list_files(list): The files that are going to be used.
text_pattern(str): The text pattern to look for.
Returns:
The an int which is the number of lines where the pattern appears.
Raise:
IOError: If any of the input files doesn't exist.
"""
num_linhas = 0
for file in list_files:
if not os.path.isfile(file):
print ('O Ficheiro %s não existe') % file
sys.exit(0)
else:
q.put(file)
with open(file, 'r') as f:
for text in text_pattern: # para cada string na lista do conjunto de argumentos,
output = f.readlines() # conteúdo do ficheiros em listas (cada linha representada por uma string)
for linha in output: # linha - string
palavras = list(set(linha.split('\n'))) # faz lista de todas as palavras (strings) da linha
for palavra in palavras:
procura = re.search(text, palavra) # procura a string text (dada na linha de comandos) na string palavra
if procura is not None:
num_linhas += 1 # contabiliza o numero de linhas em que a palavra/letra aparece
n.value += 1
return n
print('O numero de linhas = ', n)
if __name__ == '__main__':
# argumentos a utilizar
parser = argparse.ArgumentParser(prog='pgrep', description='Search files for matching words')
parser.add_argument('-p', metavar='n', nargs='?', action='append', dest='process', const=True)
parser.add_argument('ficheiros', nargs='*')
parser.add_argument('-t', nargs='*', dest="text")
args = parser.parse_args()
# verificação de argumentos
p = False
process_list = list()
list_files = [i for i in args.ficheiros]
while len(list_files) == 0:
files = input("Indique o/(s) nome/(s) do/(s) ficheiro/(s): ")
list_files = files.split(' ')
if args.process is None: # if number of processes not given
newP = Process(target=pgrep, args=(list_files, args.text))
newP.start()
else:
p = True
for i in range(int(args.process[0])):
newP = Process(target=pgrep, args=(list_files, args.text))
process_list.append(newP)
newP.start()
for p in process_list:
p.join()
|
test_ssl.py | # Test the support for SSL and sockets
import sys
import unittest
from test import test_support
import asyncore
import socket
import select
import time
import gc
import os
import errno
import pprint
import urllib, urlparse
import traceback
import weakref
import functools
import platform
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
ssl = test_support.import_module("ssl")
HOST = test_support.HOST
CERTFILE = None
SVN_PYTHON_ORG_ROOT_CERT = None
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if test_support.verbose:
sys.stdout.write(prefix + exc_format)
class BasicTests(unittest.TestCase):
def test_sslwrap_simple(self):
# A crude test for the legacy API
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET))
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock)
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
# We need to access the lower-level wrapper in order to create an
# implicit SSL context without trying to connect or listen.
try:
import _ssl
except ImportError:
# The returned function won't get executed, just ignore the error
pass
@functools.wraps(func)
def f(*args, **kwargs):
try:
s = socket.socket(socket.AF_INET)
_ssl.sslwrap(s._sock, 0, None, None,
ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None)
except ssl.SSLError as e:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')
and 'Invalid SSL protocol variant specified' in str(e)):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
#ssl.PROTOCOL_SSLv2
ssl.PROTOCOL_SSLv23
ssl.PROTOCOL_SSLv3
ssl.PROTOCOL_TLSv1
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
def test_random(self):
v = ssl.RAND_status()
if test_support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
try:
ssl.RAND_egd(1)
except TypeError:
pass
else:
print "didn't raise TypeError"
ssl.RAND_add("this is a random string", 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE, False)
if test_support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if test_support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, (int, long))
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 2.0
self.assertLess(n, 0x20000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 2)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 26)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by OpenSSL, the format might change
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
def test_ciphers(self):
if not test_support.is_resource_enabled('network'):
return
remote = ("svn.python.org", 443)
with test_support.transient_internet(remote[0]):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL")
s.connect(remote)
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")
s.connect(remote)
# Error checking occurs when connecting, because the SSL context
# isn't created before.
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
s.connect(remote)
@test_support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# The _delegate_methods in socket.py are correctly delegated to by an
# unconnected SSLSocket, so they will raise a socket.error rather than
# something unexpected like TypeError.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
self.assertRaises(socket.error, ss.recv, 1)
self.assertRaises(socket.error, ss.recv_into, bytearray(b'x'))
self.assertRaises(socket.error, ss.recvfrom, 1)
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with test_support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
s.connect(("svn.python.org", 443))
c = s.getpeercert()
if c:
self.fail("Peer cert %s shouldn't be here!")
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
try:
s.connect(("svn.python.org", 443))
except ssl.SSLError:
pass
finally:
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with test_support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with test_support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [], 5.0)
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [], 5.0)
else:
raise
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with test_support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with test_support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLError, err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [])
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [])
else:
raise
s.close()
if test_support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
with test_support.transient_internet("svn.python.org"):
pem = ssl.get_server_certificate(("svn.python.org", 443))
if not pem:
self.fail("No server certificate on svn.python.org:443!")
try:
pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=CERTFILE)
except ssl.SSLError:
#should fail
pass
else:
self.fail("Got server certificate %s for svn.python.org!" % pem)
pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
if not pem:
self.fail("No server certificate on svn.python.org:443!")
if test_support.verbose:
sys.stdout.write("\nVerified certificate for svn.python.org:443 is\n%s\n" % pem)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# NOTE: https://sha256.tbs-internet.com is another possible test host
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with test_support.transient_internet("sha256.tbs-internet.com"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=sha256_cert,)
try:
s.connect(remote)
if test_support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock):
self.server = server
self.running = False
self.sock = connsock
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def show_conn_details(self):
if self.server.certreqs == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if test_support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if test_support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if test_support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
def wrap_conn(self):
try:
self.sslconn = ssl.wrap_socket(self.sock, server_side=True,
certfile=self.server.certificate,
ssl_version=self.server.protocol,
ca_certs=self.server.cacerts,
cert_reqs=self.server.certreqs,
ciphers=self.server.ciphers)
except ssl.SSLError as e:
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " +
str(self.sock.getpeername()) + ":\n")
self.close()
self.running = False
self.server.stop()
return False
else:
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock._sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if isinstance(self.sock, ssl.SSLSocket):
self.sslconn = self.sock
elif not self.wrap_conn():
return
self.show_conn_details()
while self.running:
try:
msg = self.read()
if not msg:
# eof, so quit this handler
self.running = False
self.close()
elif msg.strip() == 'over':
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif self.server.starttls_server and msg.strip() == 'STARTTLS':
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write("OK\n")
if not self.wrap_conn():
return
elif self.server.starttls_server and self.sslconn and msg.strip() == 'ENDTLS':
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write("OK\n")
self.sslconn.unwrap()
self.sslconn = None
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
else:
if (test_support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %s (%s), sending back %s (%s)...\n"
% (repr(msg), ctype, repr(msg.lower()), ctype))
self.write(msg.lower())
except ssl.SSLError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
wrap_accepting_socket=False, ciphers=None):
if ssl_version is None:
ssl_version = ssl.PROTOCOL_TLSv1
if certreqs is None:
certreqs = ssl.CERT_NONE
self.certificate = certificate
self.protocol = ssl_version
self.certreqs = certreqs
self.cacerts = cacerts
self.ciphers = ciphers
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.flag = None
if wrap_accepting_socket:
self.sock = ssl.wrap_socket(self.sock, server_side=True,
certfile=self.certificate,
cert_reqs = self.certreqs,
ca_certs = self.cacerts,
ssl_version = self.protocol,
ciphers = self.ciphers)
if test_support.verbose and self.chatty:
sys.stdout.write(' server: wrapped server socket as %s\n' % str(self.sock))
self.port = test_support.bind_port(self.sock)
self.active = False
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if test_support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ str(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
class EchoServer(asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
asyncore.dispatcher_with_send.__init__(self, conn)
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
self._ssl_accepting = True
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if data and data.strip() != 'over':
self.send(data.lower())
def handle_close(self):
self.close()
if test_support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.socket)
self.listen(5)
def handle_accept(self):
sock_obj, addr = self.accept()
if test_support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if test_support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if test_support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if test_support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
asyncore.loop(0.05)
def stop(self):
self.active = False
self.server.close()
class SocketServerHTTPSServer(threading.Thread):
class HTTPSServer(HTTPServer):
def __init__(self, server_address, RequestHandlerClass, certfile):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
# we assume the certfile contains both private key and certificate
self.certfile = certfile
self.allow_reuse_address = True
def __str__(self):
return ('<%s %s:%s>' %
(self.__class__.__name__,
self.server_name,
self.server_port))
def get_request(self):
# override this to wrap socket with SSL
sock, addr = self.socket.accept()
sslconn = ssl.wrap_socket(sock, server_side=True,
certfile=self.certfile)
return sslconn, addr
class RootedHTTPRequestHandler(SimpleHTTPRequestHandler):
# need to override translate_path to get a known root,
# instead of using os.curdir, since the test could be
# run from anywhere
server_version = "TestHTTPS/1.0"
root = None
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = urlparse.urlparse(path)[2]
path = os.path.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = self.root
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in self.root: continue
path = os.path.join(path, word)
return path
def log_message(self, format, *args):
# we override this to suppress logging unless "verbose"
if test_support.verbose:
sys.stdout.write(" server (%s:%d %s):\n [%s] %s\n" %
(self.server.server_address,
self.server.server_port,
self.request.cipher(),
self.log_date_time_string(),
format%args))
def __init__(self, certfile):
self.flag = None
self.RootedHTTPRequestHandler.root = os.path.split(CERTFILE)[0]
self.server = self.HTTPSServer(
(HOST, 0), self.RootedHTTPRequestHandler, certfile)
self.port = self.server.server_port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
if self.flag:
self.flag.set()
self.server.serve_forever(0.05)
def stop(self):
self.server.shutdown()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False)
with server:
try:
s = ssl.wrap_socket(socket.socket(),
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError, x:
if test_support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x[1])
except socket.error, x:
if test_support.verbose:
sys.stdout.write("\nsocket.error is %s\n" % x[1])
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(certfile, protocol, certreqs, cacertsfile,
client_certfile, client_protocol=None, indata="FOO\n",
ciphers=None, chatty=True, connectionchatty=False,
wrap_accepting_socket=False):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
server = ThreadedEchoServer(certfile,
certreqs=certreqs,
ssl_version=protocol,
cacerts=cacertsfile,
ciphers=ciphers,
chatty=chatty,
connectionchatty=connectionchatty,
wrap_accepting_socket=wrap_accepting_socket)
with server:
# try to connect
if client_protocol is None:
client_protocol = protocol
s = ssl.wrap_socket(socket.socket(),
certfile=client_certfile,
ca_certs=cacertsfile,
ciphers=ciphers,
cert_reqs=certreqs,
ssl_version=client_protocol)
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if test_support.verbose:
sys.stdout.write(
" client: sending %s...\n" % (repr(arg)))
s.write(arg)
outdata = s.read()
if connectionchatty:
if test_support.verbose:
sys.stdout.write(" client: read %s\n" % repr(outdata))
if outdata != indata.lower():
raise AssertionError(
"bad data <<%s>> (%d) received; expected <<%s>> (%d)\n"
% (outdata[:min(len(outdata),20)], len(outdata),
indata[:min(len(indata),20)].lower(), len(indata)))
s.write("over\n")
if connectionchatty:
if test_support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
def try_protocol_combo(server_protocol,
client_protocol,
expect_success,
certsreqs=None):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if test_support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
try:
# NOTE: we must enable "ALL" ciphers, otherwise an SSLv23 client
# will send an SSLv3 hello (rather than SSLv2) starting from
# OpenSSL 1.0.0 (see issue #8322).
server_params_test(CERTFILE, server_protocol, certsreqs,
CERTFILE, CERTFILE, client_protocol,
ciphers="ALL", chatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except socket.error as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an IOError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = test_support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
s.accept()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
c = socket.socket()
c.connect((HOST, port))
listener_gone.wait()
# XXX why is it necessary?
test_support.gc_collect()
try:
ssl_sock = ssl.wrap_socket(c)
except IOError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if test_support.verbose:
sys.stdout.write("\n")
server_params_test(CERTFILE, ssl.PROTOCOL_TLSv1, ssl.CERT_NONE,
CERTFILE, CERTFILE, ssl.PROTOCOL_TLSv1,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if test_support.verbose:
sys.stdout.write("\n")
s2 = socket.socket()
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_SSLv23,
cacerts=CERTFILE,
chatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=ssl.PROTOCOL_SSLv23)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if test_support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
s.close()
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
if not hasattr(ssl, 'PROTOCOL_SSLv2'):
self.skipTest("PROTOCOL_SSLv2 needed")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = ("msg 1", "MSG 2", "STARTTLS", "MSG 3", "msg 4", "ENDTLS", "msg 5", "msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if test_support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if test_support.verbose:
sys.stdout.write(
" client: sending %s...\n" % repr(indata))
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
if (indata == "STARTTLS" and
outdata.strip().lower().startswith("ok")):
# STARTTLS ok, switch to secure mode
if test_support.verbose:
sys.stdout.write(
" client: read %s from server, starting TLS...\n"
% repr(outdata))
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif (indata == "ENDTLS" and
outdata.strip().lower().startswith("ok")):
# ENDTLS ok, switch back to clear text
if test_support.verbose:
sys.stdout.write(
" client: read %s from server, ending TLS...\n"
% repr(outdata))
s = conn.unwrap()
wrapped = False
else:
if test_support.verbose:
sys.stdout.write(
" client: read %s from server\n" % repr(outdata))
if test_support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write("over\n")
else:
s.send("over\n")
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = SocketServerHTTPSServer(CERTFILE)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
if test_support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://127.0.0.1:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
with test_support.check_py3k_warnings():
f = urllib.urlopen(url)
dlen = f.info().getheader("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if test_support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
f.close()
self.assertEqual(d1, d2)
finally:
server.stop()
server.join()
def test_wrapped_accept(self):
"""Check the accept() method on SSL sockets."""
if test_support.verbose:
sys.stdout.write("\n")
server_params_test(CERTFILE, ssl.PROTOCOL_SSLv23, ssl.CERT_REQUIRED,
CERTFILE, CERTFILE, ssl.PROTOCOL_SSLv23,
chatty=True, connectionchatty=True,
wrap_accepting_socket=True)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if test_support.verbose:
sys.stdout.write("\n")
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if test_support.verbose:
sys.stdout.write(
" client: sending %s...\n" % (repr(indata)))
s.write(indata)
outdata = s.read()
if test_support.verbose:
sys.stdout.write(" client: read %s\n" % repr(outdata))
if outdata != indata.lower():
self.fail(
"bad data <<%s>> (%d) received; expected <<%s>> (%d)\n"
% (outdata[:min(len(outdata),20)], len(outdata),
indata[:min(len(indata),20)].lower(), len(indata)))
s.write("over\n")
if test_support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
def test_recv_send(self):
"""Test recv(), send() and friends."""
if test_support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray("\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray("\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = u"PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = data_prefix + meth_name
try:
send_meth(indata.encode('ASCII', 'strict'), *args)
outdata = s.read()
outdata = outdata.decode('ASCII', 'strict')
if outdata != indata.lower():
self.fail(
"While sending with <<%s>> bad data "
"<<%r>> (%d) received; "
"expected <<%r>> (%d)\n" % (
meth_name, outdata[:20], len(outdata),
indata[:20], len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<%s>>; "
"expected to succeed.\n" % (meth_name,)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<%s>> failed with unexpected "
"exception message: %s\n" % (
meth_name, e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = data_prefix + meth_name
try:
s.send(indata.encode('ASCII', 'strict'))
outdata = recv_meth(*args)
outdata = outdata.decode('ASCII', 'strict')
if outdata != indata.lower():
self.fail(
"While receiving with <<%s>> bad data "
"<<%r>> (%d) received; "
"expected <<%r>> (%d)\n" % (
meth_name, outdata[:20], len(outdata),
indata[:20], len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<%s>>; "
"expected to succeed.\n" % (meth_name,)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<%s>> failed with unexpected "
"exception message: %s\n" % (
meth_name, e
)
)
# consume data
s.read()
s.write("over\n".encode("ASCII", "strict"))
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = test_support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c = ssl.wrap_socket(c)
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_default_ciphers(self):
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
sock = socket.socket()
try:
# Force a set of weak ciphers on our client socket
try:
s = ssl.wrap_socket(sock,
ssl_version=ssl.PROTOCOL_SSLv23,
ciphers="DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with self.assertRaises((OSError, ssl.SSLError)):
s.connect((HOST, server.port))
finally:
sock.close()
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_main(verbose=False):
global CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, NOKIACERT
CERTFILE = test_support.findfile("keycert.pem")
SVN_PYTHON_ORG_ROOT_CERT = test_support.findfile(
"https_svn_python_org_root.pem")
NOKIACERT = test_support.findfile("nokia.pem")
if (not os.path.exists(CERTFILE) or
not os.path.exists(SVN_PYTHON_ORG_ROOT_CERT) or
not os.path.exists(NOKIACERT)):
raise test_support.TestFailed("Can't read certificate files!")
tests = [BasicTests, BasicSocketTests]
if test_support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = test_support.threading_setup()
if thread_info and test_support.is_resource_enabled('network'):
tests.append(ThreadedTests)
try:
test_support.run_unittest(*tests)
finally:
if _have_threads:
test_support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
slam_demo.py | #!/usr/bin/env python3.7
import socket
import time
import math
import queue
import struct
import threading
from hokuyo.driver import hokuyo
from hokuyo.tools import hokuyo_socket
from roboclaw.motorcontrol import MotorControl
server_host = 'phantom-edison'
server_port = 60000
lidar_host = 'phantom-zynq'
lidar_port = 12345
roboclaw_tty = '/dev/ttyMFD1'
current_milli_time = lambda: int(round(time.time() * 1000))
current_micro_time = lambda: int(round(time.time() * 1000000))
dist = MotorControl.metres_to_pulses(2.0)
speed = MotorControl.metres_to_pulses(0.15)
axle = 0.197
radius = 0.505
arc_c = int(MotorControl.metres_to_pulses(radius * math.pi * 2))
arc_l = int(MotorControl.metres_to_pulses((radius + axle/2) * math.pi * 2))
arc_r = int(MotorControl.metres_to_pulses((radius - axle/2) * math.pi * 2))
ratio_l = arc_l / arc_c
ratio_r = arc_r / arc_c
speed_l = int(speed * ratio_l)
speed_r = int(speed * ratio_r)
if __name__ == '__main__':
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((server_host, server_port))
server.listen(0)
print('Listening for connections on port {}...'.format(server_port))
conn, addr = server.accept()
print('Connection from', addr)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((lidar_host, lidar_port))
laser = hokuyo.Hokuyo(hokuyo_socket.Socket(s))
scanning_thread = threading.Thread(target=laser.scanning_distances_loop)
motors = MotorControl(roboclaw_tty, False)
motors.stop_all()
battery = motors.read_batt_voltage()
print('*********************')
print('Battery: {}V'.format(battery))
print('*********************')
sensor_data = queue.Queue()
motors.reset_encoders()
laser.reset()
scanning_thread.start()
time.sleep(1.0)
start_time = current_micro_time()
laser.enable_scanning(True)
motors.drive_both_speed_distance(speed, dist)
motors.drive_speed_distance(speed_l, speed_r, arc_l, arc_r)
motors.drive_both_speed_distance(speed, dist)
motors.drive_speed_distance(speed_l, speed_r, arc_l, arc_r)
motors.stop_all_buffered()
start_s = time.time()
num_scans = 0
print()
print('Gathering scan data...')
while not motors.buffers_empty():
scan = []
while len(scan) == 0:
scan = laser.get_scan_distances()
timestamp = current_micro_time() - start_time
encoders = motors.read_encoders()
sensor_data.put((timestamp, encoders, scan))
print('.', end='', flush=True)
num_scans += 1
end_s = time.time()
laser.enable_scanning(False)
motors.wait_for_empty_buffers()
motors.stop_all()
print()
total_s = end_s - start_s
print('{} scans in {:.2f}s - {:.2f} scans/s - {:.2f}ms/scan'.format(num_scans, total_s, num_scans/total_s, total_s*1000.0/num_scans))
print()
# // struct containing complete sensor scan sent from robot server (Edison board)
# #define ROBOT_LIDAR_SCAN_SIZE 682
# typedef struct {
# uint16_t status; // status of robot
# uint32_t timestamp;
# int32_t q1, q2; // odometry distance for left and right wheels respectively
# int16_t d[ROBOT_LIDAR_SCAN_SIZE];
# } sensor_data_t;
packer = struct.Struct('H I 2i 682h')
f = open("scan.dat", "wb")
start_s = time.time()
print('Sending scan data for {} scans...'.format(num_scans))
print('|', ' '*(num_scans-2), '|', sep='')
while not sensor_data.empty():
data = sensor_data.get()
flat_data = [0, data[0]] + list(data[1]) + data[2]
packed_data = packer.pack(*flat_data)
conn.sendall(packed_data)
f.write(packed_data)
print('.', end='', flush=True)
end_s = time.time()
print()
total_s = end_s - start_s
print('{} scans in {:.2f}s - {:.2f} scans/s'.format(num_scans, total_s, num_scans/total_s))
print()
print('Closing connection...')
conn.close()
f.close()
laser.terminate()
|
detect_motor_test.py | #!/usr/bin/env python
#!coding=utf-8
import rospy
import numpy as np
import PIL.Image as pilimage
from sensor_msgs.msg import CompressedImage
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
from cv_bridge import CvBridge, CvBridgeError
import cv2
import time
from yolo import YOLO
from sensor_msgs.msg import Joy
from std_msgs.msg import String
from geometry_msgs.msg import Twist
from tf.transformations import *
from math import pi
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import Header
from sensor_msgs.msg import JointState
from threading import Thread
import threading
global RV2_motor1_joint
yolo = YOLO()
class image_converter:
def __init__(self):
# 创建cv_bridge,声明图像的发布者和订阅者
global delta_x
# location_pub = rospy.Publisher("cv_bridge_location", Float64, queue_size=1)
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/mid_camera/color/image_raw/compressed", CompressedImage, self.callback)
def callback(self,data):
# 使用cv_bridge将ROS的图像数据转换成OpenCV的图像格式
global delta_x
try:
cv_image = self.bridge.compressed_imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print('e')
#BGR转RGB格式
cv_image = cv2.cvtColor(cv_image,cv2.COLOR_BGR2RGB)
#cv格式转image
cv_image = pilimage.fromarray(np.uint8(cv_image))
#进行yolo语音识别,提取框位置信息与识别物体信息
cv_image, bbox_list, label_list = yolo.detect_image(cv_image)
#image转cv格式
cv_image = np.array(cv_image)
#RGB在转BGR格式
cv_image = cv2.cvtColor(cv_image,cv2.COLOR_RGB2BGR)
#显示识别后cv图像
cv2.imshow("Image window", cv_image)
cv2.waitKey(3)
if type(label_list) != int: # 没检测到物体的时候,bbox_list和label_list为1
num_of_obj = len(label_list)
#print('num_of_object:', num_of_obj)
#确定跟踪物体与图像中点的相对坐标
for i in range(num_of_obj):
if 'person' in label_list[i]:
object_center = (bbox_list[i][1]+bbox_list[i][3])*0.5
delta_x = 320-object_center
# print(delta_x)
return delta_x
# location_pub.publish(delta_x)
#motor1_move()
elif 'bed' in label_list[i]:
print("yyy")
pass
else:
print('yolo未识别到任何物体')
pass
def judge_bed():
global delta_x
image_converter()
def motor1_move():
time.sleep(2)
global command_vel_pub_m, delta_x, RV2_motor1_joint
# rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback)
while not rospy.is_shutdown():
print(delta_x)
if -1.5 < RV2_motor1_joint < 1.5:
#左转判断条件
if delta_x > 80:
print("a")
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [(delta_x - 70) * 0.003]
print (motor_vel)
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#右转判断条件
elif delta_x < -80:
print ("b")
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [(delta_x + 70) * 0.003]
command_vel_pub_m.publish(motor_vel)
time.sleep(2)
#停止判断条件
elif -80 < delta_x < 80:
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
else:
now = rospy.Time.now()
motor_vel = JointState()
motor_vel.header = Header()
motor_vel.header.stamp = now
motor_vel.header.frame_id = "bulldog"
motor_vel.name = ["motor1"]
motor_vel.velocity = [0]
command_vel_pub_m.publish(motor_vel)
time.sleep(0.5)
time.sleep(1)
#for object in vision_database_dict:
# 再将opencv格式额数据转换成ros image格式的数据发布
# try:
# #self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
# location_pub.publish(location_pub)
# except CvBridgeError as e:
# print('e')
def RV2_motorjointstate_callback(data):
# 定义RV2 motor数据全局变量,进行赋值
global RV2_motor1_joint
RV2_motor1_joint = data.position[0]
print(RV2_motor1_joint)
if __name__ == '__main__':
try:
# 初始化ros节点
rospy.init_node("cv_bridge_test")
rospy.loginfo("Starting cv_bridge_test node")
global command_vel_pub_m, delta_x
#创建发布者
command_vel_pub_m = rospy.Publisher('/motor_control/input/velocity', JointState, queue_size = 100, latch=True)
#订阅躯干点击位置信息
rospy.Subscriber('/joint_states_motor',JointState,RV2_motorjointstate_callback)
#定义yolo识别子程序
t_judge_bed = threading.Thread(target = judge_bed)
t_judge_bed.start()
time.sleep(2)
# 定义躯干运动子进程
t_motor1 = threading.Thread(target = motor1_move)
t_motor1.start()
rospy.spin()
except KeyboardInterrupt:
print("Shutting down cv_bridge_test node.")
cv2.destroyAllWindows()
|
util.py | # Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'DEI':8, 'mDEI':5, 'bits':2, 'sat':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['DEI', 'mDEI', 'bits', 'sat'] # list(dict) does not guarantee order
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "BTC"
try:
return base_units_inverse[dp]
except KeyError:
raise Exception('Unknown base unit')
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "BTC" -> 8
try:
return base_units[unit_name]
except KeyError:
raise Exception('Unknown base unit')
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " DEI"
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum_dei")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum_dei")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum_dei")
else:
#raise Exception("No home directory found in environment variables.")
return
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = localeconv()['decimal_point']
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Deimos.Network': ('https://deimos.network/',
{'tx': 'transactions/', 'addr': 'addresses/'}),
'system default': ('https://deimos.network/',
{'tx': 'tx/', 'addr': 'address/'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBTC/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain://000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Deimos.Network')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a bitcoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise Exception("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid bitcoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
mysql_test.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import logging
import os
import threading
import unittest
import uuid
import warnings
from absl import app
from absl import flags
from absl.testing import absltest
from future import builtins
import mock
import MySQLdb # TODO(hanuszczak): This should be imported conditionally.
from MySQLdb.constants import CR as mysql_conn_errors
from grr_response_server.databases import db_test_mixin
from grr_response_server.databases import mysql
from grr_response_server.databases import mysql_utils
from grr.test_lib import stats_test_lib
from grr.test_lib import test_lib
flags.DEFINE_string(
"slow_query_log", "",
"Filename. If given, generates a log of all queries not using an index.")
def _GetEnvironOrSkip(key):
value = os.environ.get(key)
if value is None:
raise unittest.SkipTest("'%s' variable is not set" % key)
return value
class MySQLDatabaseProviderMixin(db_test_mixin.DatabaseSetupMixin):
@classmethod
def _CreateDatabase(cls):
user = _GetEnvironOrSkip("MYSQL_TEST_USER")
host = _GetEnvironOrSkip("MYSQL_TEST_HOST")
port = _GetEnvironOrSkip("MYSQL_TEST_PORT")
password = _GetEnvironOrSkip("MYSQL_TEST_PASS")
# Use dash character in database name to break queries that do not quote it.
database = "test-{}".format(builtins.str(uuid.uuid4())[-10])
conn = mysql.MysqlDB(
host=host, port=port, user=user, password=password, database=database)
logging.info("Created test database: %s", database)
def _Drop(cursor):
cursor.execute("DROP DATABASE `{}`".format(database))
def Fin():
conn._RunInTransaction(_Drop)
conn.Close()
return conn, Fin
# pylint: enable=unreachable
def CreateDatabase(self):
return self.__class__._CreateDatabase()
def CreateBlobStore(self):
# Optimization: Since BlobStore and Database share the underlying MysqlDB
# instance, there is no need to actually setup and destroy the BlobStore.
# DatabaseTestMixin's setUp and tearDown do this implicitly for every test.
return self.db.delegate, None
class MysqlTestBase(MySQLDatabaseProviderMixin):
pass
class TestMysqlDB(stats_test_lib.StatsTestMixin,
db_test_mixin.DatabaseTestMixin, MysqlTestBase,
absltest.TestCase):
"""Test the mysql.MysqlDB class.
Most of the tests in this suite are general blackbox tests of the db.Database
interface brought in by the db_test.DatabaseTestMixin.
"""
def testIsRetryable(self):
self.assertFalse(mysql._IsRetryable(Exception("Some general error.")))
self.assertFalse(
mysql._IsRetryable(
MySQLdb.OperationalError(
1416, "Cannot get geometry object from data...")))
self.assertTrue(
mysql._IsRetryable(
MySQLdb.OperationalError(
1205, "Lock wait timeout exceeded; try restarting...")))
self.assertTrue(
mysql._IsRetryable(
MySQLdb.OperationalError(
1213,
"Deadlock found when trying to get lock; try restarting...")))
self.assertTrue(
mysql._IsRetryable(
MySQLdb.OperationalError(
1637, "Too many active concurrent transactions")))
def AddUser(self, connection, user, password):
cursor = connection.cursor()
cursor.execute(
"INSERT INTO grr_users (username, username_hash, password) "
"VALUES (%s, %s, %s)", (user, mysql_utils.Hash(user), bytes(password)))
cursor.close()
def ListUsers(self, connection):
cursor = connection.cursor()
cursor.execute("SELECT username, password FROM grr_users")
ret = cursor.fetchall()
cursor.close()
return ret
def testRunInTransaction(self):
def AddUserFn(con):
self.AddUser(con, "AzureDiamond", "hunter2")
self.db.delegate._RunInTransaction(AddUserFn)
users = self.db.delegate._RunInTransaction(self.ListUsers, readonly=True)
self.assertEqual(users, ((u"AzureDiamond", "hunter2"),))
@mock.patch.object(mysql, "_SleepWithBackoff")
def testRunInTransactionDeadlock(self, sleep_with_backoff_fn):
"""A deadlock error should be retried."""
def AddUserFn1(con):
self.AddUser(con, "user1", "pw1")
def AddUserFn2(con):
self.AddUser(con, "user2", "pw2")
self.db.delegate._RunInTransaction(AddUserFn1)
self.db.delegate._RunInTransaction(AddUserFn2)
# We'll start two transactions which read/modify rows in different orders.
# This should force (at least) one to fail with a deadlock, which should be
# retried.
t1_halfway = threading.Event()
t2_halfway = threading.Event()
# Number of times each transaction is attempted.
counts = [0, 0]
def Transaction1(connection):
counts[0] += 1
cursor = connection.cursor()
cursor.execute(
"SELECT password FROM grr_users WHERE username = 'user1' FOR UPDATE")
t1_halfway.set()
self.assertTrue(t2_halfway.wait(5))
cursor.execute("UPDATE grr_users SET password = 'pw2-updated' "
"WHERE username = 'user2'")
cursor.close()
def Transaction2(connection):
counts[1] += 1
cursor = connection.cursor()
cursor.execute(
"SELECT password FROM grr_users WHERE username = 'user2' FOR UPDATE")
t2_halfway.set()
self.assertTrue(t1_halfway.wait(5))
cursor.execute("UPDATE grr_users SET password = 'pw1-updated' "
"WHERE username = 'user1'")
cursor.close()
thread_1 = threading.Thread(
target=lambda: self.db.delegate._RunInTransaction(Transaction1))
thread_2 = threading.Thread(
target=lambda: self.db.delegate._RunInTransaction(Transaction2))
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
# Both transaction should have succeeded
users = self.db.delegate._RunInTransaction(self.ListUsers, readonly=True)
self.assertEqual(users,
((u"user1", "pw1-updated"), (u"user2", "pw2-updated")))
# At least one should have been retried.
self.assertGreater(sum(counts), 2)
self.assertGreater(sleep_with_backoff_fn.call_count, 0)
def testSuccessfulCallsAreCorrectlyAccounted(self):
with self.assertStatsCounterDelta(
1, "db_request_latency", fields=["ReadGRRUsers"]):
self.db.ReadGRRUsers()
def testMaxAllowedPacketSettingIsOverriddenWhenTooLow(self):
def SetMaxAllowedPacket(conn):
with contextlib.closing(conn.cursor()) as cursor:
mysql._SetGlobalVariable("max_allowed_packet", 20 << 10, cursor)
def GetMaxAllowedPacket(conn):
with contextlib.closing(conn.cursor()) as cursor:
return mysql._ReadVariable("max_allowed_packet", cursor)
self.db.delegate._RunInTransaction(SetMaxAllowedPacket)
# Initialize a new connection. This should fix the "max_allowed_packet"
# setting.
db = mysql.MysqlDB(
host=self._testdb.hostname(),
port=self._testdb.port(),
user=self._testdb.username(),
password=self._testdb.password(),
database=self._testdb.dbname())
db.Close()
self.assertEqual(
self.db.delegate._RunInTransaction(GetMaxAllowedPacket),
builtins.str(mysql.MAX_PACKET_SIZE))
def testMeaningfulErrorWhenNotEnoughPermissionsToOverrideGlobalVariable(self):
def SetMaxAllowedPacket(conn):
with contextlib.closing(conn.cursor()) as cursor:
mysql._SetGlobalVariable("max_allowed_packet", 20 << 10, cursor)
self.db.delegate._RunInTransaction(SetMaxAllowedPacket)
# MaxAllowedPacketSettingTooLowError will be raised since
# _SetGlobalVariable call will fail (via the mock). This way
# we mimick the situation when _SetGlobalVariable fails due to
# the lack of permissions.
with mock.patch.object(
mysql,
"_SetGlobalVariable",
side_effect=MySQLdb.OperationalError("SUPER privileges required")):
with self.assertRaises(mysql.MaxAllowedPacketSettingTooLowError):
mysql.MysqlDB(
host=self._testdb.hostname(),
port=self._testdb.port(),
user=self._testdb.username(),
password=self._testdb.password(),
database=self._testdb.dbname())
@mock.patch.object(mysql, "_SleepWithBackoff")
@mock.patch.object(mysql, "_MAX_RETRY_COUNT", 2)
def testRetryOnServerGoneNoRollback(self, sleep_with_backoff_fn):
expected_error_msg = "MySQL server has gone away"
connections = []
def RaiseServerGoneError(connection):
# Wrap methods of the connection so we can check whether they get
# called later.
real_rollback_fn = connection.rollback
real_close_fn = connection.close
connection.rollback = mock.Mock(wraps=real_rollback_fn)
connection.close = mock.Mock(wraps=real_close_fn)
connections.append(connection)
raise MySQLdb.OperationalError(mysql_conn_errors.SERVER_GONE_ERROR,
expected_error_msg)
with mock.patch.object(self.db.delegate, "_max_pool_size", 6):
with self.assertRaises(MySQLdb.OperationalError) as context:
self.db.delegate._RunInTransaction(RaiseServerGoneError)
self.assertIn(expected_error_msg, builtins.str(context.exception))
self.assertFalse(sleep_with_backoff_fn.called)
# We expect all connections in the pool to be removed.
self.assertLen(connections, 7)
for connection in connections:
self.assertFalse(connection.rollback.called)
self.assertTrue(connection.close.called)
@mock.patch.object(mysql, "_SleepWithBackoff")
@mock.patch.object(mysql, "_MAX_RETRY_COUNT", 2)
def testDoNotRetryPermanentErrors(self, sleep_with_backoff_fn):
expected_error_msg = "Permanent error: Not implemented"
connections = []
def RaisePermanentError(connection):
# Wrap methods of the connection so we can check whether they get
# called later.
real_rollback_fn = connection.rollback
real_close_fn = connection.close
connection.rollback = mock.Mock(wraps=real_rollback_fn)
connection.close = mock.Mock(wraps=real_close_fn)
connections.append(connection)
raise MySQLdb.OperationalError(mysql_conn_errors.NOT_IMPLEMENTED,
expected_error_msg)
with self.assertRaises(MySQLdb.OperationalError) as context:
self.db.delegate._RunInTransaction(RaisePermanentError)
self.assertIn(expected_error_msg, builtins.str(context.exception))
self.assertFalse(sleep_with_backoff_fn.called)
self.assertLen(connections, 1)
self.assertTrue(connections[0].rollback.called)
self.assertTrue(connections[0].close.called)
if __name__ == "__main__":
app.run(test_lib.main)
|
ctf_run.py | #!/usr/bin/python3
"""CTF challenges runner
This script builds, compiles, and optionally deploys to docker-compose all the
challenges located in the current working directory.
Challenge directories are expected to include a `challenge.yml` file defining
the different challenge attributes, such as its name, flag, score, etc.
What this script does:
- updates the `src/flag.txt` file if it exists based on the flag specified in
`challenge.yml`
- run the `src/Makefile`, if it exists
- build the Dockerfile and deploy it via docker-compose if
`docker-compose.yml` exists
This script is written and maintained by Iver.
"""
import argparse
import json
import os
import re
import sys
import threading
import yaml
import yaml
dry_run = False
def build(challenge):
print()
print(f"## Running {challenge}")
flag = ''
pub_port = 0
with open(f"{challenge}/challenge.yml") as f:
challenge_config = yaml.safe_load(f)
flag = challenge_config['flags'][0]
print("FLAGS:", flag)
if os.path.exists(f"{challenge}/src/flag.txt"):
print(f"Updating flag: {challenge}")
with open(f"{challenge}/src/flag.txt", "w") as f:
f.write(flag)
if os.path.exists(f"{challenge}/src/Makefile"):
print(f"Building code: {challenge}")
os_run(f"make -C ./{challenge}/src")
if os.path.exists(f"{challenge}/src/{challenge}"):
os_run(f"mkdir -pv ./{challenge}/bin")
os_run(f"cp ./{challenge}/src/{challenge} ./{challenge}/bin/program")
if os.path.exists(f"{challenge}/docker-compose.yml"):
print(f"Recreating Docker container(s): {challenge}")
os_run(f"sudo -E docker-compose -f {challenge}/docker-compose.yml up --build -d")
ports = get_docker_compose_ports(f"{challenge}/docker-compose.yml")
for port in ports:
print(f"PORT: {port} (from {challenge}/docker-compose.yml)")
def get_docker_compose_ports(filename):
with open(filename) as f:
compose = yaml.safe_load(f)
for svcName in compose['services']:
svc = compose['services'][svcName]
if 'ports' in svc:
for port in svc['ports']:
yield port.split(':', 1)[0]
def get_challenges():
return [f"{name}" for name in os.listdir(f".") if name[0] != '.' and os.path.isdir(
f"./{name}") and os.path.exists(f"./{name}/challenge.yml")]
def os_run(cmd):
if dry_run:
print(f"dry-run: $ {cmd}")
else:
print(f"$ {cmd}")
exitcode = os.system(cmd)
if exitcode != 0:
raise RuntimeError(f"Non-zero exit code: {exitcode}: {cmd}")
def run_challanges_builds(challanges, parallel):
if parallel:
jobs = []
for challenge in challenges:
jobs.append(threading.Thread(target=build, args=(challenge, )))
for job in jobs:
job.start()
for job in jobs:
job.join()
else:
for challenge in challenges:
build(challenge)
if __name__ == "__main__":
challenges = get_challenges()
parser = argparse.ArgumentParser()
parser.add_argument("--dry-run", action="store_true", help="only validate")
parser.add_argument("--parallel", action="store_true", help="run all challenges at the same time")
parser.add_argument("chall", nargs="+", choices=["all", *challenges], help="challenge names to run. specify 'all' for all")
args = parser.parse_args()
dry_run = args.dry_run
all_challenges = "all" in args.chall
if all_challenges and len(args.chall) > 1:
print("Cannot specify 'all' and specific challanges")
exit(1)
if not all_challenges:
challenges = [x for x in challenges if x in args.chall]
print(f"{challenges=}")
if args.dry_run:
print("Running a dry-run")
run_challanges_builds(challenges, args.parallel)
print()
print("Synchronized successfully!")
|
network.py |
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import queue
import os
import stat
import errno
import random
import re
import select
from collections import defaultdict
import threading
import socket
import json
import socks
from . import util
from . import bitcoin
from .bitcoin import *
from .interface import Connection, Interface
from . import blockchain
from .version import ELECTRUM_VERSION, PROTOCOL_VERSION
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from .version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = bitcoin.NetworkConstants.DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
if out:
out['pruning'] = pruning_level
out['version'] = version
servers[host] = out
return servers
def filter_version(servers):
def is_recent(version):
try:
return util.normalize_version(version) >= util.normalize_version(PROTOCOL_VERSION)
except Exception as e:
return False
return {k: v for k, v in servers.items() if is_recent(v.get('version'))}
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = bitcoin.NetworkConstants.DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from .simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if not isinstance(p, dict):
return None
return ':'.join([p.get('mode'), p.get('host'), p.get('port'),
p.get('user', ''), p.get('password', '')])
def deserialize_proxy(s):
if not isinstance(s, str):
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).split(':')
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if isinstance(config, dict) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
self.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", self.blockchains.keys())
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in self.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server')
# Sanitize default server
try:
deserialize_server(self.default_server)
except:
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = threading.Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
os.chmod(dir_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
# subscriptions and requests
self.subscribed_addresses = set()
self.h2addr = {}
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.socket_queue = queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r") as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w") as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
self.request_fee_estimates()
self.queue_request('blockchain.relayfee', [])
if self.interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, self.interface)
for h in self.subscribed_addresses:
self.queue_request('blockchain.scripthash.subscribe', [h])
def request_fee_estimates(self):
self.config.requested_fee_estimates()
for i in bitcoin.FEE_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return list(self.interfaces.keys())
def get_servers(self):
out = bitcoin.NetworkConstants.DEFAULT_SERVERS
if self.irc_servers:
out.update(filter_version(self.irc_servers.copy()))
else:
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
# Store these somewhere so we can un-monkey-patch
if not hasattr(socket, "_socketobject"):
socket._socketobject = socket.socket
socket._getaddrinfo = socket.getaddrinfo
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in list(self.interfaces.values()):
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
self.notify('updated')
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
filtered = list(map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items())))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
#Yang
self.print_error("close interface to %s " % interface.server)
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'blockchain.estimatefee':
if error is None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.update_fee_estimates(i, fee)
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN)
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.get_chunk':
self.on_get_chunk(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self.get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.scripthash.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.scripthash.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def addr_to_scripthash(self, addr):
h = bitcoin.address_to_scripthash(addr)
if h not in self.h2addr:
self.h2addr[h] = addr
return h
def overload_cb(self, callback):
def cb2(x):
x2 = x.copy()
p = x2.pop('params')
addr = self.h2addr[p[0]]
x2['params'] = [addr]
callback(x2)
return cb2
def subscribe_to_addresses(self, addresses, callback):
hashes = [self.addr_to_scripthash(addr) for addr in addresses]
msgs = [('blockchain.scripthash.subscribe', [x]) for x in hashes]
self.send(msgs, self.overload_cb(callback))
def request_address_history(self, address, callback):
h = self.addr_to_scripthash(address)
self.send([('blockchain.scripthash.get_history', [h])], self.overload_cb(callback))
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
messages = list(messages)
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.print_error("connection_down " + server)
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
for b in self.blockchains.values():
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server, socket):
# todo: get tip first, then decide which checkpoint to use.
self.add_recent_server(server)
interface = Interface(server, socket)
interface.blockchain = None
interface.tip_header = None
interface.tip = 0
interface.mode = 'default'
interface.request = None
self.interfaces[server] = interface
self.queue_request('blockchain.headers.subscribe', [], interface)
if server == self.default_server:
self.switch_to_interface(server)
#self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
# must use copy of values
for interface in list(self.interfaces.values()):
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
else:
if self.config.is_fee_estimates_update_required():
self.request_fee_estimates()
def request_chunk(self, interface, idx):
interface.print_error("requesting chunk %d" % idx)
self.queue_request('blockchain.block.get_chunk', [idx], interface)
interface.request = idx
interface.req_time = time.time()
def on_get_chunk(self, interface, response):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
params = response.get('params')
if result is None or params is None or error is not None:
interface.print_error(error or 'bad response')
return
# Ignore unsolicited chunks
index = params[0]
if interface.request != index:
return
connect = interface.blockchain.connect_chunk(index, result)
# If not finished, get the next chunk
if not connect:
self.connection_down(interface.server)
return
if interface.blockchain.height() < interface.tip:
self.request_chunk(interface, index+1)
else:
interface.request = None
interface.mode = 'default'
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.notify('updated')
def request_header(self, interface, height):
interface.print_error("requesting header %d" % height)
#Yang:
print_error("request header interface " + str(interface.mode) + " height " + str(height))
self.queue_request('blockchain.block.get_header', [height], interface)
interface.request = height
interface.req_time = time.time()
def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
print_error("Handle receiving a single block header")
header = response.get('result')
if not header:
interface.print_error(response)
self.connection_down(interface.server)
return
height = header.get('block_height')
if interface.request != height:
interface.print_error("unsolicited header",interface.request, height)
self.connection_down(interface.server)
return
chain = blockchain.check_header(header)
if interface.mode == 'backward':
if chain:
interface.print_error("binary search")
interface.mode = 'binary'
interface.blockchain = chain
interface.good = height
next_height = (interface.bad + interface.good) // 2
else:
if height == 0:
interface.print_error("hight 0")
self.connection_down(interface.server)
next_height = None
else:
interface.bad = height
interface.bad_header = header
delta = interface.tip - height
next_height = max(0, interface.tip - 2 * delta)
interface.print_error("next_height" + str(next_height))
elif interface.mode == 'binary':
if chain:
interface.good = height
interface.blockchain = chain
else:
interface.bad = height
interface.bad_header = header
if interface.bad != interface.good + 1:
next_height = (interface.bad + interface.good) // 2
elif not interface.blockchain.can_connect(interface.bad_header, check_height=False):
interface.print_error("binary")
self.connection_down(interface.server)
next_height = None
else:
branch = self.blockchains.get(interface.bad)
if branch is not None:
if branch.check_header(interface.bad_header):
interface.print_error('joining chain', interface.bad)
next_height = None
elif branch.parent().check_header(header):
interface.print_error('reorg', interface.bad, interface.tip)
interface.blockchain = branch.parent()
next_height = None
else:
interface.print_error('checkpoint conflicts with existing fork', branch.path())
#Yang
branch.write('', 0)
branch.save_header(interface.bad_header)
interface.mode = 'catch_up'
interface.blockchain = branch
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
bh = interface.blockchain.height()
next_height = None
if bh > interface.good:
if not interface.blockchain.check_header(interface.bad_header):
b = interface.blockchain.fork(interface.bad_header)
self.blockchains[interface.bad] = b
interface.blockchain = b
interface.print_error("new chain", b.checkpoint)
interface.mode = 'catch_up'
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
assert bh == interface.good
if interface.blockchain.catch_up is None and bh < interface.tip:
interface.print_error("catching up from %d"% (bh + 1))
interface.mode = 'catch_up'
next_height = bh + 1
interface.blockchain.catch_up = interface.server
self.notify('updated')
elif interface.mode == 'catch_up':
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('updated')
else:
raise BaseException(interface.mode)
# If not finished, get the next header
if next_height :
interface.print_error("requesting header or chunk %d" % next_height + interface.mode)
if interface.mode == 'catch_up' and interface.tip > next_height + 50:
self.request_chunk(interface, next_height // 2016)
# Yang: comment request_chunk cos oversize
#self.request_header(interface, next_height)
else:
self.request_header(interface, next_height)
else:
interface.mode = 'default'
interface.request = None
self.notify('updated')
# refresh network dialog
self.notify('interfaces')
def maintain_requests(self):
for interface in list(self.interfaces.values()):
if interface.request and time.time() - interface.request_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as e:
# TODO: py3, get code from e
code = None
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def init_headers_file(self):
b = self.blockchains[0]
if b.get_hash(0) == bitcoin.NetworkConstants.GENESIS:
self.downloading_headers = False
return
filename = b.path()
def download_thread():
try:
import urllib.request, socket
socket.setdefaulttimeout(30)
self.print_error("downloading ", bitcoin.NetworkConstants.HEADERS_URL)
urllib.request.urlretrieve(bitcoin.NetworkConstants.HEADERS_URL, filename + '.tmp')
os.rename(filename + '.tmp', filename)
self.print_error("done.")
except Exception:
self.print_error("download failed. creating file", filename)
open(filename, 'wb+').close()
b = self.blockchains[0]
with b.lock: b.update_size()
self.downloading_headers = False
self.downloading_headers = True
t = threading.Thread(target = download_thread)
t.daemon = True
t.start()
def run(self):
self.init_headers_file()
while self.is_running() and self.downloading_headers:
time.sleep(1)
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.maintain_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_notify_header(self, interface, header):
height = header.get('block_height')
if not height:
return
interface.tip_header = header
interface.tip = height
if interface.mode != 'default':
return
b = blockchain.check_header(header)
if b:
interface.blockchain = b
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
b = blockchain.can_connect(header)
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
tip = max([x.height() for x in self.blockchains.values()])
if tip >=0:
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip, height - 1))
else:
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.mode = 'catch_up'
interface.blockchain = chain
self.request_header(interface, 0)
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.checkpoint
return self.blockchains[self.blockchain_index]
def get_blockchains(self):
out = {}
for k, b in self.blockchains.items():
r = list(filter(lambda i: i.blockchain==b, list(self.interfaces.values())))
if r:
out[k] = r
return out
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
for i in self.interfaces.values():
if i.blockchain == blockchain:
self.switch_to_interface(i.server)
break
else:
raise BaseException('blockchain not found', index)
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
return self.blockchain().height()
def synchronous_get(self, request, timeout=30):
q = queue.Queue()
self.send([request], q.put)
try:
r = q.get(True, timeout)
except queue.Empty:
raise BaseException('Server did not answer')
if r.get('error'):
raise BaseException(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
|
executorselenium.py | import json
import os
import socket
import threading
import time
import traceback
import urlparse
import uuid
from .base import (CallbackHandler,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
extra_timeout,
strip_server)
from .protocol import (BaseProtocolPart,
TestharnessProtocolPart,
Protocol,
SelectorProtocolPart,
ClickProtocolPart,
SendKeysProtocolPart,
ActionSequenceProtocolPart,
TestDriverProtocolPart)
from ..testrunner import Stop
here = os.path.join(os.path.split(__file__)[0])
webdriver = None
exceptions = None
RemoteConnection = None
Command = None
def do_delayed_imports():
global webdriver
global exceptions
global RemoteConnection
global Command
from selenium import webdriver
from selenium.common import exceptions
from selenium.webdriver.remote.remote_connection import RemoteConnection
from selenium.webdriver.remote.command import Command
class SeleniumBaseProtocolPart(BaseProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def execute_script(self, script, async=False):
method = self.webdriver.execute_async_script if async else self.webdriver.execute_script
return method(script)
def set_timeout(self, timeout):
self.webdriver.set_script_timeout(timeout * 1000)
@property
def current_window(self):
return self.webdriver.current_window_handle
def set_window(self, handle):
self.webdriver.switch_to_window(handle)
def wait(self):
while True:
try:
self.webdriver.execute_async_script("")
except exceptions.TimeoutException:
pass
except (socket.timeout, exceptions.NoSuchWindowException,
exceptions.ErrorInResponseException, IOError):
break
except Exception as e:
self.logger.error(traceback.format_exc(e))
break
class SeleniumTestharnessProtocolPart(TestharnessProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
self.runner_handle = None
with open(os.path.join(here, "runner.js")) as f:
self.runner_script = f.read()
def load_runner(self, url_protocol):
if self.runner_handle:
self.webdriver.switch_to_window(self.runner_handle)
url = urlparse.urljoin(self.parent.executor.server_url(url_protocol),
"/testharness_runner.html")
self.logger.debug("Loading %s" % url)
self.webdriver.get(url)
self.runner_handle = self.webdriver.current_window_handle
format_map = {"title": threading.current_thread().name.replace("'", '"')}
self.parent.base.execute_script(self.runner_script % format_map)
def close_old_windows(self):
handles = [item for item in self.webdriver.window_handles if item != self.runner_handle]
for handle in handles:
try:
self.webdriver.switch_to_window(handle)
self.webdriver.close()
except exceptions.NoSuchWindowException:
pass
self.webdriver.switch_to_window(self.runner_handle)
return self.runner_handle
def get_test_window(self, window_id, parent, timeout=5):
"""Find the test window amongst all the open windows.
This is assumed to be either the named window or the one after the parent in the list of
window handles
:param window_id: The DOM name of the Window
:param parent: The handle of the runner window
:param timeout: The time in seconds to wait for the window to appear. This is because in
some implementations there's a race between calling window.open and the
window being added to the list of WebDriver accessible windows."""
test_window = None
end_time = time.time() + timeout
while time.time() < end_time:
try:
# Try using the JSON serialization of the WindowProxy object,
# it's in Level 1 but nothing supports it yet
win_s = self.webdriver.execute_script("return window['%s'];" % window_id)
win_obj = json.loads(win_s)
test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
except Exception:
pass
if test_window is None:
after = self.webdriver.window_handles
if len(after) == 2:
test_window = next(iter(set(after) - set([parent])))
elif after[0] == parent and len(after) > 2:
# Hope the first one here is the test window
test_window = after[1]
if test_window is not None:
assert test_window != parent
return test_window
time.sleep(0.1)
raise Exception("unable to find test window")
class SeleniumSelectorProtocolPart(SelectorProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def elements_by_selector(self, selector):
return self.webdriver.find_elements_by_css_selector(selector)
class SeleniumClickProtocolPart(ClickProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def element(self, element):
return element.click()
class SeleniumSendKeysProtocolPart(SendKeysProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_keys(self, element, keys):
return element.send_keys(keys)
class SeleniumActionSequenceProtocolPart(ActionSequenceProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_actions(self, actions):
self.webdriver.execute(Command.W3C_ACTIONS, {"actions": actions})
class SeleniumTestDriverProtocolPart(TestDriverProtocolPart):
def setup(self):
self.webdriver = self.parent.webdriver
def send_message(self, message_type, status, message=None):
obj = {
"type": "testdriver-%s" % str(message_type),
"status": str(status)
}
if message:
obj["message"] = str(message)
self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
class SeleniumProtocol(Protocol):
implements = [SeleniumBaseProtocolPart,
SeleniumTestharnessProtocolPart,
SeleniumSelectorProtocolPart,
SeleniumClickProtocolPart,
SeleniumSendKeysProtocolPart,
SeleniumTestDriverProtocolPart,
SeleniumActionSequenceProtocolPart]
def __init__(self, executor, browser, capabilities, **kwargs):
do_delayed_imports()
super(SeleniumProtocol, self).__init__(executor, browser)
self.capabilities = capabilities
self.url = browser.webdriver_url
self.webdriver = None
def connect(self):
"""Connect to browser via Selenium's WebDriver implementation."""
self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
resolve_ip=False),
desired_capabilities=self.capabilities)
def after_conect(self):
pass
def teardown(self):
self.logger.debug("Hanging up on Selenium session")
try:
self.webdriver.quit()
except Exception:
pass
del self.webdriver
def is_alive(self):
try:
# Get a simple property over the connection
self.webdriver.current_window_handle
# TODO what exception?
except (socket.timeout, exceptions.ErrorInResponseException):
return False
return True
def after_connect(self):
self.testharness.load_runner(self.executor.last_environment["protocol"])
class SeleniumRun(object):
def __init__(self, func, protocol, url, timeout):
self.func = func
self.result = None
self.protocol = protocol
self.url = url
self.timeout = timeout
self.result_flag = threading.Event()
def run(self):
timeout = self.timeout
try:
self.protocol.base.set_timeout((timeout + extra_timeout))
except exceptions.ErrorInResponseException:
self.logger.error("Lost WebDriver connection")
return Stop
executor = threading.Thread(target=self._run)
executor.start()
flag = self.result_flag.wait(timeout + 2 * extra_timeout)
if self.result is None:
if flag:
# flag is True unless we timeout; this *shouldn't* happen, but
# it can if self._run fails to set self.result due to raising
self.result = False, ("INTERNAL-ERROR", "self._run didn't set a result")
else:
self.result = False, ("EXTERNAL-TIMEOUT", None)
return self.result
def _run(self):
try:
self.result = True, self.func(self.protocol, self.url, self.timeout)
except exceptions.TimeoutException:
self.result = False, ("EXTERNAL-TIMEOUT", None)
except (socket.timeout, exceptions.ErrorInResponseException):
self.result = False, ("CRASH", None)
except Exception as e:
message = str(getattr(e, "message", ""))
if message:
message += "\n"
message += traceback.format_exc(e)
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class SeleniumTestharnessExecutor(TestharnessExecutor):
supports_testdriver = True
def __init__(self, browser, server_config, timeout_multiplier=1,
close_after_done=True, capabilities=None, debug_info=None,
**kwargs):
"""Selenium-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser, capabilities)
with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
self.script_resume = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.testharness.load_runner(new_environment["protocol"])
def do_test(self, test):
url = self.test_url(test)
success, data = SeleniumRun(self.do_testharness,
self.protocol,
url,
test.timeout * self.timeout_multiplier).run()
if success:
return self.convert_result(test, data)
return (test.result_cls(*data), [])
def do_testharness(self, protocol, url, timeout):
format_map = {"url": strip_server(url)}
parent_window = protocol.testharness.close_old_windows()
# Now start the test harness
protocol.base.execute_script("window.open('about:blank', '%s', 'noopener')" % self.window_id)
test_window = protocol.testharness.get_test_window(self.window_id, parent_window,
timeout=5*self.timeout_multiplier)
self.protocol.base.set_window(test_window)
protocol.webdriver.get(url)
handler = CallbackHandler(self.logger, protocol, test_window)
while True:
result = protocol.base.execute_script(
self.script_resume % format_map, async=True)
done, rv = handler(result)
if done:
break
return rv
class SeleniumRefTestExecutor(RefTestExecutor):
def __init__(self, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, capabilities=None, **kwargs):
"""Selenium WebDriver-based executor for reftests"""
RefTestExecutor.__init__(self,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = SeleniumProtocol(self, browser,
capabilities=capabilities)
self.implementation = RefTestImplementation(self)
self.close_after_done = close_after_done
self.has_window = False
with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
self.wait_script = f.read()
def is_alive(self):
return self.protocol.is_alive()
def do_test(self, test):
self.logger.info("Test requires OS-level window focus")
width_offset, height_offset = self.protocol.webdriver.execute_script(
"""return [window.outerWidth - window.innerWidth,
window.outerHeight - window.innerHeight];"""
)
self.protocol.webdriver.set_window_rect(0, 0, 600 + width_offset, 600 + height_offset)
result = self.implementation.run_test(test)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi):
# https://github.com/w3c/wptrunner/issues/166
assert viewport_size is None
assert dpi is None
return SeleniumRun(self._screenshot,
self.protocol,
self.test_url(test),
test.timeout).run()
def _screenshot(self, protocol, url, timeout):
webdriver = protocol.webdriver
webdriver.get(url)
webdriver.execute_async_script(self.wait_script)
screenshot = webdriver.get_screenshot_as_base64()
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
|
__init__.py | import functools
import threading
import schedule
import time
from fHDHR.tools import checkattr
class Scheduler():
"""
fHDHR Scheduling events system.
"""
def __init__(self, settings, logger, db):
self.config = settings
self.logger = logger
self.db = db
self.schedule = schedule
def fhdhr_self_add(self, fhdhr):
self.fhdhr = fhdhr
@property
def enabled_jobs(self):
return [x["name"] for x in self.list_jobs]
@property
def unscheduled_jobs(self):
unscheduled_job_items = []
enabled_jobs = self.enabled_jobs
origin_methods = self.fhdhr.origins.list_origins
for origin_name in origin_methods:
scan_tag = self.fhdhr.origins.get_origin_property(origin_name, "scan_tag")
if scan_tag not in enabled_jobs:
chanscan_interval = self.fhdhr.origins.get_origin_property(origin_name, "chanscan_interval")
unscheduled_job_items.append({
"name": scan_tag,
"type": "Channel Scan",
"interval": self.fhdhr.time.humanized_time(chanscan_interval),
"interval_epoch": chanscan_interval
})
epg_methods = self.fhdhr.device.epg.valid_epg_methods
for epg_method in epg_methods:
if "%s EPG Update" % epg_method not in enabled_jobs:
frequency_seconds = self.fhdhr.device.epg.epg_handling[epg_method].update_frequency
unscheduled_job_items.append({
"name": "%s EPG Update" % epg_method,
"type": "EPG Update",
"interval": self.fhdhr.time.humanized_time(frequency_seconds),
"interval_epoch": frequency_seconds
})
if "Versions Update" not in enabled_jobs:
frequency_seconds = self.fhdhr.config.dict["fhdhr"]["versions_check_interval"]
unscheduled_job_items.append({
"name": "Versions Update",
"type": "Versions Update",
"interval": self.fhdhr.time.humanized_time(frequency_seconds),
"interval_epoch": frequency_seconds
})
ssdp_methods = list(self.fhdhr.device.ssdp.ssdp_handling.keys())
for ssdp_method in ssdp_methods:
if "%s SSDP Alive" % ssdp_method not in enabled_jobs:
frequency_seconds = self.fhdhr.device.ssdp.ssdp_handling[ssdp_method].max_age
unscheduled_job_items.append({
"name": "%s SSDP Alive" % ssdp_method,
"type": "SSDP Alive",
"interval": self.fhdhr.time.humanized_time(frequency_seconds),
"interval_epoch": frequency_seconds
})
return unscheduled_job_items
# This decorator can be applied to any job function
def job_wrapper(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
job_name = func.__name__
start_timestamp = time.time()
self.logger.debug('Running job: %s' % job_name)
result = func(*args, **kwargs)
total_time = self.fhdhr.time.humanized_time(time.time() - start_timestamp)
self.logger.debug('Job %s completed in %s' % (job_name, total_time))
return result
return wrapper
def get_scheduled_time(self, jobtag):
"""Get last and next run info for a tag"""
jobsdict = {
"name": None,
"last_run": None,
"next_run": None
}
joblist = self.jobs
for job_item in joblist:
if len(list(job_item.tags)):
if jobtag in list(job_item.tags):
jobsdict.update({
"name": list(job_item.tags)[0],
"last_run": job_item.last_run,
"next_run": job_item.next_run
})
return jobsdict
def remove(self, remtag):
joblist = self.jobs
for job_item in joblist:
if len(list(job_item.tags)):
if remtag in list(job_item.tags):
self.schedule.cancel_job(job_item)
@property
def list_tags(self):
tagslist = []
joblist = self.jobs
for job_item in joblist:
if len(list(job_item.tags)):
tagslist.extend(list(job_item.tags))
return tagslist
@property
def list_jobs(self):
jobsdicts = []
joblist = self.jobs
for job_item in joblist:
if len(list(job_item.tags)):
jobsdicts.append({
"name": list(job_item.tags)[0],
"last_run": job_item.last_run,
"next_run": job_item.next_run
})
return jobsdicts
@property
def list_jobs_humanized(self):
jobsdicts = self.list_jobs
formatted_jobsdicts = []
nowtime = time.time()
for job_dict in jobsdicts:
job_dict_copy = job_dict.copy()
for run_item in ["last_run", "next_run"]:
if job_dict_copy[run_item]:
job_dict_copy[run_item] = job_dict_copy[run_item].timestamp()
if job_dict_copy[run_item] > nowtime:
job_dict_copy[run_item] = self.fhdhr.time.humanized_time(job_dict_copy[run_item] - nowtime)
else:
job_dict_copy[run_item] = self.fhdhr.time.humanized_time(nowtime - job_dict_copy[run_item])
else:
job_dict_copy[run_item] = "Never"
formatted_jobsdicts.append(job_dict_copy)
return formatted_jobsdicts
def run_from_tag(self, runtag):
joblist = self.jobs
for job_item in joblist:
if len(list(job_item.tags)):
if runtag in list(job_item.tags):
self.logger.debug("Job %s was triggered to run." % list(job_item.tags)[0])
job_item.run()
def run(self):
"""
Run all scheduled tasks.
"""
# Start a thread to run the events
t = threading.Thread(target=self.thread_worker, args=())
t.start()
def thread_worker(self):
while True:
self.schedule.run_pending()
time.sleep(1)
def startup_tasks(self):
self.fhdhr.logger.noob("Running Startup Tasks.")
tags_list = self.list_tags
self.startup_versions_update(tags_list)
self.startup_channel_scan(tags_list)
self.startup_epg_update(tags_list)
self.startup_ssdp_alive(tags_list)
self.fhdhr.logger.noob("Startup Tasks Complete.")
return "Success"
def startup_epg_update(self, tags_list):
for epg_method in self.fhdhr.device.epg.epg_methods:
updateepg = self.fhdhr.device.epg.epg_handling[epg_method].epg_update_on_start
if updateepg:
if ("%s EPG Update" % epg_method) in tags_list:
self.fhdhr.scheduler.run_from_tag("%s EPG Update" % epg_method)
def startup_channel_scan(self, tags_list):
for origin_name in self.fhdhr.origins.list_origins:
updatechannels = self.fhdhr.origins.get_origin_property(origin_name, "chanscan_on_start")
if updatechannels:
if ("%s Channel Scan" % origin_name) in tags_list:
self.fhdhr.scheduler.run_from_tag("%s Channel Scan" % origin_name)
def startup_versions_update(self, tags_list):
if "Versions Update" in tags_list:
self.fhdhr.scheduler.run_from_tag("Versions Update")
def startup_ssdp_alive(self, tags_list):
ssdp_methods = list(self.fhdhr.device.ssdp.ssdp_handling.keys())
for ssdp_method in ssdp_methods:
if "%s SSDP Alive" % ssdp_method in tags_list:
self.fhdhr.scheduler.run_from_tag("%s SSDP Alive")
def __getattr__(self, name):
"""
Quick and dirty shortcuts. Will only get called for undefined attributes.
"""
if checkattr(self.schedule, name):
return eval("self.schedule.%s" % name)
|
schedule.py | import time
from multiprocessing import Process
import asyncio
import aiohttp
from aiohttp import ClientProxyConnectionError as ProxyConnectionError, ServerDisconnectedError, ClientResponseError, \
ClientConnectorError
from proxypool.db import RedisClient
from proxypool.error import ResourceDepletionError
from proxypool.getter import FreeProxyGetter
from proxypool.setting import *
from asyncio import TimeoutError
class ValidityTester(object):
test_api = TEST_API
def __init__(self):
self._raw_proxies = None
self._usable_proxies = []
def set_raw_proxies(self, proxies):
self._raw_proxies = proxies
self._conn = RedisClient()
async def test_single_proxy(self, proxy):
"""
text one proxy, if valid, put them to usable_proxies.
"""
try:
async with aiohttp.ClientSession() as session:
try:
if isinstance(proxy, bytes):
proxy = proxy.decode('utf-8')
real_proxy = 'http://' + proxy
print('Testing', proxy)
async with session.get(self.test_api, proxy=real_proxy, timeout=get_proxy_timeout) as response:
if response.status == 200:
self._conn.put(proxy)
print('Valid proxy', proxy)
except (ProxyConnectionError, TimeoutError, ValueError):
print('Invalid proxy', proxy)
except Exception as s:
print(s)
def test(self):
"""
aio test all proxies.
"""
print('ValidityTester is working')
try:
loop = asyncio.get_event_loop()
tasks = [self.test_single_proxy(proxy) for proxy in self._raw_proxies]
loop.run_until_complete(asyncio.wait(tasks))
except ValueError:
print('Async Error')
class PoolAdder(object):
"""
add proxy to pool
"""
def __init__(self, threshold):
self._threshold = threshold
self._conn = RedisClient()
self._tester = ValidityTester()
self._crawler = FreeProxyGetter()
def is_over_threshold(self):
"""
judge if count is overflow.
"""
if self._conn.queue_len >= self._threshold:
return True
else:
return False
def add_to_queue(self):
print('PoolAdder is working')
proxy_count = 0
while not self.is_over_threshold():
for callback_label in range(self._crawler.__CrawlFuncCount__):
callback = self._crawler.__CrawlFunc__[callback_label]
raw_proxies = self._crawler.get_raw_proxies(callback)
# test crawled proxies
self._tester.set_raw_proxies(raw_proxies)
self._tester.test()
proxy_count += len(raw_proxies)
if self.is_over_threshold():
print('IP is enough, waiting to be used')
break
if proxy_count == 0:
raise ResourceDepletionError
class Schedule(object):
@staticmethod
def valid_proxy(cycle=VALID_CHECK_CYCLE):
"""
Get half of proxies which in redis
"""
conn = RedisClient()
tester = ValidityTester()
while True:
print('Refreshing ip')
count = int(0.5 * conn.queue_len)
if count == 0:
print('Waiting for adding')
time.sleep(cycle)
continue
raw_proxies = conn.get(count)
tester.set_raw_proxies(raw_proxies)
tester.test()
time.sleep(cycle)
@staticmethod
def check_pool(lower_threshold=POOL_LOWER_THRESHOLD,
upper_threshold=POOL_UPPER_THRESHOLD,
cycle=POOL_LEN_CHECK_CYCLE):
"""
If the number of proxies less than lower_threshold, add proxy
"""
conn = RedisClient()
adder = PoolAdder(upper_threshold)
while True:
if conn.queue_len < lower_threshold:
adder.add_to_queue()
time.sleep(cycle)
def run(self):
print('Ip processing running')
valid_process = Process(target=Schedule.valid_proxy)
check_process = Process(target=Schedule.check_pool)
valid_process.start()
check_process.start()
|
subproc_env_vec.py | # Inspired from OpenAI Baselines
import numpy as np
from multiprocessing import Process, Pipe
from rl.common.vec_env import VecEnv, CloudpickleWrapper
from rl.common.tile_images import tile_images
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'seed':
val = env.seed(data)
remote.send(val)
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def render(self, mode='human'):
raise NotImplementedError('Render is not implemented for Synchronous Environment')
def seed(self, i):
rank = i
for remote in self.remotes:
remote.send(('seed', rank))
rank += 1
|
test_socketserver.py | """
Test suite for socketserver.
"""
import contextlib
import io
import os
import select
import signal
import socket
import tempfile
import threading
import unittest
import socketserver
import test.support
from test.support import reap_children, reap_threads, verbose
from test.support import socket_helper
test.support.requires("network")
TEST_STR = b"hello world\n"
HOST = socket_helper.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
requires_unix_sockets = unittest.skipUnless(HAVE_UNIX_SOCKETS,
'requires Unix sockets')
HAVE_FORKING = hasattr(os, "fork")
requires_forking = unittest.skipUnless(HAVE_FORKING, 'requires forking')
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
# Remember real select() to avoid interferences with mocking
_real_select = select.select
def receive(sock, n, timeout=test.support.SHORT_TIMEOUT):
r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
raise RuntimeError("timed out on %r" % (sock,))
if HAVE_UNIX_SOCKETS and HAVE_FORKING:
class ForkingUnixStreamServer(socketserver.ForkingMixIn,
socketserver.UnixStreamServer):
pass
class ForkingUnixDatagramServer(socketserver.ForkingMixIn,
socketserver.UnixDatagramServer):
pass
@contextlib.contextmanager
def simple_subprocess(testcase):
"""Tests that a custom child process is not waited on (Issue 1540386)"""
pid = os.fork()
if pid == 0:
# Don't raise an exception; it would be caught by the test harness.
os._exit(72)
try:
yield None
except:
raise
finally:
test.support.wait_process(pid, exitcode=72)
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
signal_alarm(60) # Kill deadlocks after 60 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
signal_alarm(0) # Didn't deadlock.
reap_children()
for fn in self.test_files:
try:
os.remove(fn)
except OSError:
pass
self.test_files[:] = []
def pickaddr(self, proto):
if proto == socket.AF_INET:
return (HOST, 0)
else:
# XXX: We need a way to tell AF_UNIX to pick its own name
# like AF_INET provides port==0.
dir = None
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
self.test_files.append(fn)
return fn
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
def handle_error(self, request, client_address):
self.close_request(request)
raise
class MyHandler(hdlrbase):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
if verbose: print("creating server")
try:
server = MyServer(addr, MyHandler)
except PermissionError as e:
# Issue 29184: cannot bind() a Unix socket on Android.
self.skipTest('Cannot create server (%s, %s): %s' %
(svrcls, addr, e))
self.assertEqual(server.server_address, server.socket.getsockname())
return server
@reap_threads
def run_server(self, svrcls, hdlrbase, testfunc):
server = self.make_server(self.pickaddr(svrcls.address_family),
svrcls, hdlrbase)
# We had the OS pick a port, so pull the real address out of
# the server.
addr = server.server_address
if verbose:
print("ADDR =", addr)
print("CLASS =", svrcls)
t = threading.Thread(
name='%s serving' % svrcls,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print("server running")
for i in range(3):
if verbose: print("test client", i)
testfunc(svrcls.address_family, addr)
if verbose: print("waiting for server")
server.shutdown()
t.join()
server.server_close()
self.assertEqual(-1, server.socket.fileno())
if HAVE_FORKING and isinstance(server, socketserver.ForkingMixIn):
# bpo-31151: Check that ForkingMixIn.server_close() waits until
# all children completed
self.assertFalse(server.active_children)
if verbose: print("done")
def stream_examine(self, proto, addr):
with socket.socket(proto, socket.SOCK_STREAM) as s:
s.connect(addr)
s.sendall(TEST_STR)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
def dgram_examine(self, proto, addr):
with socket.socket(proto, socket.SOCK_DGRAM) as s:
if HAVE_UNIX_SOCKETS and proto == socket.AF_UNIX:
s.bind(self.pickaddr(proto))
s.sendto(TEST_STR, addr)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
def test_TCPServer(self):
self.run_server(socketserver.TCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_ThreadingTCPServer(self):
self.run_server(socketserver.ThreadingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_forking
def test_ForkingTCPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_UnixStreamServer(self):
self.run_server(socketserver.UnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_ThreadingUnixStreamServer(self):
self.run_server(socketserver.ThreadingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixStreamServer(self):
with simple_subprocess(self):
self.run_server(ForkingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_UDPServer(self):
self.run_server(socketserver.UDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
def test_ThreadingUDPServer(self):
self.run_server(socketserver.ThreadingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_forking
def test_ForkingUDPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_UnixDatagramServer(self):
self.run_server(socketserver.UnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_ThreadingUnixDatagramServer(self):
self.run_server(socketserver.ThreadingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixDatagramServer(self):
self.run_server(ForkingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@reap_threads
def test_shutdown(self):
# Issue #2302: shutdown() should always succeed in making an
# other thread leave serve_forever().
class MyServer(socketserver.TCPServer):
pass
class MyHandler(socketserver.StreamRequestHandler):
pass
threads = []
for i in range(20):
s = MyServer((HOST, 0), MyHandler)
t = threading.Thread(
name='MyServer serving',
target=s.serve_forever,
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
threads.append((t, s))
for t, s in threads:
t.start()
s.shutdown()
for t, s in threads:
t.join()
s.server_close()
def test_close_immediately(self):
class MyServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
server = MyServer((HOST, 0), lambda: None)
server.server_close()
def test_tcpserver_bind_leak(self):
# Issue #22435: the server socket wouldn't be closed if bind()/listen()
# failed.
# Create many servers for which bind() will fail, to see if this result
# in FD exhaustion.
for i in range(1024):
with self.assertRaises(OverflowError):
socketserver.TCPServer((HOST, -1),
socketserver.StreamRequestHandler)
def test_context_manager(self):
with socketserver.TCPServer((HOST, 0),
socketserver.StreamRequestHandler) as server:
pass
self.assertEqual(-1, server.socket.fileno())
class ErrorHandlerTest(unittest.TestCase):
"""Test that the servers pass normal exceptions from the handler to
handle_error(), and that exiting exceptions like SystemExit and
KeyboardInterrupt are not passed."""
def tearDown(self):
test.support.unlink(test.support.TESTFN)
def test_sync_handled(self):
BaseErrorTestServer(ValueError)
self.check_result(handled=True)
def test_sync_not_handled(self):
with self.assertRaises(SystemExit):
BaseErrorTestServer(SystemExit)
self.check_result(handled=False)
def test_threading_handled(self):
ThreadingErrorTestServer(ValueError)
self.check_result(handled=True)
def test_threading_not_handled(self):
ThreadingErrorTestServer(SystemExit)
self.check_result(handled=False)
@requires_forking
def test_forking_handled(self):
ForkingErrorTestServer(ValueError)
self.check_result(handled=True)
@requires_forking
def test_forking_not_handled(self):
ForkingErrorTestServer(SystemExit)
self.check_result(handled=False)
def check_result(self, handled):
with open(test.support.TESTFN) as log:
expected = 'Handler called\n' + 'Error handled\n' * handled
self.assertEqual(log.read(), expected)
class BaseErrorTestServer(socketserver.TCPServer):
def __init__(self, exception):
self.exception = exception
super().__init__((HOST, 0), BadHandler)
with socket.create_connection(self.server_address):
pass
try:
self.handle_request()
finally:
self.server_close()
self.wait_done()
def handle_error(self, request, client_address):
with open(test.support.TESTFN, 'a') as log:
log.write('Error handled\n')
def wait_done(self):
pass
class BadHandler(socketserver.BaseRequestHandler):
def handle(self):
with open(test.support.TESTFN, 'a') as log:
log.write('Handler called\n')
raise self.server.exception('Test error')
class ThreadingErrorTestServer(socketserver.ThreadingMixIn,
BaseErrorTestServer):
def __init__(self, *pos, **kw):
self.done = threading.Event()
super().__init__(*pos, **kw)
def shutdown_request(self, *pos, **kw):
super().shutdown_request(*pos, **kw)
self.done.set()
def wait_done(self):
self.done.wait()
if HAVE_FORKING:
class ForkingErrorTestServer(socketserver.ForkingMixIn, BaseErrorTestServer):
pass
class SocketWriterTest(unittest.TestCase):
def test_basics(self):
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.wfile = self.wfile
self.server.wfile_fileno = self.wfile.fileno()
self.server.request_fileno = self.request.fileno()
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
s = socket.socket(
server.address_family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
with s:
s.connect(server.server_address)
server.handle_request()
self.assertIsInstance(server.wfile, io.BufferedIOBase)
self.assertEqual(server.wfile_fileno, server.request_fileno)
def test_write(self):
# Test that wfile.write() sends data immediately, and that it does
# not truncate sends when interrupted by a Unix signal
pthread_kill = test.support.get_attribute(signal, 'pthread_kill')
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.sent1 = self.wfile.write(b'write data\n')
# Should be sent immediately, without requiring flush()
self.server.received = self.rfile.readline()
big_chunk = b'\0' * test.support.SOCK_MAX_SIZE
self.server.sent2 = self.wfile.write(big_chunk)
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
interrupted = threading.Event()
def signal_handler(signum, frame):
interrupted.set()
original = signal.signal(signal.SIGUSR1, signal_handler)
self.addCleanup(signal.signal, signal.SIGUSR1, original)
response1 = None
received2 = None
main_thread = threading.get_ident()
def run_client():
s = socket.socket(server.address_family, socket.SOCK_STREAM,
socket.IPPROTO_TCP)
with s, s.makefile('rb') as reader:
s.connect(server.server_address)
nonlocal response1
response1 = reader.readline()
s.sendall(b'client response\n')
reader.read(100)
# The main thread should now be blocking in a send() syscall.
# But in theory, it could get interrupted by other signals,
# and then retried. So keep sending the signal in a loop, in
# case an earlier signal happens to be delivered at an
# inconvenient moment.
while True:
pthread_kill(main_thread, signal.SIGUSR1)
if interrupted.wait(timeout=float(1)):
break
nonlocal received2
received2 = len(reader.read())
background = threading.Thread(target=run_client)
background.start()
server.handle_request()
background.join()
self.assertEqual(server.sent1, len(response1))
self.assertEqual(response1, b'write data\n')
self.assertEqual(server.received, b'client response\n')
self.assertEqual(server.sent2, test.support.SOCK_MAX_SIZE)
self.assertEqual(received2, test.support.SOCK_MAX_SIZE - 100)
class MiscTestCase(unittest.TestCase):
def test_all(self):
# objects defined in the module should be in __all__
expected = []
for name in dir(socketserver):
if not name.startswith('_'):
mod_object = getattr(socketserver, name)
if getattr(mod_object, '__module__', None) == 'socketserver':
expected.append(name)
self.assertCountEqual(socketserver.__all__, expected)
def test_shutdown_request_called_if_verify_request_false(self):
# Issue #26309: BaseServer should call shutdown_request even if
# verify_request is False
class MyServer(socketserver.TCPServer):
def verify_request(self, request, client_address):
return False
shutdown_called = 0
def shutdown_request(self, request):
self.shutdown_called += 1
socketserver.TCPServer.shutdown_request(self, request)
server = MyServer((HOST, 0), socketserver.StreamRequestHandler)
s = socket.socket(server.address_family, socket.SOCK_STREAM)
s.connect(server.server_address)
s.close()
server.handle_request()
self.assertEqual(server.shutdown_called, 1)
server.server_close()
def test_threads_reaped(self):
"""
In #37193, users reported a memory leak
due to the saving of every request thread. Ensure that
not all threads are kept forever.
"""
class MyServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
server = MyServer((HOST, 0), socketserver.StreamRequestHandler)
for n in range(10):
with socket.create_connection(server.server_address):
server.handle_request()
self.assertLess(len(server._threads), 10)
server.server_close()
if __name__ == "__main__":
unittest.main()
|
utils.py | import re
import requests
import xml.etree.ElementTree as ET
from datetime import datetime, date
import threading
from decimal import Decimal
from django.core.cache import caches
from django.core.exceptions import (PermissionDenied,
ObjectDoesNotExist)
from django.db.models import Q
from .iss_simple_main import (search as moex_search,
specification as moex_specification,
history as moex_history,
NoSecurityMoex)
from .models import Security
from .utils_valute import get_valute_curse as g_v_c
from .utils_yfinance import (search_in_yfinance,
NoSecurityYFinance,
get_security_yfinance,
get_history_by_secid)
get_valute_curse = g_v_c
def get_securities_in_portfolios_by_user(user):
qs = user.securities.all().prefetch_related('security')
return [i.security for i in qs]
def get_followed_securities_by_user(user, exclude_portfolios=True):
result = user.security_followed.all()
if exclude_portfolios:
security_in_portfolios = user.securities.all().values('security')
result = result.exclude(id__in=security_in_portfolios)
return result
def upload_history(security):
cache = caches['default']
security_history = security.get_history(None,
None,
format_result='str')
cache.add('security_history_by_id' + str(security.id),
security_history, timeout=30)
return security_history
def staff_only(function):
def _inner(request, *args, **kwargs):
if not request.user.is_staff:
raise PermissionDenied
return function(request, *args, **kwargs)
return _inner
def upload_search_moex_to_cache(query):
# not used
cache = caches['default']
result = moex_search(query)
securities = Security.objects.all()
secids = [i.secid for i in securities]
# delete securities if exist in base
res = {i: result[i] for i in result if i not in secids}
cache.add('moex_search_' + query,
res, timeout=24 * 60 * 60)
def prepare_new_security_by_secid_on_moex(secid):
description, boards = moex_specification(secid)
if not caches['default'].get('moex_secid_' + description["SECID"]):
data = boards['data']
board = description['primary_boardid']
for i in data:
if i[1] == board:
engine = i[7]
market = i[5]
break
if re.search(r'bond', description["TYPE"]):
security_type = 'bond'
elif re.search(r'etf_ppif', description["TYPE"]):
security_type = 'etf_ppif'
elif re.search(r'ppif', description["TYPE"]):
security_type = 'ppif'
elif re.search(r'share', description["TYPE"]):
security_type = 'share'
elif re.search(r'futures', description["TYPE"]):
security_type = 'futures'
elif re.search(r'index', description["TYPE"]):
security_type = 'index'
elif re.search(r'depositary_receipt', description["TYPE"]):
security_type = 'depositary_receipt'
else:
pass
regnumber = get_value(description, "REGNUMBER")
isin = get_value(description, "ISIN")
facevalue = get_value(description, "FACEVALUE", 0)
issuesize = get_value(description, "ISSUESIZE", 0)
initialfacevalue = get_value(description, "INITIALFACEVALUE", 0)
matdate = get_value(description, "MATDATE")
coupondate = get_value(description, "COUPONDATE")
couponfrequency = get_value(description, "COUPONFREQUENCY")
couponpercent = get_value(description, "COUPONPERCENT")
couponvalue = get_value(description, "COUPONVALUE")
faceunit = get_value(description, "FACEUNIT")
main_board_faceunit = get_value(description, "MAINBOARDFACEUNIT")
url = 'https://www.moex.com/ru/issue.aspx?code=' + description["SECID"]
parce_url = 'http://iss.moex.com/iss/history/engines/' + \
'{}/markets/{}/'.format(engine, market) + \
'boards/{}/securities/{}.json'.format(board, description["SECID"])
today_price, last_update, accint, change_price_percent = upload_moex_history(
parce_url, description["SECID"], security_type, facevalue)
newitem = Security(fullname=description["NAME"],
shortname=description["SHORTNAME"],
name=description["SHORTNAME"],
regnumber=regnumber,
secid=description["SECID"],
isin=isin,
facevalue=facevalue,
initialfacevalue=initialfacevalue,
matdate=matdate,
security_type=security_type,
url=url,
emitent=description['emitent'],
board=board,
engine=engine,
market=market,
parce_url=parce_url,
coupondate=coupondate,
couponfrequency=couponfrequency,
couponpercent=couponpercent,
couponvalue=couponvalue,
accint=accint,
faceunit=faceunit,
issuesize=issuesize,
main_board_faceunit=main_board_faceunit,
oldest_date=datetime.now().date(),
today_price=today_price,
last_update=last_update,
change_price_percent=change_price_percent,
source='moex')
caches['default'].add('moex_secid_' + description["SECID"],
newitem, timeout=60 * 60)
else:
newitem = caches['default'].get('moex_secid_' + description["SECID"])
return newitem
def upload_history_yfinance_to_cache(secid):
history = get_history_by_secid(secid)
caches['default'].add('yfinance_security_history_secid_' + secid,
history, timeout=60 * 60)
def prepare_new_security_by_secid_yfinance(secid):
if not caches['default'].get('yfinance_secid_' + secid):
new = get_security_yfinance(secid)
newitem = Security(
source='yfinance',
last_update=datetime.now().date(),
oldest_date=datetime.now().date(),
**new)
caches['default'].add('yfinance_secid_' + secid,
newitem, timeout=60 * 60)
else:
newitem = caches['default'].get('yfinance_secid_' + secid)
if not caches['default'].get('yfinance_security_history_secid_' + secid):
# блок кеширования исторических данных поценной бумаге
# для дальнейшей загрузки через ajax-запрос
t = threading.Thread(target=upload_history_yfinance_to_cache, args=(secid,))
t.start()
# конец блока кеширования
return newitem
def prepare_new_security_by_secid(secid):
try:
newitem = prepare_new_security_by_secid_on_moex(secid)
except NoSecurityMoex:
newitem = None
if newitem is None:
try:
newitem = prepare_new_security_by_secid_yfinance(secid)
except NoSecurityYFinance:
newitem = None
return newitem
def get_value(dictionary, key, default=None):
try:
result = dictionary[key]
if key in ["MATDATE", "COUPONDATE"]:
result = datetime.strptime(result, '%Y-%m-%d').date()
return result
except KeyError:
return default
def upload_moex_history(parce_url, secid, security_type, facevalue):
security_history = moex_history(parce_url)
if security_type == 'bond':
for i in security_history:
try:
security_history[i]['CLOSE'] = str(
float(security_history[i]['CLOSE']) * float(facevalue
) / 100)
except Exception:
pass
# security_history.pop(i)
days = sorted(
security_history,
key=lambda i: datetime.strptime(i, '%d.%m.%Y').date(),
reverse=True)
result_history = {i: security_history[i]['CLOSE'] for i in days}
caches['default'].add('moex_security_history_secid' + secid,
result_history, timeout=60 * 60)
today_price = security_history[days[0]]['CLOSE']
try:
previos_price = security_history[days[1]]['CLOSE']
change_price_percent = (float(today_price) - float(previos_price))\
/ float(previos_price) * 100
change_price_percent = float("{0:.2f}".format(change_price_percent))
except Exception:
change_price_percent = 0
try:
accint = security_history[days[0]]['ACCINT']
except KeyError:
accint = None
return today_price,\
datetime.strptime(days[0], '%d.%m.%Y').date(),\
accint,\
change_price_percent
def security_search_in_db(query):
return Security.objects.filter(
Q(name__icontains=query) |
Q(code__icontains=query) |
Q(fullname__icontains=query) |
Q(regnumber__icontains=query) |
Q(secid__icontains=query) |
Q(isin__icontains=query) |
Q(emitent__icontains=query)
).order_by('-last_update', '-id')
def security_search_in_moex(query):
if not caches['default'].get('moex_search_' + query):
result = moex_search(query)
securities = Security.objects.all()
secids = [i.secid for i in securities]
if result:
# delete securities if exist in base
res = {
i: result[i] for i in result if re.search(
r'bond|etf_ppif|ppif|share|futures|index|depositary_receipt',
result[i]['type']
)
}
res = {i: res[i] for i in res if i not in secids}
else:
res = dict()
result_yfinance = search_in_yfinance(query)
if result_yfinance:
if query not in secids:
res[result_yfinance['name']] = result_yfinance
if res:
caches['default'].add('moex_search_' + query,
res, timeout=24 * 60 * 60)
else:
res = caches['default'].get('moex_search_' + query)
return res
def get_new_security_type(security_type):
if re.search(r'bond', security_type):
return 'bond'
elif re.search(r'etf_ppif', security_type):
return 'etf_ppif'
elif re.search(r'ppif', security_type):
return 'ppif'
elif re.search(r'share', security_type):
return 'share'
elif re.search(r'futures', security_type):
return 'futures'
elif re.search(r'index', security_type):
return 'index'
elif re.search(r'depositary_receipt', security_type):
return 'depositary_receipt'
class NewSearchSecurity:
def __init__(self,
secid,
isin,
shortname,
name,
emitent,
source,
security_type,
query: str = None,
**kwargs):
self.secid = secid
self.isin = isin
self.shortname = shortname
self.name = name
self.emitent = emitent
self.source = source
self.security_type = security_type
self.query = query
def search_new_securities_api(query):
if not caches['default'].get('moex_search_api_' + query):
result_moex = moex_search(query)
securities = Security.objects.all()
secids = [i.secid.upper() for i in securities if i.secid]
if result_moex:
# delete securities if exist in base
temp = {
i: result_moex[i] for i in result_moex if re.search(
r'bond|etf_ppif|ppif|share|futures|index|depositary_receipt',
result_moex[i]['type']
)
}
temp = {i: temp[i] for i in temp if i not in secids}
result = [
NewSearchSecurity(
secid=i,
isin=temp[i]['isin'],
shortname=temp[i]['shortname'],
name=temp[i]['name'],
emitent=temp[i]['emitent'],
source='moex',
security_type=get_new_security_type(temp[i]['type']),
query=query
) for i in temp
]
else:
result = list()
result_yfinance = search_in_yfinance(query)
if result_yfinance:
if query.upper() not in secids:
result.append(
NewSearchSecurity(
secid=query.upper(),
isin=result_yfinance['isin'],
shortname=result_yfinance['shortname'],
source='yfinance',
name=result_yfinance['name'],
emitent=result_yfinance['emitent'],
security_type='share',
query=query
)
)
if result:
caches['default'].add('moex_search_api_' + query,
result, timeout=2 * 60 * 60)
else:
result = caches['default'].get('moex_search_api_' + query)
return result
def add_search_securities_to_cache(securities):
for i in securities:
if caches['default'].get('new_security_' + i.isin):
pass
else:
caches['default'].add('new_security_' + i.isin,
i, timeout=24 * 60 * 60)
def prepare_new_security_api(isin):
security_item = caches['default'].get('new_security_' + isin)
if not security_item:
return
if security_item.source == 'moex':
security = prepare_new_security_by_secid_on_moex(security_item.secid)
elif security_item.source == 'yfinance':
security = prepare_new_security_by_secid_yfinance(security_item.secid)
return security
def delete_search_query_from_cache(isin):
security_item = caches['default'].get('new_security_' + isin)
if not security_item:
return
query = security_item.query
caches['default'].delete('moex_search_api_' + query)
def get_new_security_history_from_moex(secid):
if caches['default'].get('moex_secid_' + secid):
newitem = caches['default'].get('moex_secid_' + secid)
else:
return {'status': 'no_secid_security'}
result = caches['default'].get('moex_security_history_secid' + secid)
if result is None:
parce_url = newitem.parce_url
result = moex_history(parce_url)
result = {i: i['CLOSE'] for i in result}
days = sorted(
result,
key=lambda i: datetime.strptime(i, '%d.%m.%Y').date(),
reverse=True)
result_history = {i: result[i] for i in days}
return {'status': 'ok',
'result_history': result_history,
'currency': newitem.get_main_board_faceunit_display()}
def get_new_security_history_from_yfinance(secid):
if caches['default'].get('yfinance_secid_' + secid):
newitem = caches['default'].get('yfinance_secid_' + secid)
else:
return {'status': 'no_secid_security'}
if not caches['default'].get(
'yfinance_security_history_secid_' + secid):
upload_history_yfinance_to_cache(secid)
result = caches['default'].get(
'yfinance_security_history_secid_' + secid
)
days = sorted(
result,
key=lambda i: i,
reverse=True
)
result_history = {
datetime.strftime(i, '%d.%m.%Y'): float("{0:.2f}".format(result[i]))
for i in days
}
return {'status': 'ok',
'result_history': result_history,
'currency': newitem.get_main_board_faceunit_display()}
def get_new_security_history(secid):
history = get_new_security_history_from_moex(secid)
if history['status'] == 'ok':
return history
history = get_new_security_history_from_yfinance(secid)
if history['status'] == 'ok':
return history
# если ни один метод не вернул status ok
return {'status': 'no_secid_security'}
def get_new_security_history_api(isin):
security_item = caches['default'].get('new_security_' + isin)
if not security_item:
return
if security_item.source == 'moex':
history = get_new_security_history_from_moex(security_item.secid)
elif security_item.source == 'yfinance':
history = get_new_security_history_from_yfinance(security_item.secid)
if history['status'] == 'ok':
return history['result_history']
return
def get_security_in_db_history_from_moex(security, date_since, date_until):
cache = caches['default']
security_history = cache.get('security_history_by_id' + str(security.id))
if not security_history:
security_history = security.get_history(date_since,
date_until,
format_result='str')
caches['default'].add('security_history_by_id' + str(security.id),
security_history, timeout=12 * 60 * 60)
try:
days = sorted(
security_history,
key=lambda i: datetime.strptime(i, '%d.%m.%Y').date(),
reverse=True)
result_history = {i: security_history[i] for i in days}
except TypeError:
days = sorted(
security_history,
key=lambda i: i,
reverse=True)
result_history = {
datetime.strftime(i, '%d.%m.%Y'): float("{0:.2f}".format(
security_history[i]))
for i in days}
return result_history
def get_or_prepare_new_security_by_secid(secid):
if not caches['default'].get('moex_secid_' + secid):
if not caches['default'].get('yfinance_secid_' + secid):
security = prepare_new_security_by_secid(secid)
else:
security = caches['default'].get('yfinance_secid_' + secid)
else:
security = caches['default'].get('moex_secid_' + secid)
return security
def get_today_price_by_secid(secid, day=None, ignore_bond_nkd=False):
try:
security = Security.objects.get(secid=secid)
if day:
history = get_security_in_db_history_from_moex(security)
today_price = history[day]
else:
today_price = security.today_price
except ObjectDoesNotExist:
security = get_or_prepare_new_security_by_secid(secid)
if not security:
raise NoSecurityMoex
if day:
history = get_new_security_history(
secid)['result_history']
temp = {datetime.strptime(
i, '%d.%m.%Y'): history[i] for i in history}
today_price = temp[day]
else:
today_price = security.today_price
if security.main_board_faceunit != 'SUR':
valute = security.main_board_faceunit
if day:
today_price *= Decimal(get_valute_curse(valute, day))
else:
today_price *= Decimal(
get_valute_curse(valute, datetime.now().date())
)
if security.security_type == 'bond' and not ignore_bond_nkd:
today_nkd = (float(security.couponvalue) *
float(security.couponfrequency) / 365
)
today_price = float(today_price) + (float(security.accint) + # NKD
today_nkd # today NKD
)
return today_price
def get_security_by_secid(secid, return_from_db_flag=False):
# return_from_db_flag устанавливается, если нужно знать
# имеется ли ценная бумага в базе
try:
security = Security.objects.get(secid=secid)
if return_from_db_flag:
return security, True
except ObjectDoesNotExist:
security = get_or_prepare_new_security_by_secid(secid)
if return_from_db_flag:
return security, False
return security
def get_security_in_db_by_id(id):
try:
security = Security.objects.get(id=id)
except ObjectDoesNotExist:
return None
return security
|
gtest_parallel.py | # Copyright 2022 PingCAP, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2013 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
from functools import total_ordering
import gzip
import io
import json
import multiprocessing
import optparse
import os
import re
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
if sys.version_info.major >= 3:
long = int
import _pickle as cPickle
import _thread as thread
else:
import cPickle
import thread
from pickle import HIGHEST_PROTOCOL as PICKLE_HIGHEST_PROTOCOL
if sys.platform == 'win32':
import msvcrt
else:
import fcntl
# An object that catches SIGINT sent to the Python process and notices
# if processes passed to wait() die by SIGINT (we need to look for
# both of those cases, because pressing Ctrl+C can result in either
# the main process or one of the subprocesses getting the signal).
#
# Before a SIGINT is seen, wait(p) will simply call p.wait() and
# return the result. Once a SIGINT has been seen (in the main process
# or a subprocess, including the one the current call is waiting for),
# wait(p) will call p.terminate() and raise ProcessWasInterrupted.
class SigintHandler(object):
class ProcessWasInterrupted(Exception):
pass
sigint_returncodes = {-signal.SIGINT, # Unix
-1073741510, # Windows
}
def __init__(self):
self.__lock = threading.Lock()
self.__processes = set()
self.__got_sigint = False
signal.signal(signal.SIGINT, lambda signal_num,
frame: self.interrupt())
def __on_sigint(self):
self.__got_sigint = True
while self.__processes:
try:
self.__processes.pop().terminate()
except OSError:
pass
def interrupt(self):
with self.__lock:
self.__on_sigint()
def got_sigint(self):
with self.__lock:
return self.__got_sigint
def wait(self, p):
with self.__lock:
if self.__got_sigint:
p.terminate()
self.__processes.add(p)
code = p.wait()
with self.__lock:
self.__processes.discard(p)
if code in self.sigint_returncodes:
self.__on_sigint()
if self.__got_sigint:
raise self.ProcessWasInterrupted
return code
sigint_handler = SigintHandler()
# Return the width of the terminal, or None if it couldn't be
# determined (e.g. because we're not being run interactively).
def term_width(out):
if not out.isatty():
return None
try:
p = subprocess.Popen(["stty", "size"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
if p.returncode != 0 or err:
return None
return int(out.split()[1])
except (IndexError, OSError, ValueError):
return None
# Output transient and permanent lines of text. If several transient
# lines are written in sequence, the new will overwrite the old. We
# use this to ensure that lots of unimportant info (tests passing)
# won't drown out important info (tests failing).
class Outputter(object):
def __init__(self, out_file):
self.__out_file = out_file
self.__previous_line_was_transient = False
# Line width, or None if not a tty.
self.__width = term_width(out_file)
def transient_line(self, msg):
if self.__width is None:
self.__out_file.write(msg + "\n")
self.__out_file.flush()
else:
self.__out_file.write(
"\r" + msg[:self.__width].ljust(self.__width))
self.__previous_line_was_transient = True
def flush_transient_output(self):
if self.__previous_line_was_transient:
self.__out_file.write("\n")
self.__previous_line_was_transient = False
def permanent_line(self, msg):
self.flush_transient_output()
self.__out_file.write(msg + "\n")
if self.__width is None:
self.__out_file.flush()
def get_save_file_path():
"""Return path to file for saving transient data."""
if sys.platform == 'win32':
default_cache_path = os.path.join(os.path.expanduser('~'),
'AppData', 'Local')
cache_path = os.environ.get('LOCALAPPDATA', default_cache_path)
else:
# We don't use xdg module since it's not a standard.
default_cache_path = os.path.join(os.path.expanduser('~'), '.cache')
cache_path = os.environ.get('XDG_CACHE_HOME', default_cache_path)
if os.path.isdir(cache_path):
return os.path.join(cache_path, 'gtest-parallel')
else:
sys.stderr.write('Directory {} does not exist'.format(cache_path))
return os.path.join(os.path.expanduser('~'), '.gtest-parallel-times')
@total_ordering
class Task(object):
"""Stores information about a task (single execution of a test).
This class stores information about the test to be executed (gtest binary and
test name), and its result (log file, exit code and runtime).
Each task is uniquely identified by the gtest binary, the test name and an
execution number that increases each time the test is executed.
Additionaly we store the last execution time, so that next time the test is
executed, the slowest tests are run first.
"""
def __init__(self, test_binary, test_name, test_command, execution_number,
last_execution_time, output_dir):
self.test_name = test_name
self.output_dir = output_dir
self.test_binary = test_binary
self.test_command = test_command
self.execution_number = execution_number
self.last_execution_time = last_execution_time
self.exit_code = None
self.runtime_ms = None
self.test_id = (test_binary, test_name)
self.task_id = (test_binary, test_name, self.execution_number)
self.log_file = Task._logname(self.output_dir, self.test_binary,
test_name, self.execution_number)
def __sorting_key(self):
# Unseen or failing tests (both missing execution time) take precedence over
# execution time. Tests are greater (seen as slower) when missing times so
# that they are executed first.
return (1 if self.last_execution_time is None else 0,
self.last_execution_time)
def __eq__(self, other):
return self.__sorting_key() == other.__sorting_key()
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
return self.__sorting_key() < other.__sorting_key()
@staticmethod
def _normalize(string):
return re.sub('[^A-Za-z0-9]', '_', string)
@staticmethod
def _logname(output_dir, test_binary, test_name, execution_number):
# Store logs to temporary files if there is no output_dir.
if output_dir is None:
(log_handle, log_name) = tempfile.mkstemp(prefix='gtest_parallel_',
suffix=".log")
os.close(log_handle)
return log_name
log_name = '%s-%s-%d.log' % (Task._normalize(os.path.basename(test_binary)),
Task._normalize(test_name), execution_number)
return os.path.join(output_dir, log_name)
def run(self):
begin = time.time()
with open(self.log_file, 'w') as log:
task = subprocess.Popen(self.test_command, stdout=log, stderr=log)
try:
self.exit_code = sigint_handler.wait(task)
except sigint_handler.ProcessWasInterrupted:
thread.exit()
self.runtime_ms = int(1000 * (time.time() - begin))
self.last_execution_time = None if self.exit_code else self.runtime_ms
class TaskManager(object):
"""Executes the tasks and stores the passed, failed and interrupted tasks.
When a task is run, this class keeps track if it passed, failed or was
interrupted. After a task finishes it calls the relevant functions of the
Logger, TestResults and TestTimes classes, and in case of failure, retries the
test as specified by the --retry_failed flag.
"""
def __init__(self, times, logger, test_results, task_factory, times_to_retry,
initial_execution_number):
self.times = times
self.logger = logger
self.test_results = test_results
self.task_factory = task_factory
self.times_to_retry = times_to_retry
self.initial_execution_number = initial_execution_number
self.global_exit_code = 0
self.passed = []
self.failed = []
self.started = {}
self.execution_number = {}
self.lock = threading.Lock()
def __get_next_execution_number(self, test_id):
with self.lock:
next_execution_number = self.execution_number.setdefault(
test_id, self.initial_execution_number)
self.execution_number[test_id] += 1
return next_execution_number
def __register_start(self, task):
with self.lock:
self.started[task.task_id] = task
def __register_exit(self, task):
self.logger.log_exit(task)
self.times.record_test_time(task.test_binary, task.test_name,
task.last_execution_time)
if self.test_results:
self.test_results.log(task.test_name, task.runtime_ms,
"PASS" if task.exit_code == 0 else "FAIL")
with self.lock:
self.started.pop(task.task_id)
if task.exit_code == 0:
self.passed.append(task)
else:
self.failed.append(task)
def run_task(self, task):
for try_number in range(self.times_to_retry + 1):
self.__register_start(task)
task.run()
self.__register_exit(task)
if task.exit_code == 0:
break
if try_number < self.times_to_retry:
execution_number = self.__get_next_execution_number(
task.test_id)
# We need create a new Task instance. Each task represents a single test
# execution, with its own runtime, exit code and log file.
task = self.task_factory(task.test_binary, task.test_name,
task.test_command, execution_number,
task.last_execution_time, task.output_dir)
with self.lock:
if task.exit_code != 0:
self.global_exit_code = task.exit_code
class FilterFormat(object):
def __init__(self, output_dir):
if sys.stdout.isatty():
# stdout needs to be unbuffered since the output is interactive.
if isinstance(sys.stdout, io.TextIOWrapper):
# workaround for https://bugs.python.org/issue17404
sys.stdout = io.TextIOWrapper(sys.stdout.detach(),
line_buffering=True,
write_through=True,
newline='\n')
else:
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
self.output_dir = output_dir
self.total_tasks = 0
self.finished_tasks = 0
self.out = Outputter(sys.stdout)
self.stdout_lock = threading.Lock()
def move_to(self, destination_dir, tasks):
if self.output_dir is None:
return
destination_dir = os.path.join(self.output_dir, destination_dir)
os.makedirs(destination_dir)
for task in tasks:
shutil.move(task.log_file, destination_dir)
def print_tests(self, message, tasks, print_try_number):
self.out.permanent_line("%s (%s/%s):" %
(message, len(tasks), self.total_tasks))
for task in sorted(tasks):
runtime_ms = 'Interrupted'
if task.runtime_ms is not None:
runtime_ms = '%d ms' % task.runtime_ms
self.out.permanent_line("%11s: %s %s%s" % (
runtime_ms, task.test_binary, task.test_name,
(" (try #%d)" % task.execution_number) if print_try_number else ""))
def log_exit(self, task):
with self.stdout_lock:
self.finished_tasks += 1
self.out.transient_line("[%d/%d] %s (%d ms)"
% (self.finished_tasks, self.total_tasks,
task.test_name, task.runtime_ms))
if task.exit_code != 0:
with open(task.log_file) as f:
for line in f.readlines():
self.out.permanent_line(line.rstrip())
self.out.permanent_line(
"[%d/%d] %s returned/aborted with exit code %d (%d ms)"
% (self.finished_tasks, self.total_tasks, task.test_name,
task.exit_code, task.runtime_ms))
if self.output_dir is None:
# Try to remove the file 100 times (sleeping for 0.1 second in between).
# This is a workaround for a process handle seemingly holding on to the
# file for too long inside os.subprocess. This workaround is in place
# until we figure out a minimal repro to report upstream (or a better
# suspect) to prevent os.remove exceptions.
num_tries = 100
for i in range(num_tries):
try:
os.remove(task.log_file)
except OSError as e:
if e.errno is not errno.ENOENT:
if i is num_tries - 1:
self.out.permanent_line(
'Could not remove temporary log file: ' + str(e))
else:
time.sleep(0.1)
continue
break
def log_tasks(self, total_tasks):
self.total_tasks += total_tasks
self.out.transient_line("[0/%d] Running tests..." % self.total_tasks)
def summarize(self, passed_tasks, failed_tasks, interrupted_tasks):
stats = {}
def add_stats(stats, task, idx):
task_key = (task.test_binary, task.test_name)
if not task_key in stats:
# (passed, failed, interrupted) task_key is added as tie breaker to get
# alphabetic sorting on equally-stable tests
stats[task_key] = [0, 0, 0, task_key]
stats[task_key][idx] += 1
for task in passed_tasks:
add_stats(stats, task, 0)
for task in failed_tasks:
add_stats(stats, task, 1)
for task in interrupted_tasks:
add_stats(stats, task, 2)
self.out.permanent_line("SUMMARY:")
for task_key in sorted(stats, key=stats.__getitem__):
(num_passed, num_failed, num_interrupted, _) = stats[task_key]
(test_binary, task_name) = task_key
total_runs = num_passed + num_failed + num_interrupted
if num_passed == total_runs:
continue
self.out.permanent_line(
" %s %s passed %d / %d times%s." %
(test_binary, task_name, num_passed, total_runs,
"" if num_interrupted == 0 else (" (%d interrupted)" % num_interrupted)))
def flush(self):
self.out.flush_transient_output()
class CollectTestResults(object):
def __init__(self, json_dump_filepath):
self.test_results_lock = threading.Lock()
self.json_dump_file = open(json_dump_filepath, 'w')
self.test_results = {
"interrupted": False,
"path_delimiter": ".",
# Third version of the file format. See the link in the flag description
# for details.
"version": 3,
"seconds_since_epoch": int(time.time()),
"num_failures_by_type": {
"PASS": 0,
"FAIL": 0,
},
"tests": {},
}
def log(self, test, runtime_ms, actual_result):
with self.test_results_lock:
self.test_results['num_failures_by_type'][actual_result] += 1
results = self.test_results['tests']
for name in test.split('.'):
results = results.setdefault(name, {})
if results:
results['actual'] += ' ' + actual_result
results['times'].append(runtime_ms)
else: # This is the first invocation of the test
results['actual'] = actual_result
results['times'] = [runtime_ms]
results['time'] = runtime_ms
results['expected'] = 'PASS'
def dump_to_file_and_close(self):
json.dump(self.test_results, self.json_dump_file)
self.json_dump_file.close()
# Record of test runtimes. Has built-in locking.
class TestTimes(object):
class LockedFile(object):
def __init__(self, filename, mode):
self._filename = filename
self._mode = mode
self._fo = None
def __enter__(self):
self._fo = open(self._filename, self._mode)
# Regardless of opening mode we always seek to the beginning of file.
# This simplifies code working with LockedFile and also ensures that
# we lock (and unlock below) always the same region in file on win32.
self._fo.seek(0)
try:
if sys.platform == 'win32':
# We are locking here fixed location in file to use it as
# an exclusive lock on entire file.
msvcrt.locking(self._fo.fileno(), msvcrt.LK_LOCK, 1)
else:
fcntl.flock(self._fo.fileno(), fcntl.LOCK_EX)
except IOError:
self._fo.close()
raise
return self._fo
def __exit__(self, exc_type, exc_value, traceback):
# Flush any buffered data to disk. This is needed to prevent race
# condition which happens from the moment of releasing file lock
# till closing the file.
self._fo.flush()
try:
if sys.platform == 'win32':
self._fo.seek(0)
msvcrt.locking(self._fo.fileno(), msvcrt.LK_UNLCK, 1)
else:
fcntl.flock(self._fo.fileno(), fcntl.LOCK_UN)
finally:
self._fo.close()
return exc_value is None
def __init__(self, save_file):
"Create new object seeded with saved test times from the given file."
self.__times = {} # (test binary, test name) -> runtime in ms
# Protects calls to record_test_time(); other calls are not
# expected to be made concurrently.
self.__lock = threading.Lock()
try:
with TestTimes.LockedFile(save_file, 'rb') as fd:
times = TestTimes.__read_test_times_file(fd)
except IOError:
# We couldn't obtain the lock.
return
# Discard saved times if the format isn't right.
if type(times) is not dict:
return
for ((test_binary, test_name), runtime) in times.items():
if (type(test_binary) is not str or type(test_name) is not str
or type(runtime) not in {int, long, type(None)}):
return
self.__times = times
def get_test_time(self, binary, testname):
"""Return the last duration for the given test as an integer number of
milliseconds, or None if the test failed or if there's no record for it."""
return self.__times.get((binary, testname), None)
def record_test_time(self, binary, testname, runtime_ms):
"""Record that the given test ran in the specified number of
milliseconds. If the test failed, runtime_ms should be None."""
with self.__lock:
self.__times[(binary, testname)] = runtime_ms
def write_to_file(self, save_file):
"Write all the times to file."
try:
with TestTimes.LockedFile(save_file, 'a+b') as fd:
times = TestTimes.__read_test_times_file(fd)
if times is None:
times = self.__times
else:
times.update(self.__times)
# We erase data from file while still holding a lock to it. This
# way reading old test times and appending new ones are atomic
# for external viewer.
fd.seek(0)
fd.truncate()
with gzip.GzipFile(fileobj=fd, mode='wb') as gzf:
cPickle.dump(times, gzf, PICKLE_HIGHEST_PROTOCOL)
except IOError:
pass # ignore errors---saving the times isn't that important
@staticmethod
def __read_test_times_file(fd):
try:
with gzip.GzipFile(fileobj=fd, mode='rb') as gzf:
times = cPickle.load(gzf)
except Exception:
# File doesn't exist, isn't readable, is malformed---whatever.
# Just ignore it.
return None
else:
return times
def find_tests(binaries, additional_args, options, times):
test_count = 0
tasks = []
for test_binary in binaries:
command = [test_binary] + additional_args
if options.gtest_also_run_disabled_tests:
command += ['--gtest_also_run_disabled_tests']
list_command = command + ['--gtest_list_tests']
if options.gtest_filter != '':
list_command += ['--gtest_filter=' + options.gtest_filter]
try:
test_list = subprocess.check_output(list_command,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
sys.exit("%s: %s\n%s" % (test_binary, str(e), e.output))
try:
test_list = test_list.split('\n')
except TypeError:
# subprocess.check_output() returns bytes in python3
test_list = test_list.decode(sys.stdout.encoding).split('\n')
command += ['--gtest_color=' + options.gtest_color]
if options.gtest_catch_exceptions:
command += ['--gtest_catch_exceptions={}'.format(
options.gtest_catch_exceptions)]
if options.gtest_break_on_failure:
command += ['--gtest_catch_exceptions']
test_group = ''
for line in test_list:
if not line.strip():
continue
if line[0] != " ":
# Remove comments for typed tests and strip whitespace.
test_group = line.split('#')[0].strip()
continue
# Remove comments for parameterized tests and strip whitespace.
line = line.split('#')[0].strip()
if not line:
continue
test_name = test_group + line
if not options.gtest_also_run_disabled_tests and 'DISABLED_' in test_name:
continue
# Skip PRE_ tests which are used by Chromium.
if '.PRE_' in test_name:
continue
last_execution_time = times.get_test_time(test_binary, test_name)
if options.failed and last_execution_time is not None:
continue
test_command = command + ['--gtest_filter=' + test_name]
if (test_count - options.shard_index) % options.shard_count == 0:
for execution_number in range(options.repeat):
tasks.append(Task(test_binary, test_name, test_command,
execution_number + 1, last_execution_time,
options.output_dir))
test_count += 1
# Sort the tasks to run the slowest tests first, so that faster ones can be
# finished in parallel.
return sorted(tasks, reverse=True)
def execute_tasks(tasks, pool_size, task_manager,
timeout, serialize_test_cases):
class WorkerFn(object):
def __init__(self, tasks, running_groups):
self.tasks = tasks
self.running_groups = running_groups
self.task_lock = threading.Lock()
def __call__(self):
while True:
with self.task_lock:
for task_id in range(len(self.tasks)):
task = self.tasks[task_id]
if self.running_groups is not None:
test_group = task.test_name.split('.')[0]
if test_group in self.running_groups:
# Try to find other non-running test group.
continue
else:
self.running_groups.add(test_group)
del self.tasks[task_id]
break
else:
# Either there is no tasks left or number or remaining test
# cases (groups) is less than number or running threads.
return
task_manager.run_task(task)
if self.running_groups is not None:
with self.task_lock:
self.running_groups.remove(test_group)
def start_daemon(func):
t = threading.Thread(target=func)
t.daemon = True
t.start()
return t
try:
if timeout:
timeout.start()
running_groups = set() if serialize_test_cases else None
worker_fn = WorkerFn(tasks, running_groups)
workers = [start_daemon(worker_fn) for _ in range(pool_size)]
for worker in workers:
worker.join()
finally:
if timeout:
timeout.cancel()
def default_options_parser():
parser = optparse.OptionParser(
usage='usage: %prog [options] binary [binary ...] -- [additional args]')
parser.add_option('-d', '--output_dir', type='string', default=None,
help='Output directory for test logs. Logs will be '
'available under gtest-parallel-logs/, so '
'--output_dir=/tmp will results in all logs being '
'available under /tmp/gtest-parallel-logs/.')
parser.add_option('-r', '--repeat', type='int', default=1,
help='Number of times to execute all the tests.')
parser.add_option('--retry_failed', type='int', default=0,
help='Number of times to repeat failed tests.')
parser.add_option('--failed', action='store_true', default=False,
help='run only failed and new tests')
parser.add_option('-w', '--workers', type='int',
default=multiprocessing.cpu_count(),
help='number of workers to spawn')
parser.add_option('--gtest_color', type='string', default='yes',
help='color output')
parser.add_option('--gtest_filter', type='string', default='',
help='test filter')
parser.add_option('--gtest_also_run_disabled_tests', action='store_true',
default=False, help='run disabled tests too')
parser.add_option('--print_test_times', action='store_true', default=False,
help='list the run time of each test at the end of execution')
parser.add_option('--shard_count', type='int', default=1,
help='total number of shards (for sharding test execution '
'between multiple machines)')
parser.add_option('--shard_index', type='int', default=0,
help='zero-indexed number identifying this shard (for '
'sharding test execution between multiple machines)')
parser.add_option('--dump_json_test_results', type='string', default=None,
help='Saves the results of the tests as a JSON machine-'
'readable file. The format of the file is specified at '
'https://www.chromium.org/developers/the-json-test-results-format')
parser.add_option('--timeout', type='int', default=None,
help='Interrupt all remaining processes after the given '
'time (in seconds).')
parser.add_option('--serialize_test_cases', action='store_true',
default=False, help='Do not run tests from the same test '
'case in parallel.')
parser.add_option('--gtest_catch_exceptions', default=0,
help='Do not report exceptions as test failures.')
parser.add_option('--gtest_break_on_failure', action='store_true', default=False,
help='Turn assertion failures into debugger break-points.')
return parser
def main():
# Remove additional arguments (anything after --).
additional_args = []
for i in range(len(sys.argv)):
if sys.argv[i] == '--':
additional_args = sys.argv[i + 1:]
sys.argv = sys.argv[:i]
break
parser = default_options_parser()
(options, binaries) = parser.parse_args()
if (options.output_dir is not None and
not os.path.isdir(options.output_dir)):
parser.error('--output_dir value must be an existing directory, '
'current value is "%s"' % options.output_dir)
# Append gtest-parallel-logs to log output, this is to avoid deleting user
# data if an user passes a directory where files are already present. If a
# user specifies --output_dir=Docs/, we'll create Docs/gtest-parallel-logs
# and clean that directory out on startup, instead of nuking Docs/.
if options.output_dir:
options.output_dir = os.path.join(options.output_dir,
'gtest-parallel-logs')
if binaries == []:
parser.print_usage()
sys.exit(1)
if options.shard_count < 1:
parser.error("Invalid number of shards: %d. Must be at least 1." %
options.shard_count)
if not (0 <= options.shard_index < options.shard_count):
parser.error("Invalid shard index: %d. Must be between 0 and %d "
"(less than the number of shards)." %
(options.shard_index, options.shard_count - 1))
# Check that all test binaries have an unique basename. That way we can ensure
# the logs are saved to unique files even when two different binaries have
# common tests.
unique_binaries = set(os.path.basename(binary) for binary in binaries)
assert len(unique_binaries) == len(binaries), (
"All test binaries must have an unique basename.")
if options.output_dir:
# Remove files from old test runs.
if os.path.isdir(options.output_dir):
shutil.rmtree(options.output_dir)
# Create directory for test log output.
try:
os.makedirs(options.output_dir)
except OSError as e:
# Ignore errors if this directory already exists.
if e.errno != errno.EEXIST or not os.path.isdir(options.output_dir):
raise e
timeout = None
if options.timeout is not None:
timeout = threading.Timer(options.timeout, sigint_handler.interrupt)
test_results = None
if options.dump_json_test_results is not None:
test_results = CollectTestResults(options.dump_json_test_results)
save_file = get_save_file_path()
times = TestTimes(save_file)
logger = FilterFormat(options.output_dir)
task_manager = TaskManager(times, logger, test_results, Task,
options.retry_failed, options.repeat + 1)
tasks = find_tests(binaries, additional_args, options, times)
logger.log_tasks(len(tasks))
execute_tasks(tasks, options.workers, task_manager,
timeout, options.serialize_test_cases)
print_try_number = options.retry_failed > 0 or options.repeat > 1
if task_manager.passed:
logger.move_to('passed', task_manager.passed)
if options.print_test_times:
logger.print_tests(
'PASSED TESTS', task_manager.passed, print_try_number)
if task_manager.failed:
logger.print_tests(
'FAILED TESTS', task_manager.failed, print_try_number)
logger.move_to('failed', task_manager.failed)
if task_manager.started:
logger.print_tests(
'INTERRUPTED TESTS', task_manager.started.values(), print_try_number)
logger.move_to('interrupted', task_manager.started.values())
if options.repeat > 1 and (task_manager.failed or task_manager.started):
logger.summarize(task_manager.passed, task_manager.failed,
task_manager.started.values())
logger.flush()
times.write_to_file(save_file)
if test_results:
test_results.dump_to_file_and_close()
if sigint_handler.got_sigint():
return -signal.SIGINT
return task_manager.global_exit_code
if __name__ == "__main__":
sys.exit(main())
|
relay.py | from asyncio.runners import run
from threading import Thread, Lock
from arduino import STORE_FILE as arduino_remember_cache, RememberedDeviceIsNotConnectedException, get_arduino_serial_connection
import socketio
import serial
import json
import time
io = socketio.Client()
try:
arduino = get_arduino_serial_connection(baudrate=115200)
except RememberedDeviceIsNotConnectedException as e:
print(f"[ERROR] {str(e)}")
print(f" connect the arduino or remove the '{arduino_remember_cache}' file")
IOT_KEY = "*2138192AHKHSBANM%^#@!@#^%&$%"
running: bool = False
serial_thread: Thread = None
def serial_thread_worker():
global running, arduino
print("serial thread running")
while running:
data = arduino.readline().decode("utf-8").strip()
if data:
try:
json_data = json.loads(data)
io.emit("update", {
"temperature": json_data["temperature"],
"humidity": json_data["humidity"]
})
except json.JSONDecodeError:
print("[ERROR] Couldn't parse data from arduino")
print("incoming data: '%s'" % data)
time.sleep(2)
print("serial threading stopping")
def start_serial_thread():
global running, serial_thread
running = True
serial_thread = Thread(target=serial_thread_worker)
serial_thread.start()
def stop_serial_thread():
global running
running = False
if serial_thread:
serial_thread.join()
@io.event
def connect():
print("[socketio] connected to server")
io.emit("upgrade", {
"UPGRADE-KEY": IOT_KEY,
"location": "Srijan's Home"
})
@io.on("upgrade-success")
def upgrade_success(data):
print(data)
start_serial_thread()
@io.on("upgrade-error")
def upgrade_error(data):
print(data)
@io.on("update-error")
def update_error(data):
print("update error", data)
@io.event
def disconnected():
print("[socketio] disconnected")
stop_serial_thread()
def main():
io.connect("https://weather-station-relay-server.herokuapp.com/")
io.wait()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print()
stop_serial_thread()
if io.connected:
io.disconnect()
exit(0)
|
better_logging.py | import thread
import threading
import time
import io
import Queue
import atexit
import ctypes
import datetime
__libc = ctypes.cdll.LoadLibrary('libc.so.6')
__log_levels_ints = {
0: "DEBUG",
1: "INFO",
2: "WARN",
3: "ERROR",
4: "FATAL"
}
__log_levels_strings = {
"DEBUG": 0,
"INFO": 1,
"WARN": 2,
"ERROR": 3,
"FATAL": 4
}
__logging_queue = Queue.Queue()
__logging_filename = None
__logging_enabled = 0
__logging_controller = None
def __get_thread_id():
return __libc.syscall(186)
def __drain_logging_queue():
global __logging_enabled
global __logging_filename
global __logging_queue
global __controller_default_level
while not __logging_filename and not __logging_enabled == 2:
time.sleep(0.25)
with io.open(__logging_filename, mode='a+', encoding='utf-8') as log_file:
while __logging_enabled == 1 or not __logging_queue.empty():
try:
log_this = __logging_queue.get(timeout=0.25)
if log_this[0] < __controller_default_level:
continue
cur_time = datetime.datetime.now().strftime("%a %b %d %Y %H:%M:%S.%f")
log_file.write(u'{3: <5} {0} [{1:x}] ({2}) {4}\n'.format(
cur_time,
log_this[1],
log_this[2],
__log_levels_ints[log_this[0]],
log_this[3]))
except Exception:
pass
__logging_thread = threading.Thread(target=__drain_logging_queue)
__logging_thread.setDaemon(True)
__logging_thread.start()
def __stop_logging():
global __logging_enabled
global __logging_thread
__logging_enabled = 2
__logging_thread.join()
atexit.register(__stop_logging)
__controller_default_level = 0
class Controller(object):
def __init__(self, filename, default_level="INFO"):
if globals()['__logging_filename'] is None:
globals()['__logging_filename'] = filename
if not globals()['__logging_enabled']:
globals()['__logging_enabled'] = 1
self.thread_id = globals()['__get_thread_id']()
if globals()['__logging_controller'] is None:
globals()['__logging_controller'] = self
else:
raise RuntimeError("There can only be one logging controller")
if not globals()['__logging_enabled']:
globals()['__logging_enabled'] = 1
globals()['__controller_default_level'] = globals()['__log_levels_strings'][default_level]
class Logger(object):
def __init__(self, sub_system, default_level="INFO"):
if globals()['__logging_controller'] is None:
raise RuntimeError(
"A logger instance cannot exist without a controller")
self.thread_id = globals()['__get_thread_id']()
self.sub_system = sub_system
self.default_level = globals()['__log_levels_strings'][default_level]
def log(self, level, message):
if level < self.default_level:
return
globals()['__logging_queue'].put((level,
self.thread_id,
self.sub_system,
message))
def debug(self, message):
self.log(0, message)
def info(self, message):
self.log(1, message)
def warn(self, message):
self.log(2, message)
def error(self, message):
self.log(3, message)
def fatal(self, message):
self.log(4, message)
|
Day5.py | import os
import time
import threading
import pyautogui
print("Integration Testing (Day 5) ...")
os.system("mvn compile")
os.chdir("./target/classes")
validAcc = " ../../ValidAccList.txt "
transSumDir = "../../TransactionFiles/"
master = " ../../Master.txt "
def runJava(session, arg1, arg2):
os.system("java main/Quinterac "+session+ arg1 + arg2)
t = threading.Thread(target=runJava, args=("frontend", validAcc, transSumDir+"TransSum1.txt",))
t.start()
time.sleep(1) # important for race conditions
pyautogui.write("login")
pyautogui.press("enter")
pyautogui.write("agent")
pyautogui.press("enter")
pyautogui.write("deleteacct")
pyautogui.press("enter")
pyautogui.write("1234567")
pyautogui.press("enter")
pyautogui.write("qwerty")
pyautogui.press("enter")
pyautogui.write("logout")
pyautogui.press("enter")
t = threading.Thread(target=runJava, args=("frontend", validAcc, transSumDir+"TransSum2.txt",))
t.start()
time.sleep(1) # important for race conditions
pyautogui.write("login")
pyautogui.press("enter")
pyautogui.write("agent")
pyautogui.press("enter")
pyautogui.write("deleteacct")
pyautogui.press("enter")
pyautogui.write("1234568")
pyautogui.press("enter")
pyautogui.write("abcde")
pyautogui.press("enter")
pyautogui.write("logout")
pyautogui.press("enter")
t = threading.Thread(target=runJava, args=("frontend", validAcc, transSumDir+"TransSum3.txt",))
t.start()
time.sleep(1) # important for race conditions
pyautogui.write("login")
pyautogui.press("enter")
pyautogui.write("agent")
pyautogui.press("enter")
pyautogui.write("deleteacct")
pyautogui.press("enter")
pyautogui.write("1234569")
pyautogui.press("enter")
pyautogui.write("uwxyz")
pyautogui.press("enter")
pyautogui.write("logout")
pyautogui.press("enter")
t = threading.Thread(target=runJava, args=("backend", master, "../../MergeTransSum.txt",))
t.start()
time.sleep(5) # important for race conditions
|
dumping_callback_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tfdbg v2 dumping callback."""
import collections
import os
import shutil
import socket
import tempfile
import threading
from absl.testing import parameterized
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import debug_events_reader
from tensorflow.python.debug.lib import dumping_callback
from tensorflow.python.debug.lib import dumping_callback_test_lib
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
_host_name = socket.gethostname()
_current_file_full_path = os.path.abspath(__file__)
class DumpingCallbackTest(
dumping_callback_test_lib.DumpingCallbackTestBase, parameterized.TestCase):
def setUp(self):
super(DumpingCallbackTest, self).setUp()
self.dump_root = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.dump_root):
shutil.rmtree(self.dump_root, ignore_errors=True)
dumping_callback.disable_dump_debug_info()
super(DumpingCallbackTest, self).tearDown()
def _verifyStackFrames(self, stack_frames):
"""Verify the correctness of the stack frames.
Currently, it simply asserts that the current file is found in the stack
frames.
TODO(cais): Perhaps implement a stricter check later.
Args:
stack_frames: The stack frames to verify.
"""
self.assertTrue([
frame for frame in stack_frames if frame[0] == _current_file_full_path])
def _expectedDefaultDeviceName(self):
gpu_name = test_util.gpu_device_name()
if gpu_name:
return "/job:localhost/replica:0/task:0" + gpu_name
else:
return "/job:localhost/replica:0/task:0/device:CPU:0"
def testInvalidTensorDebugModeCausesError(self):
with self.assertRaisesRegex(
ValueError, r"Invalid value in tensor_debug_mode \(\'NONSENSICAL\'\).*"
r"Valid options.*NO_TENSOR.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NONSENSICAL")
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FulHealth", "FULL_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testEnableDumpDebugInfoLogsTensorDebugModeAsStringName(self,
tensor_debug_mode):
log_messages = []
def fake_logging_info(*args):
log_messages.append(args)
with test.mock.patch.object(
tf_logging, "info", side_effect=fake_logging_info):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
self.assertLen(log_messages, 1)
self.assertIn(self.dump_root, log_messages[0])
self.assertIn(tensor_debug_mode, log_messages[0])
def testDisablingTracingCallbackWithoutEnablingFirstIsTolerated(self):
dumping_callback.disable_dump_debug_info()
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("Shape", "SHAPE"),
("FullHealth", "FULL_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
def testPureEagerOpExecution(self, tensor_debug_mode):
"""Test dumping data from eager op execution: float32."""
x = constant_op.constant(10.0)
zero = constant_op.constant(0.0)
one = constant_op.constant(1.0)
two = constant_op.constant(2.0)
three = constant_op.constant(3.0)
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
# Use Collatz conjecture as a test case.
while x > one:
if math_ops.equal(x % two, zero):
x = x / two
else:
x = x * three + one
writer.FlushNonExecutionFiles()
self._readAndCheckMetadataFile()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
# Before FlushExecutionFiles() is called, the .execution file should be
# empty.
self.assertFalse(reader.executions())
# After the flushing, the .execution file should hold the appropriate
# contents.
writer.FlushExecutionFiles()
reader.update()
executions = reader.executions()
prev_wall_time = 1
executed_op_types = []
tensor_values = collections.defaultdict(lambda: [])
for execution in executions:
self.assertGreaterEqual(execution.wall_time, prev_wall_time)
prev_wall_time = execution.wall_time
executed_op_types.append(execution.op_type)
# Check the device name.
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
self.assertLen(execution.output_tensor_device_ids, 1)
self.assertEqual(
reader.device_name_by_id(execution.output_tensor_device_ids[0]),
self._expectedDefaultDeviceName(),
"Unexpected device name from eager op %s" % execution.op_type)
# No graph IDs should have been logged for eager op executions.
self.assertFalse(execution.graph_id)
self.assertTrue(execution.input_tensor_ids)
self.assertTrue(execution.output_tensor_ids)
self.assertEqual(
debug_event_pb2.TensorDebugMode.keys()[execution.tensor_debug_mode],
tensor_debug_mode)
if tensor_debug_mode == "NO_TENSOR":
# Due to the NO_TENSOR tensor debug mode, tensor_protos ought to
# be empty.
self.assertFalse(execution.debug_tensor_values)
elif tensor_debug_mode == "CURT_HEALTH":
self.assertLen(execution.debug_tensor_values, 1)
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: 0 means there is no inf or nan.
self.assertAllClose(execution.debug_tensor_values, [[-1.0, 0.0]])
elif tensor_debug_mode == "CONCISE_HEALTH":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: each scalar tensor has 1 element.
# Remaining elements: no -inf, inf or nan in these
self.assertAllClose(
execution.debug_tensor_values, [[-1, 1, 0, 0, 0]])
elif tensor_debug_mode == "FULL_HEALTH":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
self.assertAllClose(
execution.debug_tensor_values,
[[-1, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1]])
elif tensor_debug_mode == "SHAPE":
if execution.op_type in ("AddV2", "Mul", "RealDiv"):
# 1st element: -1 is the unset tensor_id for eager op execution.
# 2nd element: dtype enum value (float32).
# 3rd element: rank (scalar).
# 4th element: element count (4).
# Remaining elements: shape at fixed length (6).
self.assertAllClose(execution.debug_tensor_values,
[[-1, 1, 0, 1, 0, 0, 0, 0, 0, 0]])
elif tensor_debug_mode == "FULL_TENSOR":
tensor_values[execution.op_type].append(
reader.execution_to_tensor_values(execution)[0])
host_name, stack_frames = reader.read_execution_stack_trace(execution)
self.assertEqual(host_name, _host_name)
self._verifyStackFrames(stack_frames)
if tensor_debug_mode == "FULL_TENSOR":
self.assertAllClose(tensor_values["Greater"], [1, 1, 1, 1, 1, 1, 0])
self.assertAllClose(tensor_values["RealDiv"], [5, 8, 4, 2, 1])
self.assertAllClose(tensor_values["Mul"], [15])
self.assertAllClose(tensor_values["AddV2"], [16])
self.assertEqual(
executed_op_types,
[
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 10 --> 5
"Greater",
"FloorMod",
"Equal",
"Mul",
"AddV2", # 5 --> 16
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 16 --> 8
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 8 --> 4
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 4 --> 2
"Greater",
"FloorMod",
"Equal",
"RealDiv", # 2 --> 1
"Greater"
])
# Due to the pure eager op execution, the .graph file and the
# .graph_execution_traces file ought to be empty.
self.assertFalse(reader.outermost_graphs())
self.assertEqual(reader.num_graph_execution_traces(), 0)
@parameterized.named_parameters(
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullHealth", "FULL_HEALTH"),
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testModesSummarizingBadNumericalValue(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return (x + y) / (x - y)
x = np.array([-3, -1, 0, 0, 1, 1, 1, 2], dtype=np.float16)
y = np.array([2, -1, 0, 0, 1, 1, 1, 3], dtype=np.float16)
# x - y = [-5, 0, 0, 0, 0, 0, 0, -1]
# (x + y) / (x - y) = [0.2, -inf, nan, nan, inf, inf, inf, -5].
self.evaluate(func(x, y))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces
if trace.op_type != "Const"]
self.assertCountEqual(
executed_op_types,
["Placeholder", "Placeholder", "AddV2", "Sub", "RealDiv"])
if tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: indicates if there is any inf or nan.
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 1])
else:
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0])
elif tensor_debug_mode == "CONCISE_HEALTH":
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: element count (8).
# Remaining 3 elements: The counts of -inf, inf and nan.
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 8, 1, 3, 2])
else:
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 8, 0, 0, 0])
elif tensor_debug_mode == "FULL_HEALTH":
for trace in graph_exec_traces:
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
if trace.op_type == "RealDiv":
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, -1, 19, 1, 8, 1, 3, 2, 1, 0, 1])
elif trace.op_type == "Sub":
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, -1, 19, 1, 8, 0, 0, 0, 2, 6, 0])
else: # SHAPE.
for trace in graph_exec_traces:
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (float16 = 19).
# 3rd element: rank (1)
# 4th element: element count (8).
# Remaining elements: shape at fixed length (6).
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
self.assertAllClose(trace.debug_tensor_value,
[tensor_id, 19, 1, 8, 8, 0, 0, 0, 0, 0])
@parameterized.named_parameters(
("CurtHealth", "CURT_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testConstTensorsAreCaptured(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def times_two_plus_three(x):
return x * constant_op.constant(2.0) + constant_op.constant(3.0)
self.assertAllEqual(
self.evaluate(times_two_plus_three(10.0)), 23.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
const_traces = [trace for trace in reader.graph_execution_traces()
if trace.op_type == "Const"]
self.assertGreaterEqual(len(const_traces), 3)
if tensor_debug_mode == "CURT_HEALTH":
# Under CURT_HEALTH, each debug tensor value has the form
# [tensor_id, has_inf_or_nan].
self.assertLen(const_traces[0].debug_tensor_value, 2)
self.assertEqual(const_traces[0].debug_tensor_value[1], 0)
self.assertLen(const_traces[1].debug_tensor_value, 2)
self.assertEqual(const_traces[1].debug_tensor_value[1], 0)
self.assertLen(const_traces[2].debug_tensor_value, 2)
self.assertEqual(const_traces[2].debug_tensor_value[1], 0)
else: # FULL_TENSOR.
const_tensor_values = [
reader.graph_execution_trace_to_tensor_value(const_trace)
for const_trace in const_traces]
# Avoid making assertion on the particular order of the debug tensors
# for the three Consts because it may be indeterminate.
self.assertIn(10.0, const_tensor_values)
self.assertIn(2.0, const_tensor_values)
self.assertIn(3.0, const_tensor_values)
@parameterized.named_parameters(
("Shape", "SHAPE"),
)
@test_util.run_in_graph_and_eager_modes
def testBooleanTensors(self, tensor_debug_mode):
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x, y):
return math_ops.logical_not(math_ops.logical_and(x, y))
x = np.array([[False, False], [True, True]], dtype=np.bool_)
y = np.array([[False, True], [False, True]], dtype=np.bool_)
self.assertAllEqual(
self.evaluate(func(x, y)), [[True, True], [True, False]])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces
if trace.op_type != "Const"]
self.assertEqual(
executed_op_types,
["Placeholder", "Placeholder", "LogicalAnd", "LogicalNot"])
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
self.assertGreaterEqual(tensor_id, 0)
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (bool).
# 3rd element: rank (2).
# 4th element: element count (4).
# Remaining elements: shape at fixed length.
self.assertAllClose(
trace.debug_tensor_value, [tensor_id, 10, 2, 4, 2, 2, 0, 0, 0, 0])
def testListingSourceFiles(self):
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
# Run a simple eager execution event, so that the source files are dumped.
self.assertAllClose(math_ops.truediv(7.0, 1.0 / 6.0), 42.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
source_file_list = reader.source_file_list()
self.assertIsInstance(source_file_list, tuple)
for item in source_file_list:
self.assertIsInstance(item, tuple)
self.assertLen(item, 2)
self.assertIn((_host_name, _current_file_full_path), source_file_list)
def testReadingSourceLines(self):
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
# Run a simple eager execution event, so that the source-file contents are
# dumped.
self.assertAllClose(math_ops.truediv(7.0, 1.0 / 6.0), 42.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
with open(_current_file_full_path, "rt") as f:
file_lines = f.read().split("\n")
self.assertEqual(
reader.source_lines(_host_name, _current_file_full_path), file_lines)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullHealth", "FULL_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testNestedFunctionExecutionWithoutControlFlow(self, tensor_debug_mode):
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
self.assertAllClose(sin1p_log_sum(x, y), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
outermost_graphs = reader.outermost_graphs()
self.assertLen(outermost_graphs, 1)
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, so doesn't get logged to the
# .execution file.
executions = reader.executions()
self.assertLen(executions, 1)
self.assertIn("sin1p_log_sum", executions[0].op_type)
# Get the executed graph and verify its identity and inner graph.
graph = reader.graph_by_id(executions[0].graph_id)
self.assertEqual(graph.name, "sin1p_log_sum")
self.assertLen(graph.inner_graph_ids, 1)
inner_graph = reader.graph_by_id(graph.inner_graph_ids[0])
self.assertEqual(inner_graph.name, "log_sum")
# Check device names.
self.assertLen(executions[0].output_tensor_device_ids, 1)
self.assertEqual(
reader.device_name_by_id(executions[0].output_tensor_device_ids[0]),
self._expectedDefaultDeviceName())
self.assertIn(self._expectedDefaultDeviceName(),
set(reader.device_name_map().values()))
# Verify the recorded graph-building history.
placeholder_op_digests = reader.graph_op_digests(op_type="Placeholder")
add_op_digests = reader.graph_op_digests(op_type="AddV2")
self.assertLen(add_op_digests, 2)
self.assertEqual(
reader.graph_by_id(add_op_digests[0].graph_id).name, "log_sum")
self.assertEqual(
reader.graph_by_id(add_op_digests[1].graph_id).name, "sin1p_log_sum")
log_op_digests = reader.graph_op_digests(op_type="Log")
self.assertLen(log_op_digests, 1)
self.assertEqual(
reader.graph_by_id(log_op_digests[0].graph_id).name, "log_sum")
sin_op_digests = reader.graph_op_digests(op_type="Sin")
self.assertLen(sin_op_digests, 1)
self.assertEqual(
reader.graph_by_id(sin_op_digests[0].graph_id).name, "sin1p_log_sum")
# Verify the output tensor IDs and the stack traces.
for op_digest in add_op_digests + log_op_digests + sin_op_digests:
# These are all single-output ops.
self.assertLen(op_digest.output_tensor_ids, 1)
self.assertGreaterEqual(op_digest.output_tensor_ids[0], 0)
_, stack_frames = reader.read_graph_op_creation_stack_trace(op_digest)
self._verifyStackFrames(stack_frames)
graph_exec_traces = [trace for trace in reader.graph_execution_traces()
if trace.op_type != "Const"]
executed_op_types = [digest.op_type for digest in graph_exec_traces]
self.assertEqual(
executed_op_types,
["Placeholder", "Placeholder", "Placeholder", "Placeholder",
"AddV2", "Log", "AddV2", "Sin"])
placeholder_traces = graph_exec_traces[:4]
non_placeholder_traces = graph_exec_traces[4:]
# Verify the graph ID stack of each op.
# The outer function's 1st Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[0].graph_ids[-1]).name,
"sin1p_log_sum")
# The outer function's 2nd Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[1].graph_ids[-1]).name,
"sin1p_log_sum")
# The inner function's 1st Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[2].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(placeholder_traces[2].graph_ids[-2]).name,
"sin1p_log_sum")
# The inner function's 2nd Placeholder.
self.assertEqual(
reader.graph_by_id(placeholder_traces[3].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(placeholder_traces[3].graph_ids[-2]).name,
"sin1p_log_sum")
# 1st AddV2 op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[0].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[0].graph_ids[-2]).name,
"sin1p_log_sum")
# Log op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[1].graph_ids[-1]).name,
"log_sum")
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[1].graph_ids[-2]).name,
"sin1p_log_sum")
# 2nd AddV2 op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[2].graph_ids[-1]).name,
"sin1p_log_sum")
# Sin op.
self.assertEqual(
reader.graph_by_id(non_placeholder_traces[3].graph_ids[-1]).name,
"sin1p_log_sum")
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for trace in graph_exec_traces:
self.assertIsNone(trace.debug_tensor_value)
elif tensor_debug_mode == "CURT_HEALTH":
# Test the association between graph exec and prior graph building.
# In each case, the 1st element of debug_tensor_value is the ID of the
# symbolic tenosr and the 2nd element is a zero indicating there is no
# inf or nan.
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0], 0.0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0], 0.0])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0], 0.0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0], 0.0])
self.assertAllClose( # 1st AddV2 op.
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 0.0])
self.assertAllClose( # Log op.
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 0.0])
self.assertAllClose( # 2nd AddV2 op.
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 0.0])
self.assertAllClose( # Sin op.
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 0.0])
elif tensor_debug_mode == "CONCISE_HEALTH":
# 1st element: tensor_id.
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0], 1., 0., 0., 0.])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0], 1., 0., 0., 0.])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0], 1., 0., 0., 0.])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0], 1., 0., 0., 0.])
# 1st AddV2 op.
self.assertAllClose(
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# Log op.
self.assertAllClose(
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# 2nd AddV2 op.
self.assertAllClose(
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
# Sin op.
self.assertAllClose(
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 1.0, 0.0, 0.0, 0.0])
elif tensor_debug_mode == "FULL_HEALTH":
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# 1st AddV2 op.
self.assertAllClose(
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# Log op.
self.assertAllClose(
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# 2nd AddV2 op.
self.assertAllClose(
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
# Sin op.
self.assertAllClose(
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0],
-1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
elif tensor_debug_mode == "SHAPE":
# 1st element: tensor_id.
# 2nd element: dtype (float32).
# 3rd element: rank (scalar).
# 4th element: element count (1).
# Remaining elements: shape padded to fixed length (6).
self.assertAllClose( # 1st outer placeholder.
placeholder_traces[0].debug_tensor_value,
[placeholder_op_digests[0].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[1].debug_tensor_value,
[placeholder_op_digests[1].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
self.assertAllClose( # 1st inner placeholder.
placeholder_traces[2].debug_tensor_value,
[placeholder_op_digests[2].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
self.assertAllClose( # 2nd outer placeholder.
placeholder_traces[3].debug_tensor_value,
[placeholder_op_digests[3].output_tensor_ids[0],
1, 0, 1, 0, 0, 0, 0, 0, 0])
# 1st AddV2 op.
self.assertAllClose(
non_placeholder_traces[0].debug_tensor_value,
[add_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# Log op.
self.assertAllClose(
non_placeholder_traces[1].debug_tensor_value,
[log_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# 2nd AddV2 op.
self.assertAllClose(
non_placeholder_traces[2].debug_tensor_value,
[add_op_digests[1].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
# Sin op.
self.assertAllClose(
non_placeholder_traces[3].debug_tensor_value,
[sin_op_digests[0].output_tensor_ids[0], 1, 0, 1, 0, 0, 0, 0, 0, 0])
else: # FULL_TENSOR.
placeholder_full_tensor_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in placeholder_traces]
self.assertAllClose(placeholder_full_tensor_values[0], x) # Input x.
self.assertAllClose(placeholder_full_tensor_values[1], y) # Input y.
self.assertAllClose(placeholder_full_tensor_values[2], x) # Input x.
self.assertAllClose(placeholder_full_tensor_values[3], y) # Input y.
non_placeholder_full_tensor_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in non_placeholder_traces]
self.assertAllClose(
non_placeholder_full_tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(
non_placeholder_full_tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(
non_placeholder_full_tensor_values[2],
np.log(5.0) + 1.0) # 2nd AddV2 op.
self.assertAllClose(
non_placeholder_full_tensor_values[3],
np.sin(np.log(5.0) + 1.0)) # Sin op.
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testGraphOpConsumingRelationIsCaptured(self, tensor_debug_mode):
x = constant_op.constant([2.0, 2.0])
y = constant_op.constant([3.0, 3.0])
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def maxindex_sin1p_log_sum(x, y):
_, indices = array_ops.unique(math_ops.sin(1.0 + log_sum(x, y)))
return math_ops.reduce_max(indices)
maxindex = maxindex_sin1p_log_sum(x, y)
self.assertAllEqual(maxindex, 0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
traces = reader.graph_execution_traces()
add_traces = [trace for trace in traces if trace.op_type == "AddV2"]
log_traces = [trace for trace in traces if trace.op_type == "Log"]
sin_traces = [trace for trace in traces if trace.op_type == "Sin"]
unique_traces = [trace for trace in traces if trace.op_type == "Unique"]
max_traces = [trace for trace in traces if trace.op_type == "Max"]
self.assertLen(add_traces, 2)
self.assertLen(log_traces, 1)
self.assertLen(sin_traces, 1)
self.assertLen(unique_traces, 2) # The Unique op outputs two tensors.
self.assertLen(max_traces, 1)
graph = reader.graph_by_id(add_traces[0].graph_id)
# The first AddV2 op is consumed by the Log op.
self.assertEqual(
graph.get_op_consumers(add_traces[0].op_name),
[(0, log_traces[0].op_name, 0)])
graph = reader.graph_by_id(add_traces[1].graph_id)
# The second AddV2 op is consumed by the Sin op.
self.assertEqual(
graph.get_op_consumers(add_traces[1].op_name),
[(0, sin_traces[0].op_name, 0)])
# The last Sin op is consumed by the Unique op.
self.assertEqual(
graph.get_op_consumers(sin_traces[0].op_name),
[(0, unique_traces[0].op_name, 0)])
# The Unique op's 2nd output tensor is consumed by the Max op.
self.assertEqual(
graph.get_op_consumers(unique_traces[0].op_name),
[(1, max_traces[0].op_name, 0)])
def testCapturingExecutedGraphIdsOfTwoCompilationsOfSameFunction(self):
"""Test correct executed IDs of two FuncGraphs from the same Py function."""
x_float32 = constant_op.constant(np.array(3.5, dtype=np.float32))
x_float64 = constant_op.constant(np.array(4.5, dtype=np.float64))
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def ceil_times_two(x):
return math_ops.ceil(x) * 2.0
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
self.assertAllClose(ceil_times_two(x_float32), 8.0)
self.assertAllClose(ceil_times_two(x_float64), 10.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 4)
for execution in executions:
self.assertStartsWith(execution.op_type, "__inference_ceil_times_two_")
executed_graph_ids = [execution.graph_id for execution in executions]
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertNotEqual(executed_graph_ids[0], executed_graph_ids[1])
self.assertNotEqual(executed_graph_ids[2], executed_graph_ids[3])
for executed_graph_id in executed_graph_ids:
self.assertEqual(
reader.graph_by_id(executed_graph_id).name, "ceil_times_two")
def testCapturingExecutedGraphIdsOfDuplicateFunctionNames(self):
"""Two FuncGraphs compiled from Python functions with identical names."""
x = constant_op.constant(np.array(3.5, dtype=np.float32))
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
class TestClass(object):
@def_function.function
def ceil_times_two(self, x):
return math_ops.ceil(x) * 2.0
# The `ceil_times_two` method of the two objects will be compiled
# into separate FuncGraphs.
test_object_1 = TestClass()
test_object_2 = TestClass()
# Four executions, with two different FuncGraphs, which should lead
# to two unique executed graph IDs (see assertion below).
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_1.ceil_times_two(x), 8.0)
self.assertAllClose(test_object_2.ceil_times_two(x), 8.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 4)
for execution in executions:
self.assertStartsWith(execution.op_type, "__inference_ceil_times_two_")
executed_graph_ids = [execution.graph_id for execution in executions]
self.assertEqual(executed_graph_ids[0], executed_graph_ids[2])
self.assertEqual(executed_graph_ids[1], executed_graph_ids[3])
self.assertNotEqual(executed_graph_ids[0], executed_graph_ids[1])
self.assertNotEqual(executed_graph_ids[2], executed_graph_ids[3])
for executed_graph_id in executed_graph_ids:
self.assertEqual(
reader.graph_by_id(executed_graph_id).name, "ceil_times_two")
@parameterized.named_parameters(
("AddV2", "AddV2"),
("Log", "Log"),
("AddV2AndLog", "(AddV2|Log)"),
)
@test_util.run_in_graph_and_eager_modes
def testOpRegex(self, op_regex):
x = constant_op.constant(2.0)
y = constant_op.constant(3.0)
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
op_regex=op_regex)
@def_function.function
def log_sum(x, y):
return math_ops.log(x + y)
@def_function.function
def sin1p_log_sum(x, y):
return math_ops.sin(1.0 + log_sum(x, y))
self.assertAllClose(
self.evaluate(sin1p_log_sum(x, y)), np.sin(1.0 + np.log(5.0)))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_op_digests = reader.graph_op_digests()
op_types = [digest.op_type for digest in graph_op_digests]
self.assertIn("AddV2", op_types)
self.assertIn("Log", op_types)
self.assertIn("Sin", op_types)
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests]
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
if op_regex == "AddV2":
self.assertEqual(executed_op_types, ["AddV2", "AddV2"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(
tensor_values[1], np.log(5.0) + 1.0) # 2nd AddV2 op.
elif op_regex == "Log":
self.assertEqual(executed_op_types, ["Log"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], np.log(5.0)) # Log op.
else: # "(AddV2|Log)"
self.assertEqual(executed_op_types, ["AddV2", "Log", "AddV2"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], 5.0) # 1st AddV2 op.
self.assertAllClose(tensor_values[1], np.log(5.0)) # Log op.
self.assertAllClose(
tensor_values[2], np.log(5.0) + 1.0) # 2nd AddV2 op.
def testIncorrectTensorDTypeArgFormatLeadsToError(self):
with self.assertRaisesRegex(
ValueError, r".*expected.*list.*tuple.*callable.*but received.*\{\}"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes=dict())
with self.assertRaisesRegex(
ValueError, r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(self.dump_root,
tensor_dtypes="float32")
with self.assertRaisesRegex(
ValueError, r".*expected.*list.*tuple.*callable.*but received.*"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_dtypes=dtypes.float32)
with self.assertRaises(TypeError):
dumping_callback.enable_dump_debug_info(self.dump_root, tensor_dtypes=[
lambda dtype: dtype.is_floating, lambda dtype: dtype.is_integer])
@parameterized.named_parameters(
("float", [dtypes.float32], None),
("float_only_sum", ["float32"], "Sum"),
("float_no_sum", (dtypes.float32,), "(?!Sum)"),
("int", [dtypes.int32], None),
("int_via_lambda", lambda dtype: dtype.is_integer, None),
("exclude_Sum", None, "(?!Sum)"),
("All", None, None),
)
@test_util.run_in_graph_and_eager_modes
def testTensorDTypesAndOpRegexFilters(self,
tensor_dtypes,
op_regex):
xs = constant_op.constant([2., 6., 8., 1., 2.], dtype=dtypes.float32)
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR",
tensor_dtypes=tensor_dtypes,
op_regex=op_regex)
@def_function.function
def unique_sum(xs):
"""Sum over the unique values, for testing."""
unique_xs, indices = array_ops.unique(xs)
return math_ops.reduce_sum(unique_xs), indices
y, indices = self.evaluate(unique_sum(xs))
self.assertAllClose(y, 17.)
self.assertAllEqual(indices, [0, 1, 2, 3, 0])
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_digests = reader.graph_execution_traces(digest=True)
executed_op_types = [digest.op_type for digest in graph_exec_digests
if digest.op_type not in ("Const", "Placeholder")]
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests
if digest.op_type not in ("Const", "Placeholder")]
if tensor_dtypes == [dtypes.float32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique", "Sum"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllClose(tensor_values[1], 17.) # Sum.
elif tensor_dtypes == ["float32"] and op_regex == "Sum":
self.assertEqual(executed_op_types, ["Sum"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], 17.) # Sum.
elif tensor_dtypes == (dtypes.float32,) and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
elif tensor_dtypes == [dtypes.int32] and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(
tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif callable(tensor_dtypes) and not op_regex:
self.assertEqual(executed_op_types, ["Unique"])
self.assertLen(tensor_values, 1)
self.assertAllEqual(
tensor_values[0], [0, 1, 2, 3, 0]) # Unique indices.
elif not tensor_dtypes and op_regex == "(?!Sum)":
self.assertEqual(executed_op_types, ["Unique", "Unique"])
self.assertLen(tensor_values, 2)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllEqual(
tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
else: # "All".
self.assertEqual(executed_op_types, ["Unique", "Unique", "Sum"])
self.assertLen(tensor_values, 3)
self.assertAllClose(tensor_values[0], [2, 6, 8, 1]) # Unique values.
self.assertAllEqual(
tensor_values[1], [0, 1, 2, 3, 0]) # Unique indices.
self.assertAllClose(tensor_values[2], 17) # Sum.
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testFunctionExecutionWithControlFlow(self, tensor_debug_mode):
x = constant_op.constant(0.5, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0
i += 1
return x
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 8.0)
writer.FlushNonExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_op_digests = reader.graph_op_digests()
op_types = [digest.op_type for digest in graph_op_digests]
self.assertIn("Less", op_types)
self.assertIn("Mul", op_types)
self.assertIn("AddV2", op_types)
# Before FlushExecutionFiles() is called, the .execution and
# .graph_execution_traces files should be both empty.
self.assertEqual(reader.num_executions(), 0)
self.assertEqual(reader.num_graph_execution_traces(), 0)
# TODO(cais): Backport execution instrumentation to tf.Session.
writer.FlushExecutionFiles()
# After the flushing, the .execution file should hold the appropriate
# contents.
reader.update()
if context.executing_eagerly():
# NOTE(b/142486213): Execution of the TF function happens with
# Session.run() in v1 graph mode, hence it doesn't get logged to the
executions = reader.executions()
self.assertLen(executions, 1)
executed_op_types = [execution.op_type for execution in executions]
self.assertIn("iterative_doubling", executions[0].op_type)
execution = executions[0]
self.assertLen(execution.input_tensor_ids, 2)
self.assertLen(execution.output_tensor_ids, 1)
self.assertEqual(
debug_event_pb2.TensorDebugMode.keys()[execution.tensor_debug_mode],
tensor_debug_mode)
if tensor_debug_mode == "FULL_TENSOR":
tensor_values = reader.execution_to_tensor_values(execution)
self.assertAllClose(tensor_values, [8.0])
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces
if trace.op_type != "Const"]
if tensor_debug_mode != "CURT_HEALTH":
# Less outputs a boolean tensor, which is not tracked under CURT_HEALTH.
# The Less op should have been executed 5 times.
self.assertEqual(executed_op_types.count("Less"), 5)
# The last executed op should be Less.
self.assertEqual(executed_op_types[-1], "Less")
# AddV2 produces an int tensor, which is not tracked under CURT_HEALTH.
# The AddV2 op should have been run, but we refrain from asserting on
# how many times it's executed.
self.assertIn("AddV2", executed_op_types)
for trace in graph_exec_traces:
self.assertEqual(trace.output_slot, 0)
# The Mul op should have been executed 4 times.
self.assertEqual(executed_op_types.count("Mul"), 4)
tensor_values = [reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
if tensor_debug_mode == "NO_TENSOR":
# Under the default NO_TENSOR tensor-debug mode, the tensor_proto ought
# to be an empty float32 tensor.
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor_id; 2nd element: 0 indicating no inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0.0])
elif tensor_debug_mode == "FULL_TENSOR":
less_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Less"]
self.assertAllEqual(less_values, [True, True, True, True, False])
mul_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Mul"]
self.assertAllClose(mul_values, [1.0, 2.0, 4.0, 8.0])
def testCallingEnableTracingTwiceWithTheSameDumpRootIsIdempotent(self):
x = constant_op.constant([10.0, 12.0, 10.0])
dumping_callback.enable_dump_debug_info(self.dump_root)
writer = dumping_callback.enable_dump_debug_info(self.dump_root)
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 2)
for execution in executions:
self.assertGreater(execution.wall_time, 0)
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
_, stack_frames = reader.read_execution_stack_trace(execution)
self._verifyStackFrames(stack_frames)
def testCallingEnableTracingTwiceWithDifferentDumpRootsOverwrites(self):
x = constant_op.constant([10.0, 12.0, 10.0])
dumping_callback.enable_dump_debug_info(self.dump_root)
new_dump_root = self.dump_root + "_new_dump_root"
writer = dumping_callback.enable_dump_debug_info(new_dump_root)
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(new_dump_root) as reader:
reader.update()
executions = reader.executions()
self.assertLen(executions, 2)
for execution in executions:
self.assertGreater(execution.wall_time, 0)
self.assertEqual(execution.op_type, "Unique")
self.assertEqual(execution.num_outputs, 2)
_, stack_frames = reader.read_execution_stack_trace(execution)
self._verifyStackFrames(stack_frames)
with debug_events_reader.DebugDataReader(
self.dump_root) as old_dump_root_reader:
old_dump_root_reader.update()
# The old dump root shouldn't have been written to.
self.assertEqual(old_dump_root_reader.num_executions(), 0)
self.assertFalse(old_dump_root_reader.outermost_graphs())
def testCallingEnableRepeatedlyWithDifferentTensorDebugMode(self):
"""Assert calling enable_dump_debug_info() with two tensor-debug modes.
It should lead to overwriting of the previously-configured mode.
"""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def add_1_divide_by_2(x):
return (x + 1.0) / 2.0
self.assertAllClose(add_1_divide_by_2(constant_op.constant(4.0)), 2.5)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_exec_digests = reader.graph_execution_traces(digest=True)
tensor_values = [reader.graph_execution_trace_to_tensor_value(digest)
for digest in graph_exec_digests]
for tensor_value in tensor_values:
# Under NO_TENSOR mode, each tensor is summarized as an empty float32
# array.
self.assertAllEqual(tensor_value, [])
with self.assertRaisesRegex(
ValueError, r"already.*NO_TENSOR.*FULL_TENSOR.*not be honored"):
dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="FULL_TENSOR")
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("FullTensor", "FULL_TENSOR"),
)
def testDisableTracingWorks(self, tensor_debug_mode):
x = constant_op.constant([10.0, 12.0, 10.0])
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
dumping_callback.disable_dump_debug_info()
for _ in range(2):
array_ops.unique(x)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
self.assertEqual(reader.num_executions(), 0)
self.assertEqual(reader.num_graph_execution_traces(), 0)
self.assertFalse(reader.outermost_graphs())
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("CurtHealth", "CURT_HEALTH"),
("ConciseHealth", "CONCISE_HEALTH"),
("FullHealth", "FULL_HEALTH"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
def testMultiThreadedExecutionWithSameSetting(self, tensor_debug_mode):
"""Dumping from multiple threads using the same setting."""
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
x = variables.Variable(10.0, dtype=dtypes.float32)
y = variables.Variable(3.0, dtype=dtypes.float32)
@def_function.function
def increase_x():
return x.assign_add(y * 2.0)
increase_x()
num_threads = 3
threads = []
for _ in range(num_threads):
threads.append(threading.Thread(target=increase_x))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# 10 --> 16 --> 22 --> 28 --> 34.
self.assertAllClose(x.read_value(), 34.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
prev_wall_time = 1
for exec_digest in exec_digests:
self.assertGreaterEqual(exec_digest.wall_time, prev_wall_time)
prev_wall_time = exec_digest.wall_time
graph_exec_traces = reader.graph_execution_traces()
executed_op_types = [trace.op_type for trace in graph_exec_traces]
self.assertEqual(executed_op_types.count("Mul"), 1 + num_threads)
self.assertEqual(
executed_op_types.count("ReadVariableOp"), 2 * (1 + num_threads))
for trace in graph_exec_traces:
# These are all single-output tensors.
self.assertEqual(trace.output_slot, 0)
tensor_values = [reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces]
if tensor_debug_mode == "NO_TENSOR":
for tensor_value in tensor_values:
self.assertAllEqual(tensor_value, [])
elif tensor_debug_mode == "CURT_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor ID; 2nd element: 0 indicating no inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 0])
elif tensor_debug_mode == "CONCISE_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# 1st element: tensor ID.
# 2nd element: element count. Remaining elements: all zero because there
# is no -inf, inf or nan.
self.assertAllClose(trace.debug_tensor_value, [tensor_id, 1, 0, 0, 0])
elif tensor_debug_mode == "FULL_HEALTH":
for trace in graph_exec_traces:
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
# Elements: [
# -1 is the unset tensor_id for eager op execution,
# device ID (set to -1 for now),
# dtype, rank, element_count,
# neg_inf_count, pos_inf_count, nan_count
# neg_finite_count, zero_count, pos_finite_count]
self.assertAllClose(
trace.debug_tensor_value,
[tensor_id, -1, 1, 0, 1, 0, 0, 0, 0, 0, 1])
elif tensor_debug_mode == "SHAPE":
for trace in graph_exec_traces:
if trace.op_type == "Mul":
tensor_id = reader.graph_execution_trace_to_tensor_id(trace)
mul_value = reader.graph_execution_trace_to_tensor_value(trace)
# 1st element: tensor_id, should be >= 0.
# 2nd element: dtype enum value (float32).
# 3rd element: rank.
# 4th element: element count.
self.assertAllClose(mul_value, [tensor_id, 1, 0, 1, 0, 0, 0, 0, 0, 0])
elif tensor_debug_mode == "FULL_TENSOR":
mul_values = [
reader.graph_execution_trace_to_tensor_value(trace)
for trace in graph_exec_traces if trace.op_type == "Mul"]
self.assertAllClose(mul_values, [6.0, 6.0, 6.0, 6.0])
def testMultiThreadedDumpingWithDifferentSettings(self):
gpu_name = test_util.gpu_device_name()
if gpu_name:
self.skipTest("b/153671240: test is flaky on GPUs")
dump_root_1 = os.path.join(self.dump_root, "dump_root_1")
dump_root_2 = os.path.join(self.dump_root, "dump_root_2")
v1 = variables.Variable(10.0, dtype=dtypes.float32)
v2 = variables.Variable(3.0, dtype=dtypes.float32)
def add_negative_v1_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_1, tensor_debug_mode="FULL_TENSOR")
# Run in a loop to facilitate interleaving between threads.
for _ in range(3):
v1.assign_add(-(v1 ** 2.0))
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
def add_negative_v2_squared_to_itself():
writer = dumping_callback.enable_dump_debug_info(
dump_root_2, tensor_debug_mode="FULL_TENSOR")
v2_squared = v2 ** 2.0
# Since dumping is disabled before the Neg op is called, no tensor data
# should be dumped from the op, but this shouldn't affect the dumping of
# the tensor data from the Neg op in `add_negative_v1_squared_to_itself`.
# Both behavior is checked below.
dumping_callback.disable_dump_debug_info()
negative_v2_squared = -v2_squared
v2.assign_add(negative_v2_squared)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
# v2 is mutated on a sub-thread.
sub_thread = threading.Thread(target=add_negative_v2_squared_to_itself)
sub_thread.start()
add_negative_v1_squared_to_itself() # v1 is mutated on the main thread.
sub_thread.join()
# 10 - 10 * 10 = -90.
# -90 - (-90 * -90) = -8190.
# -8190 - (-8190 * -8190) = -67084290.
self.assertAllClose(v1.read_value(), -67084290.0)
self.assertAllClose(v2.read_value(), -6.0)
with debug_events_reader.DebugDataReader(dump_root_1) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
v1_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Pow"]
negative_v1_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Neg"]
self.assertAllClose(v1_squared_values, [[100.0], [8100.0], [67076100.0]])
self.assertAllClose(
negative_v1_squared_values, [[-100.0], [-8100.0], [-67076100.0]])
with debug_events_reader.DebugDataReader(dump_root_2) as reader:
reader.update()
exec_digests = reader.executions(digest=True)
executed_op_types = [digest.op_type for digest in exec_digests]
self.assertNotIn("Neg", executed_op_types)
v2_squared_values = [
reader.execution_to_tensor_values(digest)
for digest in exec_digests if digest.op_type == "Pow"]
self.assertAllClose(v2_squared_values, [[9.0]])
@test_util.run_in_graph_and_eager_modes
def testNestedContextIsCapturedByGraphOpCreationHistory(self):
x = constant_op.constant(2.0, dtype=dtypes.float32)
times = constant_op.constant(4, dtype=dtypes.int32)
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode="NO_TENSOR")
@def_function.function
def iterative_doubling(x, times):
i = constant_op.constant(0, dtype=dtypes.int32)
while i < times:
x = x * 2.0 - 1.0
i += 1
return x
# 2 * 2 - 1 = 3; 3 * 2 - 1 = 5; 5 * 2 - 1 = 9; 9 * 2 - 1 = 17.
self.assertAllClose(self.evaluate(iterative_doubling(x, times)), 17.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
less_op_digest = reader.graph_op_digests(op_type="Less")[-1]
mul_op_digest = reader.graph_op_digests(op_type="Mul")[-1]
sub_op_digest = reader.graph_op_digests(op_type="Sub")[-1]
# The Less op is from the while-loop cond context and hence should have
# a different innermost context ID from the mul and sub ops, which are
# both from the while-loop body context.
self.assertNotEqual(less_op_digest.graph_id, mul_op_digest.graph_id)
self.assertNotEqual(less_op_digest.graph_id, sub_op_digest.graph_id)
# The Mul and Sub ops are from the same innermost context.
self.assertEqual(mul_op_digest.graph_id, sub_op_digest.graph_id)
@parameterized.named_parameters(
("NoTensor", "NO_TENSOR"),
("Shape", "SHAPE"),
("FullTensor", "FULL_TENSOR"),
)
@test_util.run_in_graph_and_eager_modes
def testGraphInputTracingWorksWithConstAndPlaceholderTensors(
self, tensor_debug_mode):
x = constant_op.constant(2.0)
writer = dumping_callback.enable_dump_debug_info(
self.dump_root, tensor_debug_mode=tensor_debug_mode)
@def_function.function
def func(x):
return (x + constant_op.constant(4.0)) / x
self.assertAllClose(self.evaluate(func(x)), 3.0)
writer.FlushNonExecutionFiles()
writer.FlushExecutionFiles()
with debug_events_reader.DebugDataReader(self.dump_root) as reader:
reader.update()
graph_op_digests = reader.graph_op_digests()
placeholder_op_name = None
const_op_name = None
add_op_name = None
div_op_name = None
for op_digest in graph_op_digests:
if op_digest.op_type == "Placeholder":
placeholder_op_name = op_digest.op_name
elif op_digest.op_type == "Const":
const_op_name = op_digest.op_name
elif op_digest.op_type == "AddV2":
add_op_name = op_digest.op_name
self.assertLen(op_digest.input_names, 2)
self.assertEqual(op_digest.input_names[0], placeholder_op_name + ":0")
self.assertEqual(op_digest.input_names[1], const_op_name + ":0")
elif op_digest.op_type == "RealDiv":
div_op_name = op_digest
self.assertLen(op_digest.input_names, 2)
self.assertEqual(op_digest.input_names[0], add_op_name + ":0")
self.assertEqual(op_digest.input_names[1], placeholder_op_name + ":0")
self.assertTrue(add_op_name)
self.assertTrue(div_op_name)
if __name__ == "__main__":
ops.enable_eager_execution()
googletest.main()
|
exchange_rate.py | from datetime import datetime
import inspect
import requests
import sys
from threading import Thread
import time
import csv
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {}
'''
{'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
'''
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def get_historical_rates_safe(self, ccy):
try:
self.print_error("requesting fx history for", ccy)
self.history[ccy] = self.historical_rates(ccy)
self.print_error("received fx history for", ccy)
self.on_history()
except BaseException as e:
self.print_error("failed fx history:", e)
def get_historical_rates(self, ccy):
result = self.history.get(ccy)
if not result and ccy in self.history_ccys():
t = Thread(target=self.get_historical_rates_safe, args=(ccy,))
t.setDaemon(True)
t.start()
return result
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'))
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a) in [3,4]])
class CryptoCompare(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('min-api.cryptocompare.com',
"/data/pricehistorical?fsym=ZCL&tsyms=USD")
return {'USD': Decimal(json['ZCL']['USD'])}
class CoinMarketCap(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinmarketcap.com',
"/v1/ticker/zclassic?convert=%s")
return {'USD': Decimal(json[0]['price_usd'])}
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
except:
continue
with open(path, 'w') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.set_exchange(self.config_exchange())
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
return fmt_str.format(round(amount, prec))
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
# Use when dynamic fetching is needed
return self.config.get('currency', 'USD')
def config_exchange(self):
return self.config.get('use_exchange', 'CoinMarketCap')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, CoinMarketCap)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
def on_quotes(self):
self.network.trigger_callback('on_quotes')
def on_history(self):
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate:
return Decimal(rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate is None else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No exchange rate available)") if rate is None else " 1 %s=%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def value_str(self, satoshis, rate):
if satoshis is None: # Can happen with incomplete history
return _("Unknown")
if rate:
value = Decimal(satoshis) / COIN * Decimal(rate)
return "%s" % (self.ccy_amount_str(value, True))
return _("No data")
def history_rate(self, d_t):
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate is None and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy)
self.history_used_spot = True
return rate
def historical_value_str(self, satoshis, d_t):
rate = self.history_rate(d_t)
return self.value_str(satoshis, rate)
|
test_core.py | from datetime import timedelta
from functools import partial
import itertools
import json
import operator
from operator import add
import os
from time import sleep
import sys
import pytest
from tornado.queues import Queue
from tornado.ioloop import IOLoop
import streamz as sz
from streamz import RefCounter
from streamz.sources import sink_to_file
from streamz.utils_test import (inc, double, gen_test, tmpfile, captured_logger, # noqa: F401
clean, await_for, metadata, wait_for) # noqa: F401
from distributed.utils_test import loop # noqa: F401
def test_basic():
source = Stream()
b1 = source.map(inc)
b2 = source.map(double)
c = b1.scan(add)
Lc = c.sink_to_list()
Lb = b2.sink_to_list()
for i in range(4):
source.emit(i)
assert Lc == [1, 3, 6, 10]
assert Lb == [0, 2, 4, 6]
def test_no_output():
source = Stream()
assert source.emit(1) is None
def test_scan():
source = Stream()
def f(acc, i):
acc = acc + i
return acc, acc
L = source.scan(f, returns_state=True).sink_to_list()
for i in range(3):
source.emit(i)
assert L == [0, 1, 3]
def test_kwargs():
source = Stream()
def f(acc, x, y=None):
acc = acc + x + y
return acc
L = source.scan(f, y=10).sink_to_list()
for i in range(3):
source.emit(i)
assert L == [0, 11, 23]
def test_filter():
source = Stream()
L = source.filter(lambda x: x % 2 == 0).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [0, 2, 4, 6, 8]
def test_filter_args():
source = Stream()
L = source.filter(lambda x, n: x % n == 0, 2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [0, 2, 4, 6, 8]
def test_filter_kwargs():
source = Stream()
L = source.filter(lambda x, n=1: x % n == 0, n=2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [0, 2, 4, 6, 8]
def test_filter_none():
source = Stream()
L = source.filter(None).sink_to_list()
for i in range(10):
source.emit(i % 3)
assert L == [1, 2, 1, 2, 1, 2]
def test_map():
def add(x=0, y=0):
return x + y
source = Stream()
L = source.map(add, y=10).sink_to_list()
source.emit(1)
assert L[0] == 11
def test_map_args():
source = Stream()
L = source.map(operator.add, 10).sink_to_list()
source.emit(1)
assert L == [11]
def test_starmap():
def add(x=0, y=0):
return x + y
source = Stream()
L = source.starmap(add).sink_to_list()
source.emit((1, 10))
assert L[0] == 11
def test_remove():
source = Stream()
L = source.remove(lambda x: x % 2 == 0).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [1, 3, 5, 7, 9]
def test_partition():
source = Stream()
L = source.partition(2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, 1), (2, 3), (4, 5), (6, 7), (8, 9)]
@pytest.mark.parametrize(
"n,key,keep,elements,exp_result",
[
(3, sz.identity, "first", [1, 2, 1, 3, 1, 3, 3, 2], [(1, 2, 3), (1, 3, 2)]),
(3, sz.identity, "last", [1, 2, 1, 3, 1, 3, 3, 2], [(2, 1, 3), (1, 3, 2)]),
(
3,
len,
"last",
["f", "fo", "f", "foo", "f", "foo", "foo", "fo"],
[("fo", "f", "foo"), ("f", "foo", "fo")],
),
(
2,
"id",
"first",
[{"id": 0, "foo": "bar"}, {"id": 0, "foo": "baz"}, {"id": 1, "foo": "bat"}],
[({"id": 0, "foo": "bar"}, {"id": 1, "foo": "bat"})],
),
(
2,
"id",
"last",
[{"id": 0, "foo": "bar"}, {"id": 0, "foo": "baz"}, {"id": 1, "foo": "bat"}],
[({"id": 0, "foo": "baz"}, {"id": 1, "foo": "bat"})],
),
]
)
def test_partition_unique(n, key, keep, elements, exp_result):
source = Stream()
L = source.partition_unique(n, key, keep).sink_to_list()
for ele in elements:
source.emit(ele)
assert L == exp_result
def test_partition_timeout():
source = Stream()
L = source.partition(10, timeout=0.01).sink_to_list()
for i in range(5):
source.emit(i)
sleep(0.1)
assert L == [(0, 1, 2, 3, 4)]
def test_partition_timeout_cancel():
source = Stream()
L = source.partition(3, timeout=0.1).sink_to_list()
for i in range(3):
source.emit(i)
sleep(0.09)
source.emit(3)
sleep(0.02)
assert L == [(0, 1, 2)]
sleep(0.09)
assert L == [(0, 1, 2), (3,)]
def test_partition_key():
source = Stream()
L = source.partition(2, key=0).sink_to_list()
for i in range(4):
source.emit((i % 2, i))
assert L == [((0, 0), (0, 2)), ((1, 1), (1, 3))]
def test_partition_key_callable():
source = Stream()
L = source.partition(2, key=lambda x: x % 2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, 2), (1, 3), (4, 6), (5, 7)]
def test_partition_size_one():
source = Stream()
source.partition(1, timeout=.01).sink(lambda x: None)
for i in range(10):
source.emit(i)
def test_sliding_window():
source = Stream()
L = source.sliding_window(2).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, ), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5),
(5, 6), (6, 7), (7, 8), (8, 9)]
L = source.sliding_window(2, return_partial=False).sink_to_list()
for i in range(10):
source.emit(i)
assert L == [(0, 1), (1, 2), (2, 3), (3, 4), (4, 5),
(5, 6), (6, 7), (7, 8), (8, 9)]
def test_sliding_window_ref_counts():
source = Stream()
_ = source.sliding_window(2)
r_prev = RefCounter()
source.emit(-2)
source.emit(-1, metadata=[{'ref': r_prev}])
for i in range(10):
r = RefCounter()
assert r_prev.count == 1
source.emit(i, metadata=[{'ref': r}])
assert r_prev.count == 0
assert r.count == 1
r_prev = r
def test_sliding_window_metadata():
source = Stream()
L = metadata(source.sliding_window(2)).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
source.emit(2, metadata=[{'v': 2}])
source.emit(3, metadata=[{'v': 3}])
assert L == [
[{'v': 1}], # First emit, because 0 has no metadata
[{'v': 1}, {'v': 2}], # Second emit
[{'v': 2}, {'v': 3}] # Third emit
]
@gen_test()
def test_backpressure():
q = Queue(maxsize=2)
source = Stream(asynchronous=True)
source.map(inc).scan(add, start=0).sink(q.put)
@gen.coroutine
def read_from_q():
while True:
yield q.get()
yield gen.sleep(0.1)
IOLoop.current().add_callback(read_from_q)
start = time()
for i in range(5):
yield source.emit(i)
end = time()
assert end - start >= 0.2
@gen_test()
def test_timed_window_unique():
tests = [
(0.05, sz.identity, "first", [1, 2, 1, 3, 1, 3, 3, 2], [(1, 2, 3)]),
(0.05, sz.identity, "last", [1, 2, 1, 3, 1, 3, 3, 2], [(1, 3, 2)]),
(
0.05,
len,
"last",
["f", "fo", "f", "foo", "f", "foo", "foo", "fo"],
[("f", "foo", "fo")],
),
(
0.05,
"id",
"first",
[{"id": 0, "foo": "bar"}, {"id": 1, "foo": "bat"}, {"id": 0, "foo": "baz"}],
[({"id": 0, "foo": "bar"}, {"id": 1, "foo": "bat"})],
),
(
0.05,
"id",
"last",
[{"id": 0, "foo": "bar"}, {"id": 1, "foo": "bat"}, {"id": 0, "foo": "baz"}],
[({"id": 1, "foo": "bat"}, {"id": 0, "foo": "baz"})],
),
]
for interval, key, keep, elements, exp_result in tests:
source = Stream(asynchronous=True)
a = source.timed_window_unique(interval, key, keep)
assert a.loop is IOLoop.current()
L = a.sink_to_list()
for ele in elements:
yield source.emit(ele)
yield gen.sleep(a.interval)
assert L
assert all(wi in elements for window in L for wi in window)
assert sum(1 for window in L for _ in window) <= len(elements)
assert L == exp_result
yield gen.sleep(a.interval)
assert not L[-1]
@gen_test()
def test_timed_window():
source = Stream(asynchronous=True)
a = source.timed_window(0.01)
assert a.loop is IOLoop.current()
L = a.sink_to_list()
for i in range(10):
yield source.emit(i)
yield gen.sleep(0.004)
yield gen.sleep(a.interval)
assert L
assert sum(L, []) == list(range(10))
assert all(len(x) <= 3 for x in L)
assert any(len(x) >= 2 for x in L)
yield gen.sleep(0.1)
assert not L[-1]
@gen_test()
def test_timed_window_ref_counts():
source = Stream(asynchronous=True)
_ = source.timed_window(0.01)
ref1 = RefCounter()
assert str(ref1) == "<RefCounter count=0>"
source.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
yield gen.sleep(0.05)
ref2 = RefCounter()
source.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
def test_mixed_async():
s1 = Stream(asynchronous=False)
with pytest.raises(ValueError):
Stream(asynchronous=True, upstream=s1)
@gen_test()
def test_timed_window_metadata():
source = Stream()
L = metadata(source.timed_window(0.06)).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
yield gen.sleep(0.1)
source.emit(2, metadata=[{'v': 2}])
source.emit(3, metadata=[{'v': 3}])
yield gen.sleep(0.1)
assert L == [
[{'v': 1}], # first emit because 0 has no metadata
[{'v': 2}, {'v': 3}] # second emit
]
def test_timed_window_timedelta(clean): # noqa: F811
pytest.importorskip('pandas')
source = Stream(asynchronous=True)
a = source.timed_window('10ms')
assert a.interval == 0.010
@gen_test()
def test_timed_window_backpressure():
q = Queue(maxsize=1)
source = Stream(asynchronous=True)
source.timed_window(0.01).sink(q.put)
@gen.coroutine
def read_from_q():
while True:
yield q.get()
yield gen.sleep(0.1)
IOLoop.current().add_callback(read_from_q)
start = time()
for i in range(5):
yield source.emit(i)
yield gen.sleep(0.01)
stop = time()
assert stop - start > 0.2
def test_sink_to_file():
with tmpfile() as fn:
source = Stream()
with sink_to_file(fn, source) as f:
source.emit('a')
source.emit('b')
with open(fn) as f:
data = f.read()
assert data == 'a\nb\n'
@gen_test()
def test_counter():
counter = itertools.count()
source = Stream.from_periodic(lambda: next(counter), 0.001, asynchronous=True,
start=True)
L = source.sink_to_list()
yield gen.sleep(0.05)
assert L
@gen_test()
def test_rate_limit():
source = Stream(asynchronous=True)
L = source.rate_limit(0.05).sink_to_list()
start = time()
for i in range(5):
yield source.emit(i)
stop = time()
assert stop - start > 0.2
assert len(L) == 5
@gen_test()
def test_delay():
source = Stream(asynchronous=True)
L = source.delay(0.02).sink_to_list()
for i in range(5):
yield source.emit(i)
assert not L
yield gen.sleep(0.04)
assert len(L) < 5
yield gen.sleep(0.1)
assert len(L) == 5
@gen_test()
def test_delay_ref_counts():
source = Stream(asynchronous=True)
_ = source.delay(0.01)
refs = []
for i in range(5):
r = RefCounter()
refs.append(r)
source.emit(i, metadata=[{'ref': r}])
assert all(r.count == 1 for r in refs)
yield gen.sleep(0.05)
assert all(r.count == 0 for r in refs)
@gen_test()
def test_buffer():
source = Stream(asynchronous=True)
L = source.map(inc).buffer(10).map(inc).rate_limit(0.05).sink_to_list()
start = time()
for i in range(10):
yield source.emit(i)
stop = time()
assert stop - start < 0.01
assert not L
start = time()
for i in range(5):
yield source.emit(i)
stop = time()
assert L
assert stop - start > 0.04
@gen_test()
def test_buffer_ref_counts():
source = Stream(asynchronous=True)
_ = source.buffer(5)
refs = []
for i in range(5):
r = RefCounter()
refs.append(r)
source.emit(i, metadata=[{'ref': r}])
assert all(r.count == 1 for r in refs)
yield gen.sleep(0.05)
assert all(r.count == 0 for r in refs)
def test_zip():
a = Stream()
b = Stream()
c = sz.zip(a, b)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
b.emit('b')
assert L == [(1, 'a'), (2, 'b')]
d = Stream()
# test zip from the object itself
# zip 3 streams together
e = a.zip(b, d)
L2 = e.sink_to_list()
a.emit(1)
b.emit(2)
d.emit(3)
assert L2 == [(1, 2, 3)]
def test_zip_literals():
a = Stream()
b = Stream()
c = sz.zip(a, 123, b)
L = c.sink_to_list()
a.emit(1)
b.emit(2)
assert L == [(1, 123, 2)]
a.emit(4)
b.emit(5)
assert L == [(1, 123, 2),
(4, 123, 5)]
def test_zip_same():
a = Stream()
b = a.zip(a)
L = b.sink_to_list()
a.emit(1)
a.emit(2)
assert L == [(1, 1), (2, 2)]
def test_combine_latest():
a = Stream()
b = Stream()
c = a.combine_latest(b)
d = a.combine_latest(b, emit_on=[a, b])
L = c.sink_to_list()
L2 = d.sink_to_list()
a.emit(1)
a.emit(2)
b.emit('a')
a.emit(3)
b.emit('b')
assert L == [(2, 'a'), (3, 'a'), (3, 'b')]
assert L2 == [(2, 'a'), (3, 'a'), (3, 'b')]
def test_combine_latest_emit_on():
a = Stream()
b = Stream()
c = a.combine_latest(b, emit_on=a)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(2, 'a'), (3, 'a'), (4, 'b')]
def test_combine_latest_emit_on_stream():
a = Stream()
b = Stream()
c = a.combine_latest(b, emit_on=0)
L = c.sink_to_list()
a.emit(1)
b.emit('a')
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(2, 'a'), (3, 'a'), (4, 'b')]
def test_combine_latest_ref_counts():
a = Stream()
b = Stream()
_ = a.combine_latest(b)
ref1 = RefCounter()
a.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
# The new value kicks out the old value
ref2 = RefCounter()
a.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
# The value on stream a is still retained and the value on stream b is new
ref3 = RefCounter()
b.emit(3, metadata=[{'ref': ref3}])
assert ref2.count == 1
assert ref3.count == 1
def test_combine_latest_metadata():
a = Stream()
b = Stream()
L = metadata(a.combine_latest(b)).sink_to_list()
a.emit(1, metadata=[{'v': 1}])
b.emit(2, metadata=[{'v': 2}])
b.emit(3)
b.emit(4, metadata=[{'v': 4}])
assert L == [
[{'v': 1}, {'v': 2}], # first emit when 2 is introduced
[{'v': 1}], # 3 has no metadata but it replaces the value on 'b'
[{'v': 1}, {'v': 4}] # 4 replaces the value without metadata on 'b'
]
@gen_test()
def test_zip_timeout():
a = Stream(asynchronous=True)
b = Stream(asynchronous=True)
c = sz.zip(a, b, maxsize=2)
L = c.sink_to_list()
a.emit(1)
a.emit(2)
future = a.emit(3)
with pytest.raises(gen.TimeoutError):
yield gen.with_timeout(timedelta(seconds=0.01), future)
b.emit('a')
yield future
assert L == [(1, 'a')]
def test_zip_ref_counts():
a = Stream()
b = Stream()
_ = a.zip(b)
# The first value in a becomes buffered
ref1 = RefCounter()
a.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
# The second value in a also becomes buffered
ref2 = RefCounter()
a.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 1
assert ref2.count == 1
# All emitted values are removed from the buffer
ref3 = RefCounter()
b.emit(3, metadata=[{'ref': ref3}])
assert ref1.count == 0
assert ref2.count == 1 # still in the buffer
assert ref3.count == 0
def test_zip_metadata():
a = Stream()
b = Stream()
L = metadata(a.zip(b)).sink_to_list()
a.emit(1, metadata=[{'v': 1}])
b.emit(2, metadata=[{'v': 2}])
a.emit(3)
b.emit(4, metadata=[{'v': 4}])
assert L == [
[{'v': 1}, {'v': 2}], # first emit when 2 is introduced
[{'v': 4}] # second emit when 4 is introduced, and 3 has no metadata
]
def test_frequencies():
source = Stream()
L = source.frequencies().sink_to_list()
source.emit('a')
source.emit('b')
source.emit('a')
assert L[-1] == {'a': 2, 'b': 1}
def test_flatten():
source = Stream()
L = source.flatten().sink_to_list()
source.emit([1, 2, 3])
source.emit([4, 5])
source.emit([6, 7, 8])
assert L == [1, 2, 3, 4, 5, 6, 7, 8]
def test_unique():
source = Stream()
L = source.unique().sink_to_list()
source.emit(1)
source.emit(2)
source.emit(1)
assert L == [1, 2]
def test_unique_key():
source = Stream()
L = source.unique(key=lambda x: x % 2, maxsize=1).sink_to_list()
source.emit(1)
source.emit(2)
source.emit(4)
source.emit(6)
source.emit(3)
assert L == [1, 2, 3]
def test_unique_metadata():
source = Stream()
L = metadata(source.unique()).flatten().sink_to_list()
for i in range(5):
source.emit(i, metadata=[{'v': i}])
assert L == [{'v': i} for i in range(5)]
def test_unique_history():
source = Stream()
s = source.unique(maxsize=2)
s2 = source.unique(maxsize=2, hashable=False)
L = s.sink_to_list()
L2 = s2.sink_to_list()
source.emit(1)
source.emit(2)
source.emit(1)
source.emit(2)
source.emit(1)
source.emit(2)
assert L == [1, 2]
assert L == L2
source.emit(3)
source.emit(2)
assert L == [1, 2, 3]
assert L == L2
source.emit(1)
assert L == [1, 2, 3, 1]
assert L == L2
# update 2 position
source.emit(2)
# knock out 1
source.emit(3)
# update 2 position
source.emit(2)
assert L == [1, 2, 3, 1, 3]
assert L == L2
def test_unique_history_dict():
source = Stream()
s = source.unique(maxsize=2, hashable=False)
L = s.sink_to_list()
a = {'hi': 'world'}
b = {'hi': 'bar'}
c = {'foo': 'bar'}
source.emit(a)
source.emit(b)
source.emit(a)
source.emit(b)
source.emit(a)
source.emit(b)
assert L == [a, b]
source.emit(c)
source.emit(b)
assert L == [a, b, c]
source.emit(a)
assert L == [a, b, c, a]
def test_union():
a = Stream()
b = Stream()
c = Stream()
L = a.union(b, c).sink_to_list()
a.emit(1)
assert L == [1]
b.emit(2)
assert L == [1, 2]
a.emit(3)
assert L == [1, 2, 3]
c.emit(4)
assert L == [1, 2, 3, 4]
def test_pluck():
a = Stream()
L = a.pluck(1).sink_to_list()
a.emit([1, 2, 3])
assert L == [2]
a.emit([4, 5, 6, 7, 8, 9])
assert L == [2, 5]
with pytest.raises(IndexError):
a.emit([1])
def test_pluck_list():
a = Stream()
L = a.pluck([0, 2]).sink_to_list()
a.emit([1, 2, 3])
assert L == [(1, 3)]
a.emit([4, 5, 6, 7, 8, 9])
assert L == [(1, 3), (4, 6)]
with pytest.raises(IndexError):
a.emit([1])
def test_collect():
source1 = Stream()
source2 = Stream()
collector = source1.collect()
L = collector.sink_to_list()
source2.sink(collector.flush)
source1.emit(1)
source1.emit(2)
assert L == []
source2.emit('anything') # flushes collector
assert L == [(1, 2)]
source2.emit('anything')
assert L == [(1, 2), ()]
source1.emit(3)
assert L == [(1, 2), ()]
source2.emit('anything')
assert L == [(1, 2), (), (3,)]
def test_collect_ref_counts():
source = Stream()
collector = source.collect()
refs = []
for i in range(10):
r = RefCounter()
refs.append(r)
source.emit(i, metadata=[{'ref': r}])
assert all(r.count == 1 for r in refs)
collector.flush()
assert all(r.count == 0 for r in refs)
def test_collect_metadata():
source = Stream()
collector = source.collect()
L = metadata(collector).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
source.emit(2, metadata=[{'v': 2}])
collector.flush()
source.emit(3, metadata=[{'v': 3}])
source.emit(4, metadata=[{'v': 4}])
collector.flush()
assert L == [
[{'v': 1}, {'v': 2}], # Flush 0-2, but 0 has no metadata
[{'v': 3}, {'v': 4}] # Flush the rest
]
def test_map_str():
def add(x=0, y=0):
return x + y
source = Stream()
s = source.map(add, y=10)
assert str(s) == '<map: add>'
def test_no_ipywidget_repr(monkeypatch, capsys):
pytest.importorskip("ipywidgets")
import ipywidgets
source = Stream()
# works by side-affect of display()
source._ipython_display_()
assert "Output()" in capsys.readouterr().out
def get(*_, **__):
raise ImportError
monkeypatch.setattr(ipywidgets.Output, "__init__", get)
out = source._ipython_display_()
assert "Stream" in capsys.readouterr().out
def test_filter_str():
def iseven(x):
return x % 2 == 0
source = Stream()
s = source.filter(iseven)
assert str(s) == '<filter: iseven>'
def test_timed_window_str(clean): # noqa: F811
source = Stream()
s = source.timed_window(.05)
assert str(s) == '<timed_window: 0.05>'
def test_partition_str():
source = Stream()
s = source.partition(2)
assert str(s) == '<partition: 2>'
def test_partition_ref_counts():
source = Stream()
_ = source.partition(2)
for i in range(10):
r = RefCounter()
source.emit(i, metadata=[{'ref': r}])
if i % 2 == 0:
assert r.count == 1
else:
assert r.count == 0
def test_partition_metadata():
source = Stream()
L = metadata(source.partition(2)).sink_to_list()
source.emit(0)
source.emit(1, metadata=[{'v': 1}])
source.emit(2, metadata=[{'v': 2}])
source.emit(3, metadata=[{'v': 3}])
assert L == [
[{'v': 1}], # first emit when 1 is introduced. 0 has no metadata
[{'v': 2}, {'v': 3}] # second emit
]
def test_stream_name_str():
source = Stream(stream_name='this is not a stream')
assert str(source) == '<this is not a stream; Stream>'
def test_zip_latest():
a = Stream()
b = Stream()
c = a.zip_latest(b)
d = a.combine_latest(b, emit_on=a)
L = c.sink_to_list()
L2 = d.sink_to_list()
a.emit(1)
a.emit(2)
b.emit('a')
b.emit('b')
a.emit(3)
assert L == [(1, 'a'), (2, 'a'), (3, 'b')]
assert L2 == [(3, 'b')]
def test_zip_latest_reverse():
a = Stream()
b = Stream()
c = a.zip_latest(b)
L = c.sink_to_list()
b.emit('a')
a.emit(1)
a.emit(2)
a.emit(3)
b.emit('b')
a.emit(4)
assert L == [(1, 'a'), (2, 'a'), (3, 'a'), (4, 'b')]
def test_triple_zip_latest():
from streamz.core import Stream
s1 = Stream()
s2 = Stream()
s3 = Stream()
s_simple = s1.zip_latest(s2, s3)
L_simple = s_simple.sink_to_list()
s1.emit(1)
s2.emit('I')
s2.emit("II")
s1.emit(2)
s2.emit("III")
s3.emit('a')
s3.emit('b')
s1.emit(3)
assert L_simple == [(1, 'III', 'a'), (2, 'III', 'a'), (3, 'III', 'b')]
def test_zip_latest_ref_counts():
a = Stream()
b = Stream()
_ = a.zip_latest(b)
ref1 = RefCounter()
a.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1 # Retained until stream b has a value
# The lossless stream is never retained if all upstreams have a value
ref2 = RefCounter()
b.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
# Kick out the stream b value and verify it has zero references
ref3 = RefCounter()
b.emit(3, metadata=[{'ref': ref3}])
assert ref2.count == 0
assert ref3.count == 1
# Verify the lossless value is not retained, but the lossy value is
ref4 = RefCounter()
a.emit(3, metadata=[{'ref': ref4}])
assert ref3.count == 1
assert ref4.count == 0
def test_zip_latest_metadata():
a = Stream()
b = Stream()
L = metadata(a.zip_latest(b)).sink_to_list()
a.emit(1, metadata=[{'v': 1}])
b.emit(2, metadata=[{'v': 2}])
a.emit(3)
b.emit(4, metadata=[{'v': 4}])
assert L == [
[{'v': 1}, {'v': 2}], # the first emit when 2 is introduced
[{'v': 2}] # 3 has no metadata
]
def test_connect():
source_downstream = Stream()
# connect assumes this default behaviour
# of stream initialization
assert not source_downstream.downstreams
assert source_downstream.upstreams == []
# initialize the second stream to connect to
source_upstream = Stream()
sout = source_downstream.map(lambda x : x + 1)
L = list()
sout = sout.map(L.append)
source_upstream.connect(source_downstream)
source_upstream.emit(2)
source_upstream.emit(4)
assert L == [3, 5]
def test_multi_connect():
source0 = Stream()
source1 = Stream()
source_downstream = source0.union(source1)
# connect assumes this default behaviour
# of stream initialization
assert not source_downstream.downstreams
# initialize the second stream to connect to
source_upstream = Stream()
sout = source_downstream.map(lambda x : x + 1)
L = list()
sout = sout.map(L.append)
source_upstream.connect(source_downstream)
source_upstream.emit(2)
source_upstream.emit(4)
assert L == [3, 5]
def test_disconnect():
source = Stream()
upstream = Stream()
L = upstream.sink_to_list()
source.emit(1)
assert L == []
source.connect(upstream)
source.emit(2)
source.emit(3)
assert L == [2, 3]
source.disconnect(upstream)
source.emit(4)
assert L == [2, 3]
def test_gc():
source = Stream()
L = []
a = source.map(L.append)
source.emit(1)
assert L == [1]
del a
import gc; gc.collect()
start = time()
while source.downstreams:
sleep(0.01)
assert time() < start + 1
source.emit(2)
assert L == [1]
@gen_test()
def test_from_file():
with tmpfile() as fn:
with open(fn, 'wt') as f:
f.write('{"x": 1, "y": 2}\n')
f.write('{"x": 2, "y": 2}\n')
f.write('{"x": 3, "y": 2}\n')
f.flush()
source = Stream.from_textfile(fn, poll_interval=0.010,
asynchronous=True, start=False)
L = source.map(json.loads).pluck('x').sink_to_list()
assert L == []
source.start()
yield await_for(lambda: len(L) == 3, timeout=5)
assert L == [1, 2, 3]
f.write('{"x": 4, "y": 2}\n')
f.write('{"x": 5, "y": 2}\n')
f.flush()
start = time()
while L != [1, 2, 3, 4, 5]:
yield gen.sleep(0.01)
assert time() < start + 2 # reads within 2s
@gen_test()
def test_from_file_end():
with tmpfile() as fn:
with open(fn, 'wt') as f:
f.write('data1\n')
f.flush()
source = Stream.from_textfile(fn, poll_interval=0.010,
start=False, from_end=True)
out = source.sink_to_list()
source.start()
assert out == []
yield await_for(lambda: source.started, 2, period=0.02)
f.write('data2\n')
f.flush()
yield await_for(lambda: out == ['data2\n'], timeout=5, period=0.1)
@gen_test()
def test_filenames():
with tmpfile() as fn:
os.mkdir(fn)
with open(os.path.join(fn, 'a'), 'w'):
pass
with open(os.path.join(fn, 'b'), 'w'):
pass
source = Stream.filenames(fn, asynchronous=True)
L = source.sink_to_list()
source.start()
while len(L) < 2:
yield gen.sleep(0.01)
assert L == [os.path.join(fn, x) for x in ['a', 'b']]
with open(os.path.join(fn, 'c'), 'w'):
pass
while len(L) < 3:
yield gen.sleep(0.01)
assert L == [os.path.join(fn, x) for x in ['a', 'b', 'c']]
def test_docstrings():
for s in [Stream, Stream()]:
assert 'every element' in s.map.__doc__
assert s.map.__name__ == 'map'
assert 'predicate' in s.filter.__doc__
assert s.filter.__name__ == 'filter'
def test_subclass():
class NewStream(Stream):
pass
@NewStream.register_api()
class foo(NewStream):
pass
assert hasattr(NewStream, 'map')
assert hasattr(NewStream(), 'map')
assert hasattr(NewStream, 'foo')
assert hasattr(NewStream(), 'foo')
assert not hasattr(Stream, 'foo')
assert not hasattr(Stream(), 'foo')
@gen_test()
def test_latest():
source = Stream(asynchronous=True)
L = []
@gen.coroutine
def slow_write(x):
yield gen.sleep(0.050)
L.append(x)
s = source.map(inc).latest().map(slow_write) # noqa: F841
source.emit(1)
yield gen.sleep(0.010)
source.emit(2)
source.emit(3)
start = time()
while len(L) < 2:
yield gen.sleep(0.01)
assert time() < start + 3
assert L == [2, 4]
yield gen.sleep(0.060)
assert L == [2, 4]
def test_latest_ref_counts():
source = Stream()
_ = source.latest()
ref1 = RefCounter()
source.emit(1, metadata=[{'ref': ref1}])
assert ref1.count == 1
ref2 = RefCounter()
source.emit(2, metadata=[{'ref': ref2}])
assert ref1.count == 0
assert ref2.count == 1
def test_destroy():
source = Stream()
s = source.map(inc)
L = s.sink_to_list()
source.emit(1)
assert L == [2]
s.destroy()
assert not list(source.downstreams)
assert not s.upstreams
source.emit(2)
assert L == [2]
def dont_test_stream_kwargs(clean): # noqa: F811
''' Test the good and bad kwargs for the stream
Currently just stream_name
'''
test_name = "some test name"
sin = Stream(stream_name=test_name)
sin2 = Stream()
assert sin.name == test_name
# when not defined, should be None
assert sin2.name is None
# add new core methods here, initialized
# these should be functions, use partial to partially initialize them
# (if they require more arguments)
streams = [
# some filter kwargs, so we comment them out
partial(sin.map, lambda x : x),
partial(sin.accumulate, lambda x1, x2 : x1),
partial(sin.filter, lambda x : True),
partial(sin.partition, 2),
partial(sin.sliding_window, 2),
partial(sin.timed_window, .01),
partial(sin.rate_limit, .01),
partial(sin.delay, .02),
partial(sin.buffer, 2),
partial(sin.zip, sin2),
partial(sin.combine_latest, sin2),
sin.frequencies,
sin.flatten,
sin.unique,
sin.union,
partial(sin.pluck, 0),
sin.collect,
]
good_kwargs = dict(stream_name=test_name)
bad_kwargs = dict(foo="bar")
for s in streams:
# try good kwargs
sout = s(**good_kwargs)
assert sout.name == test_name
del sout
with pytest.raises(TypeError):
sout = s(**bad_kwargs)
sin.emit(1)
# need a second emit for accumulate
sin.emit(1)
del sout
# verify that sout is properly deleted each time by emitting once into sin
# and not getting TypeError
# garbage collect and then try
import gc
gc.collect()
sin.emit(1)
@pytest.fixture
def thread(loop): # noqa: F811
from threading import Thread, Event
thread = Thread(target=loop.start)
thread.daemon = True
thread.start()
event = Event()
loop.add_callback(event.set)
event.wait()
return thread
def test_percolate_loop_information(clean): # noqa: F811
source = Stream()
assert not source.loop
s = source.timed_window(0.5)
assert source.loop is s.loop
def test_separate_thread_without_time(loop, thread): # noqa: F811
assert thread.is_alive()
source = Stream(loop=loop)
L = source.map(inc).sink_to_list()
for i in range(10):
source.emit(i)
assert L[-1] == i + 1
def test_separate_thread_with_time(clean): # noqa: F811
L = []
@gen.coroutine
def slow_write(x):
yield gen.sleep(0.1)
L.append(x)
source = Stream(asynchronous=False)
source.map(inc).sink(slow_write)
start = time()
source.emit(1)
stop = time()
assert stop - start > 0.1
assert L == [2]
def test_execution_order():
L = []
for i in range(5):
s = Stream()
b = s.pluck(1)
a = s.pluck(0)
li = a.combine_latest(b, emit_on=a).sink_to_list()
z = [(1, 'red'), (2, 'blue'), (3, 'green')]
for zz in z:
s.emit(zz)
L.append((li, ))
for ll in L:
assert ll == L[0]
L2 = []
for i in range(5):
s = Stream()
a = s.pluck(0)
b = s.pluck(1)
li = a.combine_latest(b, emit_on=a).sink_to_list()
z = [(1, 'red'), (2, 'blue'), (3, 'green')]
for zz in z:
s.emit(zz)
L2.append((li,))
for ll, ll2 in zip(L, L2):
assert ll2 == L2[0]
assert ll != ll2
@gen_test()
def test_map_errors_log():
a = Stream(asynchronous=True)
b = a.delay(0.001).map(lambda x: 1 / x) # noqa: F841
with captured_logger('streamz') as logger:
a._emit(0)
yield gen.sleep(0.1)
out = logger.getvalue()
assert 'ZeroDivisionError' in out
def test_map_errors_raises():
a = Stream()
b = a.map(lambda x: 1 / x) # noqa: F841
with pytest.raises(ZeroDivisionError):
a.emit(0)
@gen_test()
def test_accumulate_errors_log():
a = Stream(asynchronous=True)
b = a.delay(0.001).accumulate(lambda x, y: x / y, with_state=True) # noqa: F841
with captured_logger('streamz') as logger:
a._emit(1)
a._emit(0)
yield gen.sleep(0.1)
out = logger.getvalue()
assert 'ZeroDivisionError' in out
def test_accumulate_errors_raises():
a = Stream()
b = a.accumulate(lambda x, y: x / y, with_state=True) # noqa: F841
with pytest.raises(ZeroDivisionError):
a.emit(1)
a.emit(0)
@gen_test()
def test_sync_in_event_loop():
a = Stream()
assert not a.asynchronous
L = a.timed_window(0.01).sink_to_list()
sleep(0.05)
assert L
assert a.loop
assert a.loop is not IOLoop.current()
def test_share_common_ioloop(clean): # noqa: F811
a = Stream()
b = Stream()
aa = a.timed_window(0.01)
bb = b.timed_window(0.01)
assert aa.loop is bb.loop
@pytest.mark.parametrize('data', [
[[], [0, 1, 2, 3, 4, 5]],
[[None, None, None], [0, 1, 2, 3, 4, 5]],
[[1, None, None], [1, 2, 3, 4, 5]],
[[None, 4, None], [0, 1, 2, 3]],
[[None, 4, 2], [0, 2]],
[[3, 1, None], []]
])
def test_slice(data):
pars, expected = data
a = Stream()
b = a.slice(*pars)
out = b.sink_to_list()
for i in range(6):
a.emit(i)
assert out == expected
def test_slice_err():
a = Stream()
with pytest.raises(ValueError):
a.slice(end=-1)
def test_start():
flag = []
class MySource(Stream):
def start(self):
flag.append(True)
s = MySource().map(inc)
s.start()
assert flag == [True]
def test_connect_zip():
a = Stream()
b = Stream()
c = Stream()
x = a.zip(b)
L = x.sink_to_list()
c.connect(x)
a.emit(1)
b.emit(1)
assert not L
c.emit(1)
assert L == [(1, 1, 1)]
def test_disconnect_zip():
a = Stream()
b = Stream()
c = Stream()
x = a.zip(b, c)
L = x.sink_to_list()
b.disconnect(x)
a.emit(1)
b.emit(1)
assert not L
c.emit(1)
assert L == [(1, 1)]
def test_connect_combine_latest():
a = Stream()
b = Stream()
c = Stream()
x = a.combine_latest(b, emit_on=a)
L = x.sink_to_list()
c.connect(x)
b.emit(1)
c.emit(1)
a.emit(1)
assert L == [(1, 1, 1)]
def test_connect_discombine_latest():
a = Stream()
b = Stream()
c = Stream()
x = a.combine_latest(b, c, emit_on=a)
L = x.sink_to_list()
c.disconnect(x)
b.emit(1)
c.emit(1)
a.emit(1)
assert L == [(1, 1)]
if sys.version_info >= (3, 5):
from streamz.tests.py3_test_core import * # noqa
def test_buffer_after_partition():
Stream().partition(1).buffer(1)
def test_buffer_after_timed_window():
Stream().timed_window(1).buffer(1)
def test_buffer_after_sliding_window():
Stream().sliding_window(1).buffer(1)
def test_backpressure_connect_empty_stream():
@Stream.register_api()
class from_list(Stream):
def __init__(self, source, **kwargs):
self.source = source
super().__init__(ensure_io_loop=True, **kwargs)
def start(self):
self.stopped = False
self.loop.add_callback(self.run)
@gen.coroutine
def run(self):
while not self.stopped and len(self.source) > 0:
yield self._emit(self.source.pop(0))
source_list = [0, 1, 2, 3, 4]
source = Stream.from_list(source_list)
sout = Stream()
L = sout.rate_limit(1).sink_to_list()
source.connect(sout)
source.start()
wait_for(lambda: L == [0], 0.01)
assert len(source_list) > 0
|
benchmark_djangocache.py | """Benchmark diskcache.DjangoCache
$ export PYTHONPATH=/Users/grantj/repos/python-diskcache
$ python tests/benchmark_djangocache.py > tests/timings_djangocache.txt
"""
from __future__ import print_function
import collections as co
import multiprocessing as mp
import os
import random
import shutil
import sys
import time
import warnings
if sys.hexversion < 0x03000000:
range = xrange
import cPickle as pickle
else:
import pickle
from utils import display
PROCS = 8
OPS = int(1e5)
RANGE = int(1.1e3)
WARMUP = int(1e3)
def setup():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.settings')
import django
django.setup()
def worker(num, name):
setup()
from django.core.cache import caches
obj = caches[name]
random.seed(num)
timings = co.defaultdict(list)
time.sleep(0.01) # Let other processes start.
for count in range(OPS):
key = str(random.randrange(RANGE)).encode('utf-8')
value = str(count).encode('utf-8') * random.randrange(1, 100)
choice = random.random()
if choice < 0.900:
start = time.time()
result = obj.get(key)
end = time.time()
miss = result is None
action = 'get'
elif choice < 0.990:
start = time.time()
result = obj.set(key, value)
end = time.time()
miss = result == False
action = 'set'
else:
start = time.time()
result = obj.delete(key)
end = time.time()
miss = result == False
action = 'delete'
if count > WARMUP:
delta = end - start
timings[action].append(delta)
if miss:
timings[action + '-miss'].append(delta)
with open('output-%d.pkl' % num, 'wb') as writer:
pickle.dump(timings, writer, protocol=pickle.HIGHEST_PROTOCOL)
def prepare(name):
setup()
from django.core.cache import caches
obj = caches[name]
for key in range(RANGE):
key = str(key).encode('utf-8')
obj.set(key, key)
try:
obj.close()
except:
pass
def dispatch():
setup()
from django.core.cache import caches
for name in ['locmem', 'memcached', 'redis', 'diskcache', 'filebased']:
shutil.rmtree('tmp', ignore_errors=True)
preparer = mp.Process(target=prepare, args=(name,))
preparer.start()
preparer.join()
processes = [
mp.Process(target=worker, args=(value, name))
for value in range(PROCS)
]
for process in processes:
process.start()
for process in processes:
process.join()
timings = co.defaultdict(list)
for num in range(PROCS):
filename = 'output-%d.pkl' % num
with open(filename, 'rb') as reader:
output = pickle.load(reader)
for key in output:
timings[key].extend(output[key])
os.remove(filename)
display(name, timings)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
'-p', '--processes', type=int, default=PROCS,
help='Number of processes to start',
)
parser.add_argument(
'-n', '--operations', type=float, default=OPS,
help='Number of operations to perform',
)
parser.add_argument(
'-r', '--range', type=int, default=RANGE,
help='Range of keys',
)
parser.add_argument(
'-w', '--warmup', type=float, default=WARMUP,
help='Number of warmup operations before timings',
)
args = parser.parse_args()
PROCS = int(args.processes)
OPS = int(args.operations)
RANGE = int(args.range)
WARMUP = int(args.warmup)
dispatch()
|
test_client_integration.py | # -*- coding: utf-8 -*-
import pytest
from client import MultibotClient
from random import randint
from os import path
from json import loads
from requests import head
from requests.exceptions import RequestException
from multiprocessing.pool import ThreadPool
from threading import Thread
# Set global config values for the integration test
TEST_CONFIG_PATH = path.join(path.dirname(path.realpath(__file__)), 'config')
BOT_ID = 'fakebotid'
USER_ID = 'fakeid'
CHANNEL = 'abcd'
DUMMYBOTS = {
'available': False,
'tested': False
}
def check_dummybots():
"""
Checks the availablity of dummybots and set the global flag. Runs once per
test session.
"""
global DUMMYBOTS
if not DUMMYBOTS['tested']:
DUMMYBOTS['tested'] = True
# Load bot configuration
fp = open(path.join(TEST_CONFIG_PATH, 'bots.json'), 'r')
bot_connections = loads(fp.read())
fp.close()
# Check the connection to dummybots concurrently
def worker(bot_url):
try:
r = head('{}/askmeanything?q=test'.format(bot_url), timeout=5)
assert r.ok
return r
except (RequestException, AssertionError):
return None
urls = []
for connection in bot_connections:
urls.append(connection['url'])
pool = ThreadPool(processes=3)
bot_available = pool.map(worker, urls)
# Check the results of the connection tests and update flags
for i, available in enumerate(bot_available):
if available is None:
DUMMYBOTS['available'] = False
return
DUMMYBOTS['available'] = True
@pytest.fixture(params=[True, False], ids=['patch_ask', 'no_patch_ask'])
def patch_bot_ask(request, mocker):
"""
Patches for the bot ask method to either send real requests or generate a
mock response for testing. If connection tests to bots fail, mark tests
that use the real requests as xfail.
"""
if request.param:
mocker.patch(
'client.services.BotConnection.ask',
return_value=(200, 'response'))
else:
# Require a flag to run the dummybot live tests
if not pytest.config.getoption('--dummybot'):
pytest.skip('need --dummybot option to run')
else:
check_dummybots()
if not DUMMYBOTS.get('available'):
pytest.xfail('one or more dummybot servers are unreachable')
@pytest.fixture()
def client(mocker, monkeypatch, patch_bot_ask):
"""
Create and patches for the multibot client.
"""
# Create a combined api response object for user id and bot id
mock_api_call = {
'ok': True,
'user_id': USER_ID,
'user': {
'profile': {
'bot_id': BOT_ID
}
}
}
# Patch the slackclient to not do real requests to Slack
m_api = mocker.patch(
'client.multibot_client.SlackClient.api_call',
return_value=mock_api_call)
m_rtm_c = mocker.patch(
'client.multibot_client.SlackClient.rtm_connect', return_value=True)
events_input = []
m_rtm_r = mocker.patch(
'client.multibot_client.SlackClient.rtm_read',
return_value=events_input)
m_rtm_s = mocker.patch('slackclient.SlackClient.rtm_send_message')
# Create the multibot client with the config path set to the integration
# test config folder
c = MultibotClient(config_path=TEST_CONFIG_PATH)
# Enable websockets
monkeypatch.setattr(c.slack_client.server, 'websocket', True)
# Run the client on a seperate thread
t = Thread(target=c.start, args=(), daemon=True)
# Return all patch objects for checking
return_object = {
'client': c,
'events_input': events_input,
'api_call': m_api,
'rtm_connect': m_rtm_c,
'rtm_read': m_rtm_r,
'rtm_send_message': m_rtm_s
}
t.start()
yield return_object
# Cleanup and end thread
c.close()
t.join(timeout=5)
class TestClientIntegration(object):
def query_bot(self, client, query):
"""
Ask the currently set bot a question. Checks that the outputs and
function calls are as expected.
"""
read_count = client.get('rtm_read').call_count
send_count = client.get('rtm_send_message').call_count
client.get('events_input').append({
'type':
'message',
'text':
'<@{}> {}'.format(USER_ID, query),
'channel':
CHANNEL
})
has_event = client.get('client').events.get('message').wait(timeout=3)
assert has_event
del client.get('events_input')[:]
has_event = client.get('client').events.get('send').wait(timeout=3)
assert has_event
client.get('client').events.get('send').clear()
assert client.get('rtm_read').call_count - read_count >= 1
assert client.get('rtm_send_message').call_count - send_count == 1
args, kwargs = client.get('rtm_send_message').call_args
assert kwargs.get('channel') == CHANNEL
return kwargs.get('message')
def random_string(self, start=0, end=9000):
"""
Generate a string based on a random number.
"""
return str(randint(start, end))
def test_simple_chat(self, client):
"""
Test whether a simple chat to one bot goes as expected.
"""
self.query_bot(client, 'list')
assert self.query_bot(client, 'list') == ('1. Interesting Facts\n'
'2. Strange Facts\n'
'3. Unusual Facts')
assert self.query_bot(
client, 'start_session Interesting Facts'
) == 'You are now chatting with Interesting Facts.'
# Try a unicode query this time
assert self.query_bot(
client, 'Ṱ̺̺̕h̼͓̲̦̳̘̲e͇̣̰̦̬͎ ̢̼̻̱̘h͚͎͙̜̣̲ͅi̦̲̣̰̤v̻͍e̺̭̳̪̰-m̢iͅn̖̺̞̲̯̰d̵̼̟͙̩̼̘̳.̨̹͈̣').find('Interesting Facts') == 0
assert self.query_bot(
client,
'end_session') == 'Chat session with Interesting Facts ended.'
def test_multibot_chat(self, client):
"""
Test whether a chat switching between multiple bots goes as expected.
"""
assert self.query_bot(client, 'list'), ('1. Interesting Facts\n'
'2. Strange Facts\n'
'3. Unusual Facts')
# Test connecting to, chatting with, and disconnecting from all bot
# connections
for i, connection in enumerate(client.get('client').bot_connections):
assert self.query_bot(client, 'start_session {}'.format(
connection['name'])) == 'You are now chatting with {}.'.format(
connection['name'])
assert self.query_bot(
client, self.random_string()).find(connection['name']) == 0
assert self.query_bot(
client, 'end_session') == 'Chat session with {} ended.'.format(
connection['name'])
def test_invalid_chat(self, client):
"""
Test whether a chat with invalid commands being sent goes as expected.
"""
# Try to chat when not connected to a bot
rand = self.random_string()
assert self.query_bot(client, rand) == (
'You are currently not connected to any bot. '
'Connect to a bot with \'start_session <bot_name>\' or '
'type \'list\' for a list of available bots.')
# Try to end the session without being in a session
assert self.query_bot(
client,
'end_session') == 'You are currently not in an active session.'
# Try to start a new session whilst a session is currently in place
assert self.query_bot(client, 'start_session Unusual Facts'
) == 'You are now chatting with Unusual Facts.'
assert self.query_bot(
client, 'start_session Strange Facts'
) == 'You are already in a chat session with Unusual Facts!'
|
index.py | # this is a modified version of the original index.py.
# Copyright (c) Alex Ellis 2017. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
from flask import Flask, request, current_app, request, Response
from function import handler
from waitress import serve
import os, threading
import uuid
from functools import wraps
from werkzeug.exceptions import HTTPException, InternalServerError
import requests
app = Flask(__name__)
tasks = {}
def flask_async(f):
"""
This decorator transforms a sync route to asynchronous by running it in a background thread.
"""
@wraps(f)
def wrapped(*args, **kwargs):
def task(app, environ, callback_url):
# Create a request context similar to that of the original request
with app.request_context(environ):
try:
# Run the route function and record the response
tasks[task_id]['result'] = f(*args, **kwargs)
if callback_url is not None:
requests.post(callback_url, data=tasks[task_id]['result'][0].get_data(as_text=True))
except HTTPException as e:
tasks[task_id]['result'] = current_app.handle_http_exception(e)
except Exception as e:
# The function raised an exception, so we set a 500 error
tasks[task_id]['result'] = InternalServerError()
if current_app.debug:
# We want to find out if something happened so reraise
raise
# Assign an id to the asynchronous task
task_id = uuid.uuid4().hex
# Record the task, and then launch it
tasks[task_id] = {'task': threading.Thread(
target=task, args=(current_app._get_current_object(), request.environ, request.headers.get('X-Callback-Url')))}
tasks[task_id]['task'].start()
# Return a 202 response, with an id that the client can use to obtain task status
return {'task_id': task_id}, 202
return wrapped
# distutils.util.strtobool() can throw an exception
def is_true(val):
return len(val) > 0 and val.lower() == "true" or val == "1"
@app.before_request
def fix_transfer_encoding():
"""
Sets the "wsgi.input_terminated" environment flag, thus enabling
Werkzeug to pass chunked requests as streams. The gunicorn server
should set this, but it's not yet been implemented.
"""
transfer_encoding = request.headers.get("Transfer-Encoding", None)
if transfer_encoding == u"chunked":
request.environ["wsgi.input_terminated"] = True
@app.route("/function/ptas", defaults={"path": ""}, methods=["POST", "GET", "OPTIONS"])
def main_route(path):
raw_body = os.getenv("RAW_BODY", "false")
as_text = True
if is_true(raw_body):
as_text = False
ret = handler.handle(request.get_data(as_text=as_text))
return ret
@app.route("/async-function/ptas", defaults={"path": ""}, methods=["POST", "GET", "OPTIONS"])
@flask_async
def main_route_async(path):
raw_body = os.getenv("RAW_BODY", "false")
as_text = True
if is_true(raw_body):
as_text = False
return handler.handle(request.get_data(as_text=as_text))
if __name__ == '__main__':
serve(app, host='0.0.0.0', port=5000)
|
test_application.py | # GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pylint: disable-msg=C0301
#pylint: disable-msg=F0401
#pylint: disable-msg=W0142
"""Tests for application.py"""
import sys
import os
import unittest
import time
#import pprint
#import pdb
import warnings
from threading import Thread
import ctypes
import mock
import six
sys.path.append(".")
from pywinauto import Desktop
from pywinauto.windows import application, win32defines
from pywinauto.controls import hwndwrapper
from pywinauto.windows.application import Application
from pywinauto.base_application import WindowSpecification # noqa: E402
from pywinauto.windows.application import process_module
from pywinauto.windows.application import process_get_modules
from pywinauto.windows.application import ProcessNotFoundError
from pywinauto.windows.application import AppStartError
from pywinauto.windows.application import AppNotConnected
from pywinauto.controls.common_controls import TrackbarWrapper
from pywinauto import findwindows
from pywinauto import findbestmatch
from pywinauto.timings import Timings
from pywinauto.timings import TimeoutError
from pywinauto.timings import WaitUntil
from pywinauto.timings import always_wait_until
from pywinauto.timings import always_wait_until_passes
from pywinauto.timings import timestamp # noqa: E402
from pywinauto.sysinfo import is_x64_Python
from pywinauto.sysinfo import is_x64_OS
from pywinauto.sysinfo import UIA_support
#application.set_timing(1, .01, 1, .01, .05, 0, 0, .1, 0, .01)
# About dialog may take some time to load
# so make sure that we wait for it.
Timings.window_find_timeout = 5
def _notepad_exe():
if is_x64_Python() or not is_x64_OS():
return r"C:\Windows\System32\notepad.exe"
else:
return r"C:\Windows\SysWOW64\notepad.exe"
mfc_samples_folder_32 = mfc_samples_folder = os.path.join(
os.path.dirname(__file__), r"..\..\apps\MFC_samples")
if is_x64_Python():
mfc_samples_folder = os.path.join(mfc_samples_folder, 'x64')
class ApplicationWarningTestCases(unittest.TestCase):
"""Unit tests for warnings in the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
# Force Display User and Deprecation warnings every time
# Python 3.3 + nose/unittest tries really hard to suppress them
for warning in (UserWarning, PendingDeprecationWarning):
warnings.simplefilter('always', warning)
if is_x64_Python():
self.sample_exe = os.path.join(mfc_samples_folder,
"CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder_32,
"CmnCtrl1.exe")
else:
self.sample_exe = os.path.join(mfc_samples_folder_32, "CmnCtrl1.exe")
self.sample_exe_inverted_bitness = os.path.join(mfc_samples_folder,
"x64",
"CmnCtrl1.exe")
def testStartWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
app = Application().start(self.sample_exe_inverted_bitness)
app.kill()
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "64-bit" in str(w[-1].message)
def testConnectWarning3264(self):
if not is_x64_OS():
self.defaultTestResult()
return
app = Application().start(self.sample_exe_inverted_bitness)
# Appveyor misteries...
self.assertEqual(app.is_process_running(), True)
with mock.patch("warnings.warn") as mockWarn:
Application().connect(pid=app.process)
app.kill()
args, kw = mockWarn.call_args
assert len(args) == 2
assert "64-bit" in args[0]
assert args[1].__name__ == 'UserWarning'
class ApplicationWin32KillTestCases(unittest.TestCase):
"""Unit tests for method Application.kill() with backend='win32'"""
backend = 'win32'
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.sample_exe = os.path.join(mfc_samples_folder, 'RowList.exe')
self.app = Application(backend=self.backend).start(self.sample_exe)
self.target_process = self.app.process
def tearDown(self):
self.app.kill(soft=False)
def test_kill_hard(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
def test_kill_soft(self):
self.assertTrue(self.app.kill(soft=True))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
def test_already_killed_hard(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=False)) # already killed, returned True anyway
def test_already_killed_soft(self):
self.assertTrue(self.app.kill(soft=False))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=True)) # already killed, returned True anyway
def test_kill_soft_with_modal_subdialog(self):
"""Kill the app with modal subdialog to cover win.force_close() call"""
self.app.RowListSampleApplication.menu_select('Help->About RowList...')
if self.backend == 'win32':
self.app.window(name='About RowList').wait('visible')
elif self.backend == 'uia':
self.app.RowListSampleApplication.by(name='About RowList').wait('visible')
else:
raise NotImplementedError('test_kill_soft_with_modal_subdialog: ' \
'backend "{}" is not supported'.format(self.backend))
self.assertTrue(self.app.kill(soft=True))
self.assertRaises(ProcessNotFoundError, Application().connect, pid=self.target_process)
self.assertTrue(self.app.kill(soft=True)) # already killed, returned True anyway
if UIA_support:
class ApplicationUiaKillTestCases(ApplicationWin32KillTestCases):
"""Unit tests for method Application.kill() with backend='uia'"""
backend = 'uia'
# the same test methods run here
if ctypes.windll.shell32.IsUserAnAdmin() == 0:
class AdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(AdminTestCases, self).setUp()
cmd = 'powershell -Command "Start-Process {} -Verb RunAs"'.format(self.sample_exe)
self.app = Application().start(cmd, wait_for_idle=False)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(AdminTestCases, self).tearDown()
def test_non_admin_warning(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(name="Common Controls Sample", timeout=20)
assert len(w) >= 1
assert issubclass(w[-1].category, UserWarning)
assert "process has no rights" in str(w[-1].message)
def test_non_admin_click(self):
self.app = Application().connect(name="Common Controls Sample", timeout=20)
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.OK.click_input()
with self.assertRaises(RuntimeError):
self.app.CommonControlsSample.TVS_HASBUTTON.check()
class NonAdminTestCases(ApplicationWarningTestCases):
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
super(NonAdminTestCases, self).setUp()
self.app = Application().start(self.sample_exe)
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
super(NonAdminTestCases, self).tearDown()
def test_both_non_admin(self):
warnings.filterwarnings('always', category=UserWarning, append=True)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.app = Application().connect(name="Common Controls Sample", timeout=5)
assert len(w) == 0
def test_both_non_admin_click(self):
self.app = Application().connect(name="Common Controls Sample", timeout=5)
self.app.CommonControlsSample.TVS_HASBUTTON.check()
self.assertEqual(self.app.CommonControlsSample.TVS_HASBUTTON.is_checked(), True)
self.app.CommonControlsSample.OK.click()
self.app.CommonControlsSample.wait_not('visible')
class ApplicationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.prev_warn = warnings.showwarning
def no_warnings(*args, **kwargs): pass
warnings.showwarning = no_warnings
if is_x64_Python() or not is_x64_OS():
self.notepad_subpath = r"system32\notepad.exe"
else:
self.notepad_subpath = r"SysWOW64\notepad.exe"
def tearDown(self):
"""Close the application after tests"""
#self.dlg.SendMessage(win32defines.WM_CLOSE)
warnings.showwarning = self.prev_warn
def test__init__(self):
"""Verify that Application instance is initialized or not"""
self.assertRaises(ValueError, Application, backend='unregistered')
def test__iter__(self):
"""Verify that Application instance is not iterable"""
app = Application()
app.start(_notepad_exe())
with self.assertRaises(NotImplementedError):
for a in app:
pass
app.kill()
def test_not_connected(self):
"""Verify that it raises when the app is not connected"""
self.assertRaises (AppNotConnected, Application().__getattribute__, 'Hiya')
self.assertRaises (AppNotConnected, Application().__getitem__, 'Hiya')
self.assertRaises (AppNotConnected, Application().window_, name='Hiya')
self.assertRaises (AppNotConnected, Application().top_window_,)
def test_start_problem(self):
"""Verify start_ raises on unknown command"""
self.assertRaises (AppStartError, Application().start, 'Hiya')
def test_start(self):
"""test start() works correctly"""
app = Application()
self.assertEqual(app.process, None)
app.start(_notepad_exe())
self.assertNotEqual(app.process, None)
self.assertEqual(app.UntitledNotepad.process_id(), app.process)
notepadpath = os.path.join(os.environ['systemroot'], self.notepad_subpath)
self.assertEqual(str(process_module(app.process)).lower(), str(notepadpath).lower())
app.UntitledNotepad.menu_select("File->Exit")
def testStart_bug01(self):
"""On SourceForge forum AppStartError forgot to include %s for application name"""
app = Application()
self.assertEqual(app.process, None)
application.app_start_timeout = 1
app_name = r"I am not * and Application!/\.exe"
try:
app.start(app_name)
except AppStartError as e:
self.assertEqual(app_name in str(e), True)
# def testset_timing(self):
# """Test that set_timing sets the timing correctly"""
# prev_timing = (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# )
# set_timing(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
#
# self.assertEqual(
# (
# application.window_find_timeout,
# application.window_retry_interval,
# application.app_start_timeout,
# application.exists_timeout,
# application.exists_retry_interval,
# hwndwrapper.delay_after_click,
# hwndwrapper.delay_after_menuselect,
# hwndwrapper.delay_after_sendkeys_key,
# hwndwrapper.delay_after_button_click,
# hwndwrapper.delay_before_after_close_click,
# ), (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) )
#
# set_timing(*prev_timing)
def test_connect_path(self):
"""Test that connect_() works with a path"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(path=self.notepad_subpath)
self.assertEqual(app1.process, app_conn.process)
app_conn = Application()
if is_x64_Python() or not is_x64_OS():
app_conn.connect(path=r"c:\windows\system32\notepad.exe")
else:
app_conn.connect(path=r"c:\windows\syswow64\notepad.exe")
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEqual('notepad.exe' in accessible_process_names, True)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_path_timeout(self):
"""Test that connect_() works with a path with timeout"""
app1 = Application()
def delayed_launch():
time.sleep(2)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
app_conn = Application()
app_conn.connect(path=_notepad_exe(), timeout=3)
self.assertEqual(app1.process, app_conn.process)
accessible_modules = process_get_modules()
accessible_process_names = [os.path.basename(name.lower()) for process, name, cmdline in accessible_modules]
self.assertEqual('notepad.exe' in accessible_process_names, True)
app1.UntitledNotepad.menu_select('File->Exit')
def test_connect_path_timeout_problem(self):
"""Test that connect_() raise error when no process start"""
app1 = Application()
def delayed_launch():
time.sleep(1)
app1.start(_notepad_exe())
thread = Thread(target=delayed_launch)
thread.start()
self.assertRaises(ProcessNotFoundError, Application().connect, path=_notepad_exe(), timeout=0.5)
time.sleep(0.7)
app1.UntitledNotepad.menu_select('File->Exit')
def test_connect_process_timeout_failed(self):
"""Test that connect_(pid=...) raise error when set timeout"""
app1 = Application()
app1.start(_notepad_exe())
self.assertRaises(ProcessNotFoundError, Application().connect, pid=0, timeout=0.5)
app1.UntitledNotepad.menu_select('File->Exit')
# def test_Connect(self):
# """Test that connect_() works with a path"""
# app1 = Application()
# app1.start_("notepad.exe")
#
# app_conn = Application()
# app_conn.connect_(path = r"system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn = Application()
# app_conn.connect_(path = r"c:\windows\system32\notepad.exe")
# self.assertEqual(app1.process, app_conn.process)
#
# app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_process(self):
"""Test that connect_() works with a process"""
app1 = Application()
app1.start(_notepad_exe())
app_conn = Application()
app_conn.connect(pid=app1.process)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_handle(self):
"""Test that connect_() works with a handle"""
app1 = Application()
app1.start(_notepad_exe())
handle = app1.UntitledNotepad.handle
app_conn = Application()
app_conn.connect(handle=handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_windowspec(self):
"""Test that connect_() works with a windowspec"""
app1 = Application()
app1.start(_notepad_exe())
#unused var: handle = app1.UntitledNotepad.handle
app_conn = Application()
try:
app_conn.connect(name="Untitled - Notepad")
except findwindows.WindowAmbiguousError:
wins = findwindows.find_elements(active_only=True, name="Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
except findwindows.ElementNotFoundError:
WaitUntil(30, 0.5, lambda: len(findwindows.find_elements(active_only=True, name="Untitled - Notepad")) > 0)
wins = findwindows.find_elements(active_only=True, name="Untitled - Notepad")
app_conn.connect(handle = wins[0].handle)
self.assertEqual(app1.process, app_conn.process)
app_conn.UntitledNotepad.menu_select('File->Exit')
def test_connect_raises(self):
"""Test that connect_() raises with invalid input"""
# try an argument that does not exist
self.assertRaises (
KeyError,
Application().connect, **{'not_arg': 23})
self.assertRaises (
RuntimeError,
Application().connect)
# try to pass an invalid process
self.assertRaises (
ProcessNotFoundError,
Application().connect, **{'pid': 0})
# try to pass an invalid handle
self.assertRaises(
RuntimeError,
Application().connect, **{'handle' : 0})
# try to pass an invalid path
self.assertRaises(
ProcessNotFoundError,
Application().connect, **{'path': "no app here", 'timeout': 0.0})
def test_top_window(self):
"""Test that top_window_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.top_window_)
app.start(_notepad_exe())
self.assertEqual(app.UntitledNotepad.handle, app.top_window_().handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
self.assertEqual(app.AboutNotepad.handle, app.top_window_().handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
self.assertRaises(RuntimeError, app.top_window_)
def test_active_window(self):
"""Test that active_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.active_)
self.assertRaises(AppNotConnected, app.is64bit)
app.start(_notepad_exe())
app.UntitledNotepad.wait('ready')
self.assertEqual(app.active_().handle, app.UntitledNotepad.handle)
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
self.assertRaises(RuntimeError, app.active_)
def test_cpu_usage(self):
"""Verify that cpu_usage() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.cpu_usage)
app.start(_notepad_exe())
self.assertEqual(0.0 <= app.cpu_usage() <= 100.0, True)
app.UntitledNotepad.menu_select("File->Exit")
app.UntitledNotepad.wait_not('exists')
def test_wait_cpu_usage_lower(self):
"""Test that wait_cpu_usage_lower() works correctly"""
if is_x64_Python() != is_x64_OS():
return None
Application().Start(r'explorer.exe')
def _cabinetwclass_exist():
"Verify if at least one active 'CabinetWClass' window is created"
l = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')
return (len(l) > 0)
WaitUntil(40, 0.5, _cabinetwclass_exist)
handle = findwindows.find_elements(active_only = True, class_name = 'CabinetWClass')[-1].handle
window = WindowSpecification({'handle': handle, 'backend': 'win32', })
explorer = Application().Connect(pid = window.process_id())
try:
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
window.AddressBandRoot.ClickInput()
window.TypeKeys(r'Control Panel\Programs\Programs and Features', with_spaces=True, set_foreground=True)
window.TypeKeys(r'{ENTER}', set_foreground = False)
WaitUntil(40, 0.5, lambda: len(findwindows.find_elements(active_only=True,
name='Programs and Features',
class_name='CabinetWClass')) > 0)
explorer.WaitCPUUsageLower(threshold = 1.5, timeout = 60, usage_interval = 2)
installed_programs = window.FolderView.texts()[1:]
programs_list = ','.join(installed_programs)
if ('Microsoft' not in programs_list) and ('Python' not in programs_list):
hwndwrapper.ImageGrab.grab().save(r'explorer_screenshot.jpg')
hwndwrapper.ActionLogger().log('\ninstalled_programs:\n')
for prog in installed_programs:
hwndwrapper.ActionLogger().log(prog)
self.assertEqual(('Microsoft' in programs_list) or ('Python' in programs_list), True)
finally:
window.Close(2.0)
if UIA_support:
def test_wait_cpu_usage_lower_uia(self):
"""Test that wait_cpu_usage_lower() works correctly for UIA"""
app = Application(backend='uia')
app.start('notepad.exe')
try:
app.wait_cpu_usage_lower(threshold = 1.5, timeout = 30, usage_interval = 2)
finally:
app.kill()
app.cpu_usage = mock.Mock(return_value=10)
self.assertRaises(
RuntimeError, app.wait_cpu_usage_lower,
threshold = 9.0, timeout = 5, usage_interval = 0.5
)
# def test_wait_for_idle_exception(self):
# """Test that method start() raises an exception when wait for idle failed"""
# app = Application()
# self.assertRaises(Exception, app.start, 'cmd.exe')
# # TODO: test and fix the case when cmd.exe can't be killed by app.kill()
def test_windows(self):
"""Test that windows_() works correctly"""
Timings.window_find_timeout = 5
app = Application()
self.assertRaises(AppNotConnected, app.windows_, **{'title' : 'not connected'})
app.start('notepad.exe')
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
notepad_handle = app.UntitledNotepad.handle
self.assertEqual(app.windows(visible=True), [notepad_handle])
app.UntitledNotepad.menu_select("Help->About Notepad")
aboutnotepad_handle = app.AboutNotepad.handle
self.assertEqual(
app.windows(visible=True, enabled=None),
[aboutnotepad_handle, notepad_handle])
app.AboutNotepad.OK.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_window(self):
"""Test that window_() works correctly"""
app = Application()
self.assertRaises(AppNotConnected, app.window_, **{'title' : 'not connected'})
app.start(_notepad_exe())
self.assertRaises(ValueError, app.windows_, **{'backend' : 'uia'})
title = app.window(name="Untitled - Notepad")
title_re = app.window(name_re="Untitled[ -]+Notepad")
classname = app.window(class_name="Notepad")
classname_re = app.window(class_name_re="Not..ad")
handle = app.window(handle=title.handle)
bestmatch = app.window(best_match="Untiotled Notepad")
self.assertNotEqual(title.handle, None)
self.assertNotEqual(title.handle, 0)
self.assertEqual(title.handle, title_re.handle)
self.assertEqual(title.handle, classname.handle)
self.assertEqual(title.handle, classname_re.handle)
self.assertEqual(title.handle, handle.handle)
self.assertEqual(title.handle, bestmatch.handle)
app.UntitledNotepad.menu_select("File->Exit")
def test_getitem(self):
"""Test that __getitem__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(Exception, app['blahblah'])
self.assertRaises(
findbestmatch.MatchError,
app['blahblah']['not here'].__getitem__, 'handle')
self.assertEqual(
app[u'Unt\xeftledNotepad'].handle,
app.window(name="Untitled - Notepad").handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
self.assertEqual(
app['AboutNotepad'].handle,
app.window(name="About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_getattribute(self):
"""Test that __getattribute__() works correctly"""
Timings.window_find_timeout = 5
app = Application()
app.start(_notepad_exe())
self.assertRaises(
findbestmatch.MatchError,
app.blahblah.__getattribute__, 'handle')
self.assertEqual(
app.UntitledNotepad.handle,
app.window(name="Untitled - Notepad").handle)
app.UntitledNotepad.menu_select("Help->About Notepad")
# I think it's OK that this no longer raises a matcherror
# just because the window is not enabled - doesn't mean you
# should not be able to access it at all!
#self.assertRaises(findbestmatch.MatchError,
# app.Notepad.__getattribute__, 'handle')
self.assertEqual(
app.AboutNotepad.handle,
app.window(name="About Notepad").handle)
app.AboutNotepad.Ok.Click()
app.UntitledNotepad.menu_select("File->Exit")
def test_kill(self):
"""test killing the application"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.Edit.type_keys("hello")
app.UntitledNotepad.menu_select("File->Print...")
#app.Print.FindPrinter.Click() # Vasily: (Win7 x64) "Find Printer" dialog is from splwow64.exe process
#app.FindPrinters.Stop.Click()
app.kill()
self.assertRaises(AttributeError, app.UntitledNotepad.Edit)
def test_process_is_running(self):
"""Tests process is running and wait for exit function"""
app = Application()
app.start(_notepad_exe())
app.UntitledNotepad.wait("ready")
self.assertTrue(app.is_process_running())
self.assertRaises(TimeoutError, lambda: app.wait_for_process_exit(timeout=5, retry_interval=1))
app.kill()
app.wait_for_process_exit()
self.assertFalse(app.is_process_running())
def test_should_return_not_running_if_not_started(self):
"""Tests that works on new instance
is_process_running/wait_for_process_exit can be called on not started/disconnected instance
"""
app = Application()
app.wait_for_process_exit(timeout=10, retry_interval=1)
self.assertFalse(app.is_process_running())
class TestInheritedApp(Application):
"""Our inherited version of class"""
def test_method(self):
"""This method should be called without any issues"""
return self is not None
def test_application_inheritance(self):
"""Test that Application class can be inherited and has it's own methods"""
app = ApplicationTestCases.TestInheritedApp()
self.assertTrue(app.test_method())
def test_non_magic_application(self):
app = Application()
self.assertEqual(app.allow_magic_lookup, True)
app_no_magic = Application(allow_magic_lookup=False)
self.assertEqual(app_no_magic.allow_magic_lookup, False)
app_no_magic.start(_notepad_exe())
window = app_no_magic.window(best_match="UntitledNotepad")
dlg = window.by(best_match="Edit")
dlg.draw_outline()
with self.assertRaises(AttributeError):
app_no_magic.UntitledNotepad
with self.assertRaises(AttributeError):
window.Edit
app_no_magic.kill()
app_no_magic.wait_for_process_exit()
class WindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application().start("Notepad")
self.dlgspec = self.app.UntitledNotepad
self.ctrlspec = self.app.UntitledNotepad.Edit
def tearDown(self):
"""Close the application after tests"""
# close the application
#self.app.UntitledNotepad.menu_select("File->Exit")
self.app.kill()
def test__init__(self):
"""Test creating a new spec by hand"""
wspec = WindowSpecification(
dict(
best_match=u"UntitledNotepad",
app=self.app)
)
self.assertEqual(
wspec.window_text(),
u"Untitled - Notepad")
self.assertEqual(self.dlgspec.app, self.app)
self.assertEqual(self.ctrlspec.app, self.app)
self.assertEqual(wspec.app, self.app)
def test__init__both_keywords(self):
"""Test creating a new spec with ambiguity by process and app simultaneously"""
self.assertRaises(KeyError, WindowSpecification,
dict(best_match=u"UntitledNotepad", app=self.app, pid=self.app.process)
)
def test__call__(self):
"""Test that __call__() correctly raises an error"""
self.assertRaises(AttributeError, self.dlgspec)
self.assertRaises(AttributeError, self.ctrlspec)
# no best_match!
wspec = WindowSpecification(
dict(name=u"blah", app=self.app)
)
self.assertRaises(AttributeError, wspec)
def test_wrapper_object(self):
"""Test that we can get a control"""
self.assertEqual(True, isinstance(self.dlgspec, WindowSpecification))
self.assertEqual(
True,
isinstance(self.dlgspec.find(), hwndwrapper.HwndWrapper)
)
def test_window(self):
"""test specifying a sub window of an existing specification"""
sub_spec = self.dlgspec.by(class_name ="Edit")
sub_spec_legacy = self.dlgspec.window(class_name = "Edit")
self.assertEqual(True, isinstance(sub_spec, WindowSpecification))
self.assertEqual(sub_spec.class_name(), "Edit")
self.assertEqual(sub_spec_legacy.class_name(), "Edit")
def test__getitem__(self):
"""test item access of a windowspec"""
self.assertEqual(
True,
isinstance(self.dlgspec['Edit'], WindowSpecification)
)
self.assertEqual(self.dlgspec['Edit'].class_name(), "Edit")
self.assertRaises(AttributeError, self.ctrlspec.__getitem__, 'edit')
def test_getattr(self):
"""Test getting attributes works correctly"""
self.assertEqual(
True,
isinstance(self.dlgspec.Edit, WindowSpecification)
)
self.assertEqual(self.dlgspec.Edit.class_name(), "Edit")
# check that getting a dialog attribute works correctly
self.assertEqual(
"Notepad",
self.dlgspec.class_name())
# Check handling 'parent' as a WindowSpecification
spec = self.ctrlspec.by(parent=self.dlgspec, visible=True)
self.assertEqual(spec.class_name(), "Edit")
def test_non_magic_getattr(self):
ws = WindowSpecification(dict(best_match="Notepad"))
self.assertEqual(ws.allow_magic_lookup, True)
ws_no_magic = WindowSpecification(dict(best_match="Notepad"), allow_magic_lookup=False)
self.assertEqual(ws_no_magic.allow_magic_lookup, False)
dlg = ws_no_magic.by(best_match="Edit")
has_focus = dlg.has_keyboard_focus()
self.assertIn(has_focus, (True, False))
with self.assertRaises(AttributeError):
ws_no_magic.Edit
def test_exists(self):
"""Check that windows exist"""
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, self.dlgspec.exists(0))
self.assertEqual(True, self.ctrlspec.exists())
# TODO: test a control that is not visible but exists
#self.assertEqual(True, self.app.DefaultIME.exists())
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=.1))
self.assertEqual(True, timestamp() - start < .3)
start = timestamp()
self.assertEqual(False, self.app.BlahBlah.exists(timeout=3))
self.assertEqual(True, 2.7 < timestamp() - start < 3.3)
def test_exists_timing(self):
"""test the timing of the exists method"""
# try ones that should be found immediately
start = timestamp()
self.assertEqual(True, self.dlgspec.exists())
self.assertEqual(True, timestamp() - start < .3)
start = timestamp()
self.assertEqual(True, self.ctrlspec.exists())
self.assertEqual(True, timestamp() - start < .3)
# try one that should not be found
start = timestamp()
self.assertEqual(True, self.dlgspec.exists(.5))
timedif = timestamp() - start
self.assertEqual(True, .49 > timedif < .6)
def test_wait(self):
"""test the functionality and timing of the wait method"""
allowable_error = .2
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("enaBleD "))
time_taken = (timestamp() - start)
if not 0 <= time_taken < (0 + 2 * allowable_error):
self.assertEqual(.02, time_taken)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" ready"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" exiSTS"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" VISIBLE "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait(" ready enabled"))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("visible exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("exists "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
start = timestamp()
self.assertEqual(self.dlgspec.find(), self.dlgspec.wait("actIve "))
self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.wait, "Invalid_criteria")
def test_wait_non_existing(self):
"""test timing of the wait method for non-existing element"""
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'exists')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_invisible(self):
"""test timing of the wait method for non-existing element and existing invisible one"""
# TODO: re-use an MFC sample for this test
allowable_error = .2
start = timestamp()
self.assertRaises(TimeoutError, self.app.BlahBlah.wait, 'visible')
expected = Timings.window_find_timeout
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
# make sure Status Bar is not visible
status_bar_menu = self.app.UntitledNotepad.menu().item('&View').sub_menu().item('&Status Bar')
if status_bar_menu.is_checked():
status_bar_menu.select()
# check that existing invisible control is still found with 'exists' criterion
status_bar_spec = self.app.UntitledNotepad.by(class_name="msctls_statusbar32", visible=None)
self.assertEqual('StatusBar', status_bar_spec.wait('exists').friendly_class_name())
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'exists visible')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, status_bar_spec.wait, 'visible exists')
self.assertEqual(True, expected - allowable_error <= (timestamp() - start) < expected + allowable_error)
def test_wait_not(self):
"""
Test that wait not fails for all the following
* raises and error when criteria not met
* timing is close to the timeout value
"""
allowable_error = .16
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "enaBleD ", .1, .05)
taken = timestamp() - start
if .1 < (taken) > .1 + allowable_error:
self.assertEqual(.12, taken)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " ready", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " exiSTS", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " VISIBLE ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, " ready enabled", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "visible exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "exists ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
start = timestamp()
self.assertRaises(TimeoutError, self.dlgspec.wait_not, "actIve ", .1, .05)
self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
self.assertRaises(SyntaxError, self.dlgspec.wait_not, "Invalid_criteria")
# def test_wait_ready(self):
# """Make sure the friendly class is set correctly"""
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitReady(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotReady(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotReady, .1, .05)
#
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
#
# def testWaitEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitEnabled(.1, .05))
#
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
#
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
#
# def testWaitNotEnabled(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotEnabled, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
#
# def testWaitVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitVisible(.1, .05))
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(0, timestamp() - start)
# #self.assertEqual(True, 0 <= (timestamp() - start) < 0 + allowable_error)
#
# def testWaitNotVisible(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotVisible, .1, .05)
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertEqual(self.dlgspec.ctrl_(), self.dlgspec.WaitExists(.1, .05))
#
# # it it didn't finish in the allocated time then raise an error
# # we assertEqual to something that we know is not right - to get a
# # better error report
# if not 0 <= (timestamp() - start) < 0 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
#
# def testWaitNotExists(self):
# "Make sure the friendly class is set correctly"
#
# allowable_error = .02
#
# start = timestamp()
# self.assertRaises(RuntimeError, self.dlgspec.WaitNotExists, .1, .05)
# if not .1 <= (timestamp() - start) < .1 + allowable_error:
# self.assertEqual(.1, timestamp() - start)
# #self.assertEqual(True, .1 <= (timestamp() - start) < .1 + allowable_error)
def test_depth(self):
"""Test that descendants() with depth works correctly"""
self.dlgspec.menu_select("Format -> Font")
self.assertNotEqual(
len(self.app['Font'].descendants(depth=1)),
len(self.app['Font'].descendants(depth=2)))
def test_dump_tree(self):
"""Make sure dump_tree() doesn't crash"""
self.dlgspec.dump_tree()
self.ctrlspec.dump_tree()
def test_dump_tree_file_output(self):
"""Make sure dump_tree() creates correct file"""
output_filename = "test_dump_tree.txt"
self.dlgspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue("'Untitled - NotepadEdit'" in content
and "'Edit'" in content)
self.assertTrue("child_window(class_name=\"msctls_statusbar32\"" in content)
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
self.ctrlspec.dump_tree(filename=output_filename)
if os.path.isfile(output_filename):
with open(output_filename, "r") as test_log_file:
content = str(test_log_file.readlines())
self.assertTrue("child_window(class_name=\"Edit\")" in content)
os.remove(output_filename)
else:
self.fail("dump_tree can't create a file")
def test_find_elements_re(self):
"""Test for bug #90: A crash in 'find_elements' when called with 'title_re' argument"""
self.dlgspec.wait('visible')
windows = findwindows.find_elements(name_re="Untitled - Notepad")
self.assertTrue(len(windows) >= 1)
if UIA_support:
class UIAWindowSpecificationTestCases(unittest.TestCase):
"""Unit tests for the application.Application class with UIA backend"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend="uia").start(_notepad_exe())
self.dlgspec = self.app.UntitledNotepad
def tearDown(self):
"""Close the application after tests"""
self.app.kill()
def test_child_window_depth(self):
"""Test that child_window() with depth works correctly"""
# TODO fix same elements at different tree levels on win32 backend
self.dlgspec.menu_select("Format -> Font")
font = self.dlgspec.by(name="Font")
with self.assertRaises(findbestmatch.MatchError):
font.by(best_match="ListBox0", depth=1).find()
font.by(best_match="ListBox0", depth=2).find()
class WaitUntilDecoratorTests(unittest.TestCase):
"""Unit tests for always_wait_until and always_wait_until_passes decorators"""
def test_always_wait_until_decorator_success(self):
"""Test always_wait_until_decorator success"""
@always_wait_until(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_decorator_failure(self):
"""Test wait_until_decorator failure"""
@always_wait_until(4, 2)
def foo():
return False
self.assertRaises(TimeoutError, foo)
def test_always_wait_until_passes_decorator_success(self):
"""Test always_wait_until_passes_decorator success"""
@always_wait_until_passes(4, 2)
def foo():
return True
self.assertTrue(foo())
def test_always_wait_until_passes_decorator_failure(self):
"""Test always_wait_until_passes_decorator failure"""
@always_wait_until_passes(4, 2)
def foo():
raise Exception("Unexpected Error in foo")
self.assertRaises(TimeoutError, foo)
class MultiLevelWindowSpecificationTests(unittest.TestCase):
"""Unit tests for multi-level (3+) WindowSpecification objects"""
if UIA_support:
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application(backend='uia').start(os.path.join(mfc_samples_folder, u"RowList.exe"))
self.dlg = self.app.RowListSampleApplication
def tearDown(self):
"""Close the application after tests"""
self.dlg.CloseButton.click()
self.dlg.wait_not('visible')
def test_3level_specification(self):
"""Test that controls can be accessed by 3 levels of attributes"""
self.dlg.Toolbar.About.click()
self.dlg.AboutRowList.OK.click()
#self.dlg.AboutRowList.wait_not('visible') # XXX: it takes more than 50 seconds!
else: # Win32
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.dlg = self.app.CommonControlsSample
def tearDown(self):
"""Close the application after tests"""
self.dlg.SendMessage(win32defines.WM_CLOSE)
def test_4level_specification(self):
"""Test that controls can be accessed by 4 levels of attributes"""
self.assertEqual(self.dlg.CPagerCtrl.Pager.Toolbar.button_count(), 12)
if UIA_support:
class DesktopUiaWindowSpecificationTests(unittest.TestCase):
"""Unit tests for Desktop(backend='uia') object"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.slow()
self.app = Application().start('explorer.exe "' + mfc_samples_folder_32 + '"')
self.desktop = Desktop(backend='uia')
self.desktop_no_magic = Desktop(backend='uia', allow_magic_lookup=False)
def tearDown(self):
"""Close the application after tests"""
self.desktop.MFC_samplesDialog.close()
self.desktop.MFC_samplesDialog.wait_not('exists')
def test_folder_list(self):
"""Test that ListViewWrapper returns correct files list in explorer.exe"""
files_list = self.desktop.MFC_samplesDialog.Shell_Folder_View.Items_View.find()
self.assertEqual([item.window_text() for item in files_list.get_items()],
[u'x64', u'BCDialogMenu.exe', u'CmnCtrl1.exe', u'CmnCtrl2.exe', u'CmnCtrl3.exe',
u'CtrlTest.exe', u'mfc100u.dll', u'NewControls.exe', u'RebarTest.exe', u'RowList.exe', u'TrayMenu.exe'])
self.assertEqual(files_list.item('RebarTest.exe').window_text(), 'RebarTest.exe')
def test_set_backend_to_window_uia(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', name='MFC_samplesDialog')
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', name='MFC_samplesDialog')
def test_get_list_of_windows_uia(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
def test_set_backend_to_windows_uia(self):
"""Set backend to method .windows(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_uia(self):
"""Set visible=True to method .windows()"""
dlgs = self.desktop.windows(visible=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_uia(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
def test_non_magic_desktop(self):
from pywinauto.controls.uiawrapper import UIAWrapper
self.assertEqual(self.desktop.allow_magic_lookup, True)
self.assertEqual(self.desktop_no_magic.allow_magic_lookup, False)
dlgs = self.desktop_no_magic.windows()
self.assertTrue(len(dlgs) > 1)
window = self.desktop_no_magic.window(name="MFC_samples")
self.assertEqual(window.allow_magic_lookup, False)
dlg = window.by(class_name="ShellTabWindowClass").find()
self.assertIsInstance(dlg, UIAWrapper)
has_focus = dlg.has_keyboard_focus()
self.assertIn(has_focus, (True, False))
with self.assertRaises(AttributeError):
self.desktop_no_magic.MFC_samples
with self.assertRaises(AttributeError):
window.ShellTabWindowClass
class DesktopWin32WindowSpecificationTests(unittest.TestCase):
"""Unit tests for Desktop(backend='win32') object"""
def setUp(self):
"""Set some data and ensure the application is in the state we want"""
Timings.defaults()
self.app = Application(backend='win32').start(os.path.join(mfc_samples_folder, u"CmnCtrl3.exe"))
self.desktop = Desktop(backend='win32')
self.desktop_no_magic = Desktop(backend='win32', allow_magic_lookup=False)
self.window_title = 'Common Controls Sample'
def tearDown(self):
"""Close the application after tests"""
self.desktop.window(name=self.window_title, pid=self.app.process).SendMessage(win32defines.WM_CLOSE)
def test_simple_access_through_desktop(self):
"""Test that controls can be accessed by 4 levels of attributes"""
dlg = self.desktop.window(name=self.window_title, pid=self.app.process)
self.assertEqual(dlg.Pager.Toolbar.button_count(), 12)
def test_set_backend_to_window_win32(self):
"""Set backend to method window(), except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.window(backend='uia', name=self.window_title, pid=self.app.process)
with self.assertRaises(ValueError):
self.desktop.window(backend='win32', name=self.window_title, pid=self.app.process)
def test_get_list_of_windows_win32(self):
"""Test that method .windows() returns a non-empty list of windows"""
dlgs = self.desktop.windows()
self.assertTrue(len(dlgs) > 1)
window_titles = [win_obj.window_text() for win_obj in dlgs]
self.assertTrue(self.window_title in window_titles)
def test_set_backend_to_windows_win32(self):
"""Set backend to method windows, except exception ValueError"""
with self.assertRaises(ValueError):
self.desktop.windows(backend='win32')
with self.assertRaises(ValueError):
self.desktop.windows(backend='uia')
def test_only_visible_windows_win32(self):
"""Set visible=True to method .windows()"""
dlgs = self.desktop.windows(visible=True)
self.assertTrue(all([win.is_visible() for win in dlgs]))
def test_only_enable_windows_win32(self):
"""Set enable_only to the method windows"""
dlgs = self.desktop.windows(enabled=True)
self.assertTrue(all([win.is_enabled() for win in dlgs]))
def test_from_point_win32(self):
"""Test method Desktop(backend='win32').from_point(x, y)"""
combo = self.app.Common_Controls_Sample.ComboBox.find()
x, y = combo.rectangle().mid_point()
combo_from_point = self.desktop.from_point(x, y)
self.assertEqual(combo, combo_from_point)
def test_top_from_point_win32(self):
"""Test method Desktop(backend='win32').top_from_point(x, y)"""
combo = self.app.Common_Controls_Sample.ComboBox.find()
dlg = self.app.Common_Controls_Sample.find()
x, y = combo.rectangle().mid_point()
dlg_from_point = self.desktop.top_from_point(x, y)
self.assertEqual(dlg, dlg_from_point)
def test_non_magic_desktop(self):
self.assertEqual(self.desktop.allow_magic_lookup, True)
self.assertEqual(self.desktop_no_magic.allow_magic_lookup, False)
window = self.desktop_no_magic.window(name=self.window_title, pid=self.app.process)
self.assertEqual(window.allow_magic_lookup, False)
dlg = window.by(class_name="msctls_trackbar32").find()
self.assertIsInstance(dlg, TrackbarWrapper)
pos = dlg.get_position()
self.assertIsInstance(pos, six.integer_types)
with self.assertRaises(AttributeError):
getattr(self.desktop_no_magic, self.window_title.replace(" ", "_"))
with self.assertRaises(AttributeError):
window.msctls_trackbar32
if __name__ == "__main__":
unittest.main()
|
main.py |
import tensorflow as tf
import threading
import time
from tensorflow.python.client import timeline
import Actor
import GUI
from Learner import Learner
from Displayer import DISPLAYER
import settings
class Sess(tf.Session):
def __init__(self, options, meta, *args, **kwargs):
super().__init__(*args, **kwargs)
self.op = options
self.meta = meta
def run(self, *args, **kwargs):
return super().run(options=self.op, run_metadata=self.meta, *args, **kwargs)
if __name__ == '__main__':
tf.reset_default_graph()
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
meta = tf.RunMetadata()
config = tf.ConfigProto(log_device_placement=True,
device_count={"CPU":10, "GPU":1},
inter_op_parallelism_threads=10)
with Sess(options, meta, config=config) as sess:
workers = []
for i in range(settings.NB_ACTORS):
with tf.device("/device:CPU:"+str(i)):
workers.append(Actor.Actor(sess, i + 1))
print("Initializing learner...")
with tf.device("/device:GPU:0"):
learner = Learner(sess, *workers[0].get_env_features())
print("Learner initialized !\n")
if settings.LOAD:
learner.load()
threads = []
for i in range(settings.NB_ACTORS):
thread = threading.Thread(target=workers[i].run)
threads.append(thread)
threads.append(threading.Thread(target=learner.run))
if settings.GUI:
GUI_thread = threading.Thread(target=GUI.main)
GUI_thread.start()
sess.run(tf.global_variables_initializer())
for t in threads:
t.start()
print("Running...")
try:
while not Actor.STOP_REQUESTED:
time.sleep(1)
except KeyboardInterrupt:
Actor.request_stop()
for t in threads:
t.join()
learner.save()
DISPLAYER.disp()
DISPLAYER.disp_q()
f_t = timeline.Timeline(meta.step_stats)
chrome_trace = f_t.generate_chrome_trace_format()
with open("timeline.json", 'w') as f:
f.write(chrome_trace)
if settings.GUI:
GUI_thread.join()
|
TN_utils.py | # Individual Cases Text Parser
# From Tamil Nadu Bulletins
from __future__ import annotations
import multiprocessing
import sys
import pathlib
import argparse
import os
import re
import json
import pdfplumber
if sys.version_info >= (3, 8):
from typing import TypedDict, Dict, List, Optional
else:
from typing_extensions import TypedDict
_path_this_dir = pathlib.Path(__file__).parent
_path_to_examples = _path_this_dir.joinpath("./case_examples")
_set_timeout = 10
_stop_words = [
r".*24\*7.*",
r".*Passengers entered.*",
r".*Death\s+in.*"
]
_stop_splits = [
r"24\*7",
r"Passengers entered",
r"Death\s+in"
]
def read_text(test_text: str, category: str = None, date: str = None) -> CaseInfo:
manager = multiprocessing.Manager()
return_dict = manager.dict()
p = multiprocessing.Process(target=CaseInfo.extract, args=(test_text, category, date, return_dict))
p.start()
p.join(_set_timeout)
if p.is_alive():
p.kill()
return return_dict["result"]
def read_file(filename: str, date: str = None) -> List[CaseInfo]:
result = list()
if filename.endswith('.txt'):
result.append(read_text(open(filename).read()))
elif filename.endswith('.pdf'):
with pdfplumber.open(filename) as pdf:
last_match = ""
category = None
for i, page in enumerate(pdf.pages):
text = page.extract_text()
if date is None:
# Find date from bulletin if not provided through arguments
template = re.compile(r".*Media\s+Bulletin\s+(?P<date>((?!\n).)+)\n.*$", re.DOTALL)
matches = re.match(template, text)
if matches:
date = matches.groupdict()["date"]
if "death case" in text.lower():
text = last_match + text
last_match = ""
category_match = re.split(r"Death\s+in", text, flags=re.IGNORECASE)
for item in category_match:
new_category = re.split(r'death\scase', item, flags=re.IGNORECASE)[0]
new_category = " ".join(new_category.split())
if not new_category or "no." in new_category.lower():
new_category = category
case_match = re.split(r"Death\s+Case", item, flags=re.IGNORECASE)
for ii in case_match:
ii = " ".join(ii.split(" "))
if not any([re.match(s, ii, flags=re.IGNORECASE|re.DOTALL) for s in _stop_words]):
try: result.append(read_text(ii, category=new_category, date=date))
except: pass
else:
last_match = ii
for s in _stop_splits:
last_match = re.split(s, last_match, flags=re.IGNORECASE)[0]
category = new_category
else:
if last_match:
try: result.append(read_text(last_match, category=category, date=date))
except: pass
last_match = None
return result
def process_data(cls, data: Dict):
response = cls()
for key in cls.__annotations__:
custom_enforcer_function = getattr(eval(cls.__annotations__[key].__forward_arg__), "extract", None)
response[key] = None
if callable(custom_enforcer_function):
response[key] = custom_enforcer_function(data[key])
else:
if data[key]:
response[key] = eval("{}(\"{}\")".format(cls.__annotations__[key].__forward_arg__, data[key].strip()))
return response
class TestInfo(TypedDict):
date: str
details: str
@classmethod
def process(cls, data: Dict) -> TestInfo:
return process_data(cls, data)
@classmethod
def extract(cls, text: str) -> TestInfo:
if not text:
return None
input_text = text.strip()
regex = r'\D*(having|outcome of)\s*(?P<details>((?!\s+on\s+).)+)(\s*on\s+(?P<date>((?!\.$|\.\s+).)+))*.*$'
template = re.compile(regex, re.IGNORECASE|re.DOTALL)
matches = re.match(template, input_text)
if matches:
matches = matches.groupdict()
return cls.process(data = matches)
class SymptomInfo(TypedDict):
days: int
details: str
@classmethod
def process(cls, data: Dict) -> SymptomInfo:
return process_data(cls, data)
@classmethod
def extract(cls, text: str) -> SymptomInfo:
if not text:
return None
input_text = text.strip()
regex = r'(?P<details>((?!\s+for).)+)(\s*for\s+(?P<days>\d+)\s+day(s)*)*.*$'
template = re.compile(regex, re.IGNORECASE|re.DOTALL)
matches = re.match(template, input_text)
if matches:
matches = matches.groupdict()
return cls.process(data = matches)
class AdmissionInfo(TypedDict):
symptoms: SymptomInfo
location: str
date: str
time: str
@classmethod
def process(cls, data: Dict) -> AdmissionInfo:
return process_data(cls, data)
@classmethod
def extract(cls, text: str) -> AdmissionInfo:
if not text:
return None
input_text = text.strip()
regex = r'.*admitted\s+on\s+(?P<date>((?!\s).)+)(\s*at\s+(?P<time>((?!in|with).)+)\s+)*(\s*(in a|in|at a)\s+(?P<location_1>((?!with).)+)\s+)*(\s*with complaints of\s+(?P<symptoms>((?!in a\s+|at\s+|$).)+)\s*)*(\s*(in a|in|at a)\s+(?P<location_2>((?!\s+and|$).)+)\s*)*.*$'
template = re.compile(regex, re.IGNORECASE|re.DOTALL)
matches = re.match(template, input_text)
if matches:
matches = matches.groupdict()
matches["location"] = matches["location_1"] or matches["location_2"]
return cls.process(data = matches)
class DeathInfo(TypedDict):
cause: str
date: str
time: str
@classmethod
def process(cls, data: Dict) -> DeathInfo:
return process_data(cls, data)
@classmethod
def extract(cls, text: str) -> DeathInfo:
if not text:
return None
input_text = text.strip()
derived_dict = dict()
regex = r'.*died\s+(on|in)\s+(?P<date>((?!\s).)+)(\s+at\s+(?P<time>((?!due|$).)+)\s+)*(\s*due\s+to(?P<cause>.+))*.*$'
template = re.compile(regex, re.IGNORECASE|re.DOTALL)
matches = re.match(template, input_text)
if matches:
matches = matches.groupdict()
derived_dict = {**derived_dict, **matches}
return cls.process(data = derived_dict)
class CaseInfo(TypedDict):
case_id: int
category: str
date: str
age: int
gender: str
location: str
comorbidity: str
test: TestInfo
admission: AdmissionInfo
death: DeathInfo
raw_data: str
@classmethod
def process(cls, data: Dict) -> CaseInfo:
return process_data(cls, data)
@classmethod
def extract(cls, text: str, category: str, date: str, return_dict: Dict = None) -> CaseInfo:
input_text = text.strip()
if not input_text.endswith("."):
input_text += "."
derived_dict = {
"category" : category,
"date": date,
}
regex = r'\D*((?P<case_id>\d+)*\D*\n+)*(?P<raw_data>.*)$'
template = re.compile(regex, re.IGNORECASE|re.DOTALL)
matches = re.match(template, input_text)
if matches:
matches = matches.groupdict()
matches["raw_data"] = " ".join(matches["raw_data"].split())
derived_dict = {**derived_dict, **matches}
regex = r'\D*(?P<age>\d+) year[s]* old (?P<gender>\D+) from (?P<location>((?!having|with|admitted).)*)(?P<test_info_1>having((?!with|admitted).)*)*(?P<comorbidity>with((?!admitted).)*)*(?P<admission>admitted((?!died|\.\s+|\.$).)*)*(?P<test_info_2>((?!died).)+)*(?P<death>died\s*(on|in).*)*\..*$'
template = re.compile(regex, re.IGNORECASE)
matches = re.match(template, derived_dict["raw_data"])
if matches:
matches = matches.groupdict()
matches["test"] = matches["test_info_1"] or matches["test_info_2"]
derived_dict = {**derived_dict, **matches}
return_dict["result"] = cls.process(data = derived_dict)
return return_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parser for Tamil Nadu case data.')
parser.add_argument('--file', type=str, help='File containing data.')
parser.add_argument('--dir', type=str, help='Directory containing files containing data.')
args = parser.parse_args()
data = None
if args.file:
data = read_file(args.file)
elif args.dir:
_path_to_examples = args.dir
data = list()
list_of_test_files = [f.path for f in os.scandir(_path_to_examples)]
for test_file in list_of_test_files:
print(f'Reading file {test_file} {list_of_test_files.index(test_file)}/{len(list_of_test_files)}')
try:
new_data = read_file(test_file)
if new_data:
data += new_data
except Exception as e:
print(e)
with open("data.json", "w") as out_file:
out_file.write(json.dumps(data, indent=4))
|
test_client.py | from __future__ import annotations
import asyncio
import functools
import gc
import inspect
import logging
import os
import pickle
import random
import subprocess
import sys
import threading
import traceback
import types
import warnings
import weakref
import zipfile
from collections import deque
from collections.abc import Generator
from contextlib import contextmanager, suppress
from functools import partial
from operator import add
from threading import Semaphore
from time import sleep
from typing import Any
import psutil
import pytest
import yaml
from tlz import concat, first, identity, isdistinct, merge, pluck, valmap
import dask
import dask.bag as db
from dask import delayed
from dask.optimization import SubgraphCallable
from dask.utils import parse_timedelta, stringify, tmpfile
from distributed import (
CancelledError,
Executor,
LocalCluster,
Nanny,
TimeoutError,
Worker,
fire_and_forget,
get_client,
get_worker,
performance_report,
profile,
secede,
)
from distributed.client import (
Client,
Future,
_get_global_client,
as_completed,
default_client,
ensure_default_client,
futures_of,
get_task_metadata,
temp_default_client,
tokenize,
wait,
)
from distributed.comm import CommClosedError
from distributed.compatibility import LINUX, WINDOWS
from distributed.core import Status
from distributed.metrics import time
from distributed.objects import HasWhat, WhoHas
from distributed.scheduler import (
COMPILED,
CollectTaskMetaDataPlugin,
KilledWorker,
Scheduler,
)
from distributed.sizeof import sizeof
from distributed.utils import is_valid_xml, mp_context, sync, tmp_text
from distributed.utils_test import (
TaskStateMetadataPlugin,
_UnhashableCallable,
async_wait_for,
asyncinc,
captured_logger,
cluster,
dec,
div,
double,
gen_cluster,
gen_test,
geninc,
get_cert,
inc,
map_varying,
nodebug,
popen,
pristine_loop,
randominc,
save_sys_modules,
slowadd,
slowdec,
slowinc,
throws,
tls_only_security,
varying,
wait_for,
)
pytestmark = pytest.mark.ci1
@gen_cluster(client=True)
async def test_submit(c, s, a, b):
x = c.submit(inc, 10, key="x")
assert not x.done()
assert isinstance(x, Future)
assert x.client is c
result = await x
assert result == 11
assert x.done()
y = c.submit(inc, 20, key="y")
z = c.submit(add, x, y)
result = await z
assert result == 11 + 21
s.validate_state()
@gen_cluster(client=True)
async def test_map(c, s, a, b):
L1 = c.map(inc, range(5))
assert len(L1) == 5
assert isdistinct(x.key for x in L1)
assert all(isinstance(x, Future) for x in L1)
result = await L1[0]
assert result == inc(0)
assert len(s.tasks) == 5
L2 = c.map(inc, L1)
result = await L2[1]
assert result == inc(inc(1))
assert len(s.tasks) == 10
# assert L1[0].key in s.tasks[L2[0].key]
total = c.submit(sum, L2)
result = await total
assert result == sum(map(inc, map(inc, range(5))))
L3 = c.map(add, L1, L2)
result = await L3[1]
assert result == inc(1) + inc(inc(1))
L4 = c.map(add, range(3), range(4))
results = await c.gather(L4)
assert results == list(map(add, range(3), range(4)))
def f(x, y=10):
return x + y
L5 = c.map(f, range(5), y=5)
results = await c.gather(L5)
assert results == list(range(5, 10))
y = c.submit(f, 10)
L6 = c.map(f, range(5), y=y)
results = await c.gather(L6)
assert results == list(range(20, 25))
s.validate_state()
@gen_cluster(client=True)
async def test_map_empty(c, s, a, b):
L1 = c.map(inc, [], pure=False)
assert len(L1) == 0
results = await c.gather(L1)
assert results == []
@gen_cluster(client=True)
async def test_map_keynames(c, s, a, b):
futures = c.map(inc, range(4), key="INC")
assert all(f.key.startswith("INC") for f in futures)
assert isdistinct(f.key for f in futures)
futures2 = c.map(inc, [5, 6, 7, 8], key="INC")
assert [f.key for f in futures] != [f.key for f in futures2]
keys = ["inc-1", "inc-2", "inc-3", "inc-4"]
futures = c.map(inc, range(4), key=keys)
assert [f.key for f in futures] == keys
@gen_cluster(client=True)
async def test_map_retries(c, s, a, b):
args = [
[ZeroDivisionError("one"), 2, 3],
[4, 5, 6],
[ZeroDivisionError("seven"), ZeroDivisionError("eight"), 9],
]
x, y, z = c.map(*map_varying(args), retries=2)
assert await x == 2
assert await y == 4
assert await z == 9
x, y, z = c.map(*map_varying(args), retries=1, pure=False)
assert await x == 2
assert await y == 4
with pytest.raises(ZeroDivisionError, match="eight"):
await z
x, y, z = c.map(*map_varying(args), retries=0, pure=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 4
with pytest.raises(ZeroDivisionError, match="seven"):
await z
@gen_cluster(client=True)
async def test_map_batch_size(c, s, a, b):
result = c.map(inc, range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(1, 101))
result = c.map(add, range(100), range(100), batch_size=10)
result = await c.gather(result)
assert result == list(range(0, 200, 2))
# mismatch shape
result = c.map(add, range(100, 200), range(10), batch_size=2)
result = await c.gather(result)
assert result == list(range(100, 120, 2))
@gen_cluster(client=True)
async def test_custom_key_with_batches(c, s, a, b):
"""Test of <https://github.com/dask/distributed/issues/4588>"""
futs = c.map(
lambda x: x**2,
range(10),
batch_size=5,
key=[str(x) for x in range(10)],
)
assert len(futs) == 10
await wait(futs)
@gen_cluster(client=True)
async def test_compute_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check for varying() use
x = c.compute(delayed(varying(args))())
with pytest.raises(ZeroDivisionError, match="one"):
await x
# Same retries for all
x = c.compute(delayed(varying(args))(), retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
args.append(4)
x = c.compute(delayed(varying(args))(), retries=2)
assert await x == 3
@gen_cluster(client=True)
async def test_compute_retries_annotations(c, s, a, b):
# Per-future retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
with dask.annotate(retries=2):
x = delayed(varying(xargs))()
y = delayed(varying(yargs))()
x, y = c.compute([x, y], optimize_graph=False)
gc.collect()
assert await x == 30
with pytest.raises(ZeroDivisionError, match="five"):
await y
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.compute([x, y, z], optimize_graph=False)
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
def test_retries_get(c):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
assert x.compute(retries=5) == 3
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = delayed(varying(args))()
with pytest.raises(ZeroDivisionError):
x.compute()
@gen_cluster(client=True)
async def test_compute_persisted_retries(c, s, a, b):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
# Sanity check
x = c.persist(delayed(varying(args))())
fut = c.compute(x)
with pytest.raises(ZeroDivisionError, match="one"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=1)
with pytest.raises(ZeroDivisionError, match="two"):
await fut
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=2)
assert await fut == 3
args.append(4)
x = c.persist(delayed(varying(args))())
fut = c.compute(x, retries=3)
assert await fut == 3
@gen_cluster(client=True)
async def test_persist_retries(c, s, a, b):
# Same retries for all
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 3]
x = c.persist(delayed(varying(args))(), retries=1)
x = c.compute(x)
with pytest.raises(ZeroDivisionError, match="two"):
await x
x = c.persist(delayed(varying(args))(), retries=2)
x = c.compute(x)
assert await x == 3
@gen_cluster(client=True)
async def test_persist_retries_annotations(c, s, a, b):
# Per-key retries
xargs = [ZeroDivisionError("one"), ZeroDivisionError("two"), 30, 40]
yargs = [ZeroDivisionError("five"), ZeroDivisionError("six"), 70]
zargs = [80, 90, 100]
x = delayed(varying(xargs))()
with dask.annotate(retries=2):
y = delayed(varying(yargs))()
z = delayed(varying(zargs))()
x, y, z = c.persist([x, y, z], optimize_graph=False)
x, y, z = c.compute([x, y, z])
with pytest.raises(ZeroDivisionError, match="one"):
await x
assert await y == 70
assert await z == 80
@gen_cluster(client=True)
async def test_retries_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
assert y == 100
@gen_cluster(client=True)
async def test_future_repr(c, s, a, b):
pd = pytest.importorskip("pandas")
x = c.submit(inc, 10)
y = c.submit(pd.DataFrame, {"x": [1, 2, 3]})
await x
await y
for func in [repr, lambda x: x._repr_html_()]:
assert str(x.key) in func(x)
assert str(x.status) in func(x)
assert str(x.status) in repr(c.futures[x.key])
assert "int" in func(x)
assert "pandas" in func(y)
assert "DataFrame" in func(y)
@gen_cluster(client=True)
async def test_future_tuple_repr(c, s, a, b):
da = pytest.importorskip("dask.array")
y = da.arange(10, chunks=(5,)).persist()
f = futures_of(y)[0]
for func in [repr, lambda x: x._repr_html_()]:
for k in f.key:
assert str(k) in func(f)
@gen_cluster(client=True)
async def test_Future_exception(c, s, a, b):
x = c.submit(div, 1, 0)
result = await x.exception()
assert isinstance(result, ZeroDivisionError)
x = c.submit(div, 1, 1)
result = await x.exception()
assert result is None
def test_Future_exception_sync(c):
x = c.submit(div, 1, 0)
assert isinstance(x.exception(), ZeroDivisionError)
x = c.submit(div, 1, 1)
assert x.exception() is None
@gen_cluster(client=True)
async def test_Future_release(c, s, a, b):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
await x
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(slowinc, 1, delay=0.5)
x.release()
await asyncio.sleep(0)
assert not c.futures
x = c.submit(div, 1, 0)
await x.exception()
x.release()
await asyncio.sleep(0)
assert not c.futures
def test_Future_release_sync(c):
# Released Futures should be removed timely from the Client
x = c.submit(div, 1, 1)
x.result()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(slowinc, 1, delay=0.8)
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
x = c.submit(div, 1, 0)
x.exception()
x.release()
wait_for(lambda: not c.futures, timeout=0.3)
def test_short_tracebacks(loop, c):
tblib = pytest.importorskip("tblib")
future = c.submit(div, 1, 0)
try:
future.result()
except Exception:
_, _, tb = sys.exc_info()
tb = tblib.Traceback(tb).to_dict()
n = 0
while tb is not None:
n += 1
tb = tb["tb_next"]
assert n < 5
@gen_cluster(client=True)
async def test_map_naming(c, s, a, b):
L1 = c.map(inc, range(5))
L2 = c.map(inc, range(5))
assert [x.key for x in L1] == [x.key for x in L2]
L3 = c.map(inc, [1, 1, 1, 1])
assert len({x._state for x in L3}) == 1
L4 = c.map(inc, [1, 1, 1, 1], pure=False)
assert len({x._state for x in L4}) == 4
@gen_cluster(client=True)
async def test_submit_naming(c, s, a, b):
a = c.submit(inc, 1)
b = c.submit(inc, 1)
assert a._state is b._state
c = c.submit(inc, 1, pure=False)
assert c.key != a.key
@gen_cluster(client=True)
async def test_exceptions(c, s, a, b):
x = c.submit(div, 1, 2)
result = await x
assert result == 1 / 2
x = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await x
x = c.submit(div, 10, 2) # continues to operate
result = await x
assert result == 10 / 2
@gen_cluster()
async def test_gc(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
await x
assert s.tasks[x.key].who_has
x.__del__()
await async_wait_for(
lambda: x.key not in s.tasks or not s.tasks[x.key].who_has, timeout=0.3
)
await c.close()
def test_thread(c):
x = c.submit(inc, 1)
assert x.result() == 2
x = c.submit(slowinc, 1, delay=0.3)
with pytest.raises(TimeoutError):
x.result(timeout="10 ms")
assert x.result() == 2
def test_sync_exceptions(c):
x = c.submit(div, 10, 2)
assert x.result() == 5
y = c.submit(div, 10, 0)
try:
y.result()
assert False
except ZeroDivisionError:
pass
z = c.submit(div, 10, 5)
assert z.result() == 2
@gen_cluster(client=True)
async def test_gather(c, s, a, b):
x = c.submit(inc, 10)
y = c.submit(inc, x)
result = await c.gather(x)
assert result == 11
result = await c.gather([x])
assert result == [11]
result = await c.gather({"x": x, "y": [y]})
assert result == {"x": 11, "y": [12]}
@gen_cluster(client=True)
async def test_gather_mismatched_client(c, s, a, b):
c2 = await Client(s.address, asynchronous=True)
x = c.submit(inc, 10)
y = c2.submit(inc, 5)
with pytest.raises(ValueError, match="Futures created by another client"):
await c.gather([x, y])
@gen_cluster(client=True)
async def test_gather_lost(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
await a.close()
with pytest.raises(Exception):
await c.gather([x, y])
def test_gather_sync(c):
x = c.submit(inc, 1)
assert c.gather(x) == 2
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
c.gather([x, y])
[xx] = c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True)
async def test_gather_strict(c, s, a, b):
x = c.submit(div, 2, 1)
y = c.submit(div, 1, 0)
with pytest.raises(ZeroDivisionError):
await c.gather([x, y])
[xx] = await c.gather([x, y], errors="skip")
assert xx == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_gather_skip(c, s, a):
x = c.submit(div, 1, 0, priority=10)
y = c.submit(slowinc, 1, delay=0.5)
with captured_logger(logging.getLogger("distributed.scheduler")) as sched:
with captured_logger(logging.getLogger("distributed.client")) as client:
L = await c.gather([x, y], errors="skip")
assert L == [2]
assert not client.getvalue()
assert not sched.getvalue()
@gen_cluster(client=True)
async def test_limit_concurrent_gathering(c, s, a, b):
futures = c.map(inc, range(100))
await c.gather(futures)
assert len(a.outgoing_transfer_log) + len(b.outgoing_transfer_log) < 100
@gen_cluster(client=True)
async def test_get(c, s, a, b):
future = c.get({"x": (inc, 1)}, "x", sync=False)
assert isinstance(future, Future)
result = await future
assert result == 2
futures = c.get({"x": (inc, 1)}, ["x"], sync=False)
assert isinstance(futures[0], Future)
result = await c.gather(futures)
assert result == [2]
futures = c.get({}, [], sync=False)
result = await c.gather(futures)
assert result == []
result = await c.get(
{("x", 1): (inc, 1), ("x", 2): (inc, ("x", 1))}, ("x", 2), sync=False
)
assert result == 3
def test_get_sync(c):
assert c.get({"x": (inc, 1)}, "x") == 2
def test_no_future_references(c):
from weakref import WeakSet
ws = WeakSet()
futures = c.map(inc, range(10))
ws.update(futures)
del futures
import gc
gc.collect()
start = time()
while list(ws):
sleep(0.01)
assert time() < start + 30
def test_get_sync_optimize_graph_passes_through(c):
bag = db.range(10, npartitions=3).map(inc)
dask.compute(bag.sum(), optimize_graph=False)
@gen_cluster(client=True)
async def test_gather_errors(c, s, a, b):
def f(a, b):
raise TypeError
def g(a, b):
raise AttributeError
future_f = c.submit(f, 1, 2)
future_g = c.submit(g, 1, 2)
with pytest.raises(TypeError):
await c.gather(future_f)
with pytest.raises(AttributeError):
await c.gather(future_g)
await a.close()
@gen_cluster(client=True)
async def test_wait(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z])
assert done == {x, y, z}
assert not_done == set()
assert x.status == y.status == "finished"
@gen_cluster(client=True)
async def test_wait_first_completed(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, 1)
z = c.submit(inc, 2)
done, not_done = await wait([x, y, z], return_when="FIRST_COMPLETED")
assert done == {z}
assert not_done == {x, y}
assert z.status == "finished"
assert x.status == "pending"
assert y.status == "pending"
@gen_cluster(client=True)
async def test_wait_timeout(c, s, a, b):
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
await wait(future, timeout=0.01)
def test_wait_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
done, not_done = wait([x, y])
assert done == {x, y}
assert not_done == set()
assert x.status == y.status == "finished"
future = c.submit(sleep, 0.3)
with pytest.raises(TimeoutError):
wait(future, timeout=0.01)
def test_wait_informative_error_for_timeouts(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
try:
wait(x, y)
except Exception as e:
assert "timeout" in str(e)
assert "list" in str(e)
@gen_cluster(client=True)
async def test_garbage_collection(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
assert c.refcount[x.key] == 2
x.__del__()
await asyncio.sleep(0)
assert c.refcount[x.key] == 1
z = c.submit(inc, y)
y.__del__()
await asyncio.sleep(0)
result = await z
assert result == 3
ykey = y.key
y.__del__()
await asyncio.sleep(0)
assert ykey not in c.futures
@gen_cluster(client=True)
async def test_garbage_collection_with_scatter(c, s, a, b):
[future] = await c.scatter([1])
assert future.key in c.futures
assert future.status == "finished"
assert s.who_wants[future.key] == {c.id}
key = future.key
assert c.refcount[key] == 1
future.__del__()
await asyncio.sleep(0)
assert c.refcount[key] == 0
while key in s.tasks and s.tasks[key].who_has:
await asyncio.sleep(0.1)
@gen_cluster(client=True)
async def test_recompute_released_key(c, s, a, b):
x = c.submit(inc, 100)
result1 = await x
xkey = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert c.refcount[xkey] == 0
# 1 second batching needs a second action to trigger
while xkey in s.tasks and s.tasks[xkey].who_has or xkey in a.data or xkey in b.data:
await asyncio.sleep(0.1)
x = c.submit(inc, 100)
assert x.key in c.futures
result2 = await x
assert result1 == result2
@pytest.mark.slow
@gen_cluster(client=True)
async def test_long_tasks_dont_trigger_timeout(c, s, a, b):
from time import sleep
x = c.submit(sleep, 3)
await x
@pytest.mark.skip
@gen_cluster(client=True)
async def test_missing_data_heals(c, s, a, b):
a.validate = False
b.validate = False
x = c.submit(inc, 1)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([x, y, z])
# Secretly delete y's key
if y.key in a.data:
del a.data[y.key]
a.release_key(y.key)
if y.key in b.data:
del b.data[y.key]
b.release_key(y.key)
await asyncio.sleep(0)
w = c.submit(add, y, z)
result = await w
assert result == 3 + 4
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_missing_data(c, s, a, b):
a.validate = False
b.validate = False
x, y, z = c.map(inc, range(3))
await wait([x, y, z]) # everything computed
for f in [x, y]:
for w in [a, b]:
if f.key in w.data:
del w.data[f.key]
await asyncio.sleep(0)
w.release_key(f.key)
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@pytest.mark.skip
@gen_cluster(client=True)
async def test_gather_robust_to_nested_missing_data(c, s, a, b):
a.validate = False
b.validate = False
w = c.submit(inc, 1)
x = c.submit(inc, w)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([z])
for worker in [a, b]:
for datum in [y, z]:
if datum.key in worker.data:
del worker.data[datum.key]
await asyncio.sleep(0)
worker.release_key(datum.key)
result = await c.gather([z])
assert result == [inc(inc(inc(inc(1))))]
@gen_cluster(client=True)
async def test_tokenize_on_futures(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 1)
tok = tokenize(x)
assert tokenize(x) == tokenize(x)
assert tokenize(x) == tokenize(y)
c.futures[x.key].finish()
assert tok == tokenize(y)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_submit(c, s, a, b):
x = c.submit(inc, 1, workers={a.ip})
y = c.submit(inc, x, workers={b.ip})
await wait([x, y])
assert s.host_restrictions[x.key] == {a.ip}
assert x.key in a.data
assert s.host_restrictions[y.key] == {b.ip}
assert y.key in b.data
@gen_cluster(client=True)
async def test_restrictions_ip_port(c, s, a, b):
x = c.submit(inc, 1, workers={a.address})
y = c.submit(inc, x, workers={b.address})
await wait([x, y])
assert s.worker_restrictions[x.key] == {a.address}
assert x.key in a.data
assert s.worker_restrictions[y.key] == {b.address}
assert y.key in b.data
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_map(c, s, a, b):
L = c.map(inc, range(5), workers={a.ip})
await wait(L)
assert set(a.data) == {x.key for x in L}
assert not b.data
for x in L:
assert s.host_restrictions[x.key] == {a.ip}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_restrictions_get(c, s, a, b):
dsk = {"x": 1, "y": (inc, "x"), "z": (inc, "y")}
futures = c.get(dsk, ["y", "z"], workers=a.ip, sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert "y" in a.data
assert "z" in a.data
assert len(b.data) == 0
@gen_cluster(client=True)
async def test_restrictions_get_annotate(c, s, a, b):
x = 1
with dask.annotate(workers=a.address):
y = delayed(inc)(x)
with dask.annotate(workers=b.address):
z = delayed(inc)(y)
futures = c.get(z.__dask_graph__(), [y.key, z.key], sync=False)
result = await c.gather(futures)
assert result == [2, 3]
assert y.key in a.data
assert z.key in b.data
@gen_cluster(client=True)
async def dont_test_bad_restrictions_raise_exception(c, s, a, b):
z = c.submit(inc, 2, workers={"bad-address"})
try:
await z
assert False
except ValueError as e:
assert "bad-address" in str(e)
assert z.key in str(e)
@gen_cluster(client=True)
async def test_remove_worker(c, s, a, b):
L = c.map(inc, range(20))
await wait(L)
await b.close()
assert b.address not in s.workers
result = await c.gather(L)
assert result == list(map(inc, range(20)))
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_errors_dont_block(c, s, w):
L = [c.submit(inc, 1), c.submit(throws, 1), c.submit(inc, 2), c.submit(throws, 2)]
while not (L[0].status == L[2].status == "finished"):
await asyncio.sleep(0.01)
result = await c.gather([L[0], L[2]])
assert result == [2, 3]
@gen_cluster(client=True)
async def test_submit_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
x = c.submit(assert_list, [1, 2, 3])
result = await x
assert result
x = c.submit(assert_list, [1, 2, 3], z=[4, 5, 6])
result = await x
assert result
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(assert_list, [x, y])
result = await z
assert result
@gen_cluster(client=True)
async def test_map_quotes(c, s, a, b):
def assert_list(x, z=[]):
return isinstance(x, list) and isinstance(z, list)
L = c.map(assert_list, [[1, 2, 3], [4]])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], z=[10])
result = await c.gather(L)
assert all(result)
L = c.map(assert_list, [[1, 2, 3], [4]], [[]] * 3)
result = await c.gather(L)
assert all(result)
@gen_cluster()
async def test_two_consecutive_clients_share_results(s, a, b):
c = await Client(s.address, asynchronous=True)
x = c.submit(random.randint, 0, 1000, pure=True)
xx = await x
f = await Client(s.address, asynchronous=True)
y = f.submit(random.randint, 0, 1000, pure=True)
yy = await y
assert xx == yy
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_submit_then_get_with_Future(c, s, a, b):
x = c.submit(slowinc, 1)
dsk = {"y": (inc, x)}
result = await c.get(dsk, "y", sync=False)
assert result == 3
@gen_cluster(client=True)
async def test_aliases(c, s, a, b):
x = c.submit(inc, 1)
dsk = {"y": x}
result = await c.get(dsk, "y", sync=False)
assert result == 2
@gen_cluster(client=True)
async def test_aliases_2(c, s, a, b):
dsk_keys = [
({"x": (inc, 1), "y": "x", "z": "x", "w": (add, "y", "z")}, ["y", "w"]),
({"x": "y", "y": 1}, ["x"]),
({"x": 1, "y": "x", "z": "y", "w": (inc, "z")}, ["w"]),
]
for dsk, keys in dsk_keys:
result = await c.gather(c.get(dsk, keys, sync=False))
assert list(result) == list(dask.get(dsk, keys))
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_scatter(c, s, a, b):
d = await c.scatter({"y": 20})
assert isinstance(d["y"], Future)
assert a.data.get("y") == 20 or b.data.get("y") == 20
y_who_has = s.get_who_has(keys=["y"])["y"]
assert a.address in y_who_has or b.address in y_who_has
assert s.get_nbytes(summary=False) == {"y": sizeof(20)}
yy = await c.gather([d["y"]])
assert yy == [20]
[x] = await c.scatter([10])
assert isinstance(x, Future)
assert a.data.get(x.key) == 10 or b.data.get(x.key) == 10
xx = await c.gather([x])
x_who_has = s.get_who_has(keys=[x.key])[x.key]
assert s.tasks[x.key].who_has
assert (
s.workers[a.address] in s.tasks[x.key].who_has
or s.workers[b.address] in s.tasks[x.key].who_has
)
assert s.get_nbytes(summary=False) == {"y": sizeof(20), x.key: sizeof(10)}
assert xx == [10]
z = c.submit(add, x, d["y"]) # submit works on Future
result = await z
assert result == 10 + 20
result = await c.gather([z, x])
assert result == [30, 10]
@gen_cluster(client=True)
async def test_scatter_types(c, s, a, b):
d = await c.scatter({"x": 1})
assert isinstance(d, dict)
assert list(d) == ["x"]
for seq in [[1], (1,), {1}, frozenset([1])]:
L = await c.scatter(seq)
assert isinstance(L, type(seq))
assert len(L) == 1
s.validate_state()
seq = await c.scatter(range(5))
assert isinstance(seq, list)
assert len(seq) == 5
s.validate_state()
@gen_cluster(client=True)
async def test_scatter_non_list(c, s, a, b):
x = await c.scatter(1)
assert isinstance(x, Future)
result = await x
assert result == 1
@gen_cluster(client=True)
async def test_scatter_tokenize_local(c, s, a, b):
from dask.base import normalize_token
class MyObj:
pass
L = []
@normalize_token.register(MyObj)
def f(x):
L.append(x)
return "x"
obj = MyObj()
future = await c.scatter(obj)
assert L and L[0] is obj
@gen_cluster(client=True)
async def test_scatter_singletons(c, s, a, b):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
for x in [1, np.ones(5), pd.DataFrame({"x": [1, 2, 3]})]:
future = await c.scatter(x)
result = await future
assert str(result) == str(x)
@gen_cluster(client=True)
async def test_scatter_typename(c, s, a, b):
future = await c.scatter(123)
assert future.key.startswith("int")
@gen_cluster(client=True)
async def test_scatter_hash(c, s, a, b):
x = await c.scatter(123)
y = await c.scatter(123)
assert x.key == y.key
z = await c.scatter(123, hash=False)
assert z.key != y.key
@gen_cluster(client=True)
async def test_scatter_hash_2(c, s, a, b):
[a] = await c.scatter([1])
[b] = await c.scatter([1])
assert a.key == b.key
s.validate_state()
@gen_cluster(client=True)
async def test_get_releases_data(c, s, a, b):
await c.gather(c.get({"x": (inc, 1)}, ["x"], sync=False))
import gc
gc.collect()
while c.refcount["x"]:
await asyncio.sleep(0.01)
def test_current(s, a, b):
with Client(s["address"]) as c:
assert Client.current() is c
with pytest.raises(ValueError):
Client.current()
with Client(s["address"]) as c:
assert Client.current() is c
def test_global_clients(loop):
assert _get_global_client() is None
with pytest.raises(ValueError):
default_client()
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert _get_global_client() is c
assert default_client() is c
with Client(s["address"], loop=loop) as f:
assert _get_global_client() is f
assert default_client() is f
assert default_client(c) is c
assert default_client(f) is f
assert _get_global_client() is None
@gen_cluster(client=True)
async def test_exception_on_exception(c, s, a, b):
x = c.submit(lambda: 1 / 0)
y = c.submit(inc, x)
with pytest.raises(ZeroDivisionError):
await y
z = c.submit(inc, y)
with pytest.raises(ZeroDivisionError):
await z
@gen_cluster(client=True)
async def test_get_task_prefix_states(c, s, a, b):
x = await c.submit(inc, 1)
res = s.get_task_prefix_states()
data = {
"inc": {
"erred": 0,
"memory": 1,
"processing": 0,
"released": 0,
"waiting": 0,
}
}
assert res == data
del x
while s.get_task_prefix_states() == data:
await asyncio.sleep(0.01)
res = s.get_task_prefix_states()
assert res == {}
@gen_cluster(client=True)
async def test_get_nbytes(c, s, a, b):
[x] = await c.scatter([1])
assert s.get_nbytes(summary=False) == {x.key: sizeof(1)}
y = c.submit(inc, x)
await y
assert s.get_nbytes(summary=False) == {x.key: sizeof(1), y.key: sizeof(2)}
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_nbytes_determines_worker(c, s, a, b):
x = c.submit(identity, 1, workers=[a.ip])
y = c.submit(identity, tuple(range(100)), workers=[b.ip])
await c.gather([x, y])
z = c.submit(lambda x, y: None, x, y)
await z
assert s.tasks[z.key].who_has == {s.workers[b.address]}
@gen_cluster(client=True)
async def test_if_intermediates_clear_on_error(c, s, a, b):
x = delayed(div, pure=True)(1, 0)
y = delayed(div, pure=True)(1, 2)
z = delayed(add, pure=True)(x, y)
f = c.compute(z)
with pytest.raises(ZeroDivisionError):
await f
s.validate_state()
assert not any(ts.who_has for ts in s.tasks.values())
@gen_cluster(
client=True, config={"distributed.scheduler.default-task-durations": {"f": "1ms"}}
)
async def test_pragmatic_move_small_data_to_large_data(c, s, a, b):
np = pytest.importorskip("numpy")
lists = c.map(np.ones, [10000] * 10, pure=False)
sums = c.map(np.sum, lists)
total = c.submit(sum, sums)
def f(x, y):
return None
results = c.map(f, lists, [total] * 10)
await wait([total])
await wait(results)
assert (
sum(
s.tasks[r.key].who_has.issubset(s.tasks[l.key].who_has)
for l, r in zip(lists, results)
)
>= 9
)
@gen_cluster(client=True)
async def test_get_with_non_list_key(c, s, a, b):
dsk = {("x", 0): (inc, 1), 5: (inc, 2)}
x = await c.get(dsk, ("x", 0), sync=False)
y = await c.get(dsk, 5, sync=False)
assert x == 2
assert y == 3
@gen_cluster(client=True)
async def test_get_with_error(c, s, a, b):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
await c.get(dsk, "y", sync=False)
def test_get_with_error_sync(c):
dsk = {"x": (div, 1, 0), "y": (inc, "x")}
with pytest.raises(ZeroDivisionError):
c.get(dsk, "y")
@gen_cluster(client=True)
async def test_directed_scatter(c, s, a, b):
await c.scatter([1, 2, 3], workers=[a.address])
assert len(a.data) == 3
assert not b.data
await c.scatter([4, 5], workers=[b.name])
assert len(b.data) == 2
def test_directed_scatter_sync(c, s, a, b, loop):
futures = c.scatter([1, 2, 3], workers=[b["address"]])
has_what = sync(loop, c.scheduler.has_what)
assert len(has_what[b["address"]]) == len(futures)
assert len(has_what[a["address"]]) == 0
@gen_cluster(client=True)
async def test_scatter_direct(c, s, a, b):
future = await c.scatter(123, direct=True)
assert future.key in a.data or future.key in b.data
assert s.tasks[future.key].who_has
assert future.status == "finished"
result = await future
assert result == 123
assert not s.counters["op"].components[0]["scatter"]
result = await future
assert not s.counters["op"].components[0]["gather"]
result = await c.gather(future)
assert not s.counters["op"].components[0]["gather"]
@gen_cluster()
async def test_scatter_direct_2(s, a, b):
c = await Client(s.address, asynchronous=True, heartbeat_interval=10)
last = s.clients[c.id].last_seen
while s.clients[c.id].last_seen == last:
await asyncio.sleep(0.10)
await c.close()
@gen_cluster(client=True)
async def test_scatter_direct_numpy(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.ones(5)
future = await c.scatter(x, direct=True)
result = await future
assert np.allclose(x, result)
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True)
async def test_scatter_direct_broadcast(c, s, a, b):
future2 = await c.scatter(456, direct=True, broadcast=True)
assert future2.key in a.data
assert future2.key in b.data
assert s.tasks[future2.key].who_has == {s.workers[a.address], s.workers[b.address]}
result = await future2
assert result == 456
assert not s.counters["op"].components[0]["scatter"]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_balanced(c, s, *workers):
futures = await c.scatter([1, 2, 3], direct=True)
assert sorted(len(w.data) for w in workers) == [0, 1, 1, 1]
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_scatter_direct_broadcast_target(c, s, *workers):
futures = await c.scatter([123, 456], direct=True, workers=workers[0].address)
assert futures[0].key in workers[0].data
assert futures[1].key in workers[0].data
futures = await c.scatter(
[123, 456],
direct=True,
broadcast=True,
workers=[w.address for w in workers[:3]],
)
assert (
f.key in w.data and w.address in s.tasks[f.key].who_has
for f in futures
for w in workers[:3]
)
@gen_cluster(client=True, nthreads=[])
async def test_scatter_direct_empty(c, s):
with pytest.raises((ValueError, TimeoutError)):
await c.scatter(123, direct=True, timeout=0.1)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 5)
async def test_scatter_direct_spread_evenly(c, s, *workers):
futures = []
for i in range(10):
future = await c.scatter(i, direct=True)
futures.append(future)
assert all(w.data for w in workers)
@pytest.mark.parametrize("direct", [True, False])
@pytest.mark.parametrize("broadcast", [True, False])
def test_scatter_gather_sync(c, direct, broadcast):
futures = c.scatter([1, 2, 3], direct=direct, broadcast=broadcast)
results = c.gather(futures, direct=direct)
assert results == [1, 2, 3]
delayed(inc)(1).compute(direct=direct)
@gen_cluster(client=True)
async def test_gather_direct(c, s, a, b):
futures = await c.scatter([1, 2, 3])
data = await c.gather(futures, direct=True)
assert data == [1, 2, 3]
@gen_cluster(client=True)
async def test_many_submits_spread_evenly(c, s, a, b):
L = [c.submit(inc, i) for i in range(10)]
await wait(L)
assert a.data and b.data
@gen_cluster(client=True)
async def test_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
tb = await x.traceback()
assert any("x / y" in line for line in pluck(3, traceback.extract_tb(tb)))
@gen_cluster(client=True)
async def test_get_traceback(c, s, a, b):
try:
await c.get({"x": (div, 1, 0)}, "x", sync=False)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
@gen_cluster(client=True)
async def test_gather_traceback(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await c.gather(x)
except ZeroDivisionError:
exc_type, exc_value, exc_traceback = sys.exc_info()
L = traceback.format_tb(exc_traceback)
assert any("x / y" in line for line in L)
def test_traceback_sync(c):
x = c.submit(div, 1, 0)
tb = x.traceback()
assert any(
"x / y" in line
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
y = c.submit(inc, x)
tb2 = y.traceback()
assert set(pluck(3, traceback.extract_tb(tb2))).issuperset(
set(pluck(3, traceback.extract_tb(tb)))
)
z = c.submit(div, 1, 2)
tb = z.traceback()
assert tb is None
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", f"def f():\n return {value}") as fn:
await c.upload_file(fn)
x = c.submit(g, pure=False)
result = await x
assert result == value
@gen_cluster(client=True)
async def test_upload_file_refresh_delayed(c, s, a, b):
with save_sys_modules():
for value in [123, 456]:
with tmp_text("myfile.py", f"def f():\n return {value}") as fn:
await c.upload_file(fn)
sys.path.append(os.path.dirname(fn))
from myfile import f
b = delayed(f)()
bb = c.compute(b, sync=False)
result = await c.gather(bb)
assert result == value
@gen_cluster(client=True)
async def test_upload_file_no_extension(c, s, a, b):
with tmp_text("myfile", "") as fn:
await c.upload_file(fn)
@gen_cluster(client=True)
async def test_upload_file_zip(c, s, a, b):
def g():
import myfile
return myfile.f()
with save_sys_modules():
try:
for value in [123, 456]:
with tmp_text(
"myfile.py", f"def f():\n return {value}"
) as fn_my_file:
with zipfile.ZipFile("myfile.zip", "w") as z:
z.write(fn_my_file, arcname=os.path.basename(fn_my_file))
await c.upload_file("myfile.zip")
x = c.submit(g, pure=False)
result = await x
assert result == value
finally:
if os.path.exists("myfile.zip"):
os.remove("myfile.zip")
@gen_cluster(client=True)
async def test_upload_file_egg(c, s, a, b):
def g():
import package_1
import package_2
return package_1.a, package_2.b
# c.upload_file tells each worker to
# - put this file in their local_directory
# - modify their sys.path to include it
# we don't care about the local_directory
# but we do care about restoring the path
with save_sys_modules():
for value in [123, 456]:
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "setup.py"), "w") as f:
f.write("from setuptools import setup, find_packages\n")
f.write(
'setup(name="my_package", packages=find_packages(), version="{}")\n'.format(
value
)
)
# test a package with an underscore in the name
package_1 = os.path.join(dirname, "package_1")
os.mkdir(package_1)
with open(os.path.join(package_1, "__init__.py"), "w") as f:
f.write(f"a = {value}\n")
# test multiple top-level packages
package_2 = os.path.join(dirname, "package_2")
os.mkdir(package_2)
with open(os.path.join(package_2, "__init__.py"), "w") as f:
f.write(f"b = {value}\n")
# compile these into an egg
subprocess.check_call(
[sys.executable, "setup.py", "bdist_egg"], cwd=dirname
)
egg_root = os.path.join(dirname, "dist")
# first file ending with '.egg'
egg_name = [
fname for fname in os.listdir(egg_root) if fname.endswith(".egg")
][0]
egg_path = os.path.join(egg_root, egg_name)
await c.upload_file(egg_path)
os.remove(egg_path)
x = c.submit(g, pure=False)
result = await x
assert result == (value, value)
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
assert a.local_directory
assert b.local_directory
with tmp_text("myfile", "abc") as fn:
with tmp_text("myfile2", "def") as fn2:
await c._upload_large_file(fn, remote_filename="x")
await c._upload_large_file(fn2)
for w in [a, b]:
assert os.path.exists(os.path.join(w.local_directory, "x"))
assert os.path.exists(os.path.join(w.local_directory, "myfile2"))
with open(os.path.join(w.local_directory, "x")) as f:
assert f.read() == "abc"
with open(os.path.join(w.local_directory, "myfile2")) as f:
assert f.read() == "def"
def test_upload_file_sync(c):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
c.upload_file(fn)
x = c.submit(g)
assert x.result() == 123
@gen_cluster(client=True)
async def test_upload_file_exception(c, s, a, b):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
await c.upload_file(fn)
def test_upload_file_exception_sync(c):
with tmp_text("myfile.py", "syntax-error!") as fn:
with pytest.raises(SyntaxError):
c.upload_file(fn)
@gen_cluster(client=True, nthreads=[])
async def test_upload_file_new_worker(c, s):
def g():
import myfile
return myfile.x
with tmp_text("myfile.py", "x = 123") as fn:
await c.upload_file(fn)
async with Worker(s.address):
x = await c.submit(g)
assert x == 123
@pytest.mark.skip
@gen_cluster()
async def test_multiple_clients(s, a, b):
a = await Client(s.address, asynchronous=True)
b = await Client(s.address, asynchronous=True)
x = a.submit(inc, 1)
y = b.submit(inc, 2)
assert x.client is a
assert y.client is b
xx = await x
yy = await y
assert xx == 2
assert yy == 3
z = a.submit(add, x, y)
assert z.client is a
zz = await z
assert zz == 5
await a.close()
await b.close()
@gen_cluster(client=True)
async def test_async_compute(c, s, a, b):
from dask.delayed import delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
[yy, zz, aa] = c.compute([y, z, 3], sync=False)
assert isinstance(yy, Future)
assert isinstance(zz, Future)
assert aa == 3
result = await c.gather([yy, zz])
assert result == [2, 0]
assert isinstance(c.compute(y), Future)
assert isinstance(c.compute([y]), (tuple, list))
@gen_cluster(client=True)
async def test_async_compute_with_scatter(c, s, a, b):
d = await c.scatter({("x", 1): 1, ("y", 1): 2})
x, y = d[("x", 1)], d[("y", 1)]
from dask.delayed import delayed
z = delayed(add)(delayed(inc)(x), delayed(inc)(y))
zz = c.compute(z)
[result] = await c.gather([zz])
assert result == 2 + 3
def test_sync_compute(c):
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
yy, zz = c.compute([y, z], sync=True)
assert (yy, zz) == (2, 0)
@gen_cluster(client=True)
async def test_remote_scatter_gather(c, s, a, b):
x, y, z = await c.scatter([1, 2, 3])
assert x.key in a.data or x.key in b.data
assert y.key in a.data or y.key in b.data
assert z.key in a.data or z.key in b.data
xx, yy, zz = await c.gather([x, y, z])
assert (xx, yy, zz) == (1, 2, 3)
@gen_cluster(client=True)
async def test_remote_submit_on_Future(c, s, a, b):
x = c.submit(lambda x: x + 1, 1)
y = c.submit(lambda x: x + 1, x)
result = await y
assert result == 3
def test_start_is_idempotent(c):
c.start()
c.start()
c.start()
x = c.submit(inc, 1)
assert x.result() == 2
@gen_cluster(client=True)
async def test_client_with_scheduler(c, s, a, b):
assert s.nthreads == {a.address: a.nthreads, b.address: b.nthreads}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y)
result = await x
assert result == 1 + 1
result = await z
assert result == 1 + 1 + 1 + 2
A, B, C = await c.scatter([1, 2, 3])
AA, BB, xx = await c.gather([A, B, x])
assert (AA, BB, xx) == (1, 2, 2)
result = await c.get({"x": (inc, 1), "y": (add, "x", 10)}, "y", sync=False)
assert result == 12
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_allow_restrictions(c, s, a, b):
aws = s.workers[a.address]
bws = s.workers[a.address]
x = c.submit(inc, 1, workers=a.ip)
await x
assert s.tasks[x.key].who_has == {aws}
assert not s.loose_restrictions
x = c.submit(inc, 2, workers=a.ip, allow_other_workers=True)
await x
assert s.tasks[x.key].who_has == {aws}
assert x.key in s.loose_restrictions
L = c.map(inc, range(3, 13), workers=a.ip, allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has == {aws} for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
x = c.submit(inc, 15, workers="127.0.0.3", allow_other_workers=True)
await x
assert s.tasks[x.key].who_has
assert x.key in s.loose_restrictions
L = c.map(inc, range(15, 25), workers="127.0.0.3", allow_other_workers=True)
await wait(L)
assert all(s.tasks[f.key].who_has for f in L)
assert {f.key for f in L}.issubset(s.loose_restrictions)
with pytest.raises(ValueError):
c.submit(inc, 1, allow_other_workers=True)
with pytest.raises(ValueError):
c.map(inc, [1], allow_other_workers=True)
with pytest.raises(TypeError):
c.submit(inc, 20, workers="127.0.0.1", allow_other_workers="Hello!")
with pytest.raises(TypeError):
c.map(inc, [20], workers="127.0.0.1", allow_other_workers="Hello!")
def test_bad_address():
with pytest.raises(OSError, match="connect"):
Client("123.123.123.123:1234", timeout=0.1)
with pytest.raises(OSError, match="connect"):
Client("127.0.0.1:1234", timeout=0.1)
def test_informative_error_on_cluster_type():
with pytest.raises(TypeError) as exc_info:
Client(LocalCluster)
assert "Scheduler address must be a string or a Cluster instance" in str(
exc_info.value
)
@gen_cluster(client=True)
async def test_long_error(c, s, a, b):
def bad(x):
raise ValueError("a" * 100000)
x = c.submit(bad, 10)
try:
await x
except ValueError as e:
assert len(str(e)) < 100000
tb = await x.traceback()
assert all(
len(line) < 100000
for line in concat(traceback.extract_tb(tb))
if isinstance(line, str)
)
@gen_cluster(client=True)
async def test_map_on_futures_with_kwargs(c, s, a, b):
def f(x, y=10):
return x + y
futures = c.map(inc, range(10))
futures2 = c.map(f, futures, y=20)
results = await c.gather(futures2)
assert results == [i + 1 + 20 for i in range(10)]
future = c.submit(inc, 100)
future2 = c.submit(f, future, y=200)
result = await future2
assert result == 100 + 1 + 200
class BadlySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
class FatallySerializedObject:
def __getstate__(self):
return 1
def __setstate__(self, state):
print("This should never have been deserialized, closing")
import sys
sys.exit(0)
@gen_cluster(client=True)
async def test_badly_serialized_input(c, s, a, b):
o = BadlySerializedObject()
future = c.submit(inc, o)
futures = c.map(inc, range(10))
L = await c.gather(futures)
assert list(L) == list(map(inc, range(10)))
assert future.status == "error"
with pytest.raises(Exception) as info:
await future
assert "hello!" in str(info.value)
@pytest.mark.skip
@gen_test()
async def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert future.status == "error"
def test_repr(loop):
funcs = [str, repr, lambda x: x._repr_html_()]
with cluster(nworkers=3, worker_kwargs={"memory_limit": "2 GiB"}) as (s, [a, b, c]):
with Client(s["address"], loop=loop) as c:
for func in funcs:
text = func(c)
assert c.scheduler.address in text
assert "threads=3" in text or "Total threads: </strong>" in text
assert "6.00 GiB" in text
if "<table" not in text:
assert len(text) < 80
for func in funcs:
text = func(c)
assert "No scheduler connected" in text
@gen_cluster(client=True)
async def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
async def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
async def test_repr_localcluster():
cluster = await LocalCluster(
processes=False, dashboard_address=":0", asynchronous=True
)
client = await Client(cluster, asynchronous=True)
try:
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
finally:
await client.close()
await cluster.close()
@gen_cluster(client=True)
async def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
async def test_forget_complex(e, s, A, B):
a, b, c, d = await e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
await wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
while b.key in A.data or b.key in B.data:
await asyncio.sleep(0.01)
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
async def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = (delayed2(slowinc)(i) for i in range(4))
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for i in range(5):
await asyncio.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
async def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([y])
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key in s.exceptions_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.exceptions
assert x.key in s.exceptions_blame
assert y.key in s.exceptions_blame
assert z.key not in s.exceptions_blame
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.exceptions
assert x.key not in s.exceptions_blame
assert y.key not in s.exceptions_blame
assert z.key not in s.exceptions_blame
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s or "threads" in s
@gen_cluster(client=True)
async def test_waiting_data(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.waiting_data.get(x.key)
assert not s.waiting_data.get(y.key)
@gen_cluster()
async def test_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
assert s.wants_what == {
c.id: {x.key, y.key},
f.id: {y.key},
"fire-and-forget": set(),
}
assert s.who_wants == {x.key: {c.id}, y.key: {c.id, f.id}}
await c.close()
while c.id in s.wants_what:
await asyncio.sleep(0.01)
assert c.id not in s.wants_what
assert c.id not in s.who_wants[y.key]
assert x.key not in s.who_wants
await f.close()
while s.tasks:
await asyncio.sleep(0.01)
def long_running_client_connection(address):
with pristine_loop():
c = Client(address)
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
async def test_cleanup_after_broken_client_connection(s, a, b):
proc = mp_context.Process(target=long_running_client_connection, args=(s.address,))
proc.daemon = True
proc.start()
while not s.tasks:
await asyncio.sleep(0.01)
proc.terminate()
while s.tasks:
await asyncio.sleep(0.01)
@gen_cluster()
async def test_multi_garbage_collection(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
x.__del__()
while x.key in a.data or x.key in b.data:
await asyncio.sleep(0.01)
assert s.wants_what == {c.id: {y.key}, f.id: {y.key}, "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id, f.id}}
y.__del__()
while x.key in s.wants_what[f.id]:
await asyncio.sleep(0.01)
await asyncio.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert s.wants_what == {c.id: {y.key}, f.id: set(), "fire-and-forget": set()}
assert s.who_wants == {y.key: {c.id}}
y2.__del__()
while y.key in a.data or y.key in b.data:
await asyncio.sleep(0.01)
assert not any(v for v in s.wants_what.values())
assert not s.who_wants
await c.close()
await f.close()
@gen_cluster(client=True)
async def test__broadcast(c, s, a, b):
x, y = await c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test__broadcast_integer(c, s, *workers):
x, y = await c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True)
async def test__broadcast_dict(c, s, a, b):
d = await c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
def test_broadcast(c, s, a, b):
x, y = c.scatter([1, 2], broadcast=True)
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key},
b["address"]: {x.key, y.key},
}
[z] = c.scatter([3], broadcast=True, workers=[a["address"]])
has_what = sync(c.loop, c.scheduler.has_what)
assert {k: set(v) for k, v in has_what.items()} == {
a["address"]: {x.key, y.key, z.key},
b["address"]: {x.key, y.key},
}
@gen_cluster(client=True)
async def test_proxy(c, s, a, b):
msg = await c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
async def test_cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
await c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
while not y.cancelled():
await asyncio.sleep(0.01)
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
async def test_cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
await x
await c.cancel(x)
with pytest.raises(CancelledError):
await x
@gen_cluster()
async def test_cancel_multi_client(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
await c.cancel([x])
assert x.cancelled()
assert not y.cancelled()
while y.key not in s.tasks:
await asyncio.sleep(0.01)
out = await y
assert out == 2
with pytest.raises(CancelledError):
await x
await c.close()
await f.close()
@gen_cluster(client=True)
async def test_cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await c.cancel(x)
await c.cancel([x])
assert all(f.cancelled() for f in L)
while s.tasks:
await asyncio.sleep(0.01)
def test_cancel_sync(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 30
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
async def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
await wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
async def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
async def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
async def test_async_persist(c, s, a, b):
from dask.delayed import Delayed, delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.who_wants[y.key] == {c.id}
assert s.who_wants[w.key] == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = await c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
async def test__persist(c, s, a, b):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = await c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("dask.array")
import dask.array as da
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
async def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
await wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
async def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
async def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
sg = SubgraphCallable(
{"x": x, "y": y, "z": z, "out": (add, (add, (add, x, y), z), "in")},
"out",
("in",),
)
assert set(futures_of(sg)) == {x, y, z}
def test_futures_of_class():
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
async def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
await c.cancel([x])
with pytest.raises(CancelledError):
await x
with pytest.raises(CancelledError):
await c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
with pytest.raises(CancelledError):
c.submit(inc, x)
with pytest.raises(CancelledError):
c.submit(add, 1, y=x)
with pytest.raises(CancelledError):
c.map(add, [1], y=x)
assert "y" not in s.tasks
@pytest.mark.skip
@gen_cluster(nthreads=[("127.0.0.1", 1)], client=True)
async def test_dont_delete_recomputed_results(c, s, w):
x = c.submit(inc, 1) # compute first time
await wait([x])
x.__del__() # trigger garbage collection
await asyncio.sleep(0)
xx = c.submit(inc, 1) # compute second time
start = time()
while xx.key not in w.data: # data shows up
await asyncio.sleep(0.01)
assert time() < start + 1
while time() < start + (s.delete_interval + 100) / 1000: # and stays
assert xx.key in w.data
await asyncio.sleep(0.01)
@gen_cluster(nthreads=[], client=True)
async def test_fatally_serialized_input(c, s):
o = FatallySerializedObject()
future = c.submit(inc, o)
while not s.tasks:
await asyncio.sleep(0.01)
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
async def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
y = c.submit(inc, 2)
await wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
async def test_run(c, s, a, b):
results = await c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = await c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
async def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = await c.run_on_scheduler(func)
assert results == func()
results = await c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
async def test_run_coroutine(c, s, a, b):
results = await c.run(geninc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = await c.run(geninc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(geninc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
await c.run(throws, 1)
results = await c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(geninc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(geninc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(geninc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
@gen_cluster(client=True)
async def test_run_coroutine_deprecated(c, s, a, b):
async def foo():
return "bar"
with pytest.warns(FutureWarning, match="Client.run "):
results = await c.run_coroutine(foo)
assert results == {a.address: "bar", b.address: "bar"}
@gen_cluster(client=True)
async def test_run_exception(c, s, a, b):
class MyError(Exception):
pass
def raise_exception(dask_worker, addr):
if addr == dask_worker.address:
raise MyError("informative message")
return 123
with pytest.raises(MyError, match="informative message"):
await c.run(raise_exception, addr=a.address)
with pytest.raises(MyError, match="informative message"):
await c.run(raise_exception, addr=a.address, on_error="raise")
with pytest.raises(ValueError, match="on_error must be"):
await c.run(raise_exception, addr=a.address, on_error="invalid")
out = await c.run(raise_exception, addr=a.address, on_error="return")
assert isinstance(out[a.address], MyError)
assert out[b.address] == 123
out = await c.run(raise_exception, addr=a.address, on_error="ignore")
assert out == {b.address: 123}
@gen_cluster(client=True, config={"distributed.comm.timeouts.connect": "200ms"})
async def test_run_rpc_error(c, s, a, b):
a.stop()
with pytest.raises(OSError, match="Timed out trying to connect"):
await c.run(inc, 1)
with pytest.raises(OSError, match="Timed out trying to connect"):
await c.run(inc, 1, on_error="raise")
out = await c.run(inc, 1, on_error="return")
assert isinstance(out[a.address], OSError)
assert out[b.address] == 2
out = await c.run(inc, 1, on_error="ignore")
assert out == {b.address: 2}
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
async def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
await wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True, nthreads=[])
async def test_worker_aliases(c, s):
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
await asyncio.gather(a, b, w)
L = c.map(inc, range(10), workers="alice")
future = await c.scatter(123, workers=3)
await wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = await c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
await asyncio.gather(a.close(), b.close(), w.close())
def test_persist_get_sync(c):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@gen_cluster(client=True)
async def test_persist_get(c, s, a, b):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
await asyncio.sleep(0.5)
result = await c.gather(c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False))
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for i in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 10, (before, proc.num_fds())
@gen_cluster()
async def test_startup_close_startup(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c = await Client(s.address, asynchronous=True)
await c.close()
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"]) as c:
pass
with Client(s["address"]) as c:
pass
sleep(0.1)
with Client(s["address"]) as c:
pass
@gen_cluster(client=True)
async def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
with pytest.raises(Exception, match="hello world"):
await x
# Set rebalance() to work predictably on small amounts of managed memory. By default, it
# uses optimistic memory, which would only be possible to test by allocating very large
# amounts of managed memory, so that they would hide variations in unmanaged memory.
REBALANCE_MANAGED_CONFIG = {
"distributed.worker.memory.rebalance.measure": "managed",
"distributed.worker.memory.rebalance.sender-min": 0,
"distributed.worker.memory.rebalance.sender-recipient-gap": 0,
}
@gen_cluster(client=True, config=REBALANCE_MANAGED_CONFIG)
async def test_rebalance(c, s, a, b):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
futures = await c.scatter(range(100), workers=[a.address])
assert len(a.data) == 100
assert len(b.data) == 0
await c.rebalance()
assert len(a.data) == 50
assert len(b.data) == 50
@gen_cluster(nthreads=[("", 1)] * 3, client=True, config=REBALANCE_MANAGED_CONFIG)
async def test_rebalance_workers_and_keys(client, s, a, b, c):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
futures = await client.scatter(range(100), workers=[a.address])
assert (len(a.data), len(b.data), len(c.data)) == (100, 0, 0)
# Passing empty iterables is not the same as omitting the arguments
await client.rebalance([])
await client.rebalance(workers=[])
assert (len(a.data), len(b.data), len(c.data)) == (100, 0, 0)
# Limit rebalancing to two arbitrary keys and two arbitrary workers.
await client.rebalance([futures[3], futures[7]], [a.address, b.address])
assert (len(a.data), len(b.data), len(c.data)) == (98, 2, 0)
with pytest.raises(KeyError):
await client.rebalance(workers=["notexist"])
def test_rebalance_sync():
with dask.config.set(REBALANCE_MANAGED_CONFIG):
with Client(n_workers=2, processes=False, dashboard_address=":0") as c:
s = c.cluster.scheduler
a = c.cluster.workers[0]
b = c.cluster.workers[1]
futures = c.scatter(range(100), workers=[a.address])
assert len(a.data) == 100
assert len(b.data) == 0
c.rebalance()
assert len(a.data) == 50
assert len(b.data) == 50
@gen_cluster(client=True)
async def test_rebalance_unprepared(c, s, a, b):
"""Client.rebalance() internally waits for unfinished futures"""
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
# Let the futures reach the scheduler
await asyncio.sleep(0.1)
# We didn't wait enough for futures to complete. However, Client.rebalance() will
# block until all futures are completed before invoking Scheduler.rebalance().
await c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
async def test_rebalance_raises_on_explicit_missing_data(c, s, a, b):
"""rebalance() raises KeyError if explicitly listed futures disappear"""
f = Future("x", client=c, state="memory")
with pytest.raises(KeyError, match="Could not rebalance keys:"):
await c.rebalance(futures=[f])
@gen_cluster(client=True)
async def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
w = await Worker(s.address, loop=s.loop)
while x.status != "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = await x
assert result == 2
await w.close()
@gen_cluster(client=True, nthreads=[])
async def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
n = await Nanny(s.address, nthreads=2, loop=s.loop)
await c.gather(futures)
await n.close()
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_workers_register_indirect_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
await y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
async def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
await x
await c.cancel(x)
with pytest.raises(CancelledError):
c.submit(inc, x)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate(c, s, *workers):
[a, b] = await c.scatter([1, 2])
await s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True)
async def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
await c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
await c.rebalance(f)
s.validate_state()
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_workers(c, s, *workers):
[a, b] = await c.scatter([1, 2], workers=[workers[0].address])
await s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
await s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
await s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
await s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
class CountSerialization:
def __init__(self):
self.n = 0
def __setstate__(self, n):
self.n = n + 1
def __getstate__(self):
return self.n
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_replicate_tree_branching(c, s, *workers):
obj = CountSerialization()
[future] = await c.scatter([obj])
await s.replicate(keys=[future.key], n=10)
max_count = max(w.data[future.key].n for w in workers)
assert max_count > 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 10)
async def test_client_replicate(c, s, *workers):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
await c.replicate([x, y], n=5)
assert len(s.tasks[x.key].who_has) == 5
assert len(s.tasks[y.key].who_has) == 5
await c.replicate([x, y], n=3)
assert len(s.tasks[x.key].who_has) == 3
assert len(s.tasks[y.key].who_has) == 3
await c.replicate([x, y])
s.validate_state()
assert len(s.tasks[x.key].who_has) == 10
assert len(s.tasks[y.key].who_has) == 10
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.2", 1), ("127.0.0.2", 1)],
)
async def test_client_replicate_host(client, s, a, b, c):
aws = s.workers[a.address]
bws = s.workers[b.address]
cws = s.workers[c.address]
x = client.submit(inc, 1, workers="127.0.0.2")
await wait([x])
assert s.tasks[x.key].who_has == {bws} or s.tasks[x.key].who_has == {cws}
await client.replicate([x], workers=["127.0.0.2"])
assert s.tasks[x.key].who_has == {bws, cws}
await client.replicate([x], workers=["127.0.0.1"])
assert s.tasks[x.key].who_has == {aws, bws, cws}
def test_client_replicate_sync(c):
x = c.submit(inc, 1)
y = c.submit(inc, 2)
c.replicate([x, y], n=2)
who_has = c.who_has()
assert len(who_has[x.key]) == len(who_has[y.key]) == 2
with pytest.raises(ValueError):
c.replicate([x], n=0)
assert y.result() == 3
@pytest.mark.skipif(WINDOWS, reason="Windows timer too coarse-grained")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 1)
async def test_task_load_adapts_quickly(c, s, a):
future = c.submit(slowinc, 1, delay=0.2) # slow
await wait(future)
assert 0.15 < s.task_prefixes["slowinc"].duration_average < 0.4
futures = c.map(slowinc, range(10), delay=0) # very fast
await wait(futures)
assert 0 < s.task_prefixes["slowinc"].duration_average < 0.1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_after_fast_functions(c, s, a, b):
x = c.submit(inc, 1, workers=a.address) # very fast
y = c.submit(inc, 2, workers=b.address) # very fast
await wait([x, y])
futures = c.map(inc, range(2, 11))
await wait(futures)
assert any(f.key in a.data for f in futures)
assert any(f.key in b.data for f in futures)
# assert abs(len(a.data) - len(b.data)) <= 3
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_even_load_on_startup(c, s, a, b):
x, y = c.map(inc, [1, 2])
await wait([x, y])
assert len(a.data) == len(b.data) == 1
@pytest.mark.skip
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_contiguous_load(c, s, a, b):
w, x, y, z = c.map(inc, [1, 2, 3, 4])
await wait([w, x, y, z])
groups = [set(a.data), set(b.data)]
assert {w.key, x.key} in groups
assert {y.key, z.key} in groups
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit(c, s, *workers):
L = [c.submit(slowinc, i) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_balanced_with_submit_and_resident_data(c, s, *workers):
[x] = await c.scatter([10], broadcast=True)
L = [c.submit(slowinc, x, pure=False) for i in range(4)]
await wait(L)
for w in workers:
assert len(w.data) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(slowinc, range(100), delay=delay)
futures = c.map(slowinc, futures, delay=delay / 10)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 20)] * 2)
async def test_scheduler_saturates_cores_random(c, s, a, b):
for delay in [0, 0.01, 0.1]:
futures = c.map(randominc, range(100), scale=0.1)
while not s.tasks:
if s.tasks:
assert all(
len(p) >= 20
for w in s.workers.values()
for p in w.processing.values()
)
await asyncio.sleep(0.01)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4)
async def test_cancel_clears_processing(c, s, *workers):
da = pytest.importorskip("dask.array")
x = c.submit(slowinc, 1, delay=0.2)
while not s.tasks:
await asyncio.sleep(0.01)
await c.cancel(x)
while any(v for w in s.workers.values() for v in w.processing):
await asyncio.sleep(0.01)
s.validate_state()
def test_default_get():
with cluster() as (s, [a, b]):
pre_get = dask.base.get_scheduler()
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"], set_as_default=True) as c:
assert dask.base.get_scheduler() == c.get
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c = Client(s["address"], set_as_default=False)
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
c.close()
c = Client(s["address"], set_as_default=True)
assert dask.config.get("shuffle") == "tasks"
assert dask.base.get_scheduler() == c.get
c.close()
assert dask.base.get_scheduler() == pre_get
pytest.raises(KeyError, dask.config.get, "shuffle")
with Client(s["address"]) as c:
assert dask.base.get_scheduler() == c.get
with Client(s["address"], set_as_default=False) as c:
assert dask.base.get_scheduler() != c.get
assert dask.base.get_scheduler() != c.get
with Client(s["address"], set_as_default=True) as c1:
assert dask.base.get_scheduler() == c1.get
with Client(s["address"], set_as_default=True) as c2:
assert dask.base.get_scheduler() == c2.get
assert dask.base.get_scheduler() == c1.get
assert dask.base.get_scheduler() == pre_get
@gen_cluster(client=True)
async def test_ensure_default_client(c, s, a, b):
assert c is default_client()
async with Client(s.address, set_as_default=False, asynchronous=True) as c2:
assert c is default_client()
assert c2 is not default_client()
ensure_default_client(c2)
assert c is not default_client()
assert c2 is default_client()
def test_ensure_default_get_deprecated():
with pytest.warns(FutureWarning, match="`ensure_default_get` is deprecated"):
from distributed.client import ensure_default_get
assert ensure_default_get is ensure_default_client
@gen_cluster()
async def test_set_as_default(s, a, b):
with pytest.raises(ValueError):
default_client()
async with Client(s.address, set_as_default=False, asynchronous=True) as c1:
with pytest.raises(ValueError):
default_client()
async with Client(s.address, set_as_default=True, asynchronous=True) as c2:
assert default_client() is c2
async with Client(s.address, set_as_default=True, asynchronous=True) as c3:
assert default_client() is c3
async with Client(
s.address, set_as_default=False, asynchronous=True
) as c4:
assert default_client() is c3
await c4.scheduler_comm.close()
while c4.status != "running":
await asyncio.sleep(0.01)
assert default_client() is c3
with pytest.raises(ValueError):
default_client()
@gen_cluster(client=True)
async def test_get_processing(c, s, a, b):
processing = await c.processing()
assert processing == valmap(tuple, s.processing)
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a.address], allow_other_workers=True
)
await asyncio.sleep(0.2)
x = await c.processing()
assert set(x) == {a.address, b.address}
x = await c.processing(workers=[a.address])
assert isinstance(x[a.address], (list, tuple))
@gen_cluster(client=True)
async def test_get_foo(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
x = await c.scheduler.ncores()
assert x == s.nthreads
x = await c.scheduler.ncores(workers=[a.address])
assert x == {a.address: s.nthreads[a.address]}
x = await c.scheduler.has_what()
assert valmap(sorted, x) == valmap(sorted, s.has_what)
x = await c.scheduler.has_what(workers=[a.address])
assert valmap(sorted, x) == {a.address: sorted(s.has_what[a.address])}
x = await c.scheduler.nbytes(summary=False)
assert x == s.get_nbytes(summary=False)
x = await c.scheduler.nbytes(keys=[futures[0].key], summary=False)
assert x == {futures[0].key: s.tasks[futures[0].key].nbytes}
x = await c.scheduler.who_has()
assert valmap(sorted, x) == valmap(sorted, s.who_has)
x = await c.scheduler.who_has(keys=[futures[0].key])
assert valmap(sorted, x) == {futures[0].key: sorted(s.who_has[futures[0].key])}
def assert_dict_key_equal(expected, actual):
assert set(expected.keys()) == set(actual.keys())
for k in actual.keys():
ev = expected[k]
av = actual[k]
assert list(ev) == list(av)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_get_foo_lost_keys(c, s, u, v, w):
x = c.submit(inc, 1, workers=[u.address])
y = await c.scatter(3, workers=[v.address])
await wait([x, y])
ua, va, wa = u.address, v.address, w.address
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {ua: [x.key], va: [y.key], wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [x.key], va: [y.key]})
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [ua], y.key: [va]})
await u.close()
await v.close()
d = await c.scheduler.has_what()
assert_dict_key_equal(d, {wa: []})
d = await c.scheduler.has_what(workers=[ua, va])
assert_dict_key_equal(d, {ua: [], va: []})
# The scattered key cannot be recomputed so it is forgotten
d = await c.scheduler.who_has()
assert_dict_key_equal(d, {x.key: []})
# ... but when passed explicitly, it is included in the result
d = await c.scheduler.who_has(keys=[x.key, y.key])
assert_dict_key_equal(d, {x.key: [], y.key: []})
@pytest.mark.slow
@gen_cluster(
client=True, Worker=Nanny, clean_kwargs={"threads": False, "processes": False}
)
async def test_bad_tasks_fail(c, s, a, b):
f = c.submit(sys.exit, 0)
with captured_logger(logging.getLogger("distributed.scheduler")) as logger:
with pytest.raises(KilledWorker) as info:
await f
text = logger.getvalue()
assert f.key in text
assert info.value.last_worker.nanny in {a.address, b.address}
await asyncio.gather(a.close(), b.close())
def test_get_processing_sync(c, s, a, b):
processing = c.processing()
assert not any(v for v in processing.values())
futures = c.map(
slowinc, range(10), delay=0.1, workers=[a["address"]], allow_other_workers=False
)
sleep(0.2)
aa = a["address"]
bb = b["address"]
processing = c.processing()
assert set(c.processing(aa)) == {aa}
assert set(c.processing([aa])) == {aa}
c.cancel(futures)
def test_close_idempotent(c):
c.close()
c.close()
c.close()
@nodebug
def test_get_returns_early(c):
start = time()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), "y": (sleep, 1)}, ["x", "y"])
assert time() < start + 0.5
# Futures should be released and forgotten
wait_for(lambda: not c.futures, timeout=0.1)
wait_for(lambda: not any(c.processing().values()), timeout=3)
x = c.submit(inc, 1)
x.result()
with suppress(RuntimeError):
result = c.get({"x": (throws, 1), x.key: (inc, 1)}, ["x", x.key])
assert x.key in c.futures
@pytest.mark.slow
@gen_cluster(Worker=Nanny, client=True, timeout=60)
async def test_Client_clears_references_after_restart(c, s, a, b):
x = c.submit(inc, 1)
assert x.key in c.refcount
await c.restart()
assert x.key not in c.refcount
key = x.key
del x
import gc
gc.collect()
await asyncio.sleep(0)
assert key not in c.refcount
@gen_cluster(Worker=Nanny, client=True)
async def test_restart_timeout_is_logged(c, s, a, b):
with captured_logger(logging.getLogger("distributed.client")) as logger:
await c.restart(timeout="0.5s")
text = logger.getvalue()
assert "Restart timed out after 0.50 seconds" in text
def test_get_stops_work_after_error(c):
with pytest.raises(RuntimeError):
c.get({"x": (throws, 1), "y": (sleep, 1.5)}, ["x", "y"])
start = time()
while any(c.processing().values()):
sleep(0.01)
assert time() < start + 0.5
def test_as_completed_list(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq))
assert set(c.gather(seq2)) == {1, 2, 3, 4, 5}
def test_as_completed_results(c):
seq = c.map(inc, range(5))
seq2 = list(as_completed(seq, with_results=True))
assert set(pluck(1, seq2)) == {1, 2, 3, 4, 5}
assert set(pluck(0, seq2)) == set(seq)
@pytest.mark.parametrize("with_results", [True, False])
def test_as_completed_batches(c, with_results):
n = 50
futures = c.map(slowinc, range(n), delay=0.01)
out = []
for batch in as_completed(futures, with_results=with_results).batches():
assert isinstance(batch, (tuple, list))
sleep(0.05)
out.extend(batch)
assert len(out) == n
if with_results:
assert set(pluck(1, out)) == set(range(1, n + 1))
else:
assert set(out) == set(futures)
def test_as_completed_next_batch(c):
futures = c.map(slowinc, range(2), delay=0.1)
ac = as_completed(futures)
assert not ac.is_empty()
assert ac.next_batch(block=False) == []
assert set(ac.next_batch(block=True)).issubset(futures)
while not ac.is_empty():
assert set(ac.next_batch(block=True)).issubset(futures)
assert ac.is_empty()
assert not ac.has_ready()
@gen_cluster(nthreads=[])
async def test_status(s):
c = await Client(s.address, asynchronous=True)
assert c.status == "running"
x = c.submit(inc, 1)
await c.close()
assert c.status == "closed"
@gen_cluster(client=True)
async def test_async_whowhat(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
who_has = await c.who_has()
has_what = await c.has_what()
assert type(who_has) is WhoHas
assert type(has_what) is HasWhat
assert who_has == {x.key: (a.address,)}
assert has_what == {a.address: (x.key,), b.address: ()}
def test_client_repr_html(c):
x = c.submit(inc, 1)
who_has = c.who_has()
has_what = c.has_what()
assert type(who_has) is WhoHas
assert type(has_what) is HasWhat
@gen_cluster(client=True)
async def test_persist_optimize_graph(c, s, a, b):
i = 10
for method in [c.persist, c.compute]:
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=False)
await wait(b4)
assert set(map(stringify, b3.__dask_keys__())).issubset(s.tasks)
b = db.range(i, npartitions=2)
i += 1
b2 = b.map(inc)
b3 = b2.map(inc)
b4 = method(b3, optimize_graph=True)
await wait(b4)
assert not any(stringify(k) in s.tasks for k in b2.__dask_keys__())
@gen_cluster(client=True, nthreads=[])
async def test_scatter_raises_if_no_workers(c, s):
with pytest.raises(TimeoutError):
await c.scatter(1, timeout=0.5)
@pytest.mark.slow
def test_reconnect(loop):
w = Worker("127.0.0.1", 9393, loop=loop)
loop.add_callback(w.start)
scheduler_cli = [
"dask-scheduler",
"--host",
"127.0.0.1",
"--port",
"9393",
"--no-dashboard",
]
with popen(scheduler_cli):
c = Client("127.0.0.1:9393", loop=loop)
c.wait_for_workers(1, timeout=10)
x = c.submit(inc, 1)
assert x.result(timeout=10) == 2
start = time()
while c.status != "connecting":
assert time() < start + 10
sleep(0.01)
assert x.status == "cancelled"
with pytest.raises(CancelledError):
x.result(timeout=10)
with popen(scheduler_cli):
start = time()
while c.status != "running":
sleep(0.1)
assert time() < start + 10
start = time()
while len(c.nthreads()) != 1:
sleep(0.05)
assert time() < start + 10
x = c.submit(inc, 1)
assert x.result(timeout=10) == 2
start = time()
while True:
assert time() < start + 10
try:
x.result(timeout=10)
assert False
except CommClosedError:
continue
except CancelledError:
break
sync(loop, w.close, timeout=1)
c.close()
class UnhandledException(Exception):
pass
@contextmanager
def catch_unhandled_exceptions() -> Generator[None, None, None]:
loop = asyncio.get_running_loop()
ctx: dict[str, Any] | None = None
old_handler = loop.get_exception_handler()
@loop.set_exception_handler
def _(loop: object, context: dict[str, Any]) -> None:
nonlocal ctx
ctx = context
try:
yield
finally:
loop.set_exception_handler(old_handler)
if ctx:
raise UnhandledException(ctx["message"]) from ctx.get("exception")
@gen_cluster(client=True, nthreads=[], client_kwargs={"timeout": 0.5})
async def test_reconnect_timeout(c, s):
with catch_unhandled_exceptions(), captured_logger(
logging.getLogger("distributed.client")
) as logger:
await s.close()
while c.status != "closed":
await c._update_scheduler_info()
await asyncio.sleep(0.05)
text = logger.getvalue()
assert "Failed to reconnect" in text
@pytest.mark.avoid_ci(reason="hangs on github actions ubuntu-latest CI")
@pytest.mark.slow
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.parametrize("worker,count,repeat", [(Worker, 100, 5), (Nanny, 10, 20)])
def test_open_close_many_workers(loop, worker, count, repeat):
proc = psutil.Process()
with cluster(nworkers=0, active_rpc_timeout=2) as (s, _):
gc.collect()
before = proc.num_fds()
done = Semaphore(0)
running = weakref.WeakKeyDictionary()
workers = set()
status = True
async def start_worker(sleep, duration, repeat=1):
for i in range(repeat):
await asyncio.sleep(sleep)
if not status:
return
w = worker(s["address"], loop=loop)
running[w] = None
await w
workers.add(w)
addr = w.worker_address
running[w] = addr
await asyncio.sleep(duration)
await w.close()
del w
await asyncio.sleep(0)
done.release()
for i in range(count):
loop.add_callback(
start_worker, random.random() / 5, random.random() / 5, repeat=repeat
)
with Client(s["address"], loop=loop) as c:
sleep(1)
for i in range(count):
done.acquire(timeout=5)
gc.collect()
if not running:
break
start = time()
while c.nthreads():
sleep(0.2)
assert time() < start + 10
while len(workers) < count * repeat:
sleep(0.2)
status = False
[c.sync(w.close) for w in list(workers)]
for w in workers:
assert w.status == Status.closed
start = time()
while proc.num_fds() > before:
print("fds:", before, proc.num_fds())
sleep(0.1)
if time() > start + 10:
if worker == Worker: # this is an esoteric case
print("File descriptors did not clean up")
break
else:
raise ValueError("File descriptors did not clean up")
@gen_cluster()
async def test_idempotence(s, a, b):
c = await Client(s.address, asynchronous=True)
f = await Client(s.address, asynchronous=True)
# Submit
x = c.submit(inc, 1)
await x
log = list(s.transition_log)
len_single_submit = len(log) # see last assert
y = f.submit(inc, 1)
assert x.key == y.key
await y
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
# Error
a = c.submit(div, 1, 0)
await wait(a)
assert a.status == "error"
log = list(s.transition_log)
b = f.submit(div, 1, 0)
assert a.key == b.key
await wait(b)
await asyncio.sleep(0.1)
log2 = list(s.transition_log)
assert log == log2
s.transition_log.clear()
# Simultaneous Submit
d = c.submit(inc, 2)
e = c.submit(inc, 2)
await wait([d, e])
assert len(s.transition_log) == len_single_submit
await c.close()
await f.close()
def test_scheduler_info(c):
info = c.scheduler_info()
assert isinstance(info, dict)
assert len(info["workers"]) == 2
assert isinstance(info["started"], float)
def test_write_scheduler_file(c):
info = c.scheduler_info()
with tmpfile("json") as scheduler_file:
c.write_scheduler_file(scheduler_file)
with Client(scheduler_file=scheduler_file) as c2:
info2 = c2.scheduler_info()
assert c.scheduler.address == c2.scheduler.address
# test that a ValueError is raised if the scheduler_file
# attribute is already set
with pytest.raises(ValueError):
c.write_scheduler_file(scheduler_file)
def test_get_versions_sync(c):
requests = pytest.importorskip("requests")
v = c.get_versions()
assert v["scheduler"] is not None
assert v["client"] is not None
assert len(v["workers"]) == 2
for k, v in v["workers"].items():
assert v is not None
c.get_versions(check=True)
# smoke test for versions
# that this does not raise
v = c.get_versions(packages=["requests"])
assert v["client"]["packages"]["requests"] == requests.__version__
@gen_cluster(client=True)
async def test_get_versions_async(c, s, a, b):
v = await c.get_versions(check=True)
assert v.keys() == {"scheduler", "client", "workers"}
@gen_cluster(client=True, config={"distributed.comm.timeouts.connect": "200ms"})
async def test_get_versions_rpc_error(c, s, a, b):
a.stop()
v = await c.get_versions()
assert v.keys() == {"scheduler", "client", "workers"}
assert v["workers"].keys() == {b.address}
def test_threaded_get_within_distributed(c):
import dask.multiprocessing
for get in [dask.local.get_sync, dask.multiprocessing.get, dask.threaded.get]:
def f():
return get({"x": (lambda: 1,)}, "x")
future = c.submit(f)
assert future.result() == 1
@gen_cluster(client=True)
async def test_lose_scattered_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "cancelled"
assert x.key not in s.tasks
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_partially_lose_scattered_data(e, s, a, b, c):
x = await e.scatter(1, workers=a.address)
await e.replicate(x, n=2)
await a.close()
await asyncio.sleep(0.1)
assert x.status == "finished"
assert s.get_task_status(keys=[x.key]) == {x.key: "memory"}
@gen_cluster(client=True)
async def test_scatter_compute_lose(c, s, a, b):
[x] = await c.scatter([[1, 2, 3, 4]], workers=a.address)
y = c.submit(inc, 1, workers=b.address)
z = c.submit(slowadd, x, y, delay=0.2)
await asyncio.sleep(0.1)
await a.close()
with pytest.raises(CancelledError):
await wait(z)
assert x.status == "cancelled"
assert y.status == "finished"
assert z.status == "cancelled"
@gen_cluster(client=True)
async def test_scatter_compute_store_lose(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
x = await c.scatter(1, workers=a.address)
xx = c.submit(inc, x, workers=a.address)
y = c.submit(inc, 1)
z = c.submit(slowadd, xx, y, delay=0.2, workers=b.address)
await wait(z)
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
# assert xx.status == 'finished'
assert y.status == "finished"
assert z.status == "finished"
zz = c.submit(inc, z)
await wait(zz)
zkey = z.key
del z
while s.get_task_status(keys=[zkey]) != {zkey: "released"}:
await asyncio.sleep(0.01)
xxkey = xx.key
del xx
while x.key in s.tasks and zkey not in s.tasks and xxkey not in s.tasks:
await asyncio.sleep(0.01)
@gen_cluster(client=True)
async def test_scatter_compute_store_lose_processing(c, s, a, b):
"""
Create irreplaceable data on one machine,
cause a dependent computation to occur on another and complete
Kill the machine with the irreplaceable data. What happens to the complete
result? How about after it GCs and tries to come back?
"""
[x] = await c.scatter([1], workers=a.address)
y = c.submit(slowinc, x, delay=0.2)
z = c.submit(inc, y)
await asyncio.sleep(0.1)
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
assert y.status == "cancelled"
assert z.status == "cancelled"
@gen_cluster()
async def test_serialize_future(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(lambda: 1)
result = await future
for ci in (c1, c2):
for ctxman in ci.as_current, lambda: temp_default_client(ci):
with ctxman():
future2 = pickle.loads(pickle.dumps(future))
assert future2.client is ci
assert stringify(future2.key) in ci.futures
result2 = await future2
assert result == result2
await c1.close()
await c2.close()
@gen_cluster()
async def test_temp_default_client(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c1):
assert default_client() is c1
assert default_client(c2) is c2
with temp_default_client(c2):
assert default_client() is c2
assert default_client(c1) is c1
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_as_current(c, s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
with temp_default_client(c):
assert Client.current() is c
with pytest.raises(ValueError):
Client.current(allow_global=False)
with c1.as_current():
assert Client.current() is c1
assert Client.current(allow_global=True) is c1
with c2.as_current():
assert Client.current() is c2
assert Client.current(allow_global=True) is c2
await c1.close()
await c2.close()
def test_as_current_is_thread_local(s):
l1 = threading.Lock()
l2 = threading.Lock()
l3 = threading.Lock()
l4 = threading.Lock()
l1.acquire()
l2.acquire()
l3.acquire()
l4.acquire()
def run1():
with Client(s["address"]) as c:
with c.as_current():
l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.acquire()
l4.release()
def run2():
with Client(s["address"]) as c:
with c.as_current():
l1.release()
l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
l4.acquire()
t1 = threading.Thread(target=run1)
t2 = threading.Thread(target=run2)
t1.start()
t2.start()
t1.join()
t2.join()
@gen_cluster()
async def test_as_current_is_task_local(s, a, b):
l1 = asyncio.Lock()
l2 = asyncio.Lock()
l3 = asyncio.Lock()
l4 = asyncio.Lock()
await l1.acquire()
await l2.acquire()
await l3.acquire()
await l4.acquire()
async def run1():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
await l1.acquire()
l2.release()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
await l3.acquire()
l4.release()
async def run2():
async with Client(s.address, asynchronous=True) as c:
with c.as_current():
l1.release()
await l2.acquire()
try:
# This line runs only when both run1 and run2 are inside the
# context manager
assert Client.current(allow_global=False) is c
finally:
l3.release()
await l4.acquire()
await asyncio.gather(run1(), run2())
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=False):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=False):
total = delayed(sum)(L1)
with dask.annotate(workers=c.address, allow_other_workers=True):
L2 = [delayed(add)(i, total) for i in L1]
with dask.annotate(workers=b.address, allow_other_workers=True):
total2 = delayed(sum)(L2)
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.persist(L1 + L2 + [total, total2], optimize_graph=False)
await wait(out)
assert all(v.key in a.data for v in L1)
assert total.key in b.data
assert s.loose_restrictions == {total2.key} | {v.key for v in L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers_annotate2(e, s, a, b, c):
def key_to_worker(key):
return a.address
L1 = [delayed(inc)(i) for i in range(4)]
for x in L1:
assert all(layer.annotations is None for layer in x.dask.layers.values())
with dask.annotate(workers=key_to_worker):
out = e.persist(L1, optimize_graph=False)
await wait(out)
for x in L1:
assert all(layer.annotations is None for layer in x.dask.layers.values())
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
@nodebug # test timing is fragile
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_persist_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
total2 = delayed(sum)(L2)
out = e.persist(
L1 + L2 + [total, total2],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total, total2]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key, total2.key} | {v.key for v in L1 + L2}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers_annotate(e, s, a, b, c):
with dask.annotate(workers=a.address, allow_other_workers=True):
L1 = [delayed(inc)(i) for i in range(4)]
with dask.annotate(workers=b.address, allow_other_workers=True):
total = delayed(sum)(L1)
with dask.annotate(workers=[c.address]):
L2 = [delayed(add)(i, total) for i in L1]
# TODO: once annotations are faithfully forwarded upon graph optimization,
# we shouldn't need to disable that here.
out = e.compute(L1 + L2 + [total], optimize_graph=False)
await wait(out)
for v in L1:
assert s.worker_restrictions[v.key] == {a.address}
for v in L2:
assert s.worker_restrictions[v.key] == {c.address}
assert s.worker_restrictions[total.key] == {b.address}
assert s.loose_restrictions == {total.key} | {v.key for v in L1}
@gen_cluster(nthreads=[("127.0.0.1", 1)] * 3, client=True)
async def test_compute_workers(e, s, a, b, c):
L1 = [delayed(inc)(i) for i in range(4)]
total = delayed(sum)(L1)
L2 = [delayed(add)(i, total) for i in L1]
out = e.compute(
L1 + L2 + [total],
workers=[a.address, b.address],
allow_other_workers=True,
)
await wait(out)
for v in L1 + L2 + [total]:
assert s.worker_restrictions[v.key] == {a.address, b.address}
assert not any(c.address in r for r in s.worker_restrictions)
assert s.loose_restrictions == {total.key} | {v.key for v in L1 + L2}
@gen_cluster(client=True)
async def test_compute_nested_containers(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
x = da.ones(10, chunks=(5,)) + 1
future = c.compute({"x": [x], "y": 123})
result = await future
assert isinstance(result, dict)
assert (result["x"][0] == np.ones(10) + 1).all()
assert result["y"] == 123
@gen_cluster(client=True)
async def test_scatter_type(c, s, a, b):
[future] = await c.scatter([1])
assert future.type == int
d = await c.scatter({"x": 1.0})
assert d["x"].type == float
@gen_cluster(client=True)
async def test_retire_workers_2(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
await s.retire_workers(workers=[a.address])
assert b.data == {x.key: 1}
assert s.who_has == {x.key: {b.address}}
assert s.has_what == {b.address: {x.key}}
assert a.address not in s.workers
@gen_cluster(client=True, nthreads=[("", 1)] * 10)
async def test_retire_many_workers(c, s, *workers):
futures = await c.scatter(list(range(100)))
await s.retire_workers(workers=[w.address for w in workers[:7]])
results = await c.gather(futures)
assert results == list(range(100))
while len(s.workers) != 3:
await asyncio.sleep(0.01)
assert len(s.has_what) == len(s.nthreads) == 3
assert all(future.done() for future in futures)
assert all(s.tasks[future.key].state == "memory" for future in futures)
assert await c.gather(futures) == list(range(100))
# Don't count how many task landed on each worker.
# Normally, tasks would be distributed evenly over the surviving workers. However,
# here all workers share the same process memory, so you'll get an unintuitive
# distribution of tasks if for any reason one transfer take longer than 2 seconds
# and as a consequence the Active Memory Manager ends up running for two iterations.
# This is something that will happen more frequently on low-powered CI machines.
# See test_active_memory_manager.py for tests that robustly verify the statistical
# distribution of tasks after worker retirement.
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 3)] * 2,
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_weight_occupancy_against_data_movement(c, s, a, b):
await s.extensions["stealing"].stop()
def f(x, y=0, z=0):
sleep(0.01)
return x
y = await c.scatter([[1, 2, 3, 4]], workers=[a.address])
z = await c.scatter([1], workers=[b.address])
futures = c.map(f, [1, 2, 3, 4], y=y, z=z)
await wait(futures)
assert sum(f.key in a.data for f in futures) >= 2
assert sum(f.key in b.data for f in futures) >= 1
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 10)],
config={"distributed.scheduler.default-task-durations": {"f": "10ms"}},
)
async def test_distribute_tasks_by_nthreads(c, s, a, b):
await s.extensions["stealing"].stop()
def f(x, y=0):
sleep(0.01)
return x
y = await c.scatter([1], broadcast=True)
futures = c.map(f, range(20), y=y)
await wait(futures)
assert len(b.data) > 2 * len(a.data)
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_add_done_callback(c, s, a, b):
S = set()
def f(future):
future.add_done_callback(g)
def g(future):
S.add((future.key, future.status))
u = c.submit(inc, 1, key="u")
v = c.submit(throws, "hello", key="v")
w = c.submit(slowinc, 2, delay=0.3, key="w")
x = c.submit(inc, 3, key="x")
u.add_done_callback(f)
v.add_done_callback(f)
w.add_done_callback(f)
await wait((u, v, w, x))
x.add_done_callback(f)
while len(S) < 4:
await asyncio.sleep(0.01)
assert S == {(f.key, f.status) for f in (u, v, w, x)}
@gen_cluster(client=True)
async def test_normalize_collection(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(x)
z = delayed(inc)(y)
yy = c.persist(y)
zz = c.normalize_collection(z)
assert len(z.dask) == len(y.dask) + 1
assert isinstance(zz.dask[y.key], Future)
assert len(zz.dask) < len(z.dask)
@gen_cluster(client=True)
async def test_normalize_collection_dask_array(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
y = x + 1
yy = c.persist(y)
z = y.sum()
zdsk = dict(z.dask)
zz = c.normalize_collection(z)
assert z.dask == zdsk # do not mutate input
assert len(z.dask) > len(zz.dask)
assert any(isinstance(v, Future) for v in zz.dask.values())
for k, v in yy.dask.items():
assert zz.dask[k].key == v.key
result1 = await c.compute(z)
result2 = await c.compute(zz)
assert result1 == result2
@pytest.mark.slow
def test_normalize_collection_with_released_futures(c):
da = pytest.importorskip("dask.array")
x = da.arange(2**20, chunks=2**10)
y = x.persist()
wait(y)
sol = y.sum().compute()
# Start releasing futures
del y
# Try to reuse futures. Previously this was a race condition,
# and the call to `.compute()` would error out due to missing
# futures on the scheduler at compute time.
normalized = c.normalize_collection(x)
res = normalized.sum().compute()
assert res == sol
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
@gen_cluster(client=True)
async def test_auto_normalize_collection(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
assert len(x.dask) == 2
with dask.config.set(optimizations=[c._optimize_insert_futures]):
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
await wait(yy)
start = time()
future = c.compute(y.sum())
await future
end = time()
assert end - start < 1
start = time()
z = c.persist(y + 1)
await wait(z)
end = time()
assert end - start < 1
@pytest.mark.xfail(reason="https://github.com/dask/distributed/issues/4404")
def test_auto_normalize_collection_sync(c):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=5)
y = x.map_blocks(slowinc, delay=1, dtype=x.dtype)
yy = c.persist(y)
wait(yy)
with dask.config.set(optimizations=[c._optimize_insert_futures]):
start = time()
y.sum().compute()
end = time()
assert end - start < 1
def assert_no_data_loss(scheduler):
for key, start, finish, recommendations, _ in scheduler.transition_log:
if start == "memory" and finish == "released":
for k, v in recommendations.items():
assert not (k == key and v == "waiting")
@gen_cluster(client=True)
async def test_interleave_computations(c, s, a, b):
import distributed
distributed.g = s
xs = [delayed(slowinc)(i, delay=0.02) for i in range(30)]
ys = [delayed(slowdec)(x, delay=0.02) for x in xs]
zs = [delayed(slowadd)(x, y, delay=0.02) for x, y in zip(xs, ys)]
total = delayed(sum)(zs)
future = c.compute(total)
done = ("memory", "released")
await asyncio.sleep(0.1)
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
assert_no_data_loss(s)
@pytest.mark.skip(reason="Now prefer first-in-first-out")
@gen_cluster(client=True)
async def test_interleave_computations_map(c, s, a, b):
xs = c.map(slowinc, range(30), delay=0.02)
ys = c.map(slowdec, xs, delay=0.02)
zs = c.map(slowadd, xs, ys, delay=0.02)
done = ("memory", "released")
x_keys = [x.key for x in xs]
y_keys = [y.key for y in ys]
z_keys = [z.key for z in zs]
while not s.tasks or any(w.processing for w in s.workers.values()):
await asyncio.sleep(0.05)
x_done = sum(state in done for state in s.get_task_status(keys=x_keys).values())
y_done = sum(state in done for state in s.get_task_status(keys=y_keys).values())
z_done = sum(state in done for state in s.get_task_status(keys=z_keys).values())
assert x_done >= y_done >= z_done
assert x_done < y_done + 10
assert y_done < z_done + 10
@gen_cluster(client=True)
async def test_scatter_dict_workers(c, s, a, b):
await c.scatter({"a": 10}, workers=[a.address, b.address])
assert "a" in a.data or "a" in b.data
@pytest.mark.slow
@gen_test()
async def test_client_timeout():
"""`await Client(...)` keeps retrying for 10 seconds if it can't find the Scheduler
straight away
"""
with dask.config.set({"distributed.comm.timeouts.connect": "10s"}):
c = Client("127.0.0.1:57484", asynchronous=True)
client_start_fut = asyncio.ensure_future(c)
await asyncio.sleep(2)
async with Scheduler(port=57484, dashboard_address=":0"):
await client_start_fut
assert await c.run_on_scheduler(lambda: 123) == 123
await c.close()
@gen_cluster(client=True)
async def test_submit_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(L=None):
return sum(L)
future = c.submit(f, L=futures)
result = await future
assert result == 1 + 2 + 3
@gen_cluster(client=True)
async def test_map_list_kwargs(c, s, a, b):
futures = await c.scatter([1, 2, 3])
def f(i, L=None):
return i + sum(L)
futures = c.map(f, range(10), L=futures)
results = await c.gather(futures)
assert results == [i + 6 for i in range(10)]
@gen_cluster(client=True)
async def test_dont_clear_waiting_data(c, s, a, b):
x = await c.scatter(1)
y = c.submit(slowinc, x, delay=0.5)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
key = x.key
del x
for i in range(5):
assert s.waiting_data[key]
await asyncio.sleep(0)
@gen_cluster(client=True)
async def test_recreate_error_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(1)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)(x, y)
f = c.compute(tot)
assert f.status == "pending"
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
assert f.status == "pending"
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
assert f.status == "error"
assert function.__name__ == "div"
assert args == (1, 0)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: 1 / x)
b = b.persist()
f = c.compute(b)
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ZeroDivisionError):
function(*args, **kwargs)
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
def make_err(x):
# because pandas would happily work with NaN
if x == 0:
raise ValueError
return x
df2 = df.a.map(make_err)
f = c.compute(df2)
error_f = await c._get_errored_future(f)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ValueError):
function(*args, **kwargs)
# with persist
df3 = c.persist(df2)
error_f = await c._get_errored_future(df3)
function, args, kwargs = await c._get_components_from_future(error_f)
with pytest.raises(ValueError):
function(*args, **kwargs)
@gen_cluster(client=True)
async def test_recreate_error_array(c, s, a, b):
da = pytest.importorskip("dask.array")
pytest.importorskip("scipy")
z = (da.linalg.inv(da.zeros((10, 10), chunks=10)) + 1).sum()
zz = z.persist()
error_f = await c._get_errored_future(zz)
function, args, kwargs = await c._get_components_from_future(error_f)
assert "0.,0.,0." in str(args).replace(" ", "") # args contain actual arrays
def test_recreate_error_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 1)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, x, y)
f = c.compute(tot)
with pytest.raises(ZeroDivisionError):
c.recreate_error_locally(f)
assert f.status == "error"
def test_recreate_error_not_error(c):
f = c.submit(dec, 2)
with pytest.raises(ValueError, match="No errored futures passed"):
c.recreate_error_locally(f)
@gen_cluster(client=True)
async def test_recreate_task_delayed(c, s, a, b):
x0 = delayed(dec)(2)
y0 = delayed(dec)(2)
x = delayed(div)(1, x0)
y = delayed(div)(1, y0)
tot = delayed(sum)([x, y])
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._get_components_from_future(f)
assert f.status == "finished"
assert function.__name__ == "sum"
assert args == ([1, 1],)
assert function(*args, **kwargs) == 2
@gen_cluster(client=True)
async def test_recreate_task_futures(c, s, a, b):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 2)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, [x, y])
f = c.compute(tot)
assert f.status == "pending"
function, args, kwargs = await c._get_components_from_future(f)
assert f.status == "finished"
assert function.__name__ == "sum"
assert args == ([1, 1],)
assert function(*args, **kwargs) == 2
@gen_cluster(client=True)
async def test_recreate_task_collection(c, s, a, b):
b = db.range(10, npartitions=4)
b = b.map(lambda x: int(3628800 / (x + 1)))
b = b.persist()
f = c.compute(b)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs) == [
3628800,
1814400,
1209600,
907200,
725760,
604800,
518400,
453600,
403200,
362880,
]
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = dd.from_pandas(pd.DataFrame({"a": [0, 1, 2, 3, 4]}), chunksize=2)
df2 = df.a.map(lambda x: x + 1)
f = c.compute(df2)
function, args, kwargs = await c._get_components_from_future(f)
expected = pd.DataFrame({"a": [1, 2, 3, 4, 5]})["a"]
assert function(*args, **kwargs).equals(expected)
# with persist
df3 = c.persist(df2)
# recreate_task_locally only works with futures
with pytest.raises(AttributeError):
function, args, kwargs = await c._get_components_from_future(df3)
f = c.compute(df3)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs).equals(expected)
@gen_cluster(client=True)
async def test_recreate_task_array(c, s, a, b):
da = pytest.importorskip("dask.array")
z = (da.zeros((10, 10), chunks=10) + 1).sum()
f = c.compute(z)
function, args, kwargs = await c._get_components_from_future(f)
assert function(*args, **kwargs) == 100
def test_recreate_task_sync(c):
x0 = c.submit(dec, 2)
y0 = c.submit(dec, 2)
x = c.submit(div, 1, x0)
y = c.submit(div, 1, y0)
tot = c.submit(sum, [x, y])
f = c.compute(tot)
assert c.recreate_task_locally(f) == 2
@gen_cluster(client=True)
async def test_retire_workers(c, s, a, b):
assert set(s.workers) == {a.address, b.address}
await c.retire_workers(workers=[a.address], close_workers=True)
assert set(s.workers) == {b.address}
while a.status != Status.closed:
await asyncio.sleep(0.01)
class MyException(Exception):
pass
@gen_cluster(client=True)
async def test_robust_unserializable(c, s, a, b):
class Foo:
def __getstate__(self):
raise MyException()
with pytest.raises(MyException):
future = c.submit(identity, Foo())
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
future = c.submit(identity, Foo())
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_robust_undeserializable_function(c, s, a, b):
class Foo:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise MyException("hello")
def __call__(self, *args):
return 1
future = c.submit(Foo(), 1)
with pytest.raises(MyException):
await future
futures = c.map(inc, range(10))
results = await c.gather(futures)
assert results == list(map(inc, range(10)))
assert a.data and b.data
@gen_cluster(client=True)
async def test_fire_and_forget(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.1)
import distributed
def f(x):
distributed.foo = 123
try:
fire_and_forget(c.submit(f, future))
while not hasattr(distributed, "foo"):
await asyncio.sleep(0.01)
assert distributed.foo == 123
finally:
del distributed.foo
while len(s.tasks) > 1:
await asyncio.sleep(0.01)
assert set(s.who_wants) == {future.key}
assert set(s.tasks) == {future.key}
@gen_cluster(client=True)
async def test_fire_and_forget_err(c, s, a, b):
fire_and_forget(c.submit(div, 1, 0))
await asyncio.sleep(0.1)
# erred task should clear out quickly
start = time()
while s.tasks:
await asyncio.sleep(0.01)
assert time() < start + 1
def test_quiet_client_close(loop):
with captured_logger(logging.getLogger("distributed")) as logger:
with Client(
loop=loop,
processes=False,
dashboard_address=":0",
threads_per_worker=4,
) as c:
futures = c.map(slowinc, range(1000), delay=0.01)
sleep(0.200) # stop part-way
sleep(0.1) # let things settle
out = logger.getvalue()
lines = out.strip().split("\n")
assert len(lines) <= 2
for line in lines:
assert (
not line
or "Reconnecting" in line
or "garbage" in line
or set(line) == {"-"}
), line
@pytest.mark.slow
def test_quiet_client_close_when_cluster_is_closed_before_client(loop):
with captured_logger(logging.getLogger("tornado.application")) as logger:
cluster = LocalCluster(loop=loop, n_workers=1, dashboard_address=":0")
client = Client(cluster, loop=loop)
cluster.close()
client.close()
out = logger.getvalue()
assert "CancelledError" not in out
@gen_cluster()
async def test_close(s, a, b):
c = await Client(s.address, asynchronous=True)
future = c.submit(inc, 1)
await wait(future)
assert c.id in s.wants_what
await c.close()
while c.id in s.wants_what or s.tasks:
await asyncio.sleep(0.01)
def test_threadsafe(c):
def f(_):
d = deque(maxlen=50)
for i in range(100):
future = c.submit(inc, random.randint(0, 100))
d.append(future)
sleep(0.001)
c.gather(list(d))
total = c.submit(sum, list(d))
return total.result()
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(20) as e:
results = list(e.map(f, range(20)))
assert results and all(results)
del results
@pytest.mark.slow
def test_threadsafe_get(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
total += (x + random.randint(0, 20)).sum().compute()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
with ThreadPoolExecutor(30) as e:
results = list(e.map(f, range(30)))
assert results and all(results)
@pytest.mark.slow
def test_threadsafe_compute(c):
da = pytest.importorskip("dask.array")
x = da.arange(100, chunks=(10,))
def f(_):
total = 0
for i in range(20):
future = c.compute((x + random.randint(0, 20)).sum())
total += future.result()
sleep(0.001)
return total
from concurrent.futures import ThreadPoolExecutor
e = ThreadPoolExecutor(30)
results = list(e.map(f, range(30)))
assert results and all(results)
@gen_cluster(client=True)
async def test_identity(c, s, a, b):
assert c.id.lower().startswith("client")
assert a.id.lower().startswith("worker")
assert b.id.lower().startswith("worker")
assert s.id.lower().startswith("scheduler")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 4)] * 2)
async def test_get_client(c, s, a, b):
assert get_client() is c
assert c.asynchronous
def f(x):
import distributed
client = get_client()
assert not client.asynchronous
assert client is distributed.tmp_client
future = client.submit(inc, x)
return future.result()
import distributed
distributed.tmp_client = c
try:
futures = c.map(f, range(5))
results = await c.gather(futures)
assert results == list(map(inc, range(5)))
finally:
del distributed.tmp_client
def test_get_client_no_cluster():
# Clean up any global workers added by other tests. This test requires that
# there are no global workers.
Worker._instances.clear()
msg = "No global client found and no address provided"
with pytest.raises(ValueError, match=rf"^{msg}$"):
get_client()
@gen_cluster(client=True)
async def test_serialize_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.arange(10, chunks=(5,)).persist()
def f(x):
assert isinstance(x, da.Array)
return x.sum().compute()
future = c.submit(f, x)
result = await future
assert result == sum(range(10))
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 1)
async def test_secede_simple(c, s, a):
def f():
client = get_client()
secede()
return client.submit(inc, 1).result()
result = await c.submit(f)
assert result == 2
@gen_cluster(client=True)
async def test_secede_balances(c, s, a, b):
"""Ensure that tasks scheduled from a seceded thread can be scheduled
elsewhere"""
def f(x):
client = get_client()
secede()
futures = client.map(inc, range(10), pure=False)
total = client.submit(sum, futures).result()
return total
futures = c.map(f, range(10), workers=[a.address])
results = await c.gather(futures)
# We dispatch 10 tasks and every task generates 11 more tasks
# 10 * 11 + 10
assert a.executed_count + b.executed_count == 120
assert a.executed_count >= 10
assert b.executed_count > 0
assert results == [sum(map(inc, range(10)))] * 10
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_long_running_not_in_occupancy(c, s, a):
# https://github.com/dask/distributed/issues/5332
from distributed import Lock
l = Lock()
await l.acquire()
def long_running(lock):
sleep(0.1)
secede()
lock.acquire()
f = c.submit(long_running, l)
while f.key not in s.tasks:
await asyncio.sleep(0.01)
assert s.workers[a.address].occupancy == parse_timedelta(
dask.config.get("distributed.scheduler.unknown-task-duration")
)
while s.workers[a.address].occupancy:
await asyncio.sleep(0.01)
await a.heartbeat()
ts = s.tasks[f.key]
ws = s.workers[a.address]
s.set_duration_estimate(ts, ws)
assert s.workers[a.address].occupancy == 0
s.reevaluate_occupancy(0)
assert s.workers[a.address].occupancy == 0
await l.release()
await f
@gen_cluster(client=True)
async def test_sub_submit_priority(c, s, a, b):
def func():
client = get_client()
f = client.submit(slowinc, 1, delay=0.5, key="slowinc")
client.gather(f)
future = c.submit(func, key="f")
while len(s.tasks) != 2:
await asyncio.sleep(0.001)
# lower values schedule first
assert s.tasks["f"].priority > s.tasks["slowinc"].priority, (
s.tasks["f"].priority,
s.tasks["slowinc"].priority,
)
def test_get_client_sync(c, s, a, b):
results = c.run(lambda: get_worker().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
results = c.run(lambda: get_client().scheduler.address)
assert results == {w["address"]: s["address"] for w in [a, b]}
@gen_cluster(client=True)
async def test_serialize_collections_of_futures(c, s, a, b):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = await c.scatter(ddf)
ddf2 = await future
df2 = await c.compute(ddf2)
assert_eq(df, df2)
def test_serialize_collections_of_futures_sync(c):
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
from dask.dataframe.utils import assert_eq
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2).persist()
future = c.scatter(ddf)
result = future.result()
assert_eq(result.compute(), df)
assert future.type == dd.DataFrame
assert c.submit(lambda x, y: assert_eq(x.compute(), y), future, df).result()
def _dynamic_workload(x, delay=0.01):
if delay == "random":
sleep(random.random() / 2)
else:
sleep(delay)
if x > 4:
return 4
secede()
client = get_client()
futures = client.map(
_dynamic_workload, [x + i + 1 for i in range(2)], pure=False, delay=delay
)
total = client.submit(sum, futures)
return total.result()
def test_dynamic_workloads_sync(c):
future = c.submit(_dynamic_workload, 0, delay=0.02)
assert future.result(timeout=20) == 52
@pytest.mark.slow
def test_dynamic_workloads_sync_random(c):
future = c.submit(_dynamic_workload, 0, delay="random")
assert future.result(timeout=20) == 52
@pytest.mark.xfail(COMPILED, reason="Fails with cythonized scheduler")
@gen_cluster(client=True)
async def test_bytes_keys(c, s, a, b):
key = b"inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is bytes
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_ascii_keys(c, s, a, b):
uni_type = str
key = "inc-123"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
@gen_cluster(client=True)
async def test_unicode_keys(c, s, a, b):
uni_type = str
key = "inc-123\u03bc"
future = c.submit(inc, 1, key=key)
result = await future
assert type(future.key) is uni_type
assert set(s.tasks) == {key}
assert key in a.data or key in b.data
assert result == 2
future2 = c.submit(inc, future)
result2 = await future2
assert result2 == 3
future3 = await c.scatter({"data-123": 123})
result3 = await future3["data-123"]
assert result3 == 123
def test_use_synchronous_client_in_async_context(loop, c):
async def f():
x = await c.scatter(123)
y = c.submit(inc, x)
z = await c.gather(y)
return z
z = sync(loop, f)
assert z == 124
def test_quiet_quit_when_cluster_leaves(loop_in_thread):
loop = loop_in_thread
with LocalCluster(loop=loop, dashboard_address=":0", silence_logs=False) as cluster:
with captured_logger("distributed.comm") as sio:
with Client(cluster, loop=loop) as client:
futures = client.map(lambda x: x + 1, range(10))
sleep(0.05)
cluster.close()
sleep(0.05)
text = sio.getvalue()
assert not text
def test_warn_executor(loop, s, a, b):
with warnings.catch_warnings(record=True) as record:
with Executor(s["address"], loop=loop) as c:
pass
assert any("Client" in str(r.message) for r in record)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_future(c, s, a, b):
x = c.submit(slowdec, 1, delay=0.5)
future = c.submit(slowinc, 1, delay=0.5)
await asyncio.sleep(0.1)
results = await asyncio.gather(
c.call_stack(future), c.call_stack(keys=[future.key])
)
assert all(list(first(result.values())) == [future.key] for result in results)
assert results[0] == results[1]
result = results[0]
ts = a.tasks.get(future.key)
if ts is not None and ts.state == "executing":
w = a
else:
w = b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
assert "slowdec" not in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_all(c, s, a, b):
future = c.submit(slowinc, 1, delay=0.8)
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.01)
result = await c.call_stack()
w = a if a.executing_count else b
assert list(result) == [w.address]
assert list(result[w.address]) == [future.key]
assert "slowinc" in str(result)
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack(x)
assert result
@gen_cluster([("127.0.0.1", 4)] * 2, client=True)
async def test_call_stack_collections_all(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.random.random(100, chunks=(10,)).map_blocks(slowinc, delay=0.5).persist()
while not a.executing_count and not b.executing_count:
await asyncio.sleep(0.001)
result = await c.call_stack()
assert result
@pytest.mark.flaky(condition=WINDOWS, reruns=10, reruns_delay=5)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
await wait(futures)
x = await c.profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await c.profile(start=0, stop=time())
assert (
x["count"]
== sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
)
y = await c.profile(start=time() - 0.300, stop=time())
assert 0 < y["count"] < x["count"]
assert not any(p["count"] for _, p in b.profile_history)
result = await c.profile(workers=b.address)
assert not result["count"]
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "100ms"})
async def test_profile_keys(c, s, a, b):
x = c.map(slowinc, range(10), delay=0.05, workers=a.address)
y = c.map(slowdec, range(10), delay=0.05, workers=a.address)
await wait(x + y)
xp = await c.profile("slowinc")
yp = await c.profile("slowdec")
p = await c.profile()
assert p["count"] == xp["count"] + yp["count"]
with captured_logger(logging.getLogger("distributed")) as logger:
prof = await c.profile("does-not-exist")
assert prof == profile.create()
out = logger.getvalue()
assert not out
@gen_cluster()
async def test_client_with_name(s, a, b):
with captured_logger("distributed.scheduler") as sio:
client = await Client(s.address, asynchronous=True, name="foo")
assert "foo" in client.id
await client.close()
text = sio.getvalue()
assert "foo" in text
@gen_cluster(client=True)
async def test_future_defaults_to_default_client(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
future = Future(x.key)
assert future.client is c
@gen_cluster(client=True)
async def test_future_auto_inform(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
client = await Client(s.address, asynchronous=True)
future = Future(x.key, client)
while future.status != "finished":
await asyncio.sleep(0.01)
await client.close()
def test_client_async_before_loop_starts():
with pristine_loop() as loop:
client = Client(asynchronous=True, loop=loop)
assert client.asynchronous
client.close()
@pytest.mark.slow
@gen_cluster(client=True, Worker=Nanny, timeout=60, nthreads=[("127.0.0.1", 3)] * 2)
async def test_nested_compute(c, s, a, b):
def fib(x):
assert get_worker().get_current_task()
if x < 2:
return x
a = delayed(fib)(x - 1)
b = delayed(fib)(x - 2)
c = a + b
return c.compute()
future = c.submit(fib, 8)
result = await future
assert result == 21
assert len(s.transition_log) > 50
@gen_cluster(client=True)
async def test_task_metadata(c, s, a, b):
await c.set_metadata("x", 1)
result = await c.get_metadata("x")
assert result == 1
future = c.submit(inc, 1)
key = future.key
await wait(future)
await c.set_metadata(key, 123)
result = await c.get_metadata(key)
assert result == 123
del future
while key in s.tasks:
await asyncio.sleep(0.01)
with pytest.raises(KeyError):
await c.get_metadata(key)
result = await c.get_metadata(key, None)
assert result is None
await c.set_metadata(["x", "a"], 1)
result = await c.get_metadata("x")
assert result == {"a": 1}
await c.set_metadata(["x", "b"], 2)
result = await c.get_metadata("x")
assert result == {"a": 1, "b": 2}
result = await c.get_metadata(["x", "a"])
assert result == 1
await c.set_metadata(["x", "a", "c", "d"], 1)
result = await c.get_metadata("x")
assert result == {"a": {"c": {"d": 1}}, "b": 2}
@gen_cluster(client=True, Worker=Nanny)
async def test_logs(c, s, a, b):
await wait(c.map(inc, range(5)))
logs = await c.get_scheduler_logs(n=5)
assert logs
for _, msg in logs:
assert "distributed.scheduler" in msg
w_logs = await c.get_worker_logs(n=5)
assert set(w_logs.keys()) == {a.worker_address, b.worker_address}
for log in w_logs.values():
for _, msg in log:
assert "distributed.worker" in msg
n_logs = await c.get_worker_logs(nanny=True)
assert set(n_logs.keys()) == {a.worker_address, b.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
n_logs = await c.get_worker_logs(nanny=True, workers=[a.worker_address])
assert set(n_logs.keys()) == {a.worker_address}
for log in n_logs.values():
for _, msg in log:
assert "distributed.nanny" in msg
@gen_cluster(client=True)
async def test_avoid_delayed_finalize(c, s, a, b):
x = delayed(inc)(1)
future = c.compute(x)
result = await future
assert result == 2
assert list(s.tasks) == [future.key] == [x.key]
@gen_cluster()
async def test_config_scheduler_address(s, a, b):
with dask.config.set({"scheduler-address": s.address}):
with captured_logger("distributed.client") as sio:
c = await Client(asynchronous=True)
assert c.scheduler.address == s.address
text = sio.getvalue()
assert s.address in text
await c.close()
@gen_cluster(client=True)
async def test_warn_when_submitting_large_values(c, s, a, b):
with warnings.catch_warnings(record=True) as record:
future = c.submit(lambda x: x + 1, b"0" * 2000000)
text = str(record[0].message)
assert "2.00 MB" in text or "1.91 MiB" in text
assert "large" in text
assert "..." in text
assert "'000" in text
assert "000'" in text
assert len(text) < 2000
with warnings.catch_warnings(record=True) as record:
data = b"0" * 2000000
for i in range(10):
future = c.submit(lambda x, y: x, data, i)
assert len(record) < 2
@gen_cluster(client=True)
async def test_unhashable_function(c, s, a, b):
func = _UnhashableCallable()
result = await c.submit(func, 1)
assert result == 2
@gen_cluster()
async def test_client_name(s, a, b):
with dask.config.set({"client-name": "hello-world"}):
c = await Client(s.address, asynchronous=True)
assert any("hello-world" in name for name in list(s.clients))
await c.close()
def test_client_doesnt_close_given_loop(loop_in_thread, s, a, b):
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 1).result() == 2
with Client(s["address"], loop=loop_in_thread) as c:
assert c.submit(inc, 2).result() == 3
@gen_cluster(client=True, nthreads=[])
async def test_quiet_scheduler_loss(c, s):
c._periodic_callbacks["scheduler-info"].interval = 10
with captured_logger(logging.getLogger("distributed.client")) as logger:
await s.close()
await c._update_scheduler_info()
text = logger.getvalue()
assert "BrokenPipeError" not in text
def test_dashboard_link(loop, monkeypatch):
monkeypatch.setenv("USER", "myusername")
with cluster(scheduler_kwargs={"dashboard_address": ":12355"}) as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
with dask.config.set(
{"distributed.dashboard.link": "{scheme}://foo-{USER}:{port}/status"}
):
link = "http://foo-myusername:12355/status"
assert link == c.dashboard_link
text = c._repr_html_()
assert link in text
@gen_test()
async def test_dashboard_link_inproc():
async with Client(processes=False, asynchronous=True, dashboard_address=":0") as c:
with dask.config.set({"distributed.dashboard.link": "{host}"}):
assert "/" not in c.dashboard_link
@gen_test()
async def test_client_timeout_2():
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
start = time()
c = Client("127.0.0.1:3755", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
stop = time()
assert c.status == "closed"
await c.close()
assert stop - start < 1
@gen_test()
async def test_client_active_bad_port():
import tornado.httpserver
import tornado.web
application = tornado.web.Application([(r"/", tornado.web.RequestHandler)])
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(8080)
with dask.config.set({"distributed.comm.timeouts.connect": "10ms"}):
c = Client("127.0.0.1:8080", asynchronous=True)
with pytest.raises((TimeoutError, IOError)):
await c
await c._close(fast=True)
http_server.stop()
@pytest.mark.parametrize("direct", [True, False])
@gen_cluster(client=True, client_kwargs={"serializers": ["dask", "msgpack"]})
async def test_turn_off_pickle(c, s, a, b, direct):
np = pytest.importorskip("numpy")
assert (await c.submit(inc, 1)) == 2
await c.submit(np.ones, 5)
await c.scatter(1)
# Can't send complex data
with pytest.raises(TypeError):
await c.scatter(inc)
# can send complex tasks (this uses pickle regardless)
future = c.submit(lambda x: x, inc)
await wait(future)
# but can't receive complex results
with pytest.raises(TypeError):
await c.gather(future, direct=direct)
# Run works
result = await c.run(lambda: 1)
assert list(result.values()) == [1, 1]
result = await c.run_on_scheduler(lambda: 1)
assert result == 1
# But not with complex return values
with pytest.raises(TypeError):
await c.run(lambda: inc)
with pytest.raises(TypeError):
await c.run_on_scheduler(lambda: inc)
@gen_cluster()
async def test_de_serialization(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(
s.address,
asynchronous=True,
serializers=["msgpack", "pickle"],
deserializers=["msgpack"],
)
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_de_serialization_none(s, a, b):
np = pytest.importorskip("numpy")
c = await Client(s.address, asynchronous=True, deserializers=["msgpack"])
try:
# Can send complex data
future = await c.scatter(np.ones(5))
# But can not retrieve it
with pytest.raises(TypeError):
result = await future
finally:
await c.close()
@gen_cluster()
async def test_client_repr_closed(s, a, b):
c = await Client(s.address, asynchronous=True)
await c.close()
c._repr_html_()
def test_client_repr_closed_sync(loop):
with Client(loop=loop, processes=False, dashboard_address=":0") as c:
pass
c._repr_html_()
@pytest.mark.xfail(reason="https://github.com/dask/dask/pull/6807")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_nested_prioritization(c, s, w):
x = delayed(inc)(1, dask_key_name=("a", 2))
y = delayed(inc)(2, dask_key_name=("a", 10))
o = dask.order.order(merge(x.__dask_graph__(), y.__dask_graph__()))
fx, fy = c.compute([x, y])
await wait([fx, fy])
assert (o[x.key] < o[y.key]) == (
s.tasks[stringify(fx.key)].priority < s.tasks[stringify(fy.key)].priority
)
@gen_cluster(client=True)
async def test_scatter_error_cancel(c, s, a, b):
# https://github.com/dask/distributed/issues/2038
def bad_fn(x):
raise Exception("lol")
x = await c.scatter(1)
y = c.submit(bad_fn, x)
del x
await wait(y)
assert y.status == "error"
await asyncio.sleep(0.1)
assert y.status == "error" # not cancelled
@pytest.mark.parametrize("workers_arg", [False, True])
@pytest.mark.parametrize("direct", [False, True])
@pytest.mark.parametrize("broadcast", [False, True, 10])
@gen_cluster(
client=True,
nthreads=[("", 1)] * 10,
worker_kwargs={"memory_monitor_interval": "20ms"},
)
async def test_scatter_and_replicate_avoid_paused_workers(
c, s, *workers, workers_arg, direct, broadcast
):
paused_workers = [w for i, w in enumerate(workers) if i not in (3, 7)]
for w in paused_workers:
w.memory_pause_fraction = 1e-15
while any(s.workers[w.address].status != Status.paused for w in paused_workers):
await asyncio.sleep(0.01)
f = await c.scatter(
{"x": 1},
workers=[w.address for w in workers[1:-1]] if workers_arg else None,
broadcast=broadcast,
direct=direct,
)
if not broadcast:
await c.replicate(f, n=10)
expect = [i in (3, 7) for i in range(10)]
actual = [("x" in w.data) for w in workers]
assert actual == expect
@pytest.mark.xfail(reason="GH#5409 Dask-Default-Threads are frequently detected")
def test_no_threads_lingering():
if threading.active_count() < 40:
return
active = dict(threading._active)
print(f"==== Found {len(active)} active threads: ====")
for t in active.values():
print(t)
assert False
@gen_cluster()
async def test_direct_async(s, a, b):
c = await Client(s.address, asynchronous=True, direct_to_workers=True)
assert c.direct_to_workers
await c.close()
c = await Client(s.address, asynchronous=True, direct_to_workers=False)
assert not c.direct_to_workers
await c.close()
def test_direct_sync(c):
assert not c.direct_to_workers
def f():
return get_client().direct_to_workers
assert c.submit(f).result()
@gen_cluster()
async def test_mixing_clients(s, a, b):
c1 = await Client(s.address, asynchronous=True)
c2 = await Client(s.address, asynchronous=True)
future = c1.submit(inc, 1)
with pytest.raises(ValueError):
c2.submit(inc, future)
assert not c2.futures # Don't create Futures on second Client
await c1.close()
await c2.close()
@gen_cluster(client=True)
async def test_tuple_keys(c, s, a, b):
x = dask.delayed(inc)(1, dask_key_name=("x", 1))
y = dask.delayed(inc)(x, dask_key_name=("y", 1))
future = c.compute(y)
assert (await future) == 3
@gen_cluster(client=True)
async def test_multiple_scatter(c, s, a, b):
futures = await asyncio.gather(*(c.scatter(1, direct=True) for _ in range(5)))
x = await futures[0]
x = await futures[0]
@gen_cluster(client=True)
async def test_map_large_kwargs_in_graph(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(100000)
futures = c.map(lambda a, b: a + b, range(100), b=x)
while not s.tasks:
await asyncio.sleep(0.01)
assert len(s.tasks) == 101
assert any(k.startswith("ndarray") for k in s.tasks)
@gen_cluster(client=True)
async def test_retry(c, s, a, b):
def f():
assert dask.config.get("foo")
with dask.config.set(foo=False):
future = c.submit(f)
with pytest.raises(AssertionError):
await future
with dask.config.set(foo=True):
await future.retry()
await future
@gen_cluster(client=True)
async def test_retry_dependencies(c, s, a, b):
def f():
return dask.config.get("foo")
x = c.submit(f)
y = c.submit(inc, x)
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
await y.retry()
await x.retry()
result = await y
assert result == 101
@gen_cluster(client=True)
async def test_released_dependencies(c, s, a, b):
def f(x):
return dask.config.get("foo") + 1
x = c.submit(inc, 1, key="x")
y = c.submit(f, x, key="y")
del x
with pytest.raises(KeyError):
await y
with dask.config.set(foo=100):
await y.retry()
result = await y
assert result == 101
@gen_cluster(client=True, clean_kwargs={"threads": False})
async def test_profile_bokeh(c, s, a, b):
pytest.importorskip("bokeh.plotting")
from bokeh.model import Model
await c.gather(c.map(slowinc, range(10), delay=0.2))
state, figure = await c.profile(plot=True)
assert isinstance(figure, Model)
with tmpfile("html") as fn:
try:
await c.profile(filename=fn)
except PermissionError:
if WINDOWS:
pytest.xfail()
assert os.path.exists(fn)
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable(c, s, a, b):
future = c.submit(add, 1, 2)
subgraph = SubgraphCallable(
{"_2": (add, "_0", "_1"), "_3": (add, future, "_2")}, "_3", ("_0", "_1")
)
dsk = {"a": 1, "b": 2, "c": (subgraph, "a", "b"), "d": (subgraph, "c", "b")}
future2 = c.get(dsk, "d", sync=False)
result = await future2
assert result == 11
# Nested subgraphs
subgraph2 = SubgraphCallable(
{
"_2": (subgraph, "_0", "_1"),
"_3": (subgraph, "_2", "_1"),
"_4": (add, "_3", future2),
},
"_4",
("_0", "_1"),
)
dsk2 = {"e": 1, "f": 2, "g": (subgraph2, "e", "f")}
result = await c.get(dsk2, "g", sync=False)
assert result == 22
@gen_cluster(client=True)
async def test_get_mix_futures_and_SubgraphCallable_dask_dataframe(c, s, a, b):
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
df = pd.DataFrame({"x": range(1, 11)})
ddf = dd.from_pandas(df, npartitions=2).persist()
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
ddf = ddf.map_partitions(lambda x: x)
ddf["x"] = ddf["x"].astype("f8")
result = await c.compute(ddf)
assert result.equals(df.astype("f8"))
def test_direct_to_workers(s, loop):
with Client(s["address"], loop=loop, direct_to_workers=True) as client:
future = client.scatter(1)
future.result()
resp = client.run_on_scheduler(lambda dask_scheduler: dask_scheduler.events)
assert "gather" not in str(resp)
@gen_cluster(client=True)
async def test_instances(c, s, a, b):
assert list(Client._instances) == [c]
assert list(Scheduler._instances) == [s]
assert set(Worker._instances) == {a, b}
@gen_cluster(client=True)
async def test_wait_for_workers(c, s, a, b):
future = asyncio.ensure_future(c.wait_for_workers(n_workers=3))
await asyncio.sleep(0.22) # 2 chances
assert not future.done()
w = await Worker(s.address)
start = time()
await future
assert time() < start + 1
await w.close()
with pytest.raises(TimeoutError) as info:
await c.wait_for_workers(n_workers=10, timeout="1 ms")
assert "2/10" in str(info.value).replace(" ", "")
assert "1 ms" in str(info.value)
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_file_descriptors_dont_leak(Worker):
pytest.importorskip("pandas")
df = dask.datasets.timeseries(freq="10s", dtypes={"x": int, "y": float})
proc = psutil.Process()
before = proc.num_fds()
async with Scheduler(dashboard_address=":0") as s:
async with Worker(s.address), Worker(s.address), Client(
s.address, asynchronous=True
):
assert proc.num_fds() > before
await df.sum().persist()
start = time()
while proc.num_fds() > before:
await asyncio.sleep(0.01)
assert time() < start + 10, (before, proc.num_fds())
@gen_test()
async def test_dashboard_link_cluster():
class MyCluster(LocalCluster):
@property
def dashboard_link(self):
return "http://foo.com"
async with MyCluster(
processes=False, asynchronous=True, dashboard_address=":0"
) as cluster:
async with Client(cluster, asynchronous=True) as client:
assert "http://foo.com" in client._repr_html_()
@gen_test()
async def test_shutdown():
async with Scheduler(dashboard_address=":0") as s:
async with Worker(s.address) as w:
async with Client(s.address, asynchronous=True) as c:
await c.shutdown()
assert s.status == Status.closed
assert w.status == Status.closed
@pytest.mark.asyncio
async def test_shutdown_localcluster(cleanup):
async with LocalCluster(
n_workers=1, asynchronous=True, processes=False, dashboard_address=":0"
) as lc:
async with Client(lc, asynchronous=True) as c:
await c.shutdown()
assert lc.scheduler.status == Status.closed
@gen_test()
async def test_config_inherited_by_subprocess():
with dask.config.set(foo=100):
async with LocalCluster(
n_workers=1,
asynchronous=True,
processes=True,
dashboard_address=":0",
) as lc:
async with Client(lc, asynchronous=True) as c:
assert await c.submit(dask.config.get, "foo") == 100
@gen_cluster(client=True)
async def test_futures_of_sorted(c, s, a, b):
pytest.importorskip("dask.dataframe")
df = await dask.datasets.timeseries(dtypes={"x": int}).persist()
futures = futures_of(df)
for k, f in zip(df.__dask_keys__(), futures):
assert str(k) in str(f)
@pytest.mark.flaky(reruns=10, reruns_delay=5)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "10ms"})
async def test_profile_server(c, s, a, b):
for i in range(5):
try:
x = c.map(slowinc, range(10), delay=0.01, workers=a.address, pure=False)
await wait(x)
await asyncio.gather(
c.run(slowinc, 1, delay=0.5), c.run_on_scheduler(slowdec, 1, delay=0.5)
)
p = await c.profile(server=True) # All worker servers
assert "slowinc" in str(p)
p = await c.profile(scheduler=True) # Scheduler
assert "slowdec" in str(p)
except AssertionError:
if i == 4:
raise
else:
pass
else:
break
@gen_cluster(client=True)
async def test_await_future(c, s, a, b):
future = c.submit(inc, 1)
async def f(): # flake8: noqa
result = await future
assert result == 2
await f()
future = c.submit(div, 1, 0)
async def f():
with pytest.raises(ZeroDivisionError):
await future
await f()
@gen_cluster(client=True)
async def test_as_completed_async_for(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures)
results = []
async def f():
async for future in ac:
result = await future
results.append(result)
await f()
assert set(results) == set(range(1, 11))
@gen_cluster(client=True)
async def test_as_completed_async_for_results(c, s, a, b):
futures = c.map(inc, range(10))
ac = as_completed(futures, with_results=True)
results = []
async def f():
async for future, result in ac:
results.append(result)
await f()
assert set(results) == set(range(1, 11))
assert not s.counters["op"].components[0]["gather"]
@gen_cluster(client=True)
async def test_as_completed_async_for_cancel(c, s, a, b):
x = c.submit(inc, 1)
y = c.submit(sleep, 0.3)
ac = as_completed([x, y])
async def _():
await asyncio.sleep(0.1)
await y.cancel(asynchronous=True)
c.loop.add_callback(_)
L = []
async def f():
async for future in ac:
L.append(future)
await f()
assert L == [x, y]
@gen_test()
async def test_async_with():
async with Client(processes=False, dashboard_address=":0", asynchronous=True) as c:
assert await c.submit(lambda x: x + 1, 10) == 11
assert c.status == "closed"
assert c.cluster.status == Status.closed
def test_client_sync_with_async_def(loop):
async def ff():
await asyncio.sleep(0.01)
return 1
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert sync(loop, ff) == 1
assert c.sync(ff) == 1
@pytest.mark.skip(reason="known intermittent failure")
@gen_cluster(client=True)
async def test_dont_hold_on_to_large_messages(c, s, a, b):
np = pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = np.random.random(1000000)
xr = weakref.ref(x)
d = da.from_array(x, chunks=(100000,))
d = d.persist()
del x
start = time()
while xr() is not None:
if time() > start + 5:
# Help diagnosing
from types import FrameType
x = xr()
if x is not None:
del x
rc = sys.getrefcount(xr())
refs = gc.get_referrers(xr())
print("refs to x:", rc, refs, gc.isenabled())
frames = [r for r in refs if isinstance(r, FrameType)]
for i, f in enumerate(frames):
print(
"frames #%d:" % i,
f.f_code.co_name,
f.f_code.co_filename,
sorted(f.f_locals),
)
pytest.fail("array should have been destroyed")
await asyncio.sleep(0.200)
@gen_cluster(client=True)
async def test_run_on_scheduler_async_def(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f)
assert a.foo == "bar"
assert b.foo == "bar"
@gen_cluster(client=True)
async def test_run_on_scheduler_async_def_wait(c, s, a, b):
async def f(dask_scheduler):
await asyncio.sleep(0.01)
dask_scheduler.foo = "bar"
await c.run_on_scheduler(f, wait=False)
while not hasattr(s, "foo"):
await asyncio.sleep(0.01)
assert s.foo == "bar"
async def f(dask_worker):
await asyncio.sleep(0.01)
dask_worker.foo = "bar"
await c.run(f, wait=False)
while not hasattr(a, "foo") or not hasattr(b, "foo"):
await asyncio.sleep(0.01)
assert a.foo == "bar"
assert b.foo == "bar"
@pytest.mark.skipif(WINDOWS, reason="frequently kills off the whole test suite")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 2)] * 2)
async def test_performance_report(c, s, a, b):
pytest.importorskip("bokeh")
da = pytest.importorskip("dask.array")
async def f(stacklevel, mode=None):
"""
We wrap this in a function so that the assertions aren't in the
performanace report itself
Also, we want this comment to appear
"""
x = da.random.random((1000, 1000), chunks=(100, 100))
with tmpfile(extension="html") as fn:
async with performance_report(
filename=fn, stacklevel=stacklevel, mode=mode
):
await c.compute((x + x.T).sum())
with open(fn) as f:
data = f.read()
return data
# Ensure default kwarg maintains backward compatability
data = await f(stacklevel=1)
assert "Also, we want this comment to appear" in data
assert "bokeh" in data
assert "random" in data
assert "Dask Performance Report" in data
assert "x = da.random" in data
assert "Threads: 4" in data
assert "No logs to report" in data
assert dask.__version__ in data
# stacklevel=2 captures code two frames back -- which in this case
# is the testing function
data = await f(stacklevel=2)
assert "async def test_performance_report(c, s, a, b):" in data
assert "Dask Performance Report" in data
# stacklevel=0 or lower is overridden to stacklevel=1 so we don't see
# distributed internals
data = await f(stacklevel=0)
assert "Also, we want this comment to appear" in data
assert "Dask Performance Report" in data
data = await f(stacklevel=1, mode="inline")
assert "cdn.bokeh.org" not in data
data = await f(stacklevel=1, mode="cdn")
assert "cdn.bokeh.org" in data
@gen_cluster(nthreads=[])
async def test_client_gather_semaphore_loop(s):
async with Client(s.address, asynchronous=True) as c:
assert c._gather_semaphore._loop is c.loop.asyncio_loop
@gen_cluster(client=True)
async def test_as_completed_condition_loop(c, s, a, b):
seq = c.map(inc, range(5))
ac = as_completed(seq)
assert ac.condition._loop == c.loop.asyncio_loop
def test_client_connectionpool_semaphore_loop(s, a, b):
with Client(s["address"]) as c:
assert c.rpc.semaphore._loop is c.loop.asyncio_loop
@pytest.mark.slow
@gen_cluster(nthreads=[], timeout=60)
async def test_mixed_compression(s):
pytest.importorskip("lz4")
da = pytest.importorskip("dask.array")
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": None}
):
async with Nanny(
s.address, nthreads=1, config={"distributed.comm.compression": "lz4"}
):
async with Client(s.address, asynchronous=True) as c:
await c.get_versions()
x = da.ones((10000, 10000))
y = x + x.T
await c.compute(y.sum())
@gen_cluster(client=True)
async def test_futures_in_subgraphs(c, s, a, b):
"""Regression test of <https://github.com/dask/distributed/issues/4145>"""
dd = pytest.importorskip("dask.dataframe")
import pandas as pd
ddf = dd.from_pandas(
pd.DataFrame(
dict(
uid=range(50),
enter_time=pd.date_range(
start="2020-01-01", end="2020-09-01", periods=50, tz="UTC"
),
)
),
npartitions=5,
)
ddf = ddf[ddf.uid.isin(range(29))].persist()
ddf["local_time"] = ddf.enter_time.dt.tz_convert("US/Central")
ddf["day"] = ddf.enter_time.dt.day_name()
ddf = await c.submit(dd.categorical.categorize, ddf, columns=["day"], index=False)
@gen_cluster(client=True)
async def test_get_task_metadata(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
async with get_task_metadata() as tasks:
f = c.submit(slowinc, 1)
await f
metadata = tasks.metadata
assert f.key in metadata
assert metadata[f.key] == s.tasks.get(f.key).metadata
state = tasks.state
assert f.key in state
assert state[f.key] == "memory"
assert not any(isinstance(p, CollectTaskMetaDataPlugin) for p in s.plugins)
@gen_cluster(client=True)
async def test_get_task_metadata_multiple(c, s, a, b):
# Populate task metadata
await c.register_worker_plugin(TaskStateMetadataPlugin())
# Ensure that get_task_metadata only collects metadata for
# tasks which are submitted and completed within its context
async with get_task_metadata() as tasks1:
f1 = c.submit(slowinc, 1)
await f1
async with get_task_metadata() as tasks2:
f2 = c.submit(slowinc, 2)
await f2
metadata1 = tasks1.metadata
metadata2 = tasks2.metadata
assert len(metadata1) == 2
assert sorted(metadata1.keys()) == sorted([f1.key, f2.key])
assert metadata1[f1.key] == s.tasks.get(f1.key).metadata
assert metadata1[f2.key] == s.tasks.get(f2.key).metadata
assert len(metadata2) == 1
assert list(metadata2.keys()) == [f2.key]
assert metadata2[f2.key] == s.tasks.get(f2.key).metadata
@gen_cluster(client=True)
async def test_register_worker_plugin_exception(c, s, a, b):
class MyPlugin:
def setup(self, worker=None):
raise ValueError("Setup failed")
with pytest.raises(ValueError, match="Setup failed"):
await c.register_worker_plugin(MyPlugin())
@gen_cluster(client=True)
async def test_log_event(c, s, a, b):
# Log an event from inside a task
def foo():
get_worker().log_event("topic1", {"foo": "bar"})
assert not await c.get_events("topic1")
await c.submit(foo)
events = await c.get_events("topic1")
assert len(events) == 1
assert events[0][1] == {"foo": "bar"}
# Log an event while on the scheduler
def log_scheduler(dask_scheduler):
dask_scheduler.log_event("topic2", {"woo": "hoo"})
await c.run_on_scheduler(log_scheduler)
events = await c.get_events("topic2")
assert len(events) == 1
assert events[0][1] == {"woo": "hoo"}
# Log an event from the client process
await c.log_event("topic2", ("alice", "bob"))
events = await c.get_events("topic2")
assert len(events) == 2
assert events[1][1] == ("alice", "bob")
@gen_cluster(client=True)
async def test_annotations_task_state(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(qux="bar", priority=100):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(
{"qux": "bar", "priority": 100} == ts.annotations for ts in s.tasks.values()
)
@pytest.mark.parametrize("fn", ["compute", "persist"])
@gen_cluster(client=True)
async def test_annotations_compute_time(c, s, a, b, fn):
da = pytest.importorskip("dask.array")
x = da.ones(10, chunks=(5,))
with dask.annotate(foo="bar"):
# Turn off optimization to avoid rewriting layers and picking up annotations
# that way. Instead, we want `compute`/`persist` to be able to pick them up.
fut = getattr(c, fn)(x, optimize_graph=False)
await wait(fut)
assert s.tasks
assert all(ts.annotations == {"foo": "bar"} for ts in s.tasks.values())
@pytest.mark.xfail(reason="https://github.com/dask/dask/issues/7036")
@gen_cluster(client=True)
async def test_annotations_survive_optimization(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(foo="bar"):
x = da.ones(10, chunks=(5,))
ann = x.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
(xx,) = dask.optimize(x)
ann = xx.__dask_graph__().layers[x.name].annotations
assert ann is not None
assert ann.get("foo", None) == "bar"
@gen_cluster(client=True)
async def test_annotations_priorities(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(priority=15):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all("15" in str(ts.priority) for ts in s.tasks.values())
assert all(ts.priority[0] == -15 for ts in s.tasks.values())
assert all({"priority": 15} == ts.annotations for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_workers(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(workers=[a.address]):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all({"workers": (a.address,)} == ts.annotations for ts in s.tasks.values())
assert all({a.address} == ts.worker_restrictions for ts in s.tasks.values())
assert a.data
assert not b.data
@gen_cluster(client=True)
async def test_annotations_retries(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(retries=2):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(ts.retries == 2 for ts in s.tasks.values())
assert all(ts.annotations == {"retries": 2} for ts in s.tasks.values())
@gen_cluster(client=True)
async def test_annotations_blockwise_unpack(c, s, a, b):
da = pytest.importorskip("dask.array")
np = pytest.importorskip("numpy")
from dask.array.utils import assert_eq
# A flaky doubling function -- need extra args because it is called before
# application to establish dtype/meta.
scale = varying([ZeroDivisionError("one"), ZeroDivisionError("two"), 2, 2])
def flaky_double(x):
return scale() * x
# A reliable double function.
def reliable_double(x):
return 2 * x
x = da.ones(10, chunks=(5,))
# The later annotations should not override the earlier annotations
with dask.annotate(retries=2):
y = x.map_blocks(flaky_double, meta=np.array((), dtype=float))
with dask.annotate(retries=0):
z = y.map_blocks(reliable_double, meta=np.array((), dtype=float))
with dask.config.set(optimization__fuse__active=False):
z = await c.compute(z)
assert_eq(z, np.ones(10) * 4.0)
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources(c, s, a, b):
da = pytest.importorskip("dask.array")
with dask.annotate(resources={"GPU": 1}):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all([{"GPU": 1} == ts.resource_restrictions for ts in s.tasks.values()])
assert all([{"resources": {"GPU": 1}} == ts.annotations for ts in s.tasks.values()])
@gen_cluster(
client=True,
nthreads=[
("127.0.0.1", 1),
("127.0.0.1", 1, {"resources": {"GPU": 1}}),
],
)
async def test_annotations_resources_culled(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((2, 2, 2), chunks=1)
with dask.annotate(resources={"GPU": 1}):
y = x.map_blocks(lambda x0: x0, meta=x._meta)
z = y[0, 0, 0]
(z,) = c.compute([z], optimize_graph=False)
await z
# it worked!
@gen_cluster(client=True)
async def test_annotations_loose_restrictions(c, s, a, b):
da = pytest.importorskip("dask.array")
# Eventually fails if allow_other_workers=False
with dask.annotate(workers=["fake"], allow_other_workers=True):
x = da.ones(10, chunks=(5,))
with dask.config.set(optimization__fuse__active=False):
x = await x.persist()
assert all(not ts.worker_restrictions for ts in s.tasks.values())
assert all({"fake"} == ts.host_restrictions for ts in s.tasks.values())
assert all(
[
{"workers": ("fake",), "allow_other_workers": True} == ts.annotations
for ts in s.tasks.values()
]
)
@gen_cluster(client=True)
async def test_workers_collection_restriction(c, s, a, b):
da = pytest.importorskip("dask.array")
future = c.compute(da.arange(10), workers=a.address)
await future
assert a.data and not b.data
@gen_cluster(client=True, nthreads=[("127.0.0.1", 0)])
async def test_get_client_functions_spawn_clusters(c, s, a):
# see gh4565
scheduler_addr = c.scheduler.address
def f(x):
with LocalCluster(
n_workers=1,
processes=False,
dashboard_address=":0",
worker_dashboard_address=":0",
) as cluster2:
with Client(cluster2) as c1:
c2 = get_client()
c1_scheduler = c1.scheduler.address
c2_scheduler = c2.scheduler.address
assert c1_scheduler != c2_scheduler
assert c2_scheduler == scheduler_addr
await c.gather(c.map(f, range(2)))
await a.close()
c_default = default_client()
assert c is c_default
def test_computation_code_walk_frames():
test_function_code = inspect.getsource(test_computation_code_walk_frames)
code = Client._get_computation_code()
assert test_function_code == code
def nested_call():
return Client._get_computation_code()
assert nested_call() == inspect.getsource(nested_call)
with pytest.raises(TypeError, match="Ignored modules must be a list"):
with dask.config.set(
{"distributed.diagnostics.computations.ignore-modules": "test_client"}
):
code = Client._get_computation_code()
with dask.config.set(
{"distributed.diagnostics.computations.ignore-modules": ["test_client"]}
):
import sys
upper_frame_code = inspect.getsource(sys._getframe(1))
code = Client._get_computation_code()
assert code == upper_frame_code
assert nested_call() == upper_frame_code
def test_computation_object_code_dask_compute(client):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = x.sum().compute()
y = future
test_function_code = inspect.getsource(test_computation_object_code_dask_compute)
def fetch_comp_code(dask_scheduler):
computations = list(dask_scheduler.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
return comp.code[0]
code = client.run_on_scheduler(fetch_comp_code)
assert code == test_function_code
def test_computation_object_code_not_available(client):
np = pytest.importorskip("numpy")
pd = pytest.importorskip("pandas")
dd = pytest.importorskip("dask.dataframe")
df = pd.DataFrame({"a": range(10)})
ddf = dd.from_pandas(df, npartitions=3)
result = np.where(ddf.a > 4)
def fetch_comp_code(dask_scheduler):
computations = list(dask_scheduler.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
return comp.code[0]
code = client.run_on_scheduler(fetch_comp_code)
assert code == "<Code not available>"
@gen_cluster(client=True)
async def test_computation_object_code_dask_persist(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = x.sum().persist()
await future
test_function_code = inspect.getsource(
test_computation_object_code_dask_persist.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_simple(c, s, a, b):
def func(x):
return x
fut = c.submit(func, 1)
await fut
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_simple.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_list_comp(c, s, a, b):
def func(x):
return x
futs = [c.submit(func, x) for x in range(10)]
await c.gather(futs)
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_list_comp.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
# Code is deduplicated
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_submit_dict_comp(c, s, a, b):
def func(x):
return x
futs = {x: c.submit(func, x) for x in range(10)}
await c.gather(futs)
test_function_code = inspect.getsource(
test_computation_object_code_client_submit_dict_comp.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
# Code is deduplicated
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_map(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
test_function_code = inspect.getsource(
test_computation_object_code_client_map.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True)
async def test_computation_object_code_client_compute(c, s, a, b):
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(3, 3))
future = c.compute(x.sum(), retries=2)
y = await future
test_function_code = inspect.getsource(
test_computation_object_code_client_compute.__wrapped__
)
computations = list(s.computations)
assert len(computations) == 1
comp = computations[0]
assert len(comp.code) == 1
assert comp.code[0] == test_function_code
@gen_cluster(client=True, Worker=Nanny)
async def test_upload_directory(c, s, a, b, tmp_path):
from dask.distributed import UploadDirectory
# Be sure to exclude code coverage reports
files_start = {f for f in os.listdir() if not f.startswith(".coverage")}
with open(tmp_path / "foo.py", "w") as f:
f.write("x = 123")
with open(tmp_path / "bar.py", "w") as f:
f.write("from foo import x")
plugin = UploadDirectory(tmp_path, restart=True, update_path=True)
await c.register_worker_plugin(plugin)
[name] = a.plugins
assert os.path.split(tmp_path)[-1] in name
def f():
import bar
return bar.x
results = await c.run(f)
assert results[a.worker_address] == 123
assert results[b.worker_address] == 123
async with Nanny(s.address, local_directory=tmp_path / "foo", name="foo") as n:
results = await c.run(f)
assert results[n.worker_address] == 123
files_end = {f for f in os.listdir() if not f.startswith(".coverage")}
assert files_start == files_end # no change
@gen_cluster(client=True)
async def test_exception_text(c, s, a, b):
def bad(x):
raise Exception(x)
future = c.submit(bad, 123)
await wait(future)
ts = s.tasks[future.key]
assert isinstance(ts.exception_text, str)
assert "123" in ts.exception_text
assert "Exception(x)" in ts.traceback_text
assert "bad" in ts.traceback_text
@gen_cluster(client=True)
async def test_async_task(c, s, a, b):
async def f(x):
return x + 1
future = c.submit(f, 10)
result = await future
assert result == 11
@gen_cluster(client=True)
async def test_async_task_with_partial(c, s, a, b):
async def f(x, y):
return x + y + 1
future = c.submit(functools.partial(f, 1), 10)
result = await future
assert result == 12
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_subscribe_topic(c, s, a):
log = []
def user_event_handler(event):
log.append(event)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"important": "event"})
while len(log) != 1:
await asyncio.sleep(0.01)
time_, msg = log[0]
assert isinstance(time_, float)
assert msg == {"important": "event"}
c.unsubscribe_topic("test-topic")
while s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"forget": "me"})
while len(s.events["test-topic"]) == 1:
await asyncio.sleep(0.01)
assert len(log) == 1
async def async_user_event_handler(event):
log.append(event)
await asyncio.sleep(0)
c.subscribe_topic("test-topic", async_user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {"async": "event"})
while len(log) == 1:
await asyncio.sleep(0.01)
assert len(log) == 2
time_, msg = log[1]
assert isinstance(time_, float)
assert msg == {"async": "event"}
# Even though the middle event was not subscribed to, the scheduler still
# knows about all and we can retrieve them
all_events = await c.get_events(topic="test-topic")
assert len(all_events) == 3
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_subscribe_topic_cancelled(c, s, a):
event_handler_started = asyncio.Event()
exc_info = None
async def user_event_handler(event):
nonlocal exc_info
c.unsubscribe_topic("test-topic")
event_handler_started.set()
with pytest.raises(asyncio.CancelledError) as exc_info:
await asyncio.sleep(0.5)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
a.log_event("test-topic", {})
await event_handler_started.wait()
await c._close(fast=True)
assert exc_info is not None
@gen_cluster(client=True, nthreads=[("", 1)])
async def test_events_all_servers_use_same_channel(c, s, a):
"""Ensure that logs from all server types (scheduler, worker, nanny)
and the clients themselves arrive"""
log = []
def user_event_handler(event):
log.append(event)
c.subscribe_topic("test-topic", user_event_handler)
while not s.event_subscriber["test-topic"]:
await asyncio.sleep(0.01)
async with Nanny(s.address) as n:
a.log_event("test-topic", "worker")
n.log_event("test-topic", "nanny")
s.log_event("test-topic", "scheduler")
await c.log_event("test-topic", "client")
while not len(log) == 4 == len(set(log)):
await asyncio.sleep(0.1)
@gen_cluster(client=True, nthreads=[])
async def test_events_unsubscribe_raises_if_unknown(c, s):
with pytest.raises(ValueError, match="No event handler known for topic unknown"):
c.unsubscribe_topic("unknown")
@gen_cluster(client=True)
async def test_log_event_warn(c, s, a, b):
def foo():
get_worker().log_event(["foo", "warn"], "Hello!")
with pytest.warns(Warning, match="Hello!"):
await c.submit(foo)
@gen_cluster(client=True)
async def test_log_event_warn_dask_warns(c, s, a, b):
from dask.distributed import warn
def foo():
warn("Hello!")
with pytest.warns(Warning, match="Hello!"):
await c.submit(foo)
@gen_cluster(client=True, Worker=Nanny)
async def test_print(c, s, a, b, capsys):
from dask.distributed import print
def foo():
print("Hello!", 123, sep=":")
await c.submit(foo)
out, err = capsys.readouterr()
assert "Hello!:123" in out
@gen_cluster(client=True, Worker=Nanny)
async def test_print_non_msgpack_serializable(c, s, a, b, capsys):
from dask.distributed import print
def foo():
print(object())
await c.submit(foo)
out, err = capsys.readouterr()
assert "<object object at" in out
def test_print_simple(capsys):
from dask.distributed import print
print("Hello!", 123, sep=":")
out, err = capsys.readouterr()
assert "Hello!:123" in out
def _verify_cluster_dump(path, _format: str, addresses: set[str]) -> dict:
path = str(path)
if _format == "msgpack":
import gzip
import msgpack
path += ".msgpack.gz"
with gzip.open(path) as fd_zip:
state = msgpack.unpack(fd_zip)
else:
import yaml
path += ".yaml"
with open(path) as fd_plain:
state = yaml.safe_load(fd_plain)
assert isinstance(state, dict)
assert "scheduler" in state
assert "workers" in state
assert "versions" in state
assert state["workers"].keys() == addresses
return state
@pytest.mark.parametrize("_format", ["msgpack", "yaml"])
def test_dump_cluster_state_sync(c, s, a, b, tmp_path, _format):
filename = tmp_path / "foo"
c.dump_cluster_state(filename, format=_format)
_verify_cluster_dump(filename, _format, {a["address"], b["address"]})
@pytest.mark.parametrize("_format", ["msgpack", "yaml"])
@gen_cluster(client=True)
async def test_dump_cluster_state_async(c, s, a, b, tmp_path, _format):
filename = tmp_path / "foo"
await c.dump_cluster_state(filename, format=_format)
_verify_cluster_dump(filename, _format, {a.address, b.address})
@gen_cluster(client=True)
async def test_dump_cluster_state_json(c, s, a, b, tmp_path):
with pytest.raises(ValueError, match="Unsupported format"):
await c.dump_cluster_state(tmp_path / "foo", format="json")
@gen_cluster(client=True)
async def test_dump_cluster_state_exclude_default(c, s, a, b, tmp_path):
futs = c.map(inc, range(10))
while len(s.tasks) != len(futs):
await asyncio.sleep(0.01)
excluded_by_default = [
"run_spec",
]
filename = tmp_path / "foo"
await c.dump_cluster_state(
filename=filename,
format="yaml",
)
with open(f"{filename}.yaml") as fd:
state = yaml.safe_load(fd)
assert "workers" in state
assert len(state["workers"]) == len(s.workers)
for worker, worker_dump in state["workers"].items():
for k, task_dump in worker_dump["tasks"].items():
assert not any(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
assert "scheduler" in state
assert "tasks" in state["scheduler"]
tasks = state["scheduler"]["tasks"]
assert len(tasks) == len(futs)
for k, task_dump in tasks.items():
assert not any(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
await c.dump_cluster_state(
filename=filename,
format="yaml",
exclude=(),
)
with open(f"{filename}.yaml") as fd:
state = yaml.safe_load(fd)
assert "workers" in state
assert len(state["workers"]) == len(s.workers)
for worker, worker_dump in state["workers"].items():
for k, task_dump in worker_dump["tasks"].items():
assert all(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
assert "scheduler" in state
assert "tasks" in state["scheduler"]
tasks = state["scheduler"]["tasks"]
assert len(tasks) == len(futs)
for k, task_dump in tasks.items():
assert all(blocked in task_dump for blocked in excluded_by_default)
assert k in s.tasks
@gen_cluster(client=True, config={"distributed.comm.timeouts.connect": "200ms"})
async def test_dump_cluster_state_error(c, s, a, b, tmp_path):
a.stop()
filename = tmp_path / "foo"
await c.dump_cluster_state(filename, format="yaml")
state = _verify_cluster_dump(filename, "yaml", {a.address, b.address})
assert state["workers"][a.address] == (
f"OSError('Timed out trying to connect to {a.address} after 0.2 s')"
)
assert isinstance(state["workers"][b.address], dict)
assert state["versions"]["workers"].keys() == {b.address}
class TestClientSecurityLoader:
@contextmanager
def config_loader(self, monkeypatch, loader):
module_name = "totally_fake_module_name_1"
module = types.ModuleType(module_name)
module.loader = loader
with monkeypatch.context() as m:
m.setitem(sys.modules, module_name, module)
with dask.config.set(
{"distributed.client.security-loader": f"{module_name}.loader"}
):
yield
@pytest.mark.asyncio
async def test_security_loader(self, monkeypatch):
security = tls_only_security()
async with Scheduler(
dashboard_address=":0", protocol="tls", security=security
) as scheduler:
def loader(info):
assert info == {"address": scheduler.address}
return security
with self.config_loader(monkeypatch, loader):
async with Client(scheduler.address, asynchronous=True) as client:
assert client.security is security
@pytest.mark.asyncio
async def test_security_loader_ignored_if_explicit_security_provided(
self, monkeypatch
):
security = tls_only_security()
def loader(info):
assert False
async with Scheduler(
dashboard_address=":0", protocol="tls", security=security
) as scheduler:
with self.config_loader(monkeypatch, loader):
async with Client(
scheduler.address, security=security, asynchronous=True
) as client:
assert client.security is security
@pytest.mark.asyncio
async def test_security_loader_ignored_if_returns_none(self, monkeypatch):
"""Test that if a security loader is configured, but it returns `None`,
then the default security configuration is used"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
config = {
"distributed.comm.require-encryption": True,
"distributed.comm.tls.ca-file": ca_file,
"distributed.comm.tls.client.cert": keycert,
"distributed.comm.tls.scheduler.cert": keycert,
"distributed.comm.tls.worker.cert": keycert,
}
def loader(info):
loader.called = True
return None
with dask.config.set(config):
async with Scheduler(dashboard_address=":0", protocol="tls") as scheduler:
# Smoketest to make sure config was picked up (so we're actually testing something)
assert scheduler.security.tls_client_cert
assert scheduler.security.tls_scheduler_cert
with self.config_loader(monkeypatch, loader):
async with Client(scheduler.address, asynchronous=True) as client:
assert (
client.security.tls_client_cert
== scheduler.security.tls_client_cert
)
assert loader.called
@pytest.mark.asyncio
async def test_security_loader_import_failed(self):
security = tls_only_security()
with dask.config.set(
{"distributed.client.security-loader": "totally_fake_module_name_2.loader"}
):
with pytest.raises(ImportError, match="totally_fake_module_name_2.loader"):
async with Client("tls://bad-address:8888", asynchronous=True):
pass
@gen_cluster(client=True, nthreads=[])
async def test_wait_for_workers_updates_info(c, s):
async with Worker(s.address):
await c.wait_for_workers(1)
assert c.scheduler_info()["workers"]
|
test_s3boto3.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import gzip
import pickle
import threading
import warnings
from datetime import datetime
from textwrap import dedent
from unittest import skipIf
from botocore.exceptions import ClientError
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.test import TestCase, override_settings
from django.utils.timezone import is_aware, utc
from storages.backends import s3boto3
try:
from django.utils.six.moves.urllib import parse as urlparse
except ImportError:
from urllib import parse as urlparse
try:
from unittest import mock
except ImportError: # Python 3.2 and below
import mock
class S3Boto3TestCase(TestCase):
def setUp(self):
self.storage = s3boto3.S3Boto3Storage()
self.storage._connections.connection = mock.MagicMock()
class S3Boto3StorageTests(S3Boto3TestCase):
def test_clean_name(self):
"""
Test the base case of _clean_name
"""
path = self.storage._clean_name("path/to/somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_clean_name_normalize(self):
"""
Test the normalization of _clean_name
"""
path = self.storage._clean_name("path/to/../somewhere")
self.assertEqual(path, "path/somewhere")
def test_clean_name_trailing_slash(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path/to/somewhere/")
self.assertEqual(path, "path/to/somewhere/")
def test_clean_name_windows(self):
"""
Test the _clean_name when the path has a trailing slash
"""
path = self.storage._clean_name("path\\to\\somewhere")
self.assertEqual(path, "path/to/somewhere")
def test_pickle_with_bucket(self):
"""
Test that the storage can be pickled with a bucket attached
"""
# Ensure the bucket has been used
self.storage.bucket
self.assertIsNotNone(self.storage._bucket)
# Can't pickle MagicMock, but you can't pickle a real Bucket object either
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
# Put the mock connection back in
new_storage._connections.connection = mock.MagicMock()
self.assertIsNone(new_storage._bucket)
new_storage.bucket
self.assertIsNotNone(new_storage._bucket)
def test_pickle_without_bucket(self):
"""
Test that the storage can be pickled, without a bucket instance
"""
# Can't pickle a threadlocal
p = pickle.dumps(self.storage)
new_storage = pickle.loads(p)
self.assertIsInstance(new_storage._connections, threading.local)
def test_storage_url_slashes(self):
"""
Test URL generation.
"""
self.storage.custom_domain = 'example.com'
# We expect no leading slashes in the path,
# and trailing slashes should be preserved.
self.assertEqual(self.storage.url(''), 'https://example.com/')
self.assertEqual(self.storage.url('path'), 'https://example.com/path')
self.assertEqual(self.storage.url('path/'), 'https://example.com/path/')
self.assertEqual(self.storage.url('path/1'), 'https://example.com/path/1')
self.assertEqual(self.storage.url('path/1/'), 'https://example.com/path/1/')
def test_storage_save(self):
"""
Test saving a file
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_with_acl(self):
"""
Test saving a file with user defined ACL.
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.default_acl = 'private'
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'text/plain',
'ACL': 'private',
}
)
def test_content_type(self):
"""
Test saving a file with a None content type.
"""
name = 'test_image.jpg'
content = ContentFile('data')
content.content_type = None
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'image/jpeg',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzipped(self):
"""
Test saving a gzipped file
"""
name = 'test_storage_save.gz'
content = ContentFile("I am gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
content,
ExtraArgs={
'ContentType': 'application/octet-stream',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
def test_storage_save_gzip(self):
"""
Test saving a file with gzip enabled.
"""
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
self.storage.save(name, content)
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_storage_save_gzip_twice(self):
"""
Test saving the same file content twice with gzip enabled.
"""
# Given
self.storage.gzip = True
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
# When
self.storage.save(name, content)
self.storage.save('test_storage_save_2.css', content)
# Then
obj = self.storage.bucket.Object.return_value
obj.upload_fileobj.assert_called_with(
mock.ANY,
ExtraArgs={
'ContentType': 'text/css',
'ContentEncoding': 'gzip',
'ACL': self.storage.default_acl,
}
)
args, kwargs = obj.upload_fileobj.call_args
content = args[0]
zfile = gzip.GzipFile(mode='rb', fileobj=content)
self.assertEqual(zfile.read(), b"I should be gzip'd")
def test_compress_content_len(self):
"""
Test that file returned by _compress_content() is readable.
"""
self.storage.gzip = True
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_storage_open_write(self):
"""
Test opening a file in write mode
"""
name = 'test_open_for_writïng.txt'
content = 'new content'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
file.write(content)
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
# Save the internal file before closing
multipart = obj.initiate_multipart_upload.return_value
multipart.parts.all.return_value = [mock.MagicMock(e_tag='123', part_number=1)]
file.close()
multipart.Part.assert_called_with(1)
part = multipart.Part.return_value
part.upload.assert_called_with(Body=content.encode('utf-8'))
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [{'ETag': '123', 'PartNumber': 1}]})
def test_storage_open_no_write(self):
"""
Test opening file in write mode and closing without writing.
A file should be created as by obj.put(...).
"""
name = 'test_open_no_write.txt'
# Set the encryption flag used for puts
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
obj.load.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
# Set the name of the mock object
obj.key = name
# Save the internal file before closing
file.close()
obj.load.assert_called_once_with()
obj.put.assert_called_once_with(
ACL='public-read',
Body=b"",
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
def test_storage_open_no_overwrite_existing(self):
"""
Test opening an existing file in write mode and closing without writing.
"""
name = 'test_open_no_overwrite_existing.txt'
# Set the encryption flag used for puts
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
# Save the internal file before closing
file.close()
obj.load.assert_called_once_with()
obj.put.assert_not_called()
def test_storage_write_beyond_buffer_size(self):
"""
Test writing content that exceeds the buffer size
"""
name = 'test_open_for_writïng_beyond_buffer_size.txt'
# Set the encryption flag used for multipart uploads
self.storage.encryption = True
self.storage.reduced_redundancy = True
self.storage.default_acl = 'public-read'
file = self.storage.open(name, 'w')
self.storage.bucket.Object.assert_called_with(name)
obj = self.storage.bucket.Object.return_value
# Set the name of the mock object
obj.key = name
# Initiate the multipart upload
file.write('')
obj.initiate_multipart_upload.assert_called_with(
ACL='public-read',
ContentType='text/plain',
ServerSideEncryption='AES256',
StorageClass='REDUCED_REDUNDANCY'
)
multipart = obj.initiate_multipart_upload.return_value
# Write content at least twice as long as the buffer size
written_content = ''
counter = 1
while len(written_content) < 2 * file.buffer_size:
content = 'hello, aws {counter}\n'.format(counter=counter)
# Write more than just a few bytes in each iteration to keep the
# test reasonably fast
content += '*' * int(file.buffer_size / 10)
file.write(content)
written_content += content
counter += 1
# Save the internal file before closing
multipart.parts.all.return_value = [
mock.MagicMock(e_tag='123', part_number=1),
mock.MagicMock(e_tag='456', part_number=2)
]
file.close()
self.assertListEqual(
multipart.Part.call_args_list,
[mock.call(1), mock.call(2)]
)
part = multipart.Part.return_value
uploaded_content = ''.join(
args_list[1]['Body'].decode('utf-8')
for args_list in part.upload.call_args_list
)
self.assertEqual(uploaded_content, written_content)
multipart.complete.assert_called_once_with(
MultipartUpload={'Parts': [
{'ETag': '123', 'PartNumber': 1},
{'ETag': '456', 'PartNumber': 2},
]}
)
def test_auto_creating_bucket(self):
self.storage.auto_create_bucket = True
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_auto_creating_bucket_with_acl(self):
self.storage.auto_create_bucket = True
self.storage.bucket_acl = 'public-read'
Bucket = mock.MagicMock()
self.storage._connections.connection.Bucket.return_value = Bucket
self.storage._connections.connection.meta.client.meta.region_name = 'sa-east-1'
Bucket.meta.client.head_bucket.side_effect = ClientError({'Error': {},
'ResponseMetadata': {'HTTPStatusCode': 404}},
'head_bucket')
self.storage._get_or_create_bucket('testbucketname')
Bucket.create.assert_called_once_with(
ACL='public-read',
CreateBucketConfiguration={
'LocationConstraint': 'sa-east-1',
}
)
def test_storage_exists(self):
self.assertTrue(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key="file.txt",
)
def test_storage_exists_false(self):
self.storage.connection.meta.client.head_object.side_effect = ClientError(
{'Error': {'Code': '404', 'Message': 'Not Found'}},
'HeadObject',
)
self.assertFalse(self.storage.exists("file.txt"))
self.storage.connection.meta.client.head_object.assert_called_with(
Bucket=self.storage.bucket_name,
Key='file.txt',
)
def test_storage_exists_doesnt_create_bucket(self):
with mock.patch.object(self.storage, '_get_or_create_bucket') as method:
self.storage.exists('file.txt')
self.assertFalse(method.called)
def test_storage_delete(self):
self.storage.delete("path/to/file.txt")
self.storage.bucket.Object.assert_called_with('path/to/file.txt')
self.storage.bucket.Object.return_value.delete.assert_called_with()
def test_storage_listdir_base(self):
# Files:
# some/path/1.txt
# 2.txt
# other/path/3.txt
# 4.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some'},
{'Prefix': 'other'},
],
'Contents': [
{'Key': '2.txt'},
{'Key': '4.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='')
self.assertEqual(dirs, ['some', 'other'])
self.assertEqual(files, ['2.txt', '4.txt'])
def test_storage_listdir_subdir(self):
# Files:
# some/path/1.txt
# some/2.txt
pages = [
{
'CommonPrefixes': [
{'Prefix': 'some/path'},
],
'Contents': [
{'Key': 'some/2.txt'},
],
},
]
paginator = mock.MagicMock()
paginator.paginate.return_value = pages
self.storage._connections.connection.meta.client.get_paginator.return_value = paginator
dirs, files = self.storage.listdir('some/')
paginator.paginate.assert_called_with(Bucket=None, Delimiter='/', Prefix='some/')
self.assertEqual(dirs, ['path'])
self.assertEqual(files, ['2.txt'])
def test_storage_size(self):
obj = self.storage.bucket.Object.return_value
obj.content_length = 4098
name = 'file.txt'
self.assertEqual(self.storage.size(name), obj.content_length)
def test_storage_mtime(self):
# Test both USE_TZ cases
for use_tz in (True, False):
with self.settings(USE_TZ=use_tz):
self._test_storage_mtime(use_tz)
def _test_storage_mtime(self, use_tz):
obj = self.storage.bucket.Object.return_value
obj.last_modified = datetime.now(utc)
name = 'file.txt'
self.assertFalse(
is_aware(self.storage.modified_time(name)),
'Naive datetime object expected from modified_time()'
)
self.assertIs(
settings.USE_TZ,
is_aware(self.storage.get_modified_time(name)),
'{} datetime object expected from get_modified_time() when USE_TZ={}'.format(
('Naive', 'Aware')[settings.USE_TZ],
settings.USE_TZ
)
)
def test_storage_url(self):
name = 'test_storage_size.txt'
url = 'http://aws.amazon.com/%s' % name
self.storage.bucket.meta.client.generate_presigned_url.return_value = url
self.storage.bucket.name = 'bucket'
self.assertEqual(self.storage.url(name), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=self.storage.querystring_expire,
HttpMethod=None,
)
custom_expire = 123
self.assertEqual(self.storage.url(name, expire=custom_expire), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=custom_expire,
HttpMethod=None,
)
custom_method = 'HEAD'
self.assertEqual(self.storage.url(name, http_method=custom_method), url)
self.storage.bucket.meta.client.generate_presigned_url.assert_called_with(
'get_object',
Params={'Bucket': self.storage.bucket.name, 'Key': name},
ExpiresIn=self.storage.querystring_expire,
HttpMethod=custom_method,
)
def test_storage_url_custom_domain_signed_urls(self):
key_id = 'test-key'
filename = 'file.txt'
pem = dedent(
'''\
-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQCXVuwcMk+JmVSKuQ1K4dZx4Z1dEcRQgTlqvhAyljIpttXlZh2/
fD3GkJCiqfwEmo+cdNK/LFzRj/CX8Wz1z1lH2USONpG6sAkotkatCbejiItDu5y6
janGJHfuWXu6B/o9gwZylU1gIsePY3lLNk+r9QhXUO4jXw6zLJftVwKPhQIDAQAB
AoGAbpkRV9HUmoQ5al+uPSkp5HOy4s8XHpYxdbaMc8ubwSxiyJCF8OhE5RXE/Xso
N90UUox1b0xmUKfWddPzgvgTD/Ub7D6Ukf+nVWDX60tWgNxICAUHptGL3tWweaAy
H+0+vZ0TzvTt9r00vW0FzO7F8X9/Rs1ntDRLtF3RCCxdq0kCQQDHFu+t811lCvy/
67rMEKGvNsNNSTrzOrNr3PqUrCnOrzKazjFVjsKv5VzI/U+rXGYKWJsMpuCFiHZ3
DILUC09TAkEAwpm2S6MN6pzn9eY6pmhOxZ+GQGGRUkKZfC1GDxaRSRb8sKTjptYw
WSemJSxiDzdj3Po2hF0lbhkpJgUq6xnCxwJAZgHHfn5CLSJrDD7Q7/vZi/foK3JJ
BRTfl3Wa4pAvv5meuRjKyEakVBGV79lyd5+ZHNX3Y40hXunjoO3FHrZIxwJAdRzu
waxahrRxQOKSr20c4wAzWnGddIUSO9I/VHs/al5EKsbBHrnOlQkwizSfuwqZtfZ7
csNf8FeCFRiNELoLJwJAZxWBE2+8J9VW9AQ0SE7j4FyM/B8FvRhF5PLAAsw/OxHO
SxiFP7Ptdac1tm5H5zOqaqSHWphI19HNNilXKmxuCA==
-----END RSA PRIVATE KEY-----'''
).encode('ascii')
url = 'https://mock.cloudfront.net/file.txt?Expires=3600&Signature=DbqVgh3FHtttQxof214tSAVE8Nqn3Q4Ii7eR3iykbOqAPbV89HC3EB~0CWxarpLNtbfosS5LxiP5EutriM7E8uR4Gm~UVY-PFUjPcwqdnmAiKJF0EVs7koJcMR8MKDStuWfFKVUPJ8H7ORYTOrixyHBV2NOrpI6SN5UX6ctNM50_&Key-Pair-Id=test-key' # noqa
self.storage.custom_domain = "mock.cloudfront.net"
for pem_to_signer in (
s3boto3._use_cryptography_signer(),
s3boto3._use_rsa_signer()):
self.storage.cloudfront_signer = pem_to_signer(key_id, pem)
with mock.patch('storages.backends.s3boto3.datetime') as mock_datetime:
mock_datetime.utcnow.return_value = datetime.utcfromtimestamp(0)
self.assertEqual(self.storage.url(filename), url)
def test_generated_url_is_encoded(self):
self.storage.custom_domain = "mock.cloudfront.net"
filename = "whacky & filename.mp4"
url = self.storage.url(filename)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path,
"/whacky%20%26%20filename.mp4")
self.assertFalse(self.storage.bucket.meta.client.generate_presigned_url.called)
def test_special_characters(self):
self.storage.custom_domain = "mock.cloudfront.net"
name = "ãlöhâ.jpg"
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.Object.assert_called_once_with(name)
url = self.storage.url(name)
parsed_url = urlparse.urlparse(url)
self.assertEqual(parsed_url.path, "/%C3%A3l%C3%B6h%C3%A2.jpg")
def test_strip_signing_parameters(self):
expected = 'http://bucket.s3-aws-region.amazonaws.com/foo/bar'
self.assertEqual(self.storage._strip_signing_parameters(
'%s?X-Amz-Date=12345678&X-Amz-Signature=Signature' % expected), expected)
self.assertEqual(self.storage._strip_signing_parameters(
'%s?expires=12345678&signature=Signature' % expected), expected)
@skipIf(threading is None, 'Test requires threading')
def test_connection_threading(self):
connections = []
def thread_storage_connection():
connections.append(self.storage.connection)
for x in range(2):
t = threading.Thread(target=thread_storage_connection)
t.start()
t.join()
# Connection for each thread needs to be unique
self.assertIsNot(connections[0], connections[1])
def test_location_leading_slash(self):
msg = (
"S3Boto3Storage.location cannot begin with a leading slash. "
"Found '/'. Use '' instead."
)
with self.assertRaises(ImproperlyConfigured, msg=msg):
s3boto3.S3Boto3Storage(location='/')
def test_deprecated_acl(self):
with override_settings(AWS_DEFAULT_ACL=None), warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage(acl='private')
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
message = (
"The acl argument of S3Boto3Storage is deprecated. Use argument "
"default_acl or setting AWS_DEFAULT_ACL instead. The acl argument "
"will be removed in version 1.10."
)
assert str(w[-1].message) == message
def test_deprecated_bucket(self):
with override_settings(AWS_DEFAULT_ACL=None), warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage(bucket='django')
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
message = (
"The bucket argument of S3Boto3Storage is deprecated. Use argument "
"bucket_name or setting AWS_STORAGE_BUCKET_NAME instead. The bucket "
"argument will be removed in version 1.10."
)
assert str(w[-1].message) == message
def test_deprecated_default_acl(self):
with warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage()
assert len(w) == 1
message = (
"The default behavior of S3Boto3Storage is insecure and will change "
"in django-storages 1.10. By default files and new buckets are saved "
"with an ACL of 'public-read' (globally publicly readable). Version 1.10 will "
"default to using the bucket's ACL. To opt into the new behavior set "
"AWS_DEFAULT_ACL = None, otherwise to silence this warning explicitly "
"set AWS_DEFAULT_ACL."
)
assert str(w[-1].message) == message
def test_deprecated_autocreate_bucket(self):
with override_settings(AWS_DEFAULT_ACL=None), warnings.catch_warnings(record=True) as w:
s3boto3.S3Boto3Storage(auto_create_bucket=True)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
message = (
"Automatic bucket creation will be removed in version 1.10. It encourages "
"using overly broad credentials with this library. Either create it before "
"manually or use one of a myriad of automatic configuration management tools. "
"Unset AWS_AUTO_CREATE_BUCKET (it defaults to False) to silence this warning."
)
assert str(w[-1].message) == message
def test_deprecated_default_acl_override_class_variable(self):
class MyStorage(s3boto3.S3Boto3Storage):
default_acl = "private"
with warnings.catch_warnings(record=True) as w:
MyStorage()
assert len(w) == 0
def test_override_settings(self):
with override_settings(AWS_LOCATION='foo1'):
storage = s3boto3.S3Boto3Storage()
self.assertEqual(storage.location, 'foo1')
with override_settings(AWS_LOCATION='foo2'):
storage = s3boto3.S3Boto3Storage()
self.assertEqual(storage.location, 'foo2')
def test_override_class_variable(self):
class MyStorage1(s3boto3.S3Boto3Storage):
location = 'foo1'
storage = MyStorage1()
self.assertEqual(storage.location, 'foo1')
class MyStorage2(s3boto3.S3Boto3Storage):
location = 'foo2'
storage = MyStorage2()
self.assertEqual(storage.location, 'foo2')
def test_override_init_argument(self):
storage = s3boto3.S3Boto3Storage(location='foo1')
self.assertEqual(storage.location, 'foo1')
storage = s3boto3.S3Boto3Storage(location='foo2')
self.assertEqual(storage.location, 'foo2')
|
weiqi_online_2.py | #!/usr/bin/python3
# 使用Python内建GUI模組tkinter
from tkinter import *
# ttk覆寫tkinter部分物件,ttk對tkinter進行了優化
from tkinter.ttk import *
# deepcopy需要用到copy模組
from MyOwnPeer2PeerNode import MyOwnPeer2PeerNode
import copy
import tkinter.messagebox
import sys
import time
import threading
sys.path.insert(0, '..') # Import the files where the modules are located
# 围棋應用物件定義
class model(Tk):
def __init__(self, my_mode_num=19):
Tk.__init__(self)
# 連接端口
global Node
self.node = Node
# 主導權限
self.dominance = Dominance
# 模式,九路棋:9,十三路棋:13,十九路棋:19
self.mode_num = my_mode_num
# 棋盤尺寸設置基準值,預設:1.6
self.size = 1.6
# 棋盤每格的邊長
self.dd = 360 * self.size / (self.mode_num - 1)
# 相對於九路棋盤的校正比例
self.p = 1 if self.mode_num == 9 else (2 / 3 if self.mode_num == 13 else 4 / 9)
# 定義棋盤陣列,超過邊界:-1,無子:0,黑棋:1,白棋:2
self.positions = [[0 for i in range(self.mode_num + 2)] for i in range(self.mode_num + 2)]
# 初始化棋盤,所有超過邊界的值設為-1
for m in range(self.mode_num + 2):
for n in range(self.mode_num + 2):
if m * n == 0 or m == self.mode_num + 1 or n == self.mode_num + 1:
self.positions[m][n] = -1
# 拷貝三份棋盤“快照”,悔棋和判斷“打劫”時需要参考
self.last_3_positions = copy.deepcopy(self.positions)
self.last_2_positions = copy.deepcopy(self.positions)
self.last_1_positions = copy.deepcopy(self.positions)
# 紀錄每一手的落子點,作為棋譜紀錄
self.record = []
self.record_take = []
# 記錄滑鼠經過的地方,用於顯示打算落子的棋子shadow
self.cross_last = None
# 當前輪到的玩家,黑:0,白:1,執黑先行
self.present = 0
# 初始時停止運行,點擊“開始對局”才開始運行
self.stop = True
# 悔棋次数,次数大於0才可悔棋,初始設為0(初始不能悔棋),悔棋後重置0,下棋或虛手pass時恢復為1,以禁止連續悔棋
# 圖片來源,存放在當前目錄下的/Pictures/中
self.photoW = PhotoImage(file="./Pictures/W.png")
self.photoB = PhotoImage(file="./Pictures/B.png")
self.photoBD = PhotoImage(file="./Pictures/" + "BD" + "-" + str(self.mode_num) + ".png")
self.photoWD = PhotoImage(file="./Pictures/" + "WD" + "-" + str(self.mode_num) + ".png")
self.photoBU = PhotoImage(file="./Pictures/" + "BU" + "-" + str(self.mode_num) + ".png")
self.photoWU = PhotoImage(file="./Pictures/" + "WU" + "-" + str(self.mode_num) + ".png")
# 用於黑白棋子圖片切换的列表
self.photoWBU_list = [self.photoBU, self.photoWU]
self.photoWBD_list = [self.photoBD, self.photoWD]
# 視窗大小
self.geometry(str(int(600 * self.size)) + 'x' + str(int(400 * self.size)))
# 畫布控制物件,作爲容器
self.canvas_bottom = Canvas(self, bg='#369', bd=0, width=600 * self.size, height=400 * self.size)
self.canvas_bottom.place(x=0, y=0)
# 畫棋盤,填充顏色
self.canvas_bottom.create_rectangle(0 * self.size, 0 * self.size, 400 * self.size, 400 * self.size,
fill='#FFD700')
# 刻畫棋盤線及九個點
# 先畫外框粗線
self.canvas_bottom.create_rectangle(20 * self.size, 20 * self.size, 380 * self.size, 380 * self.size, width=3)
# 棋盤上的九個定位點,以中點為模型,移動位置以作出其餘八個點
for m in ([-1, 1] if self.mode_num == 9 else ([-1, 1] if self.mode_num == 13 else [-1, 0, 1])):
for n in ([-1, 1] if self.mode_num == 9 else ([-1, 1] if self.mode_num == 13 else [-1, 0, 1])):
self.oringinal = self.canvas_bottom.create_oval(200 * self.size - self.size * 2,
200 * self.size - self.size * 2,
200 * self.size + self.size * 2,
200 * self.size + self.size * 2, fill='#000')
self.canvas_bottom.move(self.oringinal,
m * self.dd * (2 if self.mode_num == 9 else (3 if self.mode_num == 13 else 6)),
n * self.dd * (2 if self.mode_num == 9 else (3 if self.mode_num == 13 else 6)))
if self.mode_num == 13:
self.oringinal = self.canvas_bottom.create_oval(200 * self.size - self.size * 2,
200 * self.size - self.size * 2,
200 * self.size + self.size * 2,
200 * self.size + self.size * 2, fill='#000')
# 畫中間的線條
for i in range(1, self.mode_num - 1):
self.canvas_bottom.create_line(20 * self.size, 20 * self.size + i * self.dd, 380 * self.size,
20 * self.size + i * self.dd, width=2)
self.canvas_bottom.create_line(20 * self.size + i * self.dd, 20 * self.size, 20 * self.size + i * self.dd,
380 * self.size, width=2)
# 放置右側初始圖片
self.pW = self.canvas_bottom.create_image(500 * self.size + 11, 65 * self.size, image=self.photoW)
self.pB = self.canvas_bottom.create_image(500 * self.size - 11, 65 * self.size, image=self.photoB)
# 每張圖片都添加image標籤,方便reload函式删除圖片
self.canvas_bottom.addtag_withtag('image', self.pW)
self.canvas_bottom.addtag_withtag('image', self.pB)
def recover(self, list_to_recover, b_or_w):
if len(list_to_recover) > 0:
for i in range(len(list_to_recover)):
self.positions[list_to_recover[i][1]][list_to_recover[i][0]] = b_or_w + 1
self.image_added = self.canvas_bottom.create_image(
20 * self.size + (list_to_recover[i][0] - 1) * self.dd + 4 * self.p,
20 * self.size + (list_to_recover[i][1] - 1) * self.dd - 5 * self.p,
image=self.photoWBD_list[b_or_w])
self.canvas_bottom.addtag_withtag('image', self.image_added)
self.canvas_bottom.addtag_withtag('position' + str(list_to_recover[i][0]) + str(list_to_recover[i][1]),
self.image_added)
def get_deadlist(self, x, y):
deadlist = []
for i in [-1, 1]:
if self.positions[y][x + i] == (2 if self.present == 0 else 1) and ([x + i, y] not in deadlist):
killList = self.if_dead([[x + i, y]], (2 if self.present == 0 else 1), [x + i, y])
if not killList == False:
deadlist += copy.deepcopy(killList)
if self.positions[y + i][x] == (2 if self.present == 0 else 1) and ([x, y + i] not in deadlist):
killList = self.if_dead([[x, y + i]], (2 if self.present == 0 else 1), [x, y + i])
if not killList == False:
deadlist += copy.deepcopy(killList)
return deadlist
def if_dead(self, deadList, yourChessman, yourPosition):
for i in [-1, 1]:
if [yourPosition[0] + i, yourPosition[1]] not in deadList:
if self.positions[yourPosition[1]][yourPosition[0] + i] == 0:
return False
if [yourPosition[0], yourPosition[1] + i] not in deadList:
if self.positions[yourPosition[1] + i][yourPosition[0]] == 0:
return False
if ([yourPosition[0] + 1, yourPosition[1]] not in deadList) and (
self.positions[yourPosition[1]][yourPosition[0] + 1] == yourChessman):
midvar = self.if_dead(deadList + [[yourPosition[0] + 1, yourPosition[1]]], yourChessman,
[yourPosition[0] + 1, yourPosition[1]])
if not midvar:
return False
else:
deadList += copy.deepcopy(midvar)
if ([yourPosition[0] - 1, yourPosition[1]] not in deadList) and (
self.positions[yourPosition[1]][yourPosition[0] - 1] == yourChessman):
midvar = self.if_dead(deadList + [[yourPosition[0] - 1, yourPosition[1]]], yourChessman,
[yourPosition[0] - 1, yourPosition[1]])
if not midvar:
return False
else:
deadList += copy.deepcopy(midvar)
if ([yourPosition[0], yourPosition[1] + 1] not in deadList) and (
self.positions[yourPosition[1] + 1][yourPosition[0]] == yourChessman):
midvar = self.if_dead(deadList + [[yourPosition[0], yourPosition[1] + 1]], yourChessman,
[yourPosition[0], yourPosition[1] + 1])
if not midvar:
return False
else:
deadList += copy.deepcopy(midvar)
if ([yourPosition[0], yourPosition[1] - 1] not in deadList) and (
self.positions[yourPosition[1] - 1][yourPosition[0]] == yourChessman):
midvar = self.if_dead(deadList + [[yourPosition[0], yourPosition[1] - 1]], yourChessman,
[yourPosition[0], yourPosition[1] - 1])
if not midvar:
return False
else:
deadList += copy.deepcopy(midvar)
return deadList
def kill(self, killList):
if len(killList) > 0:
for i in range(len(killList)):
self.positions[killList[i][1]][killList[i][0]] = 0
self.canvas_bottom.delete('position' + str(killList[i][0]) + str(killList[i][1]))
def getDown(self, x, y):
self.positions[y][x] = self.present + 1
ex = 30 + self.dd * (x - 1)
ey = 30 + self.dd * (y - 1)
dx = (ex - 20 * self.size) % self.dd
dy = (ey - 20 * self.size) % self.dd
self.image_added = self.canvas_bottom.create_image(
ex - dx + round(dx / self.dd) * self.dd + 4 * self.p,
ey - dy + round(dy / self.dd) * self.dd - 5 * self.p,
image=self.photoWBD_list[self.present])
self.canvas_bottom.addtag_withtag('image', self.image_added)
# 棋子與位置標籤绑定,方便“殺死”
self.canvas_bottom.addtag_withtag('position' + str(x) + str(y), self.image_added)
deadlist = self.get_deadlist(x, y)
self.kill(deadlist)
if len(deadlist) > 0:
temp = []
for d in deadlist:
temp.append([d[0], d[1]])
self.record_take.append(temp)
else:
self.record_take.append([])
self.last_3_positions = copy.deepcopy(self.last_2_positions)
self.last_2_positions = copy.deepcopy(self.last_1_positions)
self.last_1_positions = copy.deepcopy(self.positions)
# 删除上次的標記,重新創建標記
self.canvas_bottom.delete('image_added_sign')
self.image_added_sign = self.canvas_bottom.create_oval(
ex - dx + round(dx / self.dd) * self.dd + 0.5 * self.dd,
ey - dy + round(dy / self.dd) * self.dd + 0.5 * self.dd,
ex - dx + round(dx / self.dd) * self.dd - 0.5 * self.dd,
ey - dy + round(dy / self.dd) * self.dd - 0.5 * self.dd, width=3, outline='#3ae')
self.canvas_bottom.addtag_withtag('image', self.image_added_sign)
self.canvas_bottom.addtag_withtag('image_added_sign', self.image_added_sign)
if self.present == 0:
self.create_pW()
self.del_pB()
self.present = 1
else:
self.create_pB()
self.del_pW()
self.present = 0
def create_pW(self):
self.pW = self.canvas_bottom.create_image(500 * self.size + 11, 65 * self.size, image=self.photoW)
self.canvas_bottom.addtag_withtag('image', self.pW)
def create_pB(self):
self.pB = self.canvas_bottom.create_image(500 * self.size - 11, 65 * self.size, image=self.photoB)
self.canvas_bottom.addtag_withtag('image', self.pB)
def del_pW(self):
self.canvas_bottom.delete(self.pW)
def del_pB(self):
self.canvas_bottom.delete(self.pB)
class model2(Toplevel):
def __init__(self,main=None, my_mode_num=19):
Toplevel.__init__(self, main)
# 連接端口
global Node
self.node = Node
# 主導權限
self.dominance = Dominance
# 模式,九路棋:9,十三路棋:13,十九路棋:19
self.mode_num = my_mode_num
# 棋盤尺寸設置基準值,預設:1.6
self.size = 1.6
# 棋盤每格的邊長
self.dd = 360 * self.size / (self.mode_num - 1)
# 相對於九路棋盤的校正比例
self.p = 1 if self.mode_num == 9 else (2 / 3 if self.mode_num == 13 else 4 / 9)
# 定義棋盤陣列,超過邊界:-1,無子:0,黑棋:1,白棋:2
self.positions = [[0 for i in range(self.mode_num + 2)] for i in range(self.mode_num + 2)]
# 初始化棋盤,所有超過邊界的值設為-1
for m in range(self.mode_num + 2):
for n in range(self.mode_num + 2):
if m * n == 0 or m == self.mode_num + 1 or n == self.mode_num + 1:
self.positions[m][n] = -1
# 拷貝三份棋盤“快照”,悔棋和判斷“打劫”時需要参考
self.last_3_positions = copy.deepcopy(self.positions)
self.last_2_positions = copy.deepcopy(self.positions)
self.last_1_positions = copy.deepcopy(self.positions)
# 紀錄每一手的落子點,作為棋譜紀錄
self.record = []
self.record_take = []
# 記錄滑鼠經過的地方,用於顯示打算落子的棋子shadow
self.cross_last = None
# 當前輪到的玩家,黑:0,白:1,執黑先行
self.present = 0
# 初始時停止運行,點擊“開始對局”才開始運行
self.stop = True
# 悔棋次数,次数大於0才可悔棋,初始設為0(初始不能悔棋),悔棋後重置0,下棋或虛手pass時恢復為1,以禁止連續悔棋
# 圖片來源,存放在當前目錄下的/Pictures/中
self.photoW = PhotoImage(file="./Pictures/W.png")
self.photoB = PhotoImage(file="./Pictures/B.png")
self.photoBD = PhotoImage(file="./Pictures/" + "BD" + "-" + str(self.mode_num) + ".png")
self.photoWD = PhotoImage(file="./Pictures/" + "WD" + "-" + str(self.mode_num) + ".png")
self.photoBU = PhotoImage(file="./Pictures/" + "BU" + "-" + str(self.mode_num) + ".png")
self.photoWU = PhotoImage(file="./Pictures/" + "WU" + "-" + str(self.mode_num) + ".png")
# 用於黑白棋子圖片切换的列表
self.photoWBU_list = [self.photoBU, self.photoWU]
self.photoWBD_list = [self.photoBD, self.photoWD]
# 視窗大小
self.geometry(str(int(600 * self.size)) + 'x' + str(int(400 * self.size)))
# 畫布控制物件,作爲容器
self.canvas_bottom = Canvas(self, bg='#369', bd=0, width=600 * self.size, height=400 * self.size)
self.canvas_bottom.place(x=0, y=0)
# 畫棋盤,填充顏色
self.canvas_bottom.create_rectangle(0 * self.size, 0 * self.size, 400 * self.size, 400 * self.size,
fill='#FFD700')
# 刻畫棋盤線及九個點
# 先畫外框粗線
self.canvas_bottom.create_rectangle(20 * self.size, 20 * self.size, 380 * self.size, 380 * self.size, width=3)
# 棋盤上的九個定位點,以中點為模型,移動位置以作出其餘八個點
for m in ([-1, 1] if self.mode_num == 9 else ([-1, 1] if self.mode_num == 13 else [-1, 0, 1])):
for n in ([-1, 1] if self.mode_num == 9 else ([-1, 1] if self.mode_num == 13 else [-1, 0, 1])):
self.oringinal = self.canvas_bottom.create_oval(200 * self.size - self.size * 2,
200 * self.size - self.size * 2,
200 * self.size + self.size * 2,
200 * self.size + self.size * 2, fill='#000')
self.canvas_bottom.move(self.oringinal,
m * self.dd * (2 if self.mode_num == 9 else (3 if self.mode_num == 13 else 6)),
n * self.dd * (2 if self.mode_num == 9 else (3 if self.mode_num == 13 else 6)))
if self.mode_num == 13:
self.oringinal = self.canvas_bottom.create_oval(200 * self.size - self.size * 2,
200 * self.size - self.size * 2,
200 * self.size + self.size * 2,
200 * self.size + self.size * 2, fill='#000')
# 畫中間的線條
for i in range(1, self.mode_num - 1):
self.canvas_bottom.create_line(20 * self.size, 20 * self.size + i * self.dd, 380 * self.size,
20 * self.size + i * self.dd, width=2)
self.canvas_bottom.create_line(20 * self.size + i * self.dd, 20 * self.size, 20 * self.size + i * self.dd,
380 * self.size, width=2)
# 放置右側初始圖片
self.pW = self.canvas_bottom.create_image(500 * self.size + 11, 65 * self.size, image=self.photoW)
self.pB = self.canvas_bottom.create_image(500 * self.size - 11, 65 * self.size, image=self.photoB)
# 每張圖片都添加image標籤,方便reload函式删除圖片
self.canvas_bottom.addtag_withtag('image', self.pW)
self.canvas_bottom.addtag_withtag('image', self.pB)
def recover(self, list_to_recover, b_or_w):
if len(list_to_recover) > 0:
for i in range(len(list_to_recover)):
self.positions[list_to_recover[i][1]][list_to_recover[i][0]] = b_or_w + 1
self.image_added = self.canvas_bottom.create_image(
20 * self.size + (list_to_recover[i][0] - 1) * self.dd + 4 * self.p,
20 * self.size + (list_to_recover[i][1] - 1) * self.dd - 5 * self.p,
image=self.photoWBD_list[b_or_w])
self.canvas_bottom.addtag_withtag('image', self.image_added)
self.canvas_bottom.addtag_withtag('position' + str(list_to_recover[i][0]) + str(list_to_recover[i][1]),
self.image_added)
def get_deadlist(self, x, y):
deadlist = []
for i in [-1, 1]:
if self.positions[y][x + i] == (2 if self.present == 0 else 1) and ([x + i, y] not in deadlist):
killList = self.if_dead([[x + i, y]], (2 if self.present == 0 else 1), [x + i, y])
if not killList == False:
deadlist += copy.deepcopy(killList)
if self.positions[y + i][x] == (2 if self.present == 0 else 1) and ([x, y + i] not in deadlist):
killList = self.if_dead([[x, y + i]], (2 if self.present == 0 else 1), [x, y + i])
if not killList == False:
deadlist += copy.deepcopy(killList)
return deadlist
def if_dead(self, deadList, yourChessman, yourPosition):
for i in [-1, 1]:
if [yourPosition[0] + i, yourPosition[1]] not in deadList:
if self.positions[yourPosition[1]][yourPosition[0] + i] == 0:
return False
if [yourPosition[0], yourPosition[1] + i] not in deadList:
if self.positions[yourPosition[1] + i][yourPosition[0]] == 0:
return False
if ([yourPosition[0] + 1, yourPosition[1]] not in deadList) and (
self.positions[yourPosition[1]][yourPosition[0] + 1] == yourChessman):
midvar = self.if_dead(deadList + [[yourPosition[0] + 1, yourPosition[1]]], yourChessman,
[yourPosition[0] + 1, yourPosition[1]])
if not midvar:
return False
else:
deadList += copy.deepcopy(midvar)
if ([yourPosition[0] - 1, yourPosition[1]] not in deadList) and (
self.positions[yourPosition[1]][yourPosition[0] - 1] == yourChessman):
midvar = self.if_dead(deadList + [[yourPosition[0] - 1, yourPosition[1]]], yourChessman,
[yourPosition[0] - 1, yourPosition[1]])
if not midvar:
return False
else:
deadList += copy.deepcopy(midvar)
if ([yourPosition[0], yourPosition[1] + 1] not in deadList) and (
self.positions[yourPosition[1] + 1][yourPosition[0]] == yourChessman):
midvar = self.if_dead(deadList + [[yourPosition[0], yourPosition[1] + 1]], yourChessman,
[yourPosition[0], yourPosition[1] + 1])
if not midvar:
return False
else:
deadList += copy.deepcopy(midvar)
if ([yourPosition[0], yourPosition[1] - 1] not in deadList) and (
self.positions[yourPosition[1] - 1][yourPosition[0]] == yourChessman):
midvar = self.if_dead(deadList + [[yourPosition[0], yourPosition[1] - 1]], yourChessman,
[yourPosition[0], yourPosition[1] - 1])
if not midvar:
return False
else:
deadList += copy.deepcopy(midvar)
return deadList
def kill(self, killList):
if len(killList) > 0:
for i in range(len(killList)):
self.positions[killList[i][1]][killList[i][0]] = 0
self.canvas_bottom.delete('position' + str(killList[i][0]) + str(killList[i][1]))
def getDown(self, x, y):
self.positions[y][x] = self.present + 1
ex = 30 + self.dd * (x - 1)
ey = 30 + self.dd * (y - 1)
dx = (ex - 20 * self.size) % self.dd
dy = (ey - 20 * self.size) % self.dd
self.image_added = self.canvas_bottom.create_image(
ex - dx + round(dx / self.dd) * self.dd + 4 * self.p,
ey - dy + round(dy / self.dd) * self.dd - 5 * self.p,
image=self.photoWBD_list[self.present])
self.canvas_bottom.addtag_withtag('image', self.image_added)
# 棋子與位置標籤绑定,方便“殺死”
self.canvas_bottom.addtag_withtag('position' + str(x) + str(y), self.image_added)
deadlist = self.get_deadlist(x, y)
self.kill(deadlist)
if len(deadlist) > 0:
temp = []
for d in deadlist:
temp.append([d[0], d[1]])
self.record_take.append(temp)
else:
self.record_take.append([])
self.last_3_positions = copy.deepcopy(self.last_2_positions)
self.last_2_positions = copy.deepcopy(self.last_1_positions)
self.last_1_positions = copy.deepcopy(self.positions)
# 删除上次的標記,重新創建標記
self.canvas_bottom.delete('image_added_sign')
self.image_added_sign = self.canvas_bottom.create_oval(
ex - dx + round(dx / self.dd) * self.dd + 0.5 * self.dd,
ey - dy + round(dy / self.dd) * self.dd + 0.5 * self.dd,
ex - dx + round(dx / self.dd) * self.dd - 0.5 * self.dd,
ey - dy + round(dy / self.dd) * self.dd - 0.5 * self.dd, width=3, outline='#3ae')
self.canvas_bottom.addtag_withtag('image', self.image_added_sign)
self.canvas_bottom.addtag_withtag('image_added_sign', self.image_added_sign)
if self.present == 0:
self.create_pW()
self.del_pB()
self.present = 1
else:
self.create_pB()
self.del_pW()
self.present = 0
def create_pW(self):
self.pW = self.canvas_bottom.create_image(500 * self.size + 11, 65 * self.size, image=self.photoW)
self.canvas_bottom.addtag_withtag('image', self.pW)
def create_pB(self):
self.pB = self.canvas_bottom.create_image(500 * self.size - 11, 65 * self.size, image=self.photoB)
self.canvas_bottom.addtag_withtag('image', self.pB)
def del_pW(self):
self.canvas_bottom.delete(self.pW)
def del_pB(self):
self.canvas_bottom.delete(self.pB)
class Application(model):
# 初始化棋盤,預設十九路棋盤
def __init__(self):
model.__init__(self, mode_num)
# 幾個功能按钮
self.startButton = Button(self, text='開始對局', command=self.start)
self.startButton.place(x=420 * self.size, y=200 * self.size)
self.passmeButton = Button(self, text='停一手', command=self.passme)
self.passmeButton.place(x=420 * self.size, y=225 * self.size)
self.regretButton = Button(self, text='悔棋', command=self.regret)
self.regretButton.place(x=420 * self.size, y=250 * self.size)
self.replayButton = Button(self, text='重新開始', command=self.reload)
self.replayButton.place(x=420 * self.size, y=275 * self.size)
self.newGameButton1 = Button(self, text=('十三' if self.mode_num == 9 else '九') + '路棋', command=self.newGame1)
self.newGameButton1.place(x=420 * self.size, y=300 * self.size)
self.newGameButton2 = Button(self, text=('十三' if self.mode_num == 19 else '十九') + '路棋', command=self.newGame2)
self.newGameButton2.place(x=420 * self.size, y=325 * self.size)
self.quitButton = Button(self, text='退出棋局', command=self.quit)
self.quitButton.place(x=420 * self.size, y=350 * self.size)
self.territoryButton = Button(self, text='算地', command=self.territory)
self.territoryButton.place(x=500 * self.size, y=200 * self.size)
self.recordButton1 = Button(self, text='棋譜匯出', command=self.save_record)
self.recordButton1.place(x=500 * self.size, y=225 * self.size)
self.recordButton2 = Button(self, text='棋譜匯入', command=self.load_record)
self.recordButton2.place(x=500 * self.size, y=250 * self.size)
# 初始悔棋、停手按钮禁用
self.startButton['state'] = DISABLED
self.newGameButton1['state'] = DISABLED
self.newGameButton2['state'] = DISABLED
self.passmeButton['state'] = DISABLED
self.regretButton['state'] = DISABLED
self.replayButton['state'] = DISABLED
self.territoryButton['state'] = DISABLED
self.recordButton2['state'] = DISABLED
# 滑鼠移动時,呼叫shadow函式,显示随滑鼠移动的棋子
self.canvas_bottom.bind('<Motion>', self.shadow)
# 滑鼠左键单击時,呼叫getdown函式,放下棋子
self.canvas_bottom.bind('<Button-1>', self.getDown)
# 设置退出快捷键<Ctrl>+<D>,快速退出遊戲
self.bind('<Control-KeyPress-d>', self.keyboardQuit)
self.t = threading.Thread(target=self.connect)
self.t.start()
# 開始對局函式,點擊“開始對局”時呼叫
def start(self):
# 按鈕解除
self.newGameButton1['state'] = DISABLED
self.newGameButton2['state'] = DISABLED
self.startButton['state'] = DISABLED
self.territoryButton['state'] = NORMAL
# 删除右側太極圖
self.canvas_bottom.delete(self.pW)
self.canvas_bottom.delete(self.pB)
# 利用右側圖案提示開始時誰先落子
if self.present == 0:
self.create_pB()
self.del_pW()
self.stop = False
self.passmeButton['state'] = NORMAL
self.node.myNode.send_to_nodes({"Start": True})
else:
self.create_pW()
self.del_pB()
# 開始標誌,解除stop
# 放棄一手函式,跳過落子環節
def passme(self):
# 悔棋恢復
# 拷貝棋盤狀態,記錄前三次棋局
self.last_3_positions = copy.deepcopy(self.last_2_positions)
self.last_2_positions = copy.deepcopy(self.last_1_positions)
self.last_1_positions = copy.deepcopy(self.positions)
self.canvas_bottom.delete('image_added_sign')
# 輪到下一玩家
if self.present == 0:
self.create_pW()
self.del_pB()
self.present = 1
else:
self.create_pB()
self.del_pW()
self.present = 0
if not self.stop:
self.node.myNode.send_to_nodes({"pass": True})
self.stop = True
else:
self.stop = False
# 悔棋函式,可悔棋一回合,下两回合不可悔棋
def regret(self):
# 判定是否可以悔棋,以前第三盤棋局復原棋盤
list_of_b = []
list_of_w = []
self.canvas_bottom.delete('image')
if self.present == 0:
self.create_pB()
else:
self.create_pW()
for m in range(1, self.mode_num + 1):
for n in range(1, self.mode_num + 1):
self.positions[m][n] = 0
for m in range(len(self.last_3_positions)):
for n in range(len(self.last_3_positions[m])):
if self.last_3_positions[m][n] == 1:
list_of_b += [[n, m]]
elif self.last_3_positions[m][n] == 2:
list_of_w += [[n, m]]
self.recover(list_of_b, 0)
self.recover(list_of_w, 1)
self.last_1_positions = copy.deepcopy(self.last_3_positions)
self.record = copy.deepcopy(self.record[:-2])
self.record_take = copy.deepcopy(self.record_take[:-2])
# 判斷是否還能悔棋
if len(self.record) < 2:
self.regretButton['state'] = DISABLED
# 重建last_2_positions、last_3_positions
for m in range(1, self.mode_num + 1):
for n in range(1, self.mode_num + 1):
self.last_2_positions[m][n] = 0
self.last_3_positions[m][n] = 0
# 根據record恢復棋盤
for r in self.record[:-2]:
if r[2] == 1:
self.last_3_positions[r[1]][r[0]] = 1
elif r[2] == 2:
self.last_3_positions[r[1]][r[0]] = 2
for r in self.record[:-1]:
if r[2] == 1:
self.last_2_positions[r[1]][r[0]] = 1
elif r[2] == 2:
self.last_2_positions[r[1]][r[0]] = 2
# 判斷是否為死棋
if len(self.record_take) > 2:
for t in self.record_take[-3]:
self.last_3_positions[t[1]][t[0]] = 0
if len(self.record_take) > 1:
for t in self.record_take[-2]:
self.last_2_positions[t[1]][t[0]] = 0
# 判斷是否為被吃子
if len(self.record_take) > 1:
for t in self.record_take[-2]:
if self.present == 1:
self.last_3_positions[t[1]][t[0]] = 1
else:
self.last_3_positions[t[1]][t[0]] = 2
if len(self.record_take) > 0:
for t in self.record_take[-1]:
if self.present == 1:
self.last_2_positions[t[1]][t[0]] = 2
else:
self.last_2_positions[t[1]][t[0]] = 1
if not self.stop:
self.node.myNode.send_to_nodes({"regret": True})
# 點擊“重新開始”時呼叫重新加載函式,删除圖片,序列歸零,設置一些初始参數
def reload(self):
self.stop = True
self.regretButton['state'] = DISABLED
self.passmeButton['state'] = DISABLED
self.territoryButton['state'] = DISABLED
self.canvas_bottom.delete('image')
self.present = 0
self.create_pB()
self.create_pW()
self.record = []
self.record_take = []
for m in range(1, self.mode_num + 1):
for n in range(1, self.mode_num + 1):
self.positions[m][n] = 0
self.last_3_positions[m][n] = 0
self.last_2_positions[m][n] = 0
self.last_1_positions[m][n] = 0
if self.dominance:
self.node.myNode.send_to_nodes({"reload": True})
self.startButton['state'] = NORMAL
self.newGameButton1['state'] = NORMAL
self.newGameButton2['state'] = NORMAL
# 顯示滑鼠移動下預定落子的位置
def shadow(self, event):
if not self.stop:
# 找到最近格點,在當前位置靠近格點的可落子處顯示棋子圖片,並删除上一位置的棋子圖片
if (20 * self.size < event.x < 380 * self.size) and (20 * self.size < event.y < 380 * self.size):
dx = (event.x - 20 * self.size) % self.dd
dy = (event.y - 20 * self.size) % self.dd
x = int((event.x - 20 * self.size - dx) / self.dd + round(dx / self.dd) + 1)
y = int((event.y - 20 * self.size - dy) / self.dd + round(dy / self.dd) + 1)
# 判断該位置是否已有棋子
if self.positions[y][x] == 0:
self.cross = self.canvas_bottom.create_image(
event.x - dx + round(dx / self.dd) * self.dd + 22 * self.p,
event.y - dy + round(dy / self.dd) * self.dd - 27 * self.p,
image=self.photoWBU_list[self.present])
self.canvas_bottom.addtag_withtag('image', self.cross)
if self.cross_last is not None:
self.canvas_bottom.delete(self.cross_last)
self.cross_last = self.cross
else:
if self.cross_last is not None:
self.canvas_bottom.delete(self.cross_last)
def territory(self):
if not self.stop:
for y in range(self.mode_num):
print()
for x in range(self.mode_num):
count = 0
mag = 1
for i in [-1, 3]:
if not (y + i == -1 or y + i == 21):
if self.getTerritory(self.positions[y + i][x + 1]) is not None:
count += self.getTerritory(self.positions[y + i][x + 1])
if not (x + i == -1 or x + i == 21):
if self.getTerritory(self.positions[y + 1][x + i]) is not None:
count += self.getTerritory(self.positions[y + 1][x + i])
for m in [0, 2]:
for n in [0, 2]:
if self.getTerritory(self.positions[y + m][x + n]) is not None:
count += self.getTerritory(self.positions[y + m][x + n])
if self.getTerritory(self.positions[y + m][x + 1]) is not None:
count += self.getTerritory(self.positions[y + m][x + 1]) * 2
else:
mag += 0.25
if self.getTerritory(self.positions[y + 1][x + m]) is not None:
count += self.getTerritory(self.positions[y + 1][x + m]) * 2
else:
mag += 0.25
count += self.getTerritory(self.positions[y + 1][x + 1]) * 3
# if count!=0:
# print(x+1, y+1, count, mag, end=' ')
print(count, end=' ')
def save_record(self):
s = ''
print('數字位置:', self.record)
print('陣列位置:')
for p in self.positions[1:-1]:
print(p[1:-1])
for r in self.record:
if r[2] == 1:
s += ';' + 'B[' + chr(r[0] + 96) + chr(r[1] + 96) + ']'
else:
s += ';' + 'W[' + chr(r[0] + 96) + chr(r[1] + 96) + ']'
f = open('test.sgf', 'w')
f.write('(')
count = 0
for i in s:
f.write(i)
if i == ']':
count += 1
if count == 10:
f.write('\n')
count = 0
f.write(')')
f.close()
def load_record(self):
f = open('test.sgf', 'r')
print(f.read())
record = Application2(self)
record.mainloop()
record.destroy()
def getTerritory(self, clr):
if clr == -1:
return None
elif clr == 0:
return 0
elif clr == 1:
return 1
else:
return -1
# 落子,並驅動玩家的輪流下棋行為
def getDown(self, event):
if not self.stop:
# 先找到最近格點
if (20 * self.size - self.dd * 0.4 < event.x < self.dd * 0.4 + 380 * self.size) and (
20 * self.size - self.dd * 0.4 < event.y < self.dd * 0.4 + 380 * self.size):
dx = (event.x - 20 * self.size) % self.dd
dy = (event.y - 20 * self.size) % self.dd
x = int((event.x - 20 * self.size - dx) / self.dd + round(dx / self.dd) + 1)
y = int((event.y - 20 * self.size - dy) / self.dd + round(dy / self.dd) + 1)
# 判斷位置是否已經被占據
if self.positions[y][x] == 0:
# 未被占據,則嘗試占據,獲得占據後能殺死的棋子列表
self.positions[y][x] = self.present + 1
self.image_added = self.canvas_bottom.create_image(
event.x - dx + round(dx / self.dd) * self.dd + 4 * self.p,
event.y - dy + round(dy / self.dd) * self.dd - 5 * self.p,
image=self.photoWBD_list[self.present])
self.canvas_bottom.addtag_withtag('image', self.image_added)
# 棋子與位置標籤绑定,方便“殺死”
self.canvas_bottom.addtag_withtag('position' + str(x) + str(y), self.image_added)
deadlist = self.get_deadlist(x, y)
self.kill(deadlist)
# 判断是否重複棋局(打劫)
if not self.last_2_positions == self.positions:
# 判断是否有氣(避免不入子)或行棋後可殺棋提子
if len(deadlist) > 0 or self.if_dead([[x, y]], self.present + 1, [x, y]) == False:
# 當不重複棋局且並非不入子狀況下,即落子有效,並紀錄棋步
self.record.append([x, y, self.positions[y][x]])
if len(deadlist) > 0:
temp = []
for d in deadlist:
temp.append([d[0], d[1]])
self.record_take.append(temp)
else:
self.record_take.append([])
self.last_3_positions = copy.deepcopy(self.last_2_positions)
self.last_2_positions = copy.deepcopy(self.last_1_positions)
self.last_1_positions = copy.deepcopy(self.positions)
# 删除上次的標記,重新創建標記
self.canvas_bottom.delete('image_added_sign')
self.image_added_sign = self.canvas_bottom.create_oval(
event.x - dx + round(dx / self.dd) * self.dd + 0.5 * self.dd,
event.y - dy + round(dy / self.dd) * self.dd + 0.5 * self.dd,
event.x - dx + round(dx / self.dd) * self.dd - 0.5 * self.dd,
event.y - dy + round(dy / self.dd) * self.dd - 0.5 * self.dd, width=3, outline='#3ae')
self.canvas_bottom.addtag_withtag('image', self.image_added_sign)
self.canvas_bottom.addtag_withtag('image_added_sign', self.image_added_sign)
if len(self.record) > 1:
self.regretButton['state'] = NORMAL
# 切換是否可以下子
if self.stop:
self.stop = False
else:
self.stop = True
# 傳遞棋子位置給對方
if self.present == 0:
self.node.myNode.send_to_nodes({"player": "black", "positionX": x, "positionY": y})
else:
self.node.myNode.send_to_nodes({"player": "white", "positionX": x, "positionY": y})
self.regretButton['state'] = DISABLED
self.passmeButton['state'] = DISABLED
if self.present == 0:
self.create_pW()
self.del_pB()
self.present = 1
else:
self.create_pB()
self.del_pW()
self.present = 0
else:
# 不屬於殺死對方或有氣,則判断為無氣,警告並彈出警告訊息盒
self.positions[y][x] = 0
self.canvas_bottom.delete('position' + str(x) + str(y))
self.bell()
self.showwarningbox('沒氣了', "禁著點!")
else:
# 重複棋局,警告打劫
self.positions[y][x] = 0
self.canvas_bottom.delete('position' + str(x) + str(y))
self.recover(deadlist, (1 if self.present == 0 else 0))
self.bell()
self.showwarningbox("打劫", "不可提熱子!")
else:
# 落子重疊,聲音警告
self.bell()
else:
# 超出邊界,聲音警告
self.bell()
# 判断棋子(yourChessman:棋子種類係黑棋或白棋,yourPosition:棋子位置)是否無氣(死亡),有氣则返回False,無氣则返回無氣棋子的列表
# 本函式是對弈规则的關鍵,初始deadlist只包含了自己的位置,每次執行時,函式嘗試尋找yourPosition周圍有没有空的位置,有則结束並返回False代表有氣;
# 若找不到,則找自己四周的同類(不在deadlist中的)是否有氣(即遞回呼叫本函式),無氣,则把該同類加入到deadlist,然候找下一個鄰居,只要有一個有氣,則返回False代表有氣;
# 若四周没有一個有氣的同類,返回deadlist,至此结束遞回
# def if_dead(self,deadlist,yourChessman,yourPosition):
# 警告訊息顯示盒,接受標题和警告訊息
def showwarningbox(self, title, message):
self.canvas_bottom.delete(self.cross)
tkinter.messagebox.showwarning(title, message)
def connect(self):
t = threading.currentThread()
while getattr(t, "do_run", True):
if self.node.remote_id is None:
print('Waiting for other player...')
time.sleep(5)
else:
if self.node.myNode.connect_with_node(self.node.remote_id['ip'], self.node.remote_id['port']) is None:
print('Connecting...')
time.sleep(5)
else:
print('Connect Successfully')
t.do_run = False
if self.dominance:
self.startButton['state'] = NORMAL
self.newGameButton1['state'] = NORMAL
self.newGameButton2['state'] = NORMAL
self.replayButton['state'] = NORMAL
self.recordButton2['state'] = NORMAL
self.t = threading.Thread(target=self.handle)
self.t.start()
def handle(self):
t = threading.currentThread()
while getattr(t, "do_run", True):
if self.node.myNode.data is None:
print('test')
time.sleep(1)
else:
print(self.node.myNode.data)
key = list(self.node.myNode.data.keys())
data = list(self.node.myNode.data.values())
print(key)
print(data)
self.node.myNode.data = None
if key[0] == 'Start':
self.territoryButton['state'] = NORMAL
self.canvas_bottom.delete(self.pW)
self.canvas_bottom.delete(self.pB)
self.create_pB()
self.del_pW()
elif key[0] == 'player':
x = data[1]
y = data[2]
super(Application, self).getDown(x, y)
self.record.append([x, y, self.positions[y][x]])
self.stop = False
self.regretButton['state'] = NORMAL
self.passmeButton['state'] = NORMAL
elif key[0] == 'regret':
self.regret()
elif key[0] == 'pass':
self.passme()
elif key[0] == 'reload':
self.reload()
elif key[0] == 'mode':
if data[0] == 1:
self.newGame1()
if data[0] == 2:
self.newGame2()
else:
self.quit()
# 键盤快捷键退出遊戲
def keyboardQuit(self, event):
global Node
Node.node_.stop()
print('end test')
self.quit()
# 以下两個函式修改全局變量值,newApp使主函式循環,以建立不同参数的對象
def newGame1(self):
global mode_num, newApp
mode_num = (13 if self.mode_num == 9 else 9)
newApp = True
if self.dominance:
self.node.myNode.send_to_nodes({"mode": 1})
self.quit()
def newGame2(self):
global mode_num, newApp
mode_num = (13 if self.mode_num == 19 else 19)
newApp = True
if self.dominance:
self.node.myNode.send_to_nodes({"mode": 2})
self.quit()
def quit(self):
self.t.do_run = False
self.tk.quit()
class Application2(model2):
# 初始化棋盤,預設十九路棋盤
def __init__(self,main):
model2.__init__(self,main,mode_num)
self.record = self.load_record()
self.record_take = []
self.record_next = []
self.previousButton = Button(self, text='上一手', command=self.previousMove)
self.previousButton.place(x=420 * self.size, y=200 * self.size)
self.nextButton = Button(self, text='下一手', command=self.nextMove)
self.nextButton.place(x=420 * self.size, y=225 * self.size)
self.previousButton['state'] = DISABLED
self.nextButton['state'] = DISABLED
self.backButton = Button(self, text='返回', command=self.back)
self.backButton.place(x=420 * self.size, y=250 * self.size)
for r in self.record:
self.getDown(r[0], r[1])
if not self.record == []:
self.previousButton['state'] = NORMAL
def previousMove(self):
# for i in range(time):
list_of_b = []
list_of_w = []
self.canvas_bottom.delete('image')
if self.present == 1:
self.create_pB()
self.del_pW()
self.present = 0
else:
self.create_pW()
self.del_pB()
self.present = 1
for m in range(len(self.last_2_positions)):
for n in range(len(self.last_2_positions[m])):
if self.last_2_positions[m][n] == 1:
list_of_b += [[n, m]]
elif self.last_2_positions[m][n] == 2:
list_of_w += [[n, m]]
self.recover(list_of_b, 0)
self.recover(list_of_w, 1)
self.last_1_positions = copy.deepcopy(self.last_2_positions)
self.last_2_positions = copy.deepcopy(self.last_3_positions)
self.positions = copy.deepcopy(self.last_1_positions)
self.record_next.append(self.record[-1])
self.record = copy.deepcopy(self.record[:-1])
self.record_take = copy.deepcopy(self.record_take[:-1])
print(self.record)
# 判斷是否還有上一手
if len(self.record) < 1:
self.previousButton['state'] = DISABLED
if len(self.record_next) > 0:
self.nextButton['state'] = NORMAL
# 重建last_2_positions、last_3_positions
for m in range(1, self.mode_num + 1):
for n in range(1, self.mode_num + 1):
self.last_3_positions[m][n] = 0
# 根據record恢復棋盤
for r in self.record[:-2]:
if r[2] == 1:
self.last_3_positions[r[1]][r[0]] = 1
elif r[2] == 2:
self.last_3_positions[r[1]][r[0]] = 2
# 判斷是否為死棋
if len(self.record_take) > 3:
for t in self.record_take[-4]:
self.last_3_positions[t[1]][t[0]] = 0
for t in self.record_take[-3]:
self.last_3_positions[t[1]][t[0]] = 0
# 判斷是否為被吃子
if len(self.record_take) > 1:
for t in self.record_take[-2]:
if self.present == 1:
self.last_3_positions[t[1]][t[0]] = 1
else:
self.last_3_positions[t[1]][t[0]] = 2
def nextMove(self):
if len(self.record_next) > 0:
self.record.append(self.record_next[-1])
self.getDown(self.record_next[-1][0], self.record_next[-1][1])
print(self.record)
self.record_next = copy.deepcopy(self.record_next[:-1])
if len(self.record_next) <= 0:
self.nextButton['state'] = DISABLED
if len(self.record) > 0:
self.previousButton['state'] = NORMAL
def load_record(self):
f = open('test.sgf', 'r')
a = re.sub(r'[\':\s ,(;\[\])]*', '', f.read())
r = []
for i in range(int(len(a) / 3)):
if a[i * 3] == 'B':
r.append([ord(a[i * 3 + 1]) - 96, ord(a[i * 3 + 2]) - 96, 1])
else:
r.append([ord(a[i * 3 + 1]) - 96, ord(a[i * 3 + 2]) - 96, 2])
f.close()
return r
def back(self):
self.quit()
class Node_connection():
connected = False
remote_id = {"ip": "127.0.0.1", "port": 8001}
myNode = MyOwnPeer2PeerNode("127.0.0.1", 8002)
myNode.start()
# 聲明全局變量,用於新建Application對象時切换成不同模式的遊戲
global mode_num, newApp, newApp2, Dominance, Node
mode_num = 19
newApp = False
Dominance = False
if __name__ == '__main__':
# 循環,直到不切换遊戲模式
while True:
newApp = False
Dominance = False
Node = Node_connection()
app = Application()
app.title('圍棋')
app.mainloop()
if newApp:
app.destroy()
else:
Node.myNode.stop()
break
|
nrf_driver.py | #
# Copyright (c) 2016 Nordic Semiconductor ASA
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of Nordic Semiconductor ASA nor the names of other
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# 4. This software must only be used in or with a processor manufactured by Nordic
# Semiconductor ASA, or in or with a processor manufactured by a third party that
# is used in combination with a processor manufactured by Nordic Semiconductor.
#
# 5. Any software provided in binary or object form under this license must not be
# reverse engineered, decompiled, modified and/or disassembled.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import atexit
import functools
import wrapt
import queue
import traceback
from threading import Thread, Lock, Event
from blatann.nrf.nrf_events import *
from blatann.nrf.nrf_types import *
from blatann.nrf.nrf_dll_load import driver
from pc_ble_driver_py.exceptions import NordicSemiException
import blatann.nrf.nrf_driver_types as util
from blatann.nrf.nrf_types.config import BleEnableConfig, BleConnConfig
logger = logging.getLogger(__name__)
NoneType = type(None)
# TODO: Do we really want to raise exceptions all the time?
def NordicSemiErrorCheck(wrapped=None, expected=driver.NRF_SUCCESS):
if wrapped is None:
return functools.partial(NordicSemiErrorCheck, expected=expected)
@wrapt.decorator
def wrapper(wrapped, instance, args, kwargs):
logger.debug("[%s] %s%s", instance.serial_port, wrapped.__name__, args)
result = wrapped(*args, **kwargs)
if isinstance(result, (list, tuple)):
err_code = result[0]
result = result[1:]
if len(result) == 1:
result = result[0]
else:
err_code = result
result = None
if err_code != expected:
try:
err_string = 'Error code: {}'.format(NrfError(err_code))
except ValueError:
err_string = 'Error code: 0x{:04x}, {}'.format(err_code, err_code)
raise NordicSemiException('Failed to {}. {}'.format(wrapped.__name__, err_string), err_code)
return result
return wrapper(wrapped)
class NrfDriverObserver(object):
def on_driver_event(self, nrf_driver, event):
pass
class NrfDriver(object):
default_baud_rate = 1000000
ATT_MTU_DEFAULT = driver.BLE_GATT_ATT_MTU_DEFAULT
def __init__(self, serial_port, baud_rate=None, log_driver_comms=False):
if baud_rate is None:
baud_rate = self.default_baud_rate
self._events = queue.Queue()
self._event_thread = None
self._event_loop = False
self._event_stopped = Event()
self.observers = []
self.ble_enable_params = None
self._event_observers = {}
self._event_observer_lock = Lock()
self._log_driver_comms = log_driver_comms
self._serial_port = serial_port
phy_layer = driver.sd_rpc_physical_layer_create_uart(serial_port,
baud_rate,
driver.SD_RPC_FLOW_CONTROL_NONE,
driver.SD_RPC_PARITY_NONE)
link_layer = driver.sd_rpc_data_link_layer_create_bt_three_wire(phy_layer, 100)
transport_layer = driver.sd_rpc_transport_layer_create(link_layer, 100)
self.rpc_adapter = driver.sd_rpc_adapter_create(transport_layer)
@property
def serial_port(self):
return self._serial_port
@NordicSemiErrorCheck
@wrapt.synchronized
def open(self):
if self.is_open:
logger.warning("Trying to open already opened driver")
return driver.NRF_SUCCESS
err_code = driver.sd_rpc_open(self.rpc_adapter,
self._status_handler,
self.ble_evt_handler,
self._log_message_handler)
if err_code == driver.NRF_SUCCESS:
self._event_thread = Thread(target=self._event_handler, name="{}_Event".format(self._serial_port))
# Note: We create a daemon thread and then register an exit handler
# to make sure this thread stops. This ensures that scripts that
# stop because of ctrl-c interrupt, compile errors or other problems
# do not keep hanging in the console, waiting for an infinite thread
# loop.
atexit.register(self._event_thread_join)
self._event_thread.daemon = True
self._event_thread.start()
return err_code
def _event_thread_join(self):
if self._event_thread is None:
return
self._event_loop = False
self._event_stopped.wait(1)
self._event_thread = None
@property
def is_open(self):
return self._event_thread is not None
@NordicSemiErrorCheck
@wrapt.synchronized
def close(self):
if not self.is_open:
return driver.NRF_SUCCESS
driver.sd_rpc_conn_reset(self.rpc_adapter, 0)
retval = driver.sd_rpc_close(self.rpc_adapter)
driver.sd_rpc_adapter_delete(self.rpc_adapter)
self._event_thread_join()
return retval
def event_subscribe(self, handler, *event_types):
for event_type in event_types:
if not issubclass(event_type, BLEEvent):
raise ValueError("Event type must be a valid BLEEvent class type. Got {}".format(event_type))
with self._event_observer_lock:
for event_type in event_types:
# If event type not already in dict, create an empty list
if event_type not in self._event_observers.keys():
self._event_observers[event_type] = []
handlers = self._event_observers[event_type]
if handler not in handlers:
handlers.append(handler)
def event_unsubscribe(self, handler, *event_types):
if not event_types:
self.event_unsubscribe_all(handler)
return
with self._event_observer_lock:
for event_type in event_types:
handlers = self._event_observers.get(event_type, [])
if handler in handlers:
handlers.remove(handler)
def event_unsubscribe_all(self, handler):
with self._event_observer_lock:
for event_type, handlers in self._event_observers.items():
if handler in handlers:
handlers.remove(handler)
def observer_register(self, observer):
with self._event_observer_lock:
if observer not in self.observers:
self.observers.append(observer)
def observer_unregister(self, observer):
with self._event_observer_lock:
if observer in self.observers:
self.observers.remove(observer)
def ble_enable_params_setup(self):
return BleEnableConfig()
def adv_params_setup(self):
return BLEGapAdvParams(interval_ms=40, timeout_s=180)
def scan_params_setup(self):
return BLEGapScanParams(interval_ms=200, window_ms=150, timeout_s=10)
def conn_params_setup(self):
return BLEGapConnParams(min_conn_interval_ms=15, max_conn_interval_ms=30,
conn_sup_timeout_ms=4000, slave_latency=0)
def security_params_setup(self):
return BLEGapSecParams(bond=True, mitm=True, le_sec_pairing=False, keypress_noti=False,
io_caps=BLEGapIoCaps.NONE, oob=False, min_key_size=8,max_key_size=16,
kdist_own=BLEGapSecKeyDist(), kdist_peer=BLEGapSecKeyDist(enc_key=True))
"""
BLE Generic methods
"""
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_conn_configure(self, conn_params):
assert isinstance(conn_params, BleConnConfig)
for tag, cfg in conn_params.get_configs():
err = driver.sd_ble_cfg_set(self.rpc_adapter, tag, cfg, 0)
if err != driver.NRF_SUCCESS:
return err
return driver.NRF_SUCCESS
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_enable(self, ble_enable_params=None):
if not ble_enable_params:
ble_enable_params = self.ble_enable_params_setup()
assert isinstance(ble_enable_params, BleEnableConfig), 'Invalid argument type'
self.ble_enable_params = ble_enable_params
for tag, cfg in self.ble_enable_params.get_configs():
err = driver.sd_ble_cfg_set(self.rpc_adapter, tag, cfg, 0)
if err != driver.NRF_SUCCESS:
return err
return driver.sd_ble_enable(self.rpc_adapter, None)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_opt_set(self, ble_opt):
assert isinstance(ble_opt, BleOption)
opt = driver.ble_opt_t()
sub_attr = opt
path = ble_opt.path.split(".")
for name in path[:-1]:
sub_attr = getattr(sub_attr, name)
setattr(sub_attr, path[-1], ble_opt.to_c())
return driver.sd_ble_opt_set(self.rpc_adapter, ble_opt.option_flag, opt)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_user_mem_reply(self, conn_handle):
return driver.sd_ble_user_mem_reply(self.rpc_adapter, conn_handle, None)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_vs_uuid_add(self, uuid_base):
assert isinstance(uuid_base, BLEUUIDBase), 'Invalid argument type'
uuid_type = driver.new_uint8()
err_code = driver.sd_ble_uuid_vs_add(self.rpc_adapter,
uuid_base.to_c(),
uuid_type)
if err_code == driver.NRF_SUCCESS:
uuid_base.type = driver.uint8_value(uuid_type)
return err_code
"""
GAP Methods
"""
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_addr_get(self):
addr = driver.ble_gap_addr_t()
err_code = driver.sd_ble_gap_addr_get(self.rpc_adapter, addr)
if err_code == driver.NRF_SUCCESS:
addr = BLEGapAddr.from_c(addr)
else:
addr = None
return err_code, addr
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_addr_set(self, address):
assert isinstance(address, BLEGapAddr), "Invalid argument type"
addr_c = address.to_c()
return driver.sd_ble_gap_addr_set(self.rpc_adapter, addr_c)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_device_name_set(self, name):
if isinstance(name, str):
name = name.encode("utf8")
write_perm = BLEGapSecMode(0, 0).to_c()
length = len(name)
data = util.list_to_uint8_array(name).cast()
return driver.sd_ble_gap_device_name_set(self.rpc_adapter, write_perm, data, length)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_appearance_set(self, value):
return driver.sd_ble_gap_appearance_set(self.rpc_adapter, value)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_ppcp_set(self, conn_params):
assert isinstance(conn_params, BLEGapConnParams), "Invalid argument type"
params = conn_params.to_c()
return driver.sd_ble_gap_ppcp_set(self.rpc_adapter, params)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_tx_power_set(self, tx_power):
return driver.sd_ble_gap_tx_power_set(self.rpc_adapter, tx_power)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_adv_start(self, adv_params=None, conn_cfg_tag=0):
if not adv_params:
adv_params = self.adv_params_setup()
assert isinstance(adv_params, BLEGapAdvParams), 'Invalid argument type'
return driver.sd_ble_gap_adv_start(self.rpc_adapter, adv_params.to_c(), conn_cfg_tag)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_conn_param_update(self, conn_handle, conn_params):
assert isinstance(conn_params, (BLEGapConnParams, NoneType)), 'Invalid argument type'
if conn_params:
conn_params = conn_params.to_c()
return driver.sd_ble_gap_conn_param_update(self.rpc_adapter, conn_handle, conn_params)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_adv_stop(self):
return driver.sd_ble_gap_adv_stop(self.rpc_adapter)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_scan_start(self, scan_params=None):
if not scan_params:
scan_params = self.scan_params_setup()
assert isinstance(scan_params, BLEGapScanParams), 'Invalid argument type'
return driver.sd_ble_gap_scan_start(self.rpc_adapter, scan_params.to_c())
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_scan_stop(self):
return driver.sd_ble_gap_scan_stop(self.rpc_adapter)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_connect(self, address, scan_params=None, conn_params=None, conn_cfg_tag=0):
assert isinstance(address, BLEGapAddr), 'Invalid argument type'
if not scan_params:
scan_params = self.scan_params_setup()
assert isinstance(scan_params, BLEGapScanParams), 'Invalid argument type'
if not conn_params:
conn_params = self.conn_params_setup()
assert isinstance(conn_params, BLEGapConnParams), 'Invalid argument type'
return driver.sd_ble_gap_connect(self.rpc_adapter,
address.to_c(),
scan_params.to_c(),
conn_params.to_c(),
conn_cfg_tag)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_disconnect(self, conn_handle, hci_status_code=BLEHci.remote_user_terminated_connection):
assert isinstance(hci_status_code, BLEHci), 'Invalid argument type'
return driver.sd_ble_gap_disconnect(self.rpc_adapter,
conn_handle,
hci_status_code.value)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_adv_data_set(self, adv_data=BLEAdvData(), scan_data=BLEAdvData()):
assert isinstance(adv_data, BLEAdvData), 'Invalid argument type'
assert isinstance(scan_data, BLEAdvData), 'Invalid argument type'
(adv_data_len, p_adv_data) = adv_data.to_c()
(scan_data_len, p_scan_data) = scan_data.to_c()
return driver.sd_ble_gap_adv_data_set(self.rpc_adapter,
p_adv_data,
adv_data_len,
p_scan_data,
scan_data_len)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_data_length_update(self, conn_handle, params=None):
assert isinstance(params, (BLEGapDataLengthParams, NoneType))
if isinstance(params, BLEGapDataLengthParams):
params = params.to_c()
return driver.sd_ble_gap_data_length_update(self.rpc_adapter, conn_handle, params, None)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_phy_update(self, conn_handle, tx_phy=BLEGapPhy.auto, rx_phy=BLEGapPhy.auto):
params = BLEGapPhys(tx_phy, rx_phy)
return driver.sd_ble_gap_phy_update(self.rpc_adapter, conn_handle, params.to_c())
"""
SMP Methods
"""
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_authenticate(self, conn_handle, sec_params):
assert isinstance(sec_params, (BLEGapSecParams, NoneType)), 'Invalid argument type'
return driver.sd_ble_gap_authenticate(self.rpc_adapter,
conn_handle,
sec_params.to_c() if sec_params else None)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_sec_params_reply(self, conn_handle, sec_status, sec_params, sec_keyset):
assert isinstance(sec_status, BLEGapSecStatus), 'Invalid argument type'
assert isinstance(sec_params, (BLEGapSecParams, NoneType)), 'Invalid argument type'
assert isinstance(sec_keyset, BLEGapSecKeyset), 'Invalid argument type'
return driver.sd_ble_gap_sec_params_reply(self.rpc_adapter,
conn_handle,
sec_status.value,
sec_params.to_c() if sec_params else None,
sec_keyset.to_c())
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_auth_key_reply(self, conn_handle, key_type, key):
if key is not None:
key_buf = util.list_to_uint8_array(key).cast()
else:
key_buf = None
return driver.sd_ble_gap_auth_key_reply(self.rpc_adapter, conn_handle, key_type, key_buf)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_sec_info_reply(self, conn_handle, enc_info=None, irk=None, sign_info=None):
assert isinstance(enc_info, (BLEGapEncryptInfo, NoneType)), "Invalid argument type"
assert isinstance(irk, (BLEGapIdKey, NoneType)), "Invalid argument type"
assert isinstance(sign_info, (BLEGapSignKey, NoneType)), "Invalid argument type"
if enc_info is not None:
enc_info = enc_info.to_c()
if irk is not None:
irk = irk.to_c().id_info
if sign_info is not None:
sign_info = sign_info.to_c()
return driver.sd_ble_gap_sec_info_reply(self.rpc_adapter, conn_handle, enc_info, irk, sign_info)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_encrypt(self, conn_handle, master_id, enc_info):
assert isinstance(master_id, BLEGapMasterId)
assert isinstance(enc_info, BLEGapEncryptInfo)
return driver.sd_ble_gap_encrypt(self.rpc_adapter, conn_handle, master_id.to_c(), enc_info.to_c())
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gap_lesc_dhkey_reply(self, conn_handle, dh_key):
assert isinstance(dh_key, BLEGapDhKey)
key = dh_key.to_c()
return driver.sd_ble_gap_lesc_dhkey_reply(self.rpc_adapter, conn_handle, key)
"""
GATTS Methods
"""
# TODO: sd_ble_gatts_include_add, sd_ble_gatts_descriptor_add, sd_ble_gatts_sys_attr_set/get,
# sd_ble_gatts_initial_user_handle_get, sd_ble_gatts_attr_get
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gatts_service_add(self, service_type, uuid, service_handle):
handle = driver.new_uint16()
uuid_c = uuid.to_c()
err_code = driver.sd_ble_gatts_service_add(self.rpc_adapter,
service_type,
uuid_c,
handle)
if err_code == driver.NRF_SUCCESS:
service_handle.handle = driver.uint16_value(handle)
return err_code
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gatts_characteristic_add(self, service_handle, char_md, attr_char_value, char_handle):
# TODO type assertions
handle_params = driver.ble_gatts_char_handles_t()
err_code = driver.sd_ble_gatts_characteristic_add(self.rpc_adapter,
service_handle,
char_md.to_c(),
attr_char_value.to_c(),
handle_params)
if err_code == driver.NRF_SUCCESS:
char_handle.value_handle = handle_params.value_handle
char_handle.user_desc_handle = handle_params.user_desc_handle
char_handle.cccd_handle = handle_params.cccd_handle
char_handle.sccd_handle = handle_params.sccd_handle
return err_code
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gatts_descriptor_add(self, char_handle, attr):
assert isinstance(attr, BLEGattsAttribute)
handle = driver.new_uint16()
err_code = driver.sd_ble_gatts_descriptor_add(self.rpc_adapter, char_handle, attr.to_c(), handle)
if err_code == driver.NRF_SUCCESS:
attr.handle = driver.uint16_value(handle)
return err_code
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gatts_rw_authorize_reply(self, conn_handle, authorize_reply_params):
assert isinstance(authorize_reply_params, BLEGattsRwAuthorizeReplyParams)
return driver.sd_ble_gatts_rw_authorize_reply(self.rpc_adapter, conn_handle, authorize_reply_params.to_c())
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gatts_value_get(self, conn_handle, attribute_handle, gatts_value, max_bytes_read=512):
assert isinstance(gatts_value, BLEGattsValue)
value_params = gatts_value.to_c()
value_params.len = max_bytes_read
err_code = driver.sd_ble_gatts_value_get(self.rpc_adapter, conn_handle, attribute_handle, value_params)
if err_code == driver.NRF_SUCCESS:
value_out = BLEGattsValue.from_c(value_params)
gatts_value.offset = value_out.offset
gatts_value.value = value_out.value
return err_code
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gatts_value_set(self, conn_handle, attribute_handle, gatts_value):
assert isinstance(gatts_value, BLEGattsValue)
value_params = gatts_value.to_c()
return driver.sd_ble_gatts_value_set(self.rpc_adapter, conn_handle, attribute_handle, value_params)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gatts_hvx(self, conn_handle, hvx_params):
assert isinstance(hvx_params, BLEGattsHvx)
return driver.sd_ble_gatts_hvx(self.rpc_adapter, conn_handle, hvx_params.to_c())
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gatts_service_changed(self, conn_handle, start_handle, end_handle):
return driver.sd_ble_gatts_service_changed(self.rpc_adapter, conn_handle, start_handle, end_handle)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gatts_exchange_mtu_reply(self, conn_handle, server_mtu):
return driver.sd_ble_gatts_exchange_mtu_reply(self.rpc_adapter, conn_handle, server_mtu)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gatts_sys_attr_set(self, conn_handle, sys_attr_data, flags=0):
if sys_attr_data is not None:
data = util.list_to_uint8_array(sys_attr_data).cast()
length = len(sys_attr_data)
else:
data = None
length = 0
return driver.sd_ble_gatts_sys_attr_set(self.rpc_adapter, conn_handle, data, length, flags)
"""
GATTC Methods
"""
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gattc_write(self, conn_handle, write_params):
assert isinstance(write_params, BLEGattcWriteParams), 'Invalid argument type'
return driver.sd_ble_gattc_write(self.rpc_adapter,
conn_handle,
write_params.to_c())
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gattc_prim_srvc_disc(self, conn_handle, srvc_uuid, start_handle):
assert isinstance(srvc_uuid, (BLEUUID, NoneType)), 'Invalid argument type'
return driver.sd_ble_gattc_primary_services_discover(self.rpc_adapter,
conn_handle,
start_handle,
srvc_uuid.to_c() if srvc_uuid else None)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gattc_char_disc(self, conn_handle, start_handle, end_handle):
handle_range = driver.ble_gattc_handle_range_t()
handle_range.start_handle = start_handle
handle_range.end_handle = end_handle
return driver.sd_ble_gattc_characteristics_discover(self.rpc_adapter,
conn_handle,
handle_range)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gattc_desc_disc(self, conn_handle, start_handle, end_handle):
handle_range = driver.ble_gattc_handle_range_t()
handle_range.start_handle = start_handle
handle_range.end_handle = end_handle
return driver.sd_ble_gattc_descriptors_discover(self.rpc_adapter,
conn_handle,
handle_range)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gattc_attr_info_disc(self, conn_handle, start_handle, end_handle):
handle_range = driver.ble_gattc_handle_range_t()
handle_range.start_handle = start_handle
handle_range.end_handle = end_handle
return driver.sd_ble_gattc_attr_info_discover(self.rpc_adapter, conn_handle, handle_range)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gattc_read(self, conn_handle, read_handle, offset=0):
return driver.sd_ble_gattc_read(self.rpc_adapter, conn_handle, read_handle, offset)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gattc_exchange_mtu_req(self, conn_handle, att_mtu_size):
return driver.sd_ble_gattc_exchange_mtu_request(self.rpc_adapter,
conn_handle,
att_mtu_size)
@NordicSemiErrorCheck
@wrapt.synchronized
def ble_gattc_hv_confirm(self, conn_handle, attr_handle):
return driver.sd_ble_gattc_hv_confirm(self.rpc_adapter, conn_handle, attr_handle)
"""
Driver handlers
"""
def _status_handler(self, adapter, status_code, status_message):
if self._log_driver_comms:
logger.info("STATUS [{}]: {}".format(status_code, status_message))
def _log_message_handler(self, adapter, severity, log_message):
if self._log_driver_comms:
logger.info("LOG [{}]: {}".format(severity, log_message))
"""
Event handling
"""
def ble_evt_handler(self, adapter, ble_event):
self._events.put(ble_event)
def _event_handler(self):
self._event_loop = True
self._event_stopped.clear()
while self._event_loop:
try:
ble_event = self._events.get(timeout=0.1)
except queue.Empty:
continue
if len(self.observers) == 0:
continue
event = event_decode(ble_event)
if event is None:
logger.warning('unknown ble_event %r (discarded)', ble_event.header.evt_id)
continue
# Get a copy of the observers and event observers in case its modified during this execution
with self._event_observer_lock:
observers = self.observers[:]
event_handlers = {k: v[:] for k, v in self._event_observers.items()}
# Call all the observers
for obs in observers:
try:
obs.on_driver_event(self, event)
except:
traceback.print_exc()
# Call all the handlers for the event type provided
for event_type, handlers in event_handlers.items():
if issubclass(type(event), event_type):
for handler in handlers:
try:
handler(self, event)
except:
traceback.print_exc()
self._event_stopped.set()
|
fakeserver.py | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import json
import multiprocessing
import six
from threading import Thread
from argparse import ArgumentParser
from six.moves.socketserver import TCPServer
from six.moves.SimpleHTTPServer import SimpleHTTPRequestHandler
from six.moves import urllib
class Handler(SimpleHTTPRequestHandler):
def _do_any(self):
method = self.command
path, _, querystr = self.path.partition('?')
query = urllib.parse.parse_qs(querystr)
content_len = int(self.headers.get('content-length', 0))
body = self.rfile.read(content_len)
if six.PY2:
headers = self.headers.getplist()
else:
headers = self.headers.get_params()
print(self)
self.server.pipe.send({
'path': path, 'query': query, 'body': body,
'method': self.command, 'headers': headers,
})
if not self.server.pipe.poll(10):
self.send_error(500, 'Pipe hung')
status, headers, body = self.server.pipe.recv()
if not isinstance(body, bytes):
body = json.dumps(body).encode('utf8') + b'\n'
self.send_response(status)
for hn, hv in headers or ():
self.send_header(hn, hv)
self.end_headers()
self.wfile.write(body)
do_GET = _do_any
do_PUT = _do_any
do_POST = _do_any
do_DELETE = _do_any
do_PATCH = _do_any
def threadit(target, *args, **kw):
t = Thread(target=target, name=target.__name__, args=args, kwargs=kw)
t.daemon = True
t.start()
return t
def run(bind_at):
p1, p2 = multiprocessing.Pipe()
class MyTCPServer(TCPServer):
allow_reuse_address = True
pipe = p2
httpd = MyTCPServer(bind_at, Handler)
threadit(httpd.serve_forever)
return p1
|
GIL_effect.py | import threading
import time
def worker(r):
tid = threading.currentThread().name
# do some hard and time consuming work:
global result
res = 0
for i in r:
res += i
result += res
print("Worker {} is working with {}".format(tid, r))
#################################################
# Sequential Processing:
#################################################
t = time.time()
result = 0
worker(range(50_000_000))
worker(range(50_000_000,100_000_000))
print("Sequential Processing result: ", result)
print("Sequential Processing took:",time.time() - t,"\n")
#################################################
# Multithreaded Processing:
#################################################
t = time.time()
result = 0
tr1 = threading.Thread(target=worker, args=(range(50_000_000),))
tr2 = threading.Thread(target=worker, args=(range(50_000_000,100_000_000),))
tr1.start();tr2.start()
tr1.join(); tr2.join()
print("Multithreaded Processing result: ", result)
print("Multithreaded Processing took:",time.time() - t,"\n") |
pydoc.py | #! /usr/bin/python2.7
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages. Port number 0 can be
used to get an arbitrary unused port.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
https://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
result = _encode(result)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- Unicode support helpers
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one that nothing will match, and make
# the _encode function that do nothing.
class _unicode(object):
pass
_encoding = 'ascii'
def _encode(text, encoding='ascii'):
return text
else:
import locale
_encoding = locale.getpreferredencoding()
def _encode(text, encoding=None):
if isinstance(text, unicode):
return text.encode(encoding or _encoding, 'xmlcharrefreplace')
else:
return text
def _binstr(obj):
# Ensure that we have an encoded (binary) string representation of obj,
# even if it is a unicode string.
if isinstance(obj, _unicode):
return obj.encode(_encoding, 'xmlcharrefreplace')
return str(obj)
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = module.__doc__.splitlines()[0] if module.__doc__ else None
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object,
basedir=os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"https://docs.python.org/library")
basedir = os.path.normcase(basedir)
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'dist-packages')) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith(("http://", "https://")):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__.lower())
else:
docloc = os.path.join(docloc, object.__name__.lower() + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return _encode('''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
<meta charset="utf-8">
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents), 'ascii')
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif selfdot:
# Create a link for methods like 'self.method(...)'
# and use <strong> for attributes like 'self.attr'
if text[end:end+1] == '(':
results.append('self.' + self.namelink(name, methods))
else:
results.append('self.<strong>%s</strong>' % name)
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(_binstr(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(_binstr(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(_binstr(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = _binstr(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', _binstr(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', _binstr(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', _binstr(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not hasattr(sys.stdin, "isatty"):
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(pager) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'pager')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(_encode(text))
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(_encode(text))
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = plain(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding))).split('\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
try:
h = int(os.environ.get('LINES', 0))
except ValueError:
h = 0
if h <= 1:
h = 25
r = inc = h - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(_encode(plain(text), getattr(sys.stdout, 'encoding', _encoding)))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = __builtin__
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if object is None:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
name = getattr(thing, '__name__', None)
return thing, name if isinstance(name, str) else None
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/%s/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % tuple([sys.version[:3]]*2))
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = module.__doc__.splitlines()[0] if module.__doc__ else ''
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
self.url = 'http://%s:%d/' % (self.address[0], self.server_port)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
def onerror(modname):
pass
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done),
kwargs=dict(onerror=onerror)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine. Port
number 0 can be used to get an arbitrary unused port.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
withlocks.py | import threading, time, random
counter_lock = threading.Lock()
printer_lock = threading.Lock()
counter = 0
def worker():
'My job is to increment the counter and print the current count'
global counter
with counter_lock:
counter += 1
with printer_lock:
print('The count is %d' % counter)
print('---------------')
with printer_lock:
print('Starting up')
worker_threads = []
for i in range(10):
t = threading.Thread(target=worker)
worker_threads.append(t)
t.start()
for t in worker_threads:
t.join()
with printer_lock:
print('Finishing up') |
keepkey.py | from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_nmc.util import bfh, bh2u, UserCancelled
from electrum_nmc.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT,
is_segwit_address)
from electrum_nmc import constants
from electrum_nmc.i18n import _
from electrum_nmc.plugins import BasePlugin
from electrum_nmc.transaction import deserialize, Transaction
from electrum_nmc.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_nmc.wallet import Standard_Wallet
from electrum_nmc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def is_segwit(self):
return self.derivation.startswith("m/49'/")
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
pass
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
finally:
wizard.loop.exit(0)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.is_segwit())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.is_segwit())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
if type(wallet) is not Standard_Wallet:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
segwit = wallet.keystore.is_segwit()
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, segwit=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, segwit=False):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
if is_segwit_address(address):
txoutputtype.script_type = self.types.PAYTOWITNESS
else:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == constants.net.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == constants.net.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise Exception('addrtype: ' + str(addrtype))
txoutputtype.address = address
return txoutputtype
def is_any_output_on_change_branch():
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
if index[0] == 1:
return True
return False
outputs = []
has_change = False
any_output_on_change_branch = is_any_output_on_change_branch()
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
redshift.py | # pylint: disable=C0111,R0903
"""Displays the current color temperature of redshift
Requires the following executable:
* redshift
Parameters:
* redshift.location : location provider, either of 'auto' (default), 'geoclue2',
'ipinfo' or 'manual'
'auto' uses whatever redshift is configured to do
* redshift.lat : latitude if location is set to 'manual'
* redshift.lon : longitude if location is set to 'manual'
* redshift.show_transition: information about the transitions (x% day) defaults to True
"""
import re
import threading
import core.module
import core.widget
import core.input
import core.decorators
import util.cli
import util.format
import util.location
def get_redshift_value(module):
widget = module.widget()
location = module.parameter("location", "auto")
lat = module.parameter("lat", None)
lon = module.parameter("lon", None)
# Even if location method is set to manual, if we have no lat or lon,
# fall back to the geoclue2 method.
if location == "manual" and (lat is None or lon is None):
location = "geoclue2"
command = ["redshift", "-p"]
if location == "manual":
command.extend(["-l", "{}:{}".format(lat, lon)])
if location == "geoclue2":
command.extend(["-l", "geoclue2"])
try:
res = util.cli.execute(" ".join(command))
except Exception:
res = ""
widget.set("temp", "n/a")
widget.set("transition", "")
widget.set("state", "day")
for line in res.split("\n"):
line = line.lower()
if "temperature" in line:
widget.set("temp", line.split(" ")[2].upper())
if "period" in line:
state = line.split(" ")[1]
if "day" in state:
widget.set("state", "day")
elif "night" in state:
widget.set("state", "night")
else:
widget.set("state", "transition")
match = re.search(r"(\d+)\.\d+% ([a-z]+)", line)
widget.set(
"transition", "({}% {})".format(match.group(1), match.group(2))
)
core.event.trigger("update", [widget.module.id], redraw_only=True)
class Module(core.module.Module):
@core.decorators.every(seconds=10)
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.text))
self.__thread = None
self.show_transition = util.format.asbool(
self.parameter("show_transition", True)
)
if self.parameter("location", "") == "ipinfo":
# override lon/lat with ipinfo
try:
location = util.location.coordinates()
self.set("lat", location[0])
self.set("lon", location[1])
self.set("location", "manual")
except Exception:
# Fall back to geoclue2.
self.set("location", "geoclue2")
self._text = ""
def text(self, widget):
val = widget.get("temp", "n/a")
transition = widget.get("transition", "")
if transition and self.show_transition:
val = "{} {}".format(val, transition)
return val
def update(self):
if self.__thread is not None and self.__thread.is_alive():
return
self.__thread = threading.Thread(target=get_redshift_value, args=(self,))
self.__thread.start()
def state(self, widget):
return widget.get("state", None)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
tasks.py |
import schedule
import time, threading
import requests
import json
import datetime, pytz, django.utils
import logging
from monitor.configure import configure
from monitor.models import BalanceHistory
from monitor.models import ExchangeHistory
from monitor.models import BotHistory
logger = logging.getLogger("tasks")
def loadDifficulty():
stateResp = requests.request("GET", "http://stats.minexmr.com/stats")
stateRespContent = stateResp.text
logger.info("recv state response >> " + stateRespContent)
stateObj = json.loads(stateRespContent)
#
hashRate = 1 # 计算1H的算力计算单位时间内收益
time = 86400 # 24小时
return (stateObj['network']['reward'] / configure.coinUnits / (stateObj['network']['difficulty'] / hashRate / time))
def loadExchange(type):
# see: https://data.bter.com/api2
exchangeResp = requests.request("GET", "http://data.bter.com/api2/1/ticker/" + type)
exchangeRespContent = exchangeResp.text
logger.info("recv exchange["+type+"] response >> " + exchangeRespContent)
exchangeObj = json.loads(exchangeRespContent)
exchange = exchangeObj['last']
return exchange
def exchangeScanJob():
xmr_cny = loadExchange('xmr_cny')
xmr_btc = loadExchange('xmr_btc')
btc_cny = loadExchange('btc_cny')
#
ExchangeHistory.objects.create(xmr_cny=xmr_cny, xmr_btc=xmr_btc, btc_cny=btc_cny).save()
def balanceScanJob():
balanceResp = requests.request("GET", "http://api.minexmr.com:8080/stats_address?address="+configure.address+"&longpoll=true")
balanceRespContent = balanceResp.text
logger.info("recv balance response >> " + balanceRespContent)
balanceObj = json.loads(balanceRespContent)
#
balance = float(balanceObj["stats"]["balance"]) / configure.coinUnits
thold = float(balanceObj["stats"]["thold"]) / configure.coinUnits
hashes = int(balanceObj["stats"]["hashes"])
ttime = datetime.datetime.now()
difficulty = loadDifficulty()
logger.info("scan data >> balance["+str(balance)+"], thold["+str(thold)+"], hashes["+str(hashes)+"], difficulty["
+str(difficulty)+"]")
#
rsObjs = BalanceHistory.objects.filter(log_day=ttime.strftime("%Y-%m-%d"), balance=balance)
if len(rsObjs) <= 0:
logger.debug("create ...")
obj = BalanceHistory.objects.create(
log_day=ttime.strftime("%Y-%m-%d"), balance=balance, thold=thold,
hashes=hashes, difficulty=difficulty,
update_time=datetime.datetime.now())
obj.save()
else:
logger.debug("update ...")
BalanceHistory.objects.filter(log_day=ttime.strftime("%Y-%m-%d"), balance=balance).update(
balance=balance, thold=thold, hashes=hashes, difficulty=difficulty,
update_time=datetime.datetime.now())
def botScanJob():
botResp = requests.request("GET", "http://api.minexmr.com:8080/get_wid_stats?address="+configure.address)
botRespContent = botResp.text
logger.info("recv bot response >> " + botRespContent)
botObj = json.loads(botRespContent)
now = datetime.datetime.now()
#
for item in botObj :
for suffix in configure.botsSuffix :
if item["address"].endswith(suffix) :
BotHistory.objects.create(log_time=now, address=item["address"], expired=item["expired"], hashes=item["hashes"],
hashrate=item["hashrate"], last_share=datetime.datetime.fromtimestamp(float(item['lastShare']))).save()
else :
print("[bot status] pass address > " + item["address"])
# 设置定时调度
schedule.every(1).minutes.do(exchangeScanJob)
schedule.every(1).minutes.do(botScanJob)
schedule.every(10).minutes.do(balanceScanJob)
# 新线程执行的代码:
def scheduleThread():
logger.info("schedule thread start ...")
try:
logger.info("run job first start ...")
# 首次执行
exchangeScanJob()
botScanJob()
balanceScanJob()
except Exception as err:
logger.warning("schedule job run exception >> ", err)
# 定时调度循环
while True:
try:
schedule.run_pending()
except Exception as err:
logger.warning("schedule job run exception >> ", err)
time.sleep(1)
t = threading.Thread(target=scheduleThread, name='ScheduleThread')
t.start()
|
xmlrpc_server_example.py | from __future__ import absolute_import, division, print_function
# This is an example of how a 3rd-party program with Python embedded, such
# as Coot or PyMOL, can be interfaced with CCTBX-based software. Something
# much like this is used for the Phenix GUI extensions to those programs.
# I haven't tried this with any other software, but anything with a reasonably
# recent version of Python and support for either persistent Python threads
# or some sort of timer callback should be able to use it.
DEFAULT_PORT = 40000
import os, sys, string, signal
import xmlrpclib
try :
from SimpleXMLRPCServer import SimpleXMLRPCServer
class external_xmlrpc_server (SimpleXMLRPCServer) :
def __init__ (self, addr, cctbx_interface) :
self.cctbx_interface = cctbx_interface
SimpleXMLRPCServer.__init__(self, addr, logRequests=0)
def _dispatch (self, method, params) :
if not self.cctbx_interface.enable_xmlrpc :
return -1
result = -1
func = getattr(self.cctbx_interface, method, None)
if not callable(func) :
print("%s is not a callable object!" % method)
else :
result = func(*params)
if result is None :
result = -1
return result
class external_xmlrpc_interface (object) :
def __init__ (self, program_id, auto_start=True, verbose=False) :
self.enable_xmlrpc = True
self.xmlrpc_server = None
self.cctbx_server = None
self.verbose = verbose
self.timeout = string.atoi(os.environ.get("CCTBX_XMLRPC_TIMEOUT", "250"))
self.program_id = program_id
self.supported_modules = []
self.setup_modules()
self.setup_server()
if auto_start :
self.start_server()
def setup_modules (self) :
pass
def add_module (self, module_object=None, module_path=None) :
if module_object is not None :
self.supported_modules.append(module_object)
elif module_path is not None :
module_object = __import__(module_path)
self.supported_modules.append(module_object)
def setup_server (self) :
port = os.environ.get("CCTBX_%s_PORT" % self.program_id, DEFAULT_PORT)
if port is not None :
self.port = int(port)
self.xmlrpc_server = external_xmlrpc_server(("127.0.0.1", self.port),
self)
if self.verbose :
print("Listening on port %s" % port)
cctbx_port = os.environ.get("CCTBX_XMLRPC_PORT", None)
if cctbx_port is not None :
uri = "http://localhost:%s/RPC2" % cctbx_port
self.cctbx_server = xmlrpclib.ServerProxy(uri=uri)
if self.verbose :
print("Connecting to XML-RPC server on port %s" % cctbx_port)
def start_server (self) :
if self.xmlrpc_server is not None :
print("XML-RPC server started on port %d" % self.port)
self.xmlrpc_server.serve_forever()
def start_server_in_separate_thread (self) :
import threading
t = threading.Thread(target=self.start_server)
t.setDaemon(1)
t.start()
def set_socket_timeout (self, timeout) :
if self.xmlrpc_server is not None :
self.xmlrpc_server.socket.settimeout(timeout)
def timeout_func (self, *args) :
if self.xmlrpc_server is not None :
self.xmlrpc_server.handle_request()
return True
def is_alive (self) :
return True
# XXX: this should be replaced by the proper quit function for the program
# being extended - e.g. cmd.quit() in PyMOL.
def quit (self) :
print("quitting")
sys.stdout.flush()
os.kill(os.getpid(), signal.SIGKILL)
def __getattr__ (self, name) :
for module_object in self.supported_modules :
if hasattr(module_object, name) :
return getattr(module_object, name)
return None
except KeyboardInterrupt :
raise
except ImportError :
def external_xmlrpc_server (*args, **kwds) :
raise Exception("SimpleXMLRPCServer not available on this platform.")
def external_cctbx_interface (*args, **kwds) :
raise Exception("SimpleXMLRPCServer not available on this platform.")
def test_server () :
class test_module (object) :
def echo_test (self) :
print("hello, world!")
sys.stdout.flush()
return True
# os.environ["CCTBX_TEST_PORT"] = "48000"
test_server = external_xmlrpc_interface("TEST", auto_start=False,
verbose=False)
module_object = test_module()
test_server.add_module(module_object)
test_server.start_server()
def coot_server () :
server = external_xmlrpc_interface("COOT",
auto_start=False,
verbose=True)
server.set_socket_timeout(0.01)
import coot
import gobject
server.add_module(coot)
gobject.timeout_add(200, server.timeout_func)
if __name__ == "__main__" :
#test_server()
coot_server()
#---end
|
cisd.py | #!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Solve CISD equation H C = C e where e = E_HF + E_CORR
'''
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import ccsd_rdm
from pyscf.fci import cistring
from pyscf import __config__
BLKMIN = getattr(__config__, 'ci_cisd_blkmin', 4)
def kernel(myci, eris, ci0=None, max_cycle=50, tol=1e-8, verbose=logger.INFO):
'''
Run CISD calculation.
Args:
myci : CISD (inheriting) object
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
Kwargs:
ci0 : (List of) numpy array(s) (if None it will set)
Initial guess for CISD coeffs.
max_cycle : integer
Maximum number of iterations to converge to CISD solution.
If not converged before, calculation stops without having
converged.
tol : float
Convergence tolerance.
verbose : integer
Level of output (roughly: the higher, the more output).
Returns:
conv : bool
Is it converged?
ecisd : List of floats or float
The lowest :attr:`myci.nroots` eigenvalues.
ci : List of 1D arrays or 1D array
The lowest :attr:`myci.nroots` eigenvectors.
'''
log = logger.new_logger(myci, verbose)
diag = myci.make_diagonal(eris)
# Note that ehf is not the HF energy (see `make_diagonal`).
ehf = diag[0]
diag -= ehf
if ci0 is None:
ci0 = myci.get_init_guess(eris=eris, nroots=myci.nroots, diag=diag)[1]
def op(xs):
return [myci.contract(x, eris) for x in xs]
def precond(x, e, *args):
diagd = diag - (e-myci.level_shift)
diagd[abs(diagd)<1e-8] = 1e-8
return x / diagd
if myci._dot is not None:
nmo = myci.nmo
nocc = myci.nocc
def cisd_dot(x1, x2):
return myci._dot(x1, x2, nmo, nocc)
else:
cisd_dot = numpy.dot
conv, ecisd, ci = lib.davidson1(op, ci0, precond, tol=tol,
max_cycle=max_cycle, max_space=myci.max_space,
lindep=myci.lindep, dot=cisd_dot,
nroots=myci.nroots, verbose=log)
if myci.nroots == 1:
conv = conv[0]
ecisd = ecisd[0]
ci = ci[0]
return conv, ecisd, ci
def make_diagonal(myci, eris):
'''
Return diagonal of CISD hamiltonian in Slater determinant basis.
Note that a constant has been substracted of all elements.
The first element is the HF energy (minus the
constant), the next elements are the diagonal elements with singly
excited determinants (<D_i^a|H|D_i^a> within the constant), then
doubly excited determinants (<D_ij^ab|H|D_ij^ab> within the
constant).
Args:
myci : CISD (inheriting) object
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
Returns:
numpy array (size: (1, 1 + #single excitations from HF det
+ #double excitations from HF det))
Diagonal elements of hamiltonian matrix within a constant,
see above.
'''
# DO NOT use eris.mo_energy, it may differ to eris.fock.diagonal()
mo_energy = eris.fock.diagonal()
nmo = mo_energy.size
jdiag = numpy.zeros((nmo,nmo))
kdiag = numpy.zeros((nmo,nmo))
nocc = eris.nocc
nvir = nmo - nocc
jdiag[:nocc,:nocc] = numpy.einsum('iijj->ij', eris.oooo)
kdiag[:nocc,:nocc] = numpy.einsum('jiij->ij', eris.oooo)
jdiag[:nocc,nocc:] = numpy.einsum('iijj->ij', eris.oovv)
kdiag[:nocc,nocc:] = numpy.einsum('ijji->ij', eris.ovvo)
if eris.vvvv is not None and len(eris.vvvv.shape) == 2:
#:eris_vvvv = ao2mo.restore(1, eris.vvvv, nvir)
#:jdiag1 = numpy.einsum('iijj->ij', eris_vvvv)
diag_idx = numpy.arange(nvir)
diag_idx = diag_idx * (diag_idx + 1) // 2 + diag_idx
for i, ii in enumerate(diag_idx):
jdiag[nocc+i,nocc:] = eris.vvvv[ii][diag_idx]
jksum = (jdiag[:nocc,:nocc] * 2 - kdiag[:nocc,:nocc]).sum()
# Note that ehf is not the HF energy.
ehf = mo_energy[:nocc].sum() * 2 - jksum
e_ia = lib.direct_sum('a-i->ia', mo_energy[nocc:], mo_energy[:nocc])
e_ia -= jdiag[:nocc,nocc:] - kdiag[:nocc,nocc:]
e1diag = ehf + e_ia
e2diag = lib.direct_sum('ia+jb->ijab', e_ia, e_ia)
e2diag += ehf
e2diag += jdiag[:nocc,:nocc].reshape(nocc,nocc,1,1)
e2diag -= jdiag[:nocc,nocc:].reshape(nocc,1,1,nvir)
e2diag -= jdiag[:nocc,nocc:].reshape(1,nocc,nvir,1)
e2diag += jdiag[nocc:,nocc:].reshape(1,1,nvir,nvir)
return numpy.hstack((ehf, e1diag.reshape(-1), e2diag.reshape(-1)))
def contract(myci, civec, eris):
'''
Application of CISD hamiltonian onto civec.
Args:
myci : CISD (inheriting) object
civec : numpy array, same length as a CI vector.
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
Returns:
numpy array, same length as a CI vector.
'''
time0 = logger.process_clock(), logger.perf_counter()
log = logger.Logger(myci.stdout, myci.verbose)
nocc = myci.nocc
nmo = myci.nmo
nvir = nmo - nocc
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
t2 = myci._add_vvvv(c2, eris, t2sym='jiba')
t2 *= .5 # due to t2+t2.transpose(1,0,3,2) in the end
log.timer_debug1('vvvv', *time0)
foo = eris.fock[:nocc,:nocc].copy()
fov = eris.fock[:nocc,nocc:].copy()
fvv = eris.fock[nocc:,nocc:].copy()
t1 = fov * c0
t1 += numpy.einsum('ib,ab->ia', c1, fvv)
t1 -= numpy.einsum('ja,ji->ia', c1, foo)
t2 += lib.einsum('kilj,klab->ijab', _cp(eris.oooo)*.5, c2)
t2 += lib.einsum('ijac,bc->ijab', c2, fvv)
t2 -= lib.einsum('kj,kiba->jiba', foo, c2)
t2 += numpy.einsum('ia,jb->ijab', c1, fov)
unit = nocc*nvir**2 + nocc**2*nvir*3 + 1
max_memory = max(0, myci.max_memory - lib.current_memory()[0])
blksize = min(nvir, max(BLKMIN, int(max_memory*.9e6/8/unit)))
log.debug1('max_memory %d MB, nocc,nvir = %d,%d blksize = %d',
max_memory, nocc, nvir, blksize)
for p0, p1 in lib.prange(0, nvir, blksize):
eris_oVoV = _cp(_cp(eris.oovv[:,:,p0:p1]).transpose(0,2,1,3))
tmp = lib.einsum('kbjc,ikca->jiba', eris_oVoV, c2)
t2[:,:,p0:p1] -= tmp*.5
t2[:,:,p0:p1] -= tmp.transpose(1,0,2,3)
tmp = None
eris_ovvo = _cp(eris.ovvo[:,p0:p1])
t2[:,:,p0:p1] += eris_ovvo.transpose(0,3,1,2) * (c0*.5)
t1 += numpy.einsum('ia,iabj->jb', c1[:,p0:p1], eris_ovvo) * 2
t1[:,p0:p1] -= numpy.einsum('ib,iajb->ja', c1, eris_oVoV)
ovov = -.5 * eris_oVoV
ovov += eris_ovvo.transpose(3,1,0,2)
eris_oVoV = None
theta = c2[:,:,p0:p1].transpose(2,0,1,3) * 2
theta-= c2[:,:,p0:p1].transpose(2,1,0,3)
for j in range(nocc):
t2[:,j] += lib.einsum('ckb,ckia->iab', ovov[j], theta)
tmp = ovov = None
t1 += numpy.einsum('aijb,ia->jb', theta, fov[:,p0:p1])
eris_ovoo = _cp(eris.ovoo[:,p0:p1])
t1 -= lib.einsum('bjka,jbki->ia', theta, eris_ovoo)
t2[:,:,p0:p1] -= lib.einsum('jbik,ka->jiba', eris_ovoo.conj(), c1)
eris_ovoo = None
eris_ovvv = eris.get_ovvv(slice(None), slice(p0,p1)).conj()
t1 += lib.einsum('cjib,jcba->ia', theta, eris_ovvv)
t2[:,:,p0:p1] += lib.einsum('iacb,jc->ijab', eris_ovvv, c1)
tmp = eris_ovvv = None
#:t2 + t2.transpose(1,0,3,2)
for i in range(nocc):
if i > 0:
t2[i,:i]+= t2[:i,i].transpose(0,2,1)
t2[:i,i] = t2[i,:i].transpose(0,2,1)
t2[i,i] = t2[i,i] + t2[i,i].T
t0 = numpy.einsum('ia,ia->', fov, c1) * 2
t0 += numpy.einsum('iabj,ijab->', eris.ovvo, c2) * 2
t0 -= numpy.einsum('iabj,jiab->', eris.ovvo, c2)
cinew = numpy.hstack((t0, t1.ravel(), t2.ravel()))
return cinew
def amplitudes_to_cisdvec(c0, c1, c2):
return numpy.hstack((c0, c1.ravel(), c2.ravel()))
def cisdvec_to_amplitudes(civec, nmo, nocc):
nvir = nmo - nocc
c0 = civec[0]
c1 = civec[1:nocc*nvir+1].reshape(nocc,nvir)
c2 = civec[nocc*nvir+1:].reshape(nocc,nocc,nvir,nvir)
return c0, c1, c2
def dot(v1, v2, nmo, nocc):
nvir = nmo - nocc
hijab = v2[1+nocc*nvir:].reshape(nocc,nocc,nvir,nvir)
cijab = v1[1+nocc*nvir:].reshape(nocc,nocc,nvir,nvir)
val = numpy.dot(v1, v2) * 2 - v1[0]*v2[0]
val-= numpy.einsum('jiab,ijab->', cijab, hijab)
return val
def t1strs(norb, nelec):
'''Compute the FCI strings (address) for CIS single-excitation amplitudes
and the signs of the coefficients when transferring the reference from
physics vacuum to HF vacuum.
'''
addrs, signs = tn_addrs_signs(norb, nelec, 1)
return addrs, signs
def tn_addrs_signs(norb, nelec, n_excite):
'''Compute the FCI strings (address) for CIS n-excitation amplitudes and
the signs of the coefficients when transferring the reference from physics
vacuum to HF vacuum.
If the excitation level is not compatible with the number of
electrons and holes, empty lists are returned for the addresses and signs.
'''
# Not enough electrons or holes for excitation; return empty lists.
if n_excite > min(nelec, norb-nelec):
return [], []
nocc = nelec
hole_strs = cistring.gen_strings4orblist(range(nocc), nocc - n_excite)
# For HF vacuum, hole operators are ordered from low-lying to high-lying
# orbitals. It leads to the opposite string ordering.
hole_strs = hole_strs[::-1]
hole_sum = numpy.zeros(len(hole_strs), dtype=int)
for i in range(nocc):
hole_at_i = (hole_strs & (1 << i)) == 0
hole_sum[hole_at_i] += i
# The hole operators are listed from low-lying to high-lying orbitals
# (from left to right). For i-th (0-based) hole operator, the number of
# orbitals which are higher than i determines the sign. This number
# equals to nocc-(i+1). After removing the highest hole operator, nocc
# becomes nocc-1, the sign for next hole operator j will be associated to
# nocc-1-(j+1). By iteratively calling this procedure, the overall sign
# for annihilating three holes is (-1)**(3*nocc - 6 - sum i)
sign = (-1) ** (n_excite * nocc - n_excite*(n_excite+1)//2 - hole_sum)
particle_strs = cistring.gen_strings4orblist(range(nocc, norb), n_excite)
strs = hole_strs[:,None] ^ particle_strs
addrs = cistring.strs2addr(norb, nocc, strs.ravel())
signs = numpy.vstack([sign] * len(particle_strs)).T.ravel()
return addrs, signs
def to_fcivec(cisdvec, norb, nelec, frozen=None):
'''Convert CISD coefficients to FCI coefficients'''
if isinstance(nelec, (int, numpy.number)):
nelecb = nelec//2
neleca = nelec - nelecb
else:
neleca, nelecb = nelec
assert(neleca == nelecb)
frozen_mask = numpy.zeros(norb, dtype=bool)
if frozen is None:
nfroz = 0
elif isinstance(frozen, (int, numpy.integer)):
nfroz = frozen
frozen_mask[:frozen] = True
else:
nfroz = len(frozen)
frozen_mask[frozen] = True
nocc = numpy.count_nonzero(~frozen_mask[:neleca])
nmo = norb - nfroz
nvir = nmo - nocc
c0, c1, c2 = cisdvec_to_amplitudes(cisdvec, nmo, nocc)
t1addr, t1sign = tn_addrs_signs(nmo, nocc, 1)
na = cistring.num_strings(nmo, nocc)
fcivec = numpy.zeros((na,na))
fcivec[0,0] = c0
fcivec[0,t1addr] = fcivec[t1addr,0] = c1.ravel() * t1sign
c2ab = c2.transpose(0,2,1,3).reshape(nocc*nvir,-1)
c2ab = numpy.einsum('i,j,ij->ij', t1sign, t1sign, c2ab)
fcivec[t1addr[:,None],t1addr] = c2ab
if nocc > 1 and nvir > 1:
c2aa = c2 - c2.transpose(1,0,2,3)
ooidx = numpy.tril_indices(nocc, -1)
vvidx = numpy.tril_indices(nvir, -1)
c2aa = c2aa[ooidx][:,vvidx[0],vvidx[1]]
t2addr, t2sign = tn_addrs_signs(nmo, nocc, 2)
fcivec[0,t2addr] = fcivec[t2addr,0] = c2aa.ravel() * t2sign
if nfroz == 0:
return fcivec
assert(norb < 63)
strs = cistring.gen_strings4orblist(range(norb), neleca)
na = len(strs)
count = numpy.zeros(na, dtype=int)
parity = numpy.zeros(na, dtype=bool)
core_mask = numpy.ones(na, dtype=bool)
# During the loop, count saves the number of occupied orbitals that
# lower (with small orbital ID) than the present orbital i.
# Moving all the frozen orbitals to the beginning of the orbital list
# (before the occupied orbitals) leads to parity odd (= True, with
# negative sign) or even (= False, with positive sign).
for i in range(norb):
if frozen_mask[i]:
if i < neleca:
# frozen occupied orbital should be occupied
core_mask &= (strs & (1 << i)) != 0
parity ^= (count & 1) == 1
else:
# frozen virtual orbital should not be occupied.
# parity is not needed since it's unoccupied
core_mask &= (strs & (1 << i)) == 0
else:
count += (strs & (1 << i)) != 0
sub_strs = strs[core_mask & (count == nocc)]
addrs = cistring.strs2addr(norb, neleca, sub_strs)
fcivec1 = numpy.zeros((na,na))
fcivec1[addrs[:,None],addrs] = fcivec
fcivec1[parity,:] *= -1
fcivec1[:,parity] *= -1
return fcivec1
def from_fcivec(ci0, norb, nelec, frozen=None):
'''Extract CISD coefficients from FCI coefficients'''
if not (frozen is None or frozen == 0):
raise NotImplementedError
if isinstance(nelec, (int, numpy.number)):
nelecb = nelec//2
neleca = nelec - nelecb
else:
neleca, nelecb = nelec
nocc = neleca
nvir = norb - nocc
t1addr, t1sign = t1strs(norb, nocc)
c0 = ci0[0,0]
c1 = ci0[0,t1addr] * t1sign
c2 = numpy.einsum('i,j,ij->ij', t1sign, t1sign, ci0[t1addr[:,None],t1addr])
c1 = c1.reshape(nocc,nvir)
c2 = c2.reshape(nocc,nvir,nocc,nvir).transpose(0,2,1,3)
return amplitudes_to_cisdvec(c0, c1, c2)
def overlap(cibra, ciket, nmo, nocc, s=None):
'''Overlap between two CISD wavefunctions.
Args:
s : 2D array
The overlap matrix of non-orthogonal one-particle basis
'''
if s is None:
return dot(cibra, ciket, nmo, nocc)
DEBUG = True
nvir = nmo - nocc
nov = nocc * nvir
bra0, bra1, bra2 = cisdvec_to_amplitudes(cibra, nmo, nocc)
ket0, ket1, ket2 = cisdvec_to_amplitudes(ciket, nmo, nocc)
# Sort the ket orbitals to make the orbitals in bra one-one mapt to orbitals
# in ket.
if ((not DEBUG) and
abs(numpy.linalg.det(s[:nocc,:nocc]) - 1) < 1e-2 and
abs(numpy.linalg.det(s[nocc:,nocc:]) - 1) < 1e-2):
ket_orb_idx = numpy.where(abs(s) > 0.9)[1]
s = s[:,ket_orb_idx]
oidx = ket_orb_idx[:nocc]
vidx = ket_orb_idx[nocc:] - nocc
ket1 = ket1[oidx[:,None],vidx]
ket2 = ket2[oidx[:,None,None,None],oidx[:,None,None],vidx[:,None],vidx]
ooidx = numpy.tril_indices(nocc, -1)
vvidx = numpy.tril_indices(nvir, -1)
bra2aa = bra2 - bra2.transpose(1,0,2,3)
bra2aa = lib.take_2d(bra2aa.reshape(nocc**2,nvir**2),
ooidx[0]*nocc+ooidx[1], vvidx[0]*nvir+vvidx[1])
ket2aa = ket2 - ket2.transpose(1,0,2,3)
ket2aa = lib.take_2d(ket2aa.reshape(nocc**2,nvir**2),
ooidx[0]*nocc+ooidx[1], vvidx[0]*nvir+vvidx[1])
occlist0 = numpy.arange(nocc).reshape(1,nocc)
occlists = numpy.repeat(occlist0, 1+nov+bra2aa.size, axis=0)
occlist0 = occlists[:1]
occlist1 = occlists[1:1+nov]
occlist2 = occlists[1+nov:]
ia = 0
for i in range(nocc):
for a in range(nocc, nmo):
occlist1[ia,i] = a
ia += 1
ia = 0
for i in range(nocc):
for j in range(i):
for a in range(nocc, nmo):
for b in range(nocc, a):
occlist2[ia,i] = a
occlist2[ia,j] = b
ia += 1
na = len(occlists)
if DEBUG:
trans = numpy.empty((na,na))
for i, idx in enumerate(occlists):
s_sub = s[idx].T.copy()
minors = s_sub[occlists]
trans[i,:] = numpy.linalg.det(minors)
# Mimic the transformation einsum('ab,ap->pb', FCI, trans).
# The wavefunction FCI has the [excitation_alpha,excitation_beta]
# representation. The zero blocks like FCI[S_alpha,D_beta],
# FCI[D_alpha,D_beta], are explicitly excluded.
bra_mat = numpy.zeros((na,na))
bra_mat[0,0] = bra0
bra_mat[0,1:1+nov] = bra_mat[1:1+nov,0] = bra1.ravel()
bra_mat[0,1+nov:] = bra_mat[1+nov:,0] = bra2aa.ravel()
bra_mat[1:1+nov,1:1+nov] = bra2.transpose(0,2,1,3).reshape(nov,nov)
ket_mat = numpy.zeros((na,na))
ket_mat[0,0] = ket0
ket_mat[0,1:1+nov] = ket_mat[1:1+nov,0] = ket1.ravel()
ket_mat[0,1+nov:] = ket_mat[1+nov:,0] = ket2aa.ravel()
ket_mat[1:1+nov,1:1+nov] = ket2.transpose(0,2,1,3).reshape(nov,nov)
ovlp = lib.einsum('ab,ap,bq,pq->', bra_mat, trans, trans, ket_mat)
else:
nov1 = 1 + nov
noovv = bra2aa.size
bra_SS = numpy.zeros((nov1,nov1))
bra_SS[0,0] = bra0
bra_SS[0,1:] = bra_SS[1:,0] = bra1.ravel()
bra_SS[1:,1:] = bra2.transpose(0,2,1,3).reshape(nov,nov)
ket_SS = numpy.zeros((nov1,nov1))
ket_SS[0,0] = ket0
ket_SS[0,1:] = ket_SS[1:,0] = ket1.ravel()
ket_SS[1:,1:] = ket2.transpose(0,2,1,3).reshape(nov,nov)
trans_SS = numpy.empty((nov1,nov1))
trans_SD = numpy.empty((nov1,noovv))
trans_DS = numpy.empty((noovv,nov1))
occlist01 = occlists[:nov1]
for i, idx in enumerate(occlist01):
s_sub = s[idx].T.copy()
minors = s_sub[occlist01]
trans_SS[i,:] = numpy.linalg.det(minors)
minors = s_sub[occlist2]
trans_SD[i,:] = numpy.linalg.det(minors)
s_sub = s[:,idx].copy()
minors = s_sub[occlist2]
trans_DS[:,i] = numpy.linalg.det(minors)
ovlp = lib.einsum('ab,ap,bq,pq->', bra_SS, trans_SS, trans_SS, ket_SS)
ovlp+= lib.einsum('ab,a ,bq, q->', bra_SS, trans_SS[:,0], trans_SD, ket2aa.ravel())
ovlp+= lib.einsum('ab,ap,b ,p ->', bra_SS, trans_SD, trans_SS[:,0], ket2aa.ravel())
ovlp+= lib.einsum(' b, p,bq,pq->', bra2aa.ravel(), trans_SS[0,:], trans_DS, ket_SS)
ovlp+= lib.einsum(' b, p,b ,p ->', bra2aa.ravel(), trans_SD[0,:], trans_DS[:,0],
ket2aa.ravel())
ovlp+= lib.einsum('a ,ap, q,pq->', bra2aa.ravel(), trans_DS, trans_SS[0,:], ket_SS)
ovlp+= lib.einsum('a ,a , q, q->', bra2aa.ravel(), trans_DS[:,0], trans_SD[0,:],
ket2aa.ravel())
# FIXME: whether to approximate the overlap between double excitation coefficients
if numpy.linalg.norm(bra2aa)*numpy.linalg.norm(ket2aa) < 1e-4:
# Skip the overlap if coefficients of double excitation are small enough
pass
if (abs(numpy.linalg.det(s[:nocc,:nocc]) - 1) < 1e-2 and
abs(numpy.linalg.det(s[nocc:,nocc:]) - 1) < 1e-2):
# If the overlap matrix close to identity enough, use the <D|D'> overlap
# for orthogonal single-particle basis to approximate the overlap
# for non-orthogonal basis.
ovlp+= numpy.dot(bra2aa.ravel(), ket2aa.ravel()) * trans_SS[0,0] * 2
else:
from multiprocessing import sharedctypes, Process
buf_ctypes = sharedctypes.RawArray('d', noovv)
trans_ket = numpy.ndarray(noovv, buffer=buf_ctypes)
def trans_dot_ket(i0, i1):
for i in range(i0, i1):
s_sub = s[occlist2[i]].T.copy()
minors = s_sub[occlist2]
trans_ket[i] = numpy.linalg.det(minors).dot(ket2aa.ravel())
nproc = lib.num_threads()
if nproc > 1:
seg = (noovv+nproc-1) // nproc
ps = []
for i0,i1 in lib.prange(0, noovv, seg):
p = Process(target=trans_dot_ket, args=(i0,i1))
ps.append(p)
p.start()
[p.join() for p in ps]
else:
trans_dot_ket(0, noovv)
ovlp+= numpy.dot(bra2aa.ravel(), trans_ket) * trans_SS[0,0] * 2
return ovlp
def make_rdm1(myci, civec=None, nmo=None, nocc=None, ao_repr=False):
r'''
Spin-traced one-particle density matrix in MO basis (the occupied-virtual
blocks from the orbital response contribution are not included).
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
return ccsd_rdm._make_rdm1(myci, d1, with_frozen=True, ao_repr=ao_repr)
def make_rdm2(myci, civec=None, nmo=None, nocc=None, ao_repr=False):
r'''
Spin-traced two-particle density matrix in MO basis
dm2[p,q,r,s] = \sum_{sigma,tau} <p_sigma^\dagger r_tau^\dagger s_tau q_sigma>
Note the contraction between ERIs (in Chemist's notation) and rdm2 is
E = einsum('pqrs,pqrs', eri, rdm2)
'''
if civec is None: civec = myci.ci
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
d1 = _gamma1_intermediates(myci, civec, nmo, nocc)
f = lib.H5TmpFile()
d2 = _gamma2_outcore(myci, civec, nmo, nocc, f, False)
return ccsd_rdm._make_rdm2(myci, d1, d2, with_dm1=True, with_frozen=True,
ao_repr=ao_repr)
def _gamma1_intermediates(myci, civec, nmo, nocc):
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
dvo = c0.conj() * c1.T
dvo += numpy.einsum('jb,ijab->ai', c1.conj(), c2) * 2
dvo -= numpy.einsum('jb,ijba->ai', c1.conj(), c2)
dov = dvo.T.conj()
theta = c2*2 - c2.transpose(0,1,3,2)
doo = -numpy.einsum('ia,ka->ik', c1.conj(), c1)
doo -= lib.einsum('ijab,ikab->jk', c2.conj(), theta)
dvv = numpy.einsum('ia,ic->ac', c1, c1.conj())
dvv += lib.einsum('ijab,ijac->bc', theta, c2.conj())
return doo, dov, dvo, dvv
def _gamma2_intermediates(myci, civec, nmo, nocc, compress_vvvv=False):
f = lib.H5TmpFile()
_gamma2_outcore(myci, civec, nmo, nocc, f, compress_vvvv)
d2 = (f['dovov'][:], f['dvvvv'][:], f['doooo'][:], f['doovv'][:],
f['dovvo'][:], None, f['dovvv'][:], f['dooov'][:])
return d2
def _gamma2_outcore(myci, civec, nmo, nocc, h5fobj, compress_vvvv=False):
log = logger.Logger(myci.stdout, myci.verbose)
nocc = myci.nocc
nmo = myci.nmo
nvir = nmo - nocc
nvir_pair = nvir * (nvir+1) // 2
c0, c1, c2 = myci.cisdvec_to_amplitudes(civec, nmo, nocc)
h5fobj['dovov'] = (2*c0*c2.conj().transpose(0,2,1,3) -
c0*c2.conj().transpose(1,2,0,3))
doooo = lib.einsum('ijab,klab->ijkl', c2.conj(), c2)
h5fobj['doooo'] = doooo.transpose(0,2,1,3) - doooo.transpose(1,2,0,3)*.5
doooo = None
dooov = -lib.einsum('ia,klac->klic', c1*2, c2.conj())
h5fobj['dooov'] = dooov.transpose(0,2,1,3)*2 - dooov.transpose(1,2,0,3)
dooov = None
#:dvovv = numpy.einsum('ia,ikcd->akcd', c1, c2) * 2
#:dvvvv = lib.einsum('ijab,ijcd->abcd', c2, c2)
max_memory = max(0, myci.max_memory - lib.current_memory()[0])
unit = max(nocc**2*nvir*2+nocc*nvir**2*3 + 1, nvir**3*2+nocc*nvir**2 + 1)
blksize = min(nvir, max(BLKMIN, int(max_memory*.9e6/8/unit)))
log.debug1('rdm intermediates: block size = %d, nvir = %d in %d blocks',
blksize, nocc, int((nvir+blksize-1)/blksize))
dtype = numpy.result_type(civec).char
dovvv = h5fobj.create_dataset('dovvv', (nocc,nvir,nvir,nvir), dtype,
chunks=(nocc,min(nocc,nvir),1,nvir))
if compress_vvvv:
dvvvv = h5fobj.create_dataset('dvvvv', (nvir_pair,nvir_pair), dtype)
else:
dvvvv = h5fobj.create_dataset('dvvvv', (nvir,nvir,nvir,nvir), dtype)
for (p0, p1) in lib.prange(0, nvir, blksize):
theta = c2[:,:,p0:p1] - c2[:,:,p0:p1].transpose(1,0,2,3) * .5
gvvvv = lib.einsum('ijab,ijcd->abcd', theta.conj(), c2)
if compress_vvvv:
# symmetrize dvvvv because it does not affect the results of cisd_grad
# dvvvv = (dvvvv+dvvvv.transpose(0,1,3,2)) * .5
# dvvvv = (dvvvv+dvvvv.transpose(1,0,2,3)) * .5
# now dvvvv == dvvvv.transpose(0,1,3,2) == dvvvv.transpose(1,0,3,2)
tmp = numpy.empty((nvir,nvir,nvir))
tmpvvvv = numpy.empty((p1-p0,nvir,nvir_pair))
for i in range(p1-p0):
tmp[:] = gvvvv[i].conj().transpose(1,0,2)
lib.pack_tril(tmp+tmp.transpose(0,2,1), out=tmpvvvv[i])
# tril of (dvvvv[p0:p1,p0:p1]+dvvvv[p0:p1,p0:p1].T)
for i in range(p0, p1):
for j in range(p0, i):
tmpvvvv[i-p0,j] += tmpvvvv[j-p0,i]
tmpvvvv[i-p0,i] *= 2
for i in range(p1, nvir):
off = i * (i+1) // 2
dvvvv[off+p0:off+p1] = tmpvvvv[:,i]
for i in range(p0, p1):
off = i * (i+1) // 2
if p0 > 0:
tmpvvvv[i-p0,:p0] += dvvvv[off:off+p0]
dvvvv[off:off+i+1] = tmpvvvv[i-p0,:i+1] * .25
tmp = tmpvvvv = None
else:
for i in range(p0, p1):
dvvvv[i] = gvvvv[i-p0].conj().transpose(1,0,2)
gvovv = numpy.einsum('ia,ikcd->akcd', c1[:,p0:p1].conj()*2, c2)
gvovv = gvovv.conj()
dovvv[:,:,p0:p1] = gvovv.transpose(1,3,0,2)*2 - gvovv.transpose(1,2,0,3)
theta = c2*2 - c2.transpose(1,0,2,3)
doovv = numpy.einsum('ia,kc->ikca', c1.conj(), -c1)
doovv -= lib.einsum('kjcb,kica->jiab', c2.conj(), theta)
doovv -= lib.einsum('ikcb,jkca->ijab', c2.conj(), theta)
h5fobj['doovv'] = doovv
doovv = None
dovvo = lib.einsum('ikac,jkbc->iabj', theta.conj(), theta)
dovvo += numpy.einsum('ia,kc->iack', c1.conj(), c1) * 2
h5fobj['dovvo'] = dovvo
theta = dovvo = None
dvvov = None
return (h5fobj['dovov'], h5fobj['dvvvv'], h5fobj['doooo'], h5fobj['doovv'],
h5fobj['dovvo'], dvvov , h5fobj['dovvv'], h5fobj['dooov'])
def trans_rdm1(myci, cibra, ciket, nmo=None, nocc=None):
r'''
Spin-traced one-particle transition density matrix in MO basis.
dm1[p,q] = <q_alpha^\dagger p_alpha> + <q_beta^\dagger p_beta>
The convention of 1-pdm is based on McWeeney's book, Eq (5.4.20).
The contraction between 1-particle Hamiltonian and rdm1 is
E = einsum('pq,qp', h1, rdm1)
'''
if nmo is None: nmo = myci.nmo
if nocc is None: nocc = myci.nocc
c0bra, c1bra, c2bra = myci.cisdvec_to_amplitudes(cibra, nmo, nocc)
c0ket, c1ket, c2ket = myci.cisdvec_to_amplitudes(ciket, nmo, nocc)
dvo = c0bra.conj() * c1ket.T
dvo += numpy.einsum('jb,ijab->ai', c1bra.conj(), c2ket) * 2
dvo -= numpy.einsum('jb,ijba->ai', c1bra.conj(), c2ket)
dov = c0ket * c1bra.conj()
dov += numpy.einsum('jb,ijab->ia', c1ket, c2bra.conj()) * 2
dov -= numpy.einsum('jb,ijba->ia', c1ket, c2bra.conj())
theta = c2ket*2 - c2ket.transpose(0,1,3,2)
doo = -numpy.einsum('ia,ka->ik', c1bra.conj(), c1ket)
doo -= lib.einsum('ijab,ikab->jk', c2bra.conj(), theta)
dvv = numpy.einsum('ia,ic->ac', c1ket, c1bra.conj())
dvv += lib.einsum('ijab,ijac->bc', theta, c2bra.conj())
dm1 = numpy.empty((nmo,nmo), dtype=doo.dtype)
dm1[:nocc,:nocc] = doo * 2
dm1[:nocc,nocc:] = dov * 2
dm1[nocc:,:nocc] = dvo * 2
dm1[nocc:,nocc:] = dvv * 2
norm = dot(cibra, ciket, nmo, nocc)
dm1[numpy.diag_indices(nocc)] += 2 * norm
if myci.frozen is not None:
nmo = myci.mo_occ.size
nocc = numpy.count_nonzero(myci.mo_occ > 0)
rdm1 = numpy.zeros((nmo,nmo), dtype=dm1.dtype)
rdm1[numpy.diag_indices(nocc)] = 2 * norm
moidx = numpy.where(myci.get_frozen_mask())[0]
rdm1[moidx[:,None],moidx] = dm1
dm1 = rdm1
return dm1
def as_scanner(ci):
'''Generating a scanner/solver for CISD PES.
The returned solver is a function. This function requires one argument
"mol" as input and returns total CISD energy.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
CISD and the underlying SCF objects (conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, ci
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> ci_scanner = ci.CISD(scf.RHF(mol)).as_scanner()
>>> e_tot = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot = ci_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
from pyscf import gto
if isinstance(ci, lib.SinglePointScanner):
return ci
logger.info(ci, 'Set %s as a scanner', ci.__class__)
class CISD_Scanner(ci.__class__, lib.SinglePointScanner):
def __init__(self, ci):
self.__dict__.update(ci.__dict__)
self._scf = ci._scf.as_scanner()
def __call__(self, mol_or_geom, ci0=None, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
self.reset(mol)
mf_scanner = self._scf
mf_scanner(mol)
self.mo_coeff = mf_scanner.mo_coeff
self.mo_occ = mf_scanner.mo_occ
if getattr(self.ci, 'size', 0) != self.vector_size():
self.ci = None
if ci0 is None:
# FIXME: Whether to use the initial guess from last step?
# If root flips, large errors may be found in the solutions
ci0 = self.ci
self.kernel(ci0, **kwargs)[0]
return self.e_tot
return CISD_Scanner(ci)
class CISD(lib.StreamObject):
'''restricted CISD
Attributes:
verbose : int
Print level. Default value equals to :class:`Mole.verbose`
max_memory : float or int
Allowed memory in MB. Default value equals to
:class:`Mole.max_memory`
conv_tol : float
converge threshold. Default is 1e-9.
max_cycle : int
max number of iterations. Default is 50.
max_space : int
Davidson diagonalization space size. Default is 12.
direct : bool
AO-direct CISD. Default is False.
async_io : bool
Allow for asynchronous function execution. Default is True.
frozen : int or list
If integer is given, the inner-most orbitals are frozen from CI
amplitudes. Given the orbital indices (0-based) in a list, both
occupied and virtual orbitals can be frozen in CI calculation.
>>> mol = gto.M(atom = 'H 0 0 0; F 0 0 1.1', basis = 'ccpvdz')
>>> mf = scf.RHF(mol).run()
>>> # freeze 2 core orbitals
>>> myci = ci.CISD(mf).set(frozen = 2).run()
>>> # freeze 2 core orbitals and 3 high lying unoccupied orbitals
>>> myci.set(frozen = [0,1,16,17,18]).run()
Saved results
converged : bool
CISD converged or not
e_corr : float
CISD correlation correction
e_tot : float
Total CCSD energy (HF + correlation)
ci :
CI wavefunction coefficients
'''
conv_tol = getattr(__config__, 'ci_cisd_CISD_conv_tol', 1e-9)
max_cycle = getattr(__config__, 'ci_cisd_CISD_max_cycle', 50)
max_space = getattr(__config__, 'ci_cisd_CISD_max_space', 12)
lindep = getattr(__config__, 'ci_cisd_CISD_lindep', 1e-14)
level_shift = getattr(__config__, 'ci_cisd_CISD_level_shift', 0) # in preconditioner
direct = getattr(__config__, 'ci_cisd_CISD_direct', False)
async_io = getattr(__config__, 'ci_cisd_CISD_async_io', True)
def __init__(self, mf, frozen=None, mo_coeff=None, mo_occ=None):
if 'dft' in str(mf.__module__):
raise RuntimeError('CISD Warning: The first argument mf is a DFT object. '
'CISD calculation should be initialized with HF object.\n'
'DFT object can be converted to HF object with '
'the code below:\n'
' mf_hf = scf.RHF(mol)\n'
' mf_hf.__dict__.update(mf_dft.__dict__)\n')
if mo_coeff is None: mo_coeff = mf.mo_coeff
if mo_occ is None: mo_occ = mf.mo_occ
self.mol = mf.mol
self._scf = mf
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = mf.max_memory
self.nroots = 1
self.frozen = frozen
self.chkfile = mf.chkfile
##################################################
# don't modify the following attributes, they are not input options
self.converged = False
self.mo_coeff = mo_coeff
self.mo_occ = mo_occ
self.e_corr = None
self.emp2 = None
self.ci = None
self._nocc = None
self._nmo = None
keys = set(('conv_tol', 'max_cycle', 'max_space', 'lindep',
'level_shift', 'direct'))
self._keys = set(self.__dict__.keys()).union(keys)
def dump_flags(self, verbose=None):
log = logger.new_logger(self, verbose)
log.info('')
log.info('******** %s ********', self.__class__)
log.info('CISD nocc = %s, nmo = %s', self.nocc, self.nmo)
if self.frozen is not None:
log.info('frozen orbitals %s', str(self.frozen))
log.info('max_cycle = %d', self.max_cycle)
log.info('direct = %d', self.direct)
log.info('conv_tol = %g', self.conv_tol)
log.info('max_cycle = %d', self.max_cycle)
log.info('max_space = %d', self.max_space)
log.info('lindep = %d', self.lindep)
log.info('nroots = %d', self.nroots)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
@property
def e_tot(self):
return numpy.asarray(self.e_corr) + self._scf.e_tot
@property
def nstates(self):
return self.nroots
@nstates.setter
def nstates(self, x):
self.nroots = x
@property
def nocc(self):
return self.get_nocc()
@nocc.setter
def nocc(self, n):
self._nocc = n
@property
def nmo(self):
return self.get_nmo()
@nmo.setter
def nmo(self, n):
self._nmo = n
def vector_size(self):
'''The size of the vector which was returned from
:func:`amplitudes_to_cisdvec`
'''
nocc = self.nocc
nvir = self.nmo - nocc
return 1 + nocc*nvir + (nocc*nvir)**2
def reset(self, mol=None):
if mol is not None:
self.mol = mol
self._scf.reset(mol)
return self
get_nocc = ccsd.get_nocc
get_nmo = ccsd.get_nmo
get_frozen_mask = ccsd.get_frozen_mask
def kernel(self, ci0=None, eris=None):
return self.cisd(ci0, eris)
def cisd(self, ci0=None, eris=None):
if eris is None:
eris = self.ao2mo(self.mo_coeff)
if self.verbose >= logger.WARN:
self.check_sanity()
self.dump_flags()
self.converged, self.e_corr, self.ci = \
kernel(self, eris, ci0, max_cycle=self.max_cycle,
tol=self.conv_tol, verbose=self.verbose)
self._finalize()
return self.e_corr, self.ci
def _finalize(self):
citype = self.__class__.__name__
if numpy.all(self.converged):
logger.info(self, '%s converged', citype)
else:
logger.info(self, '%s not converged', citype)
if self.nroots > 1:
for i,e in enumerate(self.e_tot):
logger.note(self, '%s root %d E = %.16g', citype, i, e)
else:
logger.note(self, 'E(%s) = %.16g E_corr = %.16g',
citype, self.e_tot, self.e_corr)
return self
def get_init_guess(self, eris=None, nroots=1, diag=None):
'''
MP2 energy and MP2 initial guess(es) for CISD coefficients.
Kwargs:
eris : ccsd._ChemistsERIs (inheriting) object (poss diff for df)
Contains the various (pq|rs) integrals needed.
nroots : integer
Number of CISD solutions to be found.
diag : numpy array (1D)
e.g. CISD Hamiltonian diagonal in Slater determinant
space with HF energy subtracted.
Returns:
Tuple of float and numpy array or
tuple of float and list of numpy arrays (if nroots > 1)
MP2 energy and initial guess(es) for CISD coefficients.
'''
if eris is None: eris = self.ao2mo(self.mo_coeff)
nocc = self.nocc
mo_e = eris.mo_energy
e_ia = lib.direct_sum('i-a->ia', mo_e[:nocc], mo_e[nocc:])
ci0 = 1
ci1 = eris.fock[:nocc,nocc:] / e_ia
eris_ovvo = _cp(eris.ovvo)
ci2 = 2 * eris_ovvo.transpose(0,3,1,2)
ci2 -= eris_ovvo.transpose(0,3,2,1)
ci2 /= lib.direct_sum('ia,jb->ijab', e_ia, e_ia)
self.emp2 = numpy.einsum('ijab,iabj', ci2, eris_ovvo)
logger.info(self, 'Init t2, MP2 energy = %.15g', self.emp2)
if abs(self.emp2) < 1e-3 and abs(ci1).sum() < 1e-3:
# To avoid ci1 being stuck at local minimum
ci1 = 1e-1 / e_ia
ci_guess = amplitudes_to_cisdvec(ci0, ci1, ci2)
if nroots > 1:
civec_size = ci_guess.size
dtype = ci_guess.dtype
nroots = min(ci1.size+1, nroots) # Consider Koopmans' theorem only
if diag is None:
idx = range(1, nroots)
else:
idx = diag[:ci1.size+1].argsort()[1:nroots] # exclude HF determinant
ci_guess = [ci_guess]
for i in idx:
g = numpy.zeros(civec_size, dtype)
g[i] = 1.0
ci_guess.append(g)
return self.emp2, ci_guess
contract = contract
make_diagonal = make_diagonal
def _dot(self, x1, x2, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return dot(x1, x2, nmo, nocc)
def ao2mo(self, mo_coeff=None):
nmo = self.nmo
nao = self.mo_coeff.shape[0]
nmo_pair = nmo * (nmo+1) // 2
nao_pair = nao * (nao+1) // 2
mem_incore = (max(nao_pair**2, nmo**4) + nmo_pair**2) * 8/1e6
mem_now = lib.current_memory()[0]
if (self._scf._eri is not None and
(mem_incore+mem_now < self.max_memory) or self.mol.incore_anyway):
return ccsd._make_eris_incore(self, mo_coeff)
if getattr(self._scf, 'with_df', None):
logger.warn(self, 'CISD detected DF being used in the HF object. '
'MO integrals are computed based on the DF 3-index tensors.\n'
'It\'s recommended to use dfccsd.CCSD for the '
'DF-CISD calculations')
return ccsd._make_df_eris_outcore(self, mo_coeff)
return ccsd._make_eris_outcore(self, mo_coeff)
def _add_vvvv(self, c2, eris, out=None, t2sym=None):
return ccsd._add_vvvv(self, None, c2, eris, out, False, t2sym)
def to_fcivec(self, cisdvec, norb=None, nelec=None, frozen=None):
if norb is None: norb = self.nmo
if nelec is None: nelec = self.nocc*2
return to_fcivec(cisdvec, norb, nelec, frozen)
def from_fcivec(self, fcivec, norb=None, nelec=None):
if norb is None: norb = self.nmo
if nelec is None: nelec = self.nocc*2
return from_fcivec(fcivec, norb, nelec)
make_rdm1 = make_rdm1
make_rdm2 = make_rdm2
trans_rdm1 = trans_rdm1
as_scanner = as_scanner
def dump_chk(self, ci=None, frozen=None, mo_coeff=None, mo_occ=None):
if not self.chkfile:
return self
if ci is None: ci = self.ci
if frozen is None: frozen = self.frozen
# "None" cannot be serialized by the chkfile module
if frozen is None:
frozen = 0
ci_chk = {'e_corr': self.e_corr,
'ci': ci,
'frozen': frozen}
if mo_coeff is not None: ci_chk['mo_coeff'] = mo_coeff
if mo_occ is not None: ci_chk['mo_occ'] = mo_occ
if self._nmo is not None: ci_chk['_nmo'] = self._nmo
if self._nocc is not None: ci_chk['_nocc'] = self._nocc
lib.chkfile.save(self.chkfile, 'cisd', ci_chk)
def amplitudes_to_cisdvec(self, c0, c1, c2):
return amplitudes_to_cisdvec(c0, c1, c2)
def cisdvec_to_amplitudes(self, civec, nmo=None, nocc=None):
if nmo is None: nmo = self.nmo
if nocc is None: nocc = self.nocc
return cisdvec_to_amplitudes(civec, nmo, nocc)
def density_fit(self):
raise NotImplementedError
def nuc_grad_method(self):
from pyscf.grad import cisd
return cisd.Gradients(self)
class RCISD(CISD):
pass
from pyscf import scf
scf.hf.RHF.CISD = lib.class_as_method(RCISD)
scf.rohf.ROHF.CISD = None
def _cp(a):
return numpy.array(a, copy=False, order='C')
if __name__ == '__main__':
from pyscf import gto
from pyscf import ao2mo
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['O', ( 0., 0. , 0. )],
['H', ( 0., -0.757, 0.587)],
['H', ( 0., 0.757 , 0.587)],]
mol.basis = 'sto3g'
mol.build()
mf = scf.RHF(mol).run()
myci = CISD(mf)
eris = ccsd._make_eris_outcore(myci, mf.mo_coeff)
ecisd, civec = myci.kernel(eris=eris)
print(ecisd - -0.048878084082066106)
nmo = myci.nmo
nocc = myci.nocc
rdm1 = myci.make_rdm1(civec)
rdm2 = myci.make_rdm2(civec)
h1e = reduce(numpy.dot, (mf.mo_coeff.T, mf.get_hcore(), mf.mo_coeff))
h2e = ao2mo.kernel(mf._eri, mf.mo_coeff)
h2e = ao2mo.restore(1, h2e, nmo)
e2 = (numpy.einsum('ij,ji', h1e, rdm1) +
numpy.einsum('ijkl,ijkl', h2e, rdm2) * .5)
print(ecisd + mf.e_tot - mol.energy_nuc() - e2) # = 0
print(abs(rdm1 - numpy.einsum('ijkk->ji', rdm2)/(mol.nelectron-1)).sum())
|
kafka.py | import json
import logging
from typing import List
from threading import Thread
from multiprocessing import Queue, Value
from confluent_kafka.admin import AdminClient, NewTopic
from confluent_kafka import Consumer, TopicPartition
from .model import EventSourceHook
class KafkaEventSource(EventSourceHook):
def __init__(self,
event_queue: Queue,
broker_list: List[str],
topic: str,
auth_mode: str,
username: str = None,
password: str = None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.event_queue = event_queue
self.broker_list = broker_list
self.username = username
self.password = password
self.auth_mode = auth_mode
self.__config = {'bootstrap.servers': ','.join(self.broker_list),
'group.id': self.name,
'default.topic.config': {'auto.offset.reset': 'earliest'},
'enable.auto.commit': False
}
if self.auth_mode == 'SASL_PLAINTEXT':
# append Event streams specific config
self.__config.update({'ssl.ca.location': '/etc/ssl/certs/',
'sasl.mechanisms': 'PLAIN',
'sasl.username': self.username,
'sasl.password': self.password,
'security.protocol': 'sasl_ssl'
})
self.consumer = None
self.topic = topic or self.name
self.records = []
self.commit_queue = Queue()
self.created_topic = Value('i', 0)
def __start_commiter(self):
def commiter(commit_queue):
while True:
ids = commit_queue.get()
if ids is None:
break
offsets = []
for id in ids:
offsets.append(TopicPartition(*id))
self.consumer.commit(offsets=offsets, asynchronous=False)
self.__commiter = Thread(target=commiter, args=(self.commit_queue, ))
self.__commiter.start()
def run(self):
self.consumer = Consumer(self.__config)
self.__start_commiter()
# Create topic if it does not exist
topics = self.consumer.list_topics().topics
if self.topic not in topics:
self.__create_topic(self.topic)
self.created_topic.value = 1
self.consumer.subscribe([self.topic])
logging.info("[{}] Started consuming from topic {}".format(self.name, self.topic))
payload = None
while True:
try:
message = self.consumer.poll()
logging.info("[{}] Received event".format(self.name))
payload = message.value().decode('utf-8')
event = json.loads(payload)
try:
event['data'] = json.loads(event['data'])
except Exception:
pass
event['id'] = (message.topic(), message.partition(), message.offset() + 1)
event['event_source'] = self.name
self.event_queue.put(event)
self.records.append(message)
except TypeError:
logging.error("[{}] Received event did not contain "
"JSON payload, got {} instead".format(self.name, type(payload)))
def commit(self, ids):
self.commit_queue.put(ids)
def __create_topic(self, topic):
admin_client = AdminClient(self.__config)
new_topic = [NewTopic(topic, num_partitions=1, replication_factor=1)]
# Call create_topics to asynchronously create topics, a dict of <topic,future> is returned.
fs = admin_client.create_topics(new_topic)
# Wait for operation to finish.
# Timeouts are preferably controlled by passing request_timeout=15.0
# to the create_topics() call.
# All futures will finish at the same time.
for topic, f in fs.items():
try:
f.result() # The result itself is None
logging.info("[{}] Topic {} created".format(self.name, topic))
return True
except Exception as e:
logging.info("[{}] Failed to create topic {}: {}".format(self.name, topic, e))
return False
def __delete_topic(self, topic):
admin_client = AdminClient(self.__config)
# Call delete_topics to asynchronously delete topics, a dict of <topic,future> is returned.
fs = admin_client.delete_topics([self.topic])
# Wait for operation to finish.
# Timeouts are preferably controlled by passing request_timeout=15.0
# to the delete_topics() call.
# All futures will finish at the same time.
for topic, f in fs.items():
try:
f.result() # The result itself is None
logging.info("[{}] Topic {} deleted".format(self.name, topic))
return True
except Exception as e:
logging.info("[{}] Failed to deleted topic {}: {}".format(self.name, topic, e))
return False
@staticmethod
def __get_offset_list(events):
offsets = []
for message in events:
# Add one to the offset, otherwise we'll consume this message again.
# That's just how Kafka works, you place the bookmark at the *next* message.
offsets.append(TopicPartition(message.topic(), message.partition(), message.offset() + 1))
return offsets
@property
def config(self):
d = self.__config.copy()
d['type'] = 'kafka'
d['topic'] = self.topic
return d
def stop(self):
logging.info("[{}] Stopping event source".format(self.name))
self.commit_queue.put(None)
if self.created_topic.value:
self.__delete_topic(self.topic)
self.terminate()
|
example_binance_futures.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: example_binance_futures.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import time
import threading
import os
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
logging.getLogger('unicorn-log').addHandler(logging.StreamHandler())
logging.getLogger('unicorn-log').setLevel(logging.INFO)
# create instance of BinanceWebSocketApiManager for Binance.com Futures
binance_websocket_api_manager = BinanceWebSocketApiManager(exchange="binance.com-futures")
print("starting monitoring api!")
binance_websocket_api_manager.start_monitoring_api()
# set api key and secret for userData stream
binance_je_api_key = ""
binance_je_api_secret = ""
binance_websocket_api_manager.set_private_api_config(binance_je_api_key, binance_je_api_secret)
userdata_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!userData"])
bookticker_all_stream_id = binance_websocket_api_manager.create_stream(["arr"], ["!bookTicker"])
markets = {'btcusdt', 'bchusdt', 'ethusdt'}
binance_websocket_api_manager.create_stream(["aggTrade"], markets)
binance_websocket_api_manager.create_stream(["markPrice"], markets)
binance_websocket_api_manager.create_stream(["kline_1m"], markets)
binance_websocket_api_manager.create_stream(["kline_5m"], markets)
binance_websocket_api_manager.create_stream(["kline_15m"], markets)
binance_websocket_api_manager.create_stream(["kline_1h"], markets)
binance_websocket_api_manager.create_stream(["kline_12h"], markets)
binance_websocket_api_manager.create_stream(["kline_1w"], markets)
binance_websocket_api_manager.create_stream(["ticker"], markets)
binance_websocket_api_manager.create_stream(["miniTicker"], markets)
binance_websocket_api_manager.create_stream(["bookTicker"], markets)
binance_websocket_api_manager.create_stream(["depth"], markets)
binance_websocket_api_manager.create_stream(["depth@2500ms"], markets)
binance_websocket_api_manager.create_stream(["depth5"], markets)
binance_websocket_api_manager.create_stream(["depth5@100ms"], markets)
binance_websocket_api_manager.create_stream(["depth10"], markets)
binance_websocket_api_manager.create_stream(["depth20"], markets)
channels = {'aggTrade', 'markPrice' 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_12h',
'miniTicker', 'depth20@100ms', 'bookTicker', 'kline_1w@250ms'}
binance_websocket_api_manager.create_stream(channels, markets)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
# show an overview
while True:
binance_websocket_api_manager.print_summary()
time.sleep(1)
|
__init__.py | """Support for functionality to download files."""
from http import HTTPStatus
import logging
import os
import re
import threading
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.util import raise_if_invalid_filename, raise_if_invalid_path
_LOGGER = logging.getLogger(__name__)
ATTR_FILENAME = "filename"
ATTR_SUBDIR = "subdir"
ATTR_URL = "url"
ATTR_OVERWRITE = "overwrite"
CONF_DOWNLOAD_DIR = "download_dir"
DOMAIN = "downloader"
DOWNLOAD_FAILED_EVENT = "download_failed"
DOWNLOAD_COMPLETED_EVENT = "download_completed"
SERVICE_DOWNLOAD_FILE = "download_file"
SERVICE_DOWNLOAD_FILE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_URL): cv.url,
vol.Optional(ATTR_SUBDIR): cv.string,
vol.Optional(ATTR_FILENAME): cv.string,
vol.Optional(ATTR_OVERWRITE, default=False): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_DOWNLOAD_DIR): cv.string})},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Listen for download events to download files."""
download_path = config[DOMAIN][CONF_DOWNLOAD_DIR]
# If path is relative, we assume relative to Home Assistant config dir
if not os.path.isabs(download_path):
download_path = hass.config.path(download_path)
if not os.path.isdir(download_path):
_LOGGER.error(
"Download path %s does not exist. File Downloader not active", download_path
)
return False
def download_file(service):
"""Start thread to download file specified in the URL."""
def do_download():
"""Download the file."""
try:
url = service.data[ATTR_URL]
subdir = service.data.get(ATTR_SUBDIR)
filename = service.data.get(ATTR_FILENAME)
overwrite = service.data.get(ATTR_OVERWRITE)
if subdir:
# Check the path
raise_if_invalid_path(subdir)
final_path = None
req = requests.get(url, stream=True, timeout=10)
if req.status_code != HTTPStatus.OK:
_LOGGER.warning(
"Downloading '%s' failed, status_code=%d", url, req.status_code
)
hass.bus.fire(
f"{DOMAIN}_{DOWNLOAD_FAILED_EVENT}",
{"url": url, "filename": filename},
)
else:
if filename is None and "content-disposition" in req.headers:
match = re.findall(
r"filename=(\S+)", req.headers["content-disposition"]
)
if match:
filename = match[0].strip("'\" ")
if not filename:
filename = os.path.basename(url).strip()
if not filename:
filename = "ha_download"
# Check the filename
raise_if_invalid_filename(filename)
# Do we want to download to subdir, create if needed
if subdir:
subdir_path = os.path.join(download_path, subdir)
# Ensure subdir exist
os.makedirs(subdir_path, exist_ok=True)
final_path = os.path.join(subdir_path, filename)
else:
final_path = os.path.join(download_path, filename)
path, ext = os.path.splitext(final_path)
# If file exist append a number.
# We test filename, filename_2..
if not overwrite:
tries = 1
final_path = path + ext
while os.path.isfile(final_path):
tries += 1
final_path = f"{path}_{tries}.{ext}"
_LOGGER.debug("%s -> %s", url, final_path)
with open(final_path, "wb") as fil:
for chunk in req.iter_content(1024):
fil.write(chunk)
_LOGGER.debug("Downloading of %s done", url)
hass.bus.fire(
f"{DOMAIN}_{DOWNLOAD_COMPLETED_EVENT}",
{"url": url, "filename": filename},
)
except requests.exceptions.ConnectionError:
_LOGGER.exception("ConnectionError occurred for %s", url)
hass.bus.fire(
f"{DOMAIN}_{DOWNLOAD_FAILED_EVENT}",
{"url": url, "filename": filename},
)
# Remove file if we started downloading but failed
if final_path and os.path.isfile(final_path):
os.remove(final_path)
except ValueError:
_LOGGER.exception("Invalid value")
hass.bus.fire(
f"{DOMAIN}_{DOWNLOAD_FAILED_EVENT}",
{"url": url, "filename": filename},
)
# Remove file if we started downloading but failed
if final_path and os.path.isfile(final_path):
os.remove(final_path)
threading.Thread(target=do_download).start()
hass.services.register(
DOMAIN,
SERVICE_DOWNLOAD_FILE,
download_file,
schema=SERVICE_DOWNLOAD_FILE_SCHEMA,
)
return True
|
app.py | from flask import Flask, render_template, jsonify
import serial
import threading
app = Flask(__name__)
curTemp = 0.0
curHumi = 0.0
tempDataList = []
humiDataList = []
ser = serial.Serial(
port='COM6',
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1)
def read_dht11_data():
global curTemp, curHumi
while True:
# 0x02 Humi(Integer) Humi(fractional) Temp(Integer) Temp(fractional) 0x03
while(ser.read(1) != b'\x02'):
pass
data = list(ser.read(5))
curHumi = float(str(data[0]) + '.' + str(data[1]))
curTemp = float(str(data[2]) + '.' + str(data[3]))
if len(humiDataList) > 99:
humiDataList.pop(0)
if len(tempDataList) > 99:
tempDataList.pop(0)
tempDataList.append(curTemp)
humiDataList.append(curHumi)
@app.route('/')
def index():
return render_template('index.html', temp=curTemp, humi=curHumi)
@app.route('/graph')
def graph():
return render_template('graph.html')
@app.route('/graph-data')
def graph_data():
data = {
'temps': tempDataList,
'humis': humiDataList,
}
return jsonify(data)
if __name__ == '__main__':
t = threading.Thread(target=read_dht11_data)
t.daemon = True
t.start()
app.run(debug=True, host='127.0.0.1', port=5000, use_reloader=False)
|
test_config.py | import asyncio
import copy
import pytest
import random
import yaml
from tad.util.config import create_default_chia_config, initial_config_file, load_config, save_config
from tad.util.path import mkdir
from multiprocessing import Pool
from pathlib import Path
from threading import Thread
from time import sleep
from typing import Dict
# Commented-out lines are preserved to aide in debugging the multiprocessing tests
# import logging
# import os
# import threading
# log = logging.getLogger(__name__)
def write_config(root_path: Path, config: Dict):
"""
Wait for a random amount of time and write out the config data. With a large
config, we expect save_config() to require multiple writes.
"""
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] write_config")
# save_config(root_path=root_path, filename="config.yaml", config_data=modified_config)
save_config(root_path=root_path, filename="config.yaml", config_data=config)
def read_and_compare_config(root_path: Path, default_config: Dict):
"""
Wait for a random amount of time, read the config and compare with the
default config data. If the config file is partially-written or corrupt,
load_config should fail or return bad data
"""
# Wait a moment. The read and write threads are delayed by a random amount
# in an attempt to interleave their execution.
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] read_and_compare_config")
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert len(config) > 0
# if config != default_config:
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] bad config: {config}")
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] default config: {default_config}")
assert config == default_config
async def create_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Spin-off reader and writer threads and wait for completion
"""
thread1 = Thread(target=write_config, kwargs={"root_path": root_path, "config": default_config})
thread2 = Thread(target=read_and_compare_config, kwargs={"root_path": root_path, "default_config": default_config})
thread1.start()
thread2.start()
thread1.join()
thread2.join()
def run_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Subprocess entry point. This function spins-off threads to perform read/write tasks
concurrently, possibly leading to synchronization issues accessing config data.
"""
asyncio.get_event_loop().run_until_complete(create_reader_and_writer_tasks(root_path, default_config))
class TestConfig:
@pytest.fixture(scope="function")
def root_path_populated_with_config(self, tmpdir) -> Path:
"""
Create a temp directory and populate it with a default config.yaml.
Returns the root path containing the config.
"""
root_path: Path = Path(tmpdir)
create_default_chia_config(root_path)
return Path(root_path)
@pytest.fixture(scope="function")
def default_config_dict(self) -> Dict:
"""
Returns a dictionary containing the default config.yaml contents
"""
content: str = initial_config_file("config.yaml")
config: Dict = yaml.safe_load(content)
return config
def test_create_config_new(self, tmpdir):
"""
Test create_default_chia_config() as in a first run scenario
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
# Expect: config.yaml doesn't exist
assert config_file_path.exists() is False
# When: creating a new config
create_default_chia_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are seeded with initial contents
assert actual_content == expected_content
def test_create_config_overwrite(self, tmpdir):
"""
Test create_default_chia_config() when overwriting an existing config.yaml
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
mkdir(config_file_path.parent)
# When: config.yaml already exists with content
with open(config_file_path, "w") as f:
f.write("Some config content")
# Expect: config.yaml exists
assert config_file_path.exists() is True
# When: creating a new config
create_default_chia_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are overwritten with initial contents
assert actual_content == expected_content
def test_load_config(self, root_path_populated_with_config, default_config_dict):
"""
Call load_config() with a default config and verify a few values are set to the expected values
"""
root_path: Path = root_path_populated_with_config
# When: loading a newly created config
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert config is not None
# Expect: config values should match the defaults (from a small sampling)
assert config["daemon_port"] == default_config_dict["daemon_port"] == 55400
assert config["self_hostname"] == default_config_dict["self_hostname"] == "localhost"
assert (
config["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== default_config_dict["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== "ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb"
)
def test_load_config_exit_on_error(self, tmpdir):
"""
Call load_config() with an invalid path. Behavior should be dependent on the exit_on_error flag.
"""
root_path: Path = tmpdir
config_file_path: Path = root_path / "config" / "config.yaml"
# When: config file path points to a directory
mkdir(config_file_path)
# When: exit_on_error is True
# Expect: load_config will exit
with pytest.raises(SystemExit):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=True)
# When: exit_on_error is False
# Expect: load_config will raise an exception
with pytest.raises(ValueError):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=False)
def test_save_config(self, root_path_populated_with_config, default_config_dict):
"""
Test modifying the config and saving it to disk. The modified value(s) should be present after
calling load_config().
"""
root_path: Path = root_path_populated_with_config
config: Dict = copy.deepcopy(default_config_dict)
# When: modifying the config
config["harvester"]["farmer_peer"]["host"] = "oldmacdonald.eie.io"
# Sanity check that we didn't modify the default config
assert config["harvester"]["farmer_peer"]["host"] != default_config_dict["harvester"]["farmer_peer"]["host"]
# When: saving the modified config
save_config(root_path=root_path, filename="config.yaml", config_data=config)
# Expect: modifications should be preserved in the config read from disk
loaded: Dict = load_config(root_path=root_path, filename="config.yaml")
assert loaded["harvester"]["farmer_peer"]["host"] == "oldmacdonald.eie.io"
def test_multiple_writers(self, root_path_populated_with_config, default_config_dict):
"""
Test whether multiple readers/writers encounter data corruption. When using non-atomic operations
to write to the config, partial/incomplete writes can cause readers to yield bad/corrupt data.
Access to config.yaml isn't currently synchronized, so the best we can currently hope for is that
the file contents are written-to as a whole.
"""
# Artifically inflate the size of the default config. This is done to (hopefully) force
# save_config() to require multiple writes. When save_config() was using shutil.move()
# multiple writes were observed, leading to read failures when data was partially written.
default_config_dict["xyz"] = "x" * 32768
root_path: Path = root_path_populated_with_config
save_config(root_path=root_path, filename="config.yaml", config_data=default_config_dict)
num_workers: int = 30
args = list(map(lambda _: (root_path, default_config_dict), range(num_workers)))
# Spin-off several processes (not threads) to read and write config data. If any
# read failures are detected, the failing process will assert.
with Pool(processes=num_workers) as pool:
res = pool.starmap_async(run_reader_and_writer_tasks, args)
res.get(timeout=10)
|
main.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/1/14 4:35 PM
# @Author : w8ay
# @File : main.py
import os
import random
import sys
import threading
import time
from config import THREAD_NUM, DEBUG, NODE_NAME
from lib.data import PATHS, logger
from lib.engine import Schedular
from lib.redis import redis_con
from thirdpart.requests import patch_all
def module_path():
"""
This will get us the program's directory
"""
return os.path.dirname(os.path.realpath(__file__))
def main():
PATHS.ROOT_PATH = module_path()
PATHS.PLUGIN_PATH = os.path.join(PATHS.ROOT_PATH, "pocs")
PATHS.OUTPUT_PATH = os.path.join(PATHS.ROOT_PATH, "output")
PATHS.DATA_PATH = os.path.join(PATHS.ROOT_PATH, "data")
patch_all()
logger.info("Hello W12SCAN !")
# domain域名整理(统一格式:无论是域名还是二级目录,右边没有 /),ip cidr模式识别,ip整理
# 访问redis获取目标
def redis_get():
list_name = "w12scan_scanned"
while 1:
target = redis_con.blpop(list_name)[1]
scheduler.put_target(target)
# redis_get()
def debug_get():
target = "http://stun.tuniu.com"
scheduler.put_target(target)
def node_register():
first_blood = True
while 1:
if first_blood:
dd = {
"last_time": time.time(),
"tasks": 0,
"running": 0,
"finished": 0
}
redis_con.hmset(NODE_NAME, dd)
first_blood = False
else:
redis_con.hset(NODE_NAME, "last_time", time.time())
time.sleep(50 * 5)
scheduler = Schedular(threadnum=THREAD_NUM)
scheduler.start()
# 启动任务分发调度器
if DEBUG:
func_target = debug_get
else:
func_target = redis_get
# 与WEB的通信线程
node = threading.Thread(target=node_register)
node.start()
# 队列下发线程
t = threading.Thread(target=func_target, name='LoopThread')
t.start()
try:
scheduler.run()
except KeyboardInterrupt:
logger.info("User exit")
if __name__ == '__main__':
main()
|
main.py | from flask import (Flask,
make_response,
abort,
redirect,
render_template,
url_for,
flash,
session, )
from flask_script import Manager, Shell
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_mail import Mail, Message
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from datetime import datetime
import os
from threading import Thread
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config["SECRET_KEY"] = os.getenv("SECRET_KEY", default="some secret key here")
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///" + os.path.join(basedir, "data.sqlite")
app.config["SQLALCHEMY_COMMIT_ON_TEARDOWN"] = True
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["MAIL_SERVER"] = "smtp.googlemail.com"
app.config["MAIL_PORT"] = 587
app.config["MAIL_USE_TLS"] = True
app.config["MAIL_USERNAME"] = os.getenv("MAIL_USERNAME")
app.config["MAIL_PASSWORD"] = os.getenv("MAIL_PASSWORD")
app.config["MAIL_SUBJECT"] = "eeee BOOY"
app.config["MAIL_SENDER"] = "prosto@chel.com"
app.config["ADMIN_MAIL"] = "aliaskar.isakov@yandex.ru"
###### Init side apps #######
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
mail = Mail(app)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command("db", MigrateCommand)
########## Models ###########
class Role(db.Model):
__tablename__ = "roles"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32), unique=True)
users = db.relationship("User", backref="role", lazy="dynamic")
def __repr__(self):
return f"<Role {self.name}>"
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey("roles.id"), nullable=True)
def __repr__(self):
return f"<User {self.username}>"
########## Forms ############
class NameForm(FlaskForm):
name = StringField("What is your name", validators=[DataRequired()])
submit = SubmitField("Submit")
########### Utils ###########
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_mail(to, template, **kwargs):
msg = Message(app.config["MAIL_SUBJECT"],
sender=app.config["MAIL_SENDER"],
recipients=[to])
msg.body = render_template(template + ".txt", **kwargs)
msg.html = render_template(template + ".html", **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
########## Routes ###########
@app.route("/", methods=["GET", "POST"])
def index():
form = NameForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.name.data).first()
if user:
session["known"] = True
else:
user = User(username=form.name.data)
db.session.add(user)
db.session.commit()
session["known"] = False
if app.config["ADMIN_MAIL"]:
send_mail(app.config["ADMIN_MAIL"], "mail/new_user", user=user)
session["name"], form.name.data = form.name.data, ""
return redirect(url_for("index"))
return render_template("index.html", form=form,
name=session.get("name"),
known=session.get("known", False),
current_time=datetime.utcnow())
@app.route("/<word>")
def reverse_word(word):
if word == "smth":
abort(404)
elif word == "something":
return redirect("braza")
else:
pass
i = 1
reverse = ""
while len(word) >= i:
reverse += word[-i]
i += 1
return render_template("user.html", name=word)
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template("500.html"), 500
if __name__ == "__main__":
manager.run() |
KmapMerger.py | #!/usr/bin/python
# coding: utf8
# /*##########################################################################
#
# Copyright (c) 2015-2016 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import
__authors__ = ["D. Naudet"]
__date__ = "20/04/2016"
__license__ = "MIT"
import re
import copy
import os.path
import ctypes
from threading import Thread
import multiprocessing.sharedctypes as mp_sharedctypes
from multiprocessing import Pool, cpu_count, Manager
import h5py
import numpy as np
from silx.third_party import EdfFile
from ...io import XsocsH5
class KmapMerger(object):
(READY, RUNNING, DONE,
ERROR, CANCELED, UNKNOWN) = __STATUSES = range(6)
""" Available status codes """
status = property(lambda self: self.__status)
""" Current status code of this instance """
statusMsg = property(lambda self: self.__status_msg)
""" Status message if any, or None """
results = property(lambda self: self.__results)
""" Parse results. KmapParseResults instance. """
no_match_ids = property(lambda self: self.__no_match_ids)
""" Scans that werent matched during the parsing """
no_img_ids = property(lambda self: self.__no_img_ids)
""" Scans that didnt have the image info line in the spec file """
on_error_ids = property(lambda self: self.__on_error_ids)
""" Scans for which an error occured during the parsing """
matched_ids = property(lambda self: self.__matched_ids)
""" Scans that have a matching image file """
selected_ids = property(lambda self: sorted(self.__selected_ids))
""" Selected scans for merging """
def __init__(self,
spec_h5,
match_info,
output_dir=None,
callback=None):
"""
Merger for the Kmap SPEC and EDF files. This loads a spech5 file,
converts it to HDF5 and then tries to match scans and edf image
files.
:param spec_h5: Name of the spech5.
.. seealso : silx.io.convert_spec_h5.convert
:param match_info: instance of KmapMatchResults.
:param output_dir: output directory for the merged files.
:param callback: callback to call when the parsing is done.
"""
super(KmapMerger, self).__init__()
self.__status = None
self.__set_status(self.UNKNOWN, 'Init')
self.__output_dir = output_dir
self.__params = {'prefix': None,
'beam_energy': None,
'center_channel': None,
'chan_per_deg': None}
self.__spec_h5 = spec_h5
self.__callback = callback
self.__n_proc = None
self.__compression = 'lzf'
self.__prefix = 'prefix'
self.__overwrite = False
self.__matched_scans = copy.deepcopy(match_info.matched)
self.__no_match_scans = copy.deepcopy(match_info.not_matched)
self.__no_img_scans = copy.deepcopy(match_info.no_img_info)
self.__on_error_scans = copy.deepcopy(match_info.error)
self.__selected_ids = set(self.__matched_scans.keys())
self.__matched_ids = sorted(self.__matched_scans.keys())
self.__no_match_ids = sorted(self.__no_match_scans.keys())
self.__no_img_ids = sorted(self.__no_img_scans)
self.__on_error_ids = sorted(self.__on_error_scans)
self.__results = None
self.__master = None
self.__shared_progress = None
self.__proc_indices = None
self.__term_evt = None
self.__thread = None
self.prefix = None
self.__set_status(self.READY)
def __set_status(self, status, msg=None):
"""
Sets the status of this instance.
:param status:
:param msg:
:return:
"""
assert status in self.__STATUSES
self.__status = status
self.__status_msg = msg
def merge(self,
overwrite=False,
blocking=True,
callback=None):
"""
Starts the merge.
:param overwrite: if False raises an exception if some files already
exist.
:param blocking: if False, the merge will be done in a separate
thread and this method will return immediately.
:param callback: callback that will be called when the merging is done.
It overwrites the one passed the constructor.
:return:
"""
self.__set_status(self.RUNNING)
if self.is_running():
raise RuntimeError('This KmapSpecParser instance is already '
'parsing.')
self.check_parameters()
output_dir = self.__output_dir
if output_dir is None:
self.__set_status(self.ERROR)
raise ValueError('output_dir has not been set.')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not overwrite:
if len(self.check_overwrite()):
self.__set_status(self.ERROR)
raise RuntimeError('Some files already exist. Use the '
'overwrite keyword to ignore this '
'warning.')
self.__results = None
self.__overwrite = overwrite
if callback is not None:
self.__callback = callback
if blocking:
self.__run_merge()
else:
thread = self.__thread = Thread(target=self.__run_merge)
thread.start()
def __run_merge(self):
"""
Runs the merge.
:return:
"""
self.__set_status(self.RUNNING)
self.__master = None
selected_scans = self.selected_ids
matched_scans = self.__matched_scans
output_files = self.summary()
master_f = output_files['master']
del output_files['master']
scans = {scan_id: {'image': matched_scans[scan_id]['image'],
'output': output_files[scan_id]}
for scan_id in selected_scans}
print('Merging scan IDs : {}.'
''.format(', '.join(self.selected_ids)))
try:
manager = Manager()
self.__term_evt = term_evt = manager.Event()
self.__shared_progress = mp_sharedctypes.RawArray(ctypes.c_int32,
len(scans))
master_f = os.path.join(self.__output_dir, master_f)
if not self.__overwrite:
mode = 'w-'
else:
mode = 'w'
# trying to access the file (erasing it if necessary)
with XsocsH5.XsocsH5MasterWriter(master_f, mode=mode):
pass
if self.__n_proc is None:
n_proc = cpu_count()
else:
n_proc = self.__n_proc
def init(term_evt_, shared_progress_):
global g_term_evt
global g_shared_progress
g_term_evt = term_evt_
g_shared_progress = shared_progress_
# setting progress to 0
np.frombuffer(self.__shared_progress, dtype='int32')[:] = 0
pool = Pool(n_proc,
initializer=init,
initargs=(term_evt,
self.__shared_progress),
maxtasksperchild=2)
def callback(result_):
scan, finished, info = result_
print('{0} finished.'.format(scan))
if not finished:
term_evt.set()
results = {}
self.__proc_indices = proc_indices = {}
for proc_idx, (scan_id, infos) in enumerate(scans.items()):
args = (scan_id,
proc_idx,
self.__spec_h5,
self.__output_dir,
infos['output'],
infos['image'],
self.beam_energy,
self.chan_per_deg,
self.center_chan,
self.compression)
results[scan_id] = pool.apply_async(_add_edf_data,
args,
callback=callback)
proc_indices[scan_id] = proc_idx
pool.close()
self.__proc_indices = proc_indices
pool.join()
proc_results = [result.get() for result in results.values()]
proc_codes = np.array([proc_result[1]
for proc_result in proc_results])
rc = self.DONE
if not np.all(proc_codes == self.DONE):
if self.ERROR in proc_codes:
rc = self.ERROR
elif self.CANCELED in proc_codes:
rc = self.CANCELED
else:
raise ValueError('Unknown return code.')
if rc == self.DONE:
with XsocsH5.XsocsH5MasterWriter(master_f, mode='a') as m_h5f:
items = scans.items()
for proc_idx, (scan_id, infos) in enumerate(items):
entry_fn = infos['output']
entry = entry_fn.rpartition('.')[0]
m_h5f.add_entry_file(entry, entry_fn)
self.__set_status(rc)
except Exception as ex:
self.__set_status(self.ERROR, str(ex))
else:
self.__results = master_f
self.prefix = None
self.__master = master_f
# TODO : catch exception?
if self.__callback:
self.__callback()
return self.__results
def wait(self):
"""
Waits until parsing is done, or returns if it is not running.
:return:
"""
if self.__thread:
self.__thread.join()
def __running_exception(self):
if self.is_running():
raise RuntimeError('Operation not permitted while '
'a parse or merge in running.')
def is_running(self):
return self.__thread and self.__thread.is_alive()
output_dir = property(lambda self: self.__output_dir)
@output_dir.setter
def output_dir(self, output_dir):
if not isinstance(output_dir, str):
raise TypeError('output_dir must be a string. Received {0}'
''.format(type(output_dir)))
self.__output_dir = output_dir
def select(self, scan_ids, clear=False):
if scan_ids is None:
scan_ids = list(self.__matched_scans.keys())
if not isinstance(scan_ids, (list, tuple)):
scan_ids = [scan_ids]
scan_ids = set(scan_ids)
unknown_scans = scan_ids - set(self.__matched_scans.keys())
if len(unknown_scans) != 0:
err_ids = '; '.join(str(scan) for scan in unknown_scans)
raise ValueError('Unknown scan IDs : {0}.'.format(err_ids))
if clear:
self.__selected_ids = scan_ids
else:
self.__selected_ids |= scan_ids
def unselect(self, scan_ids):
if scan_ids is None:
return
if not isinstance(scan_ids, (list, tuple)):
scan_ids = [scan_ids]
self.__selected_ids -= set(scan_ids)
def get_scan_command(self, scan_id):
with h5py.File(self.__spec_h5, 'r') as spec_h5:
try:
scan = spec_h5[scan_id]
except KeyError:
raise ValueError('Scan ID {0} not found.')
command = scan['title'][()].decode()
try:
return parse_scan_command(command)
except:
raise ValueError(
'Failed to parse command line for scan ID {0} : '
'{1}.'.format(scan_id, command))
def check_overwrite(self):
"""
Returns a list of the files that already exist and that will be
overwritten.
"""
files = self.summary(fullpath=True)
exist = [f for f in files.values() if os.path.exists(f)]
return exist
def check_consistency(self):
"""
Checks if all selected scans have the same command.
"""
errors = []
scan_ids = self.selected_ids
n_scans = len(scan_ids)
if n_scans == 0:
return None
commands = [self.get_scan_command(scan_id)
for scan_id in scan_ids]
def check_key(key):
values = [command[key] for command in commands]
values_set = set(values)
if len(values_set) != 1:
errors.append('Selected scans command inconsistency : '
'"{0}" : {1}.'
''.format(key, '; '.join(str(m)
for m in
values_set)))
check_key('motor_0')
check_key('motor_1')
check_key('motor_0_start')
check_key('motor_1_start')
check_key('motor_0_end')
check_key('motor_1_end')
check_key('motor_0_steps')
check_key('motor_1_steps')
check_key('delay')
return errors
def check_parameters(self):
errors = []
if self.beam_energy is None:
errors.append('invalid "Beam energy"')
if self.output_dir is None:
errors.append('invalid "Output directory"')
if self.center_chan is None:
errors.append('invalid "Center channel"')
if self.chan_per_deg is None:
errors.append('invalid "Channel per degree"')
return errors
beam_energy = property(lambda self: self.__params['beam_energy'])
@beam_energy.setter
def beam_energy(self, beam_energy):
self.__params['beam_energy'] = float(beam_energy)
chan_per_deg = property(lambda self: self.__params['chan_per_deg'])
@chan_per_deg.setter
def chan_per_deg(self, chan_per_deg):
if len(chan_per_deg) != 2:
raise ValueError('chan_per_deg must be a two elements array.')
self.__params['chan_per_deg'] = [float(chan_per_deg[0]),
float(chan_per_deg[1])]
center_chan = property(lambda self: self.__params['center_chan'])
@center_chan.setter
def center_chan(self, center_chan):
if len(center_chan) != 2:
raise ValueError('center_chan must be a two elements array.')
self.__params['center_chan'] = [float(center_chan[0]),
float(center_chan[1])]
compression = property(lambda self: self.__compression)
@compression.setter
def compression(self, compression):
if not isinstance(compression, str):
raise ValueError('compression must be a string.')
self.__compression = compression
n_proc = property(lambda self: self.__n_proc)
@n_proc.setter
def n_proc(self, n_proc):
if n_proc is None:
self.__n_proc = None
return
n_proc = int(n_proc)
if n_proc <= 0:
self.__n_proc = None
else:
self.__n_proc = n_proc
def get_imagefile_info(self, scan_id, key=None):
try:
scan_info = self.__matched_scans[scan_id]
except KeyError:
try:
scan_info = self.__no_match_scans[scan_id]
except KeyError:
raise ValueError('No imageFile line found in the SPEC file'
' for scan ID {0}.'
''.format(scan_id))
if key is not None:
return copy.deepcopy(scan_info['spec'][key])
else:
return copy.deepcopy(scan_info['spec'])
def get_scan_image(self, scan_id):
try:
scan_info = self.__matched_scans[scan_id]
except KeyError:
raise ValueError('Scan ID {0} is not one of the valid scans.'
''.format(scan_id))
return copy.deepcopy(scan_info['image'])
def common_prefix(self):
scan_ids = self.__selected_ids
if len(scan_ids) == 0:
return ''
prefixes = [self.__matched_scans[scan_id]['spec']['prefix']
for scan_id in scan_ids]
common = os.path.commonprefix(prefixes)
# this has to be tests and made more robust
if len(prefixes[0]) > len(common) or common.endswith('_'):
common = common.rpartition('_')[0]
return common
master_file = property(lambda self: self.__master)
prefix = property(lambda self: self.__prefix)
@prefix.setter
def prefix(self, prefix):
if prefix is None or len(prefix) == 0:
prefix = self.common_prefix()
if not prefix:
self.__prefix = 'prefix'
else:
self.__prefix = prefix
elif isinstance(prefix, str):
self.__prefix = prefix
else:
raise TypeError('prefix must be a string, or None. This '
'is q {0}.'.format(type(prefix)))
def __gen_scan_filename(self, scan_id, fullpath=False):
pattern = '{img_file}_{scan_id}.h5'
img_file = self.__matched_scans[scan_id]['image']
img_file = os.path.basename(img_file).split('.')[0]
merged_file = pattern.format(img_file=img_file, scan_id=scan_id)
if fullpath:
merged_file = os.path.join(self.output_dir, merged_file)
return merged_file
def __gen_master_filename(self, fullpath=False):
prefix = self.prefix
# if not master.endswith('.h5'):
master = prefix + '.h5'
if fullpath:
master = os.path.join(self.output_dir, master)
return master
def summary(self, fullpath=False):
if self.__output_dir is None:
raise ValueError('output_summary() cannot be called '
'before an output directory has been set. '
'Please call set_output_dir() first.')
master = self.__gen_master_filename(fullpath=fullpath)
files = {'master': master}
sel_ids = list(self.__selected_ids)
for scan_id in sel_ids:
files.update({scan_id: self.__gen_scan_filename(scan_id,
fullpath=fullpath)})
return files
def abort(self, wait=True):
if self.is_running():
self.__term_evt.set()
if wait:
self.wait()
def progress(self):
# TODO : rework
if self.__shared_progress is not None:
progress = np.frombuffer(self.__shared_progress, dtype='int32')
proc_indices = self.__proc_indices
if proc_indices:
merge_progress = dict([(scan_id, progress[proc_idx])
for scan_id, proc_idx in
proc_indices.items()])
else:
merge_progress = dict([(scan_id, 0)
for scan_id in self.selected_ids])
return merge_progress
return None
# #######################################################################
# #######################################################################
def parse_scan_command(command):
_COMMAND_LINE_PATTERN = ('^(?P<id>[0-9]*) '
'(?P<command>[^ ]*) '
'(?P<motor_0>[^ ]*) '
'(?P<motor_0_start>[^ ]*) '
'(?P<motor_0_end>[^ ]*) '
'(?P<motor_0_steps>[^ ]*) '
'(?P<motor_1>[^ ]*) '
'(?P<motor_1_start>[^ ]*) '
'(?P<motor_1_end>[^ ]*) '
'(?P<motor_1_steps>[^ ]*) '
'(?P<delay>[^ ]*)\s*'
'.*'
'$')
cmd_rgx = re.compile(_COMMAND_LINE_PATTERN)
cmd_match = cmd_rgx.match(command)
if cmd_match is None:
raise ValueError('Failed to parse command line : "{0}".'
''.format(command))
cmd_dict = cmd_match.groupdict()
cmd_dict.update(full=command)
return cmd_dict
# #######################################################################
# #######################################################################
def _add_edf_data(scan_id,
proc_idx,
spec_h5_fn,
output_dir,
output_f,
img_f,
beam_energy,
chan_per_deg,
center_chan,
compression):
"""
Creates an entry_*.h5 file with scan data from the provided
"*spec*" HDF5 files, and adds the image data from the associated
image file. This function is meant to be called in from _merge_data.
"""
global g_term_evt
global g_master_lock
global g_shared_progress
entry = output_f.rpartition('.')[0]
entry_fn = os.path.join(output_dir, output_f)
progress = np.frombuffer(g_shared_progress, dtype='int32') # noqa
progress[proc_idx] = 0
rc = None
try:
print('Merging scan ID {0}'.format(scan_id))
if g_term_evt.is_set(): # noqa
rc = KmapMerger.CANCELED
raise Exception('Merge of scan {0} aborted.'.format(scan_id))
with XsocsH5.XsocsH5Writer(entry_fn, mode='w') as entry_h5f:
entry_h5f.create_entry(entry)
progress[proc_idx] = 1
entry_h5f.copy_group(spec_h5_fn, scan_id, entry)
with h5py.File(spec_h5_fn) as spec_h5:
command = spec_h5[scan_id]['title'][()].decode()
command_params = parse_scan_command(command)
entry_h5f.set_scan_params(entry, **command_params)
if beam_energy is not None:
entry_h5f.set_beam_energy(float(beam_energy), entry)
if chan_per_deg is not None:
entry_h5f.set_chan_per_deg([float(chan_per_deg[0]),
float(chan_per_deg[1])],
entry=entry)
if center_chan is not None:
entry_h5f.set_direct_beam([float(center_chan[0]),
float(center_chan[1])],
entry=entry)
progress[proc_idx] = 2
edf_file = EdfFile.EdfFile(img_f, access='r', fastedf=True)
progress[proc_idx] = 5
n_images = edf_file.GetNumImages()
image = edf_file.GetData(0)
dtype = image.dtype
img_shape = image.shape
dset_shape = (n_images, img_shape[0], img_shape[1])
chunks = (1, dset_shape[1]//4, dset_shape[2]//4)
with entry_h5f.image_dset_ctx(entry=entry,
create=True,
shape=dset_shape,
dtype=dtype,
chunks=chunks,
compression=compression,
shuffle=True) as image_dset:
for i in range(n_images):
if i % 500 == 0:
progress[proc_idx] = round(5. + (95.0 * i) / n_images)
if g_term_evt.is_set(): # noqa
raise Exception('Merge of scan {0} aborted.'
''.format(scan_id))
data = edf_file.GetData(i)
image_dset[i, :, :] = data
except Exception as ex:
print(ex)
if rc is None:
rc = KmapMerger.ERROR
result = (scan_id, rc, str(ex))
else:
print('Entry {0} merged.'.format(entry))
result = (scan_id, KmapMerger.DONE, None)
if result[1] == KmapMerger.DONE:
progress[proc_idx] = 100
else:
progress[proc_idx] = -1
return result
if __name__ == '__main__':
pass
|
burst.py | # -*- coding: utf-8 -*-
"""
Burst processing thread
"""
import re
import json
import time
import xbmc
import xbmcaddon
import xbmcgui
from Queue import Queue
from threading import Thread
from urlparse import urlparse
from urllib import unquote
from elementum.provider import append_headers, get_setting, log
from parser.ehp import Html
from provider import process
from providers.definitions import definitions, longest
from filtering import apply_filters, Filtering
from client import USER_AGENT, Client
from utils import ADDON_ICON, notify, translation, sizeof, get_icon_path, get_enabled_providers, get_alias
provider_names = []
provider_results = []
available_providers = 0
request_time = time.time()
auto_timeout = get_setting("auto_timeout", bool)
timeout = get_setting("timeout", int)
special_chars = "()\"':.[]<>/\\?"
if auto_timeout:
elementum_addon = xbmcaddon.Addon(id='plugin.video.elementum')
if elementum_addon:
if elementum_addon.getSetting('custom_provider_timeout_enabled') == "true":
timeout = int(elementum_addon.getSetting('custom_provider_timeout')) - 2
else:
timeout = 28
log.debug("Using timeout from Elementum: %d seconds" % (timeout))
def search(payload, method="general"):
""" Main search entrypoint
Args:
payload (dict): Search payload from Elementum.
method (str): Type of search, can be ``general``, ``movie``, ``show``, ``season`` or ``anime``
Returns:
list: All filtered results in the format Elementum expects
"""
log.debug("Searching with payload (%s): %s" % (method, repr(payload)))
if method == 'general':
if 'query' in payload:
payload['title'] = payload['query']
payload['titles'] = {
'source': payload['query']
}
else:
payload = {
'title': payload,
'titles': {
'source': payload
},
}
payload['titles'] = dict((k.lower(), v) for k, v in payload['titles'].iteritems())
# If titles[] exists in payload and there are special chars in titles[source]
# then we set a flag to possibly modify the search query
payload['has_special'] = 'titles' in payload and \
bool(payload['titles']) and \
'source' in payload['titles'] and \
any(c in payload['titles']['source'] for c in special_chars)
if payload['has_special']:
log.debug("Query title contains special chars, so removing any quotes in the search query")
if 'proxy_url' not in payload:
payload['proxy_url'] = ''
if 'internal_proxy_url' not in payload:
payload['internal_proxy_url'] = ''
if 'elementum_url' not in payload:
payload['elementum_url'] = ''
if 'silent' not in payload:
payload['silent'] = False
if 'skip_auth' not in payload:
payload['skip_auth'] = False
global request_time
global provider_names
global provider_results
global available_providers
provider_names = []
provider_results = []
available_providers = 0
request_time = time.time()
providers = get_enabled_providers(method)
if len(providers) == 0:
if not payload['silent']:
notify(translation(32060), image=get_icon_path())
log.error("No providers enabled")
return []
log.info("Burstin' with %s" % ", ".join([definitions[provider]['name'] for provider in providers]))
if get_setting('kodi_language', bool):
kodi_language = xbmc.getLanguage(xbmc.ISO_639_1)
if not kodi_language:
log.warning("Kodi returned empty language code...")
elif 'titles' not in payload or not payload['titles']:
log.info("No translations available...")
elif payload['titles'] and kodi_language not in payload['titles']:
log.info("No '%s' translation available..." % kodi_language)
p_dialog = xbmcgui.DialogProgressBG()
if not payload['silent']:
p_dialog.create('Elementum [COLOR FFFF6B00]Burst[/COLOR]', translation(32061))
for provider in providers:
available_providers += 1
provider_names.append(definitions[provider]['name'])
task = Thread(target=run_provider, args=(provider, payload, method))
task.start()
providers_time = time.time()
total = float(available_providers)
# Exit if all providers have returned results or timeout reached, check every 100ms
while time.time() - providers_time < timeout and available_providers > 0:
timer = time.time() - providers_time
log.debug("Timer: %ds / %ds" % (timer, timeout))
if timer > timeout:
break
message = translation(32062) % available_providers if available_providers > 1 else translation(32063)
if not payload['silent']:
p_dialog.update(int((total - available_providers) / total * 100), message=message)
time.sleep(0.25)
if not payload['silent']:
p_dialog.close()
del p_dialog
if available_providers > 0:
message = u', '.join(provider_names)
message = message + translation(32064)
log.warning(message.encode('utf-8'))
if not payload['silent']:
notify(message, ADDON_ICON)
log.debug("all provider_results: %s" % repr(provider_results))
filtered_results = apply_filters(provider_results)
log.debug("all filtered_results: %s" % repr(filtered_results))
log.info("Providers returned %d results in %s seconds" % (len(filtered_results), round(time.time() - request_time, 2)))
return filtered_results
def got_results(provider, results):
""" Results callback once a provider found all its results, or not
Args:
provider (str): The provider ID
results (list): The list of results
"""
global provider_names
global provider_results
global available_providers
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
max_results = get_setting('max_results', int)
sort_by = get_setting('sort_by', int)
# 0 "Resolution"
# 1 "Seeds"
# 2 "Size"
# 3 "Balanced"
if not sort_by or sort_by == 3:
# TODO: think of something interesting to balance sort results
sorted_results = sorted(results, key=lambda r: (r['sort_balance']), reverse=True)
elif sort_by == 0:
sorted_results = sorted(results, key=lambda r: (r['sort_resolution']), reverse=True)
elif sort_by == 1:
sorted_results = sorted(results, key=lambda r: (r['seeds']), reverse=True)
elif sort_by == 2:
sorted_results = sorted(results, key=lambda r: (r['size']), reverse=True)
if len(sorted_results) > max_results:
sorted_results = sorted_results[:max_results]
log.info(">> %s returned %2d results in %.1f seconds%s" % (
definition['name'].rjust(longest), len(results), round(time.time() - request_time, 2),
(", sending %d best ones" % max_results) if len(results) > max_results else ""))
provider_results.extend(sorted_results)
available_providers -= 1
if definition['name'] in provider_names:
provider_names.remove(definition['name'])
def extract_torrents(provider, client):
""" Main torrent extraction generator for non-API based providers
Args:
provider (str): Provider ID
client (Client): Client class instance
Yields:
tuple: A torrent result
"""
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
log.debug("Extracting torrents from %s using definitions: %s" % (provider, repr(definition)))
if not client.content:
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Page content is empty" % provider)
raise StopIteration
dom = Html().feed(client.content)
key_search = get_search_query(definition, "key")
row_search = get_search_query(definition, "row")
name_search = get_search_query(definition, "name")
torrent_search = get_search_query(definition, "torrent")
info_hash_search = get_search_query(definition, "infohash")
size_search = get_search_query(definition, "size")
seeds_search = get_search_query(definition, "seeds")
peers_search = get_search_query(definition, "peers")
referer_search = get_search_query(definition, "referer")
log.debug("[%s] Parser: %s" % (provider, repr(definition['parser'])))
q = Queue()
threads = []
needs_subpage = 'subpage' in definition and definition['subpage']
if needs_subpage:
def extract_subpage(q, name, torrent, size, seeds, peers, info_hash, referer):
try:
log.debug("[%s] Getting subpage at %s" % (provider, repr(torrent)))
except Exception as e:
import traceback
log.error("[%s] Subpage logging failed with: %s" % (provider, repr(e)))
map(log.debug, traceback.format_exc().split("\n"))
# New client instance, otherwise it's race conditions all over the place
subclient = Client()
subclient.passkey = client.passkey
headers = {}
if "subpage_mode" in definition:
if definition["subpage_mode"] == "xhr":
headers['X-Requested-With'] = 'XMLHttpRequest'
headers['Content-Language'] = ''
if referer:
headers['Referer'] = referer
uri = torrent.split('|') # Split cookies for private trackers
subclient.open(uri[0].encode('utf-8'), headers=headers)
if 'bittorrent' in subclient.headers.get('content-type', ''):
log.debug('[%s] bittorrent content-type for %s' % (provider, repr(torrent)))
if len(uri) > 1: # Stick back cookies if needed
torrent = '%s|%s' % (torrent, uri[1])
else:
try:
torrent = extract_from_page(provider, subclient.content)
if torrent and not torrent.startswith('magnet') and len(uri) > 1: # Stick back cookies if needed
torrent = '%s|%s' % (torrent, uri[1])
except Exception as e:
import traceback
log.error("[%s] Subpage extraction for %s failed with: %s" % (provider, repr(uri[0]), repr(e)))
map(log.debug, traceback.format_exc().split("\n"))
ret = (name, info_hash, torrent, size, seeds, peers)
q.put_nowait(ret)
if not dom:
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Could not parse DOM from page content" % provider)
raise StopIteration
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Page content: %s" % (provider, client.content.replace('\r', '').replace('\n', '')))
key = eval(key_search) if key_search else ""
if key_search and get_setting("use_debug_parser", bool):
key_str = key.__str__()
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'key', key_search, key_str.replace('\r', '').replace('\n', '')))
items = eval(row_search)
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Matched %d items for '%s' query '%s'" % (provider, len(items), 'row', row_search))
for item in items:
if get_setting("use_debug_parser", bool):
item_str = item.__str__()
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'row', row_search, item_str.replace('\r', '').replace('\n', '')))
if not item:
continue
name = eval(name_search) if name_search else ""
torrent = eval(torrent_search) if torrent_search else ""
size = eval(size_search) if size_search else ""
seeds = eval(seeds_search) if seeds_search else ""
peers = eval(peers_search) if peers_search else ""
info_hash = eval(info_hash_search) if info_hash_search else ""
referer = eval(referer_search) if referer_search else ""
if 'magnet:?' in torrent:
torrent = torrent[torrent.find('magnet:?'):]
if get_setting("use_debug_parser", bool):
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'name', name_search, name))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'torrent', torrent_search, torrent))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'size', size_search, size))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'seeds', seeds_search, seeds))
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'peers', peers_search, peers))
if info_hash_search:
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'info_hash', info_hash_search, info_hash))
if referer_search:
log.debug("[%s] Parser debug | Matched '%s' iteration for query '%s': %s" % (provider, 'info_hash', referer_search, referer))
# Pass client cookies with torrent if private
if not torrent.startswith('magnet'):
user_agent = USER_AGENT
if client.passkey:
torrent = torrent.replace('PASSKEY', client.passkey)
elif client.token:
headers = {'Authorization': client.token, 'User-Agent': user_agent}
log.debug("[%s] Appending headers: %s" % (provider, repr(headers)))
torrent = append_headers(torrent, headers)
log.debug("[%s] Torrent with headers: %s" % (provider, repr(torrent)))
else:
parsed_url = urlparse(torrent.split('|')[0])
cookie_domain = '{uri.netloc}'.format(uri=parsed_url)
cookie_domain = re.sub('www\d*\.', '', cookie_domain)
cookies = []
for cookie in client._cookies:
if cookie_domain in cookie.domain:
cookies.append(cookie)
headers = {}
if cookies:
headers = {'User-Agent': user_agent}
log.debug("[%s] Cookies res: %s / %s" % (provider, repr(headers), repr(client.request_headers)))
if client.request_headers:
headers.update(client.request_headers)
if client.url:
headers['Referer'] = client.url
headers['Origin'] = client.url
# Need to set Cookie afterwards to avoid rewriting it with session Cookies
headers['Cookie'] = ";".join(["%s=%s" % (c.name, c.value) for c in cookies])
else:
headers = {'User-Agent': user_agent}
torrent = append_headers(torrent, headers)
if name and torrent and needs_subpage and not torrent.startswith('magnet'):
if not torrent.startswith('http'):
torrent = definition['root_url'] + torrent.encode('utf-8')
t = Thread(target=extract_subpage, args=(q, name, torrent, size, seeds, peers, info_hash, referer))
threads.append(t)
else:
yield (name, info_hash, torrent, size, seeds, peers)
if needs_subpage:
log.debug("[%s] Starting subpage threads..." % provider)
for t in threads:
t.start()
for t in threads:
t.join()
log.debug("[%s] Threads returned: %s" % (provider, repr(threads)))
for i in range(q.qsize()):
ret = q.get_nowait()
log.debug("[%s] Queue %d got: %s" % (provider, i, repr(ret)))
yield ret
def extract_from_api(provider, client):
""" Main API parsing generator for API-based providers
An almost clever API parser, mostly just for YTS, RARBG and T411
Args:
provider (str): Provider ID
client (Client): Client class instance
Yields:
tuple: A torrent result
"""
try:
data = json.loads(client.content)
except:
data = []
log.debug("[%s] JSON response from API: %s" % (unquote(provider), repr(data)))
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
api_format = definition['api_format']
results = []
result_keys = api_format['results'].split('.')
log.debug("%s result_keys: %s" % (provider, repr(result_keys)))
for key in result_keys:
if key in data:
data = data[key]
else:
data = []
# log.debug("%s nested results: %s" % (provider, repr(data)))
results = data
log.debug("%s results: %s" % (provider, repr(results)))
if 'subresults' in api_format:
from copy import deepcopy
for result in results: # A little too specific to YTS but who cares...
result['name'] = result[api_format['name']]
subresults = []
subresults_keys = api_format['subresults'].split('.')
for key in subresults_keys:
for result in results:
if key in result:
for subresult in result[key]:
sub = deepcopy(result)
sub.update(subresult)
subresults.append(sub)
results = subresults
log.debug("%s with subresults: %s" % (provider, repr(results)))
for result in results:
if not result or not isinstance(result, dict):
continue
name = ''
info_hash = ''
torrent = ''
size = ''
seeds = ''
peers = ''
if 'name' in api_format:
name = result[api_format['name']]
if 'torrent' in api_format:
torrent = result[api_format['torrent']]
if 'download_path' in definition:
torrent = definition['base_url'] + definition['download_path'] + torrent
if client.token:
user_agent = USER_AGENT
headers = {'Authorization': client.token, 'User-Agent': user_agent}
log.debug("[%s] Appending headers: %s" % (provider, repr(headers)))
torrent = append_headers(torrent, headers)
log.debug("[%s] Torrent with headers: %s" % (provider, repr(torrent)))
if 'info_hash' in api_format:
info_hash = result[api_format['info_hash']]
if 'quality' in api_format: # Again quite specific to YTS...
name = "%s - %s" % (name, result[api_format['quality']])
if 'size' in api_format:
size = result[api_format['size']]
if type(size) in (long, int):
size = sizeof(size)
elif type(size) in (str, unicode) and size.isdigit():
size = sizeof(int(size))
if 'seeds' in api_format:
seeds = result[api_format['seeds']]
if type(seeds) in (str, unicode) and seeds.isdigit():
seeds = int(seeds)
if 'peers' in api_format:
peers = result[api_format['peers']]
if type(peers) in (str, unicode) and peers.isdigit():
peers = int(peers)
yield (name, info_hash, torrent, size, seeds, peers)
def extract_from_page(provider, content):
""" Sub-page extraction method
Args:
provider (str): Provider ID
content (str): Page content from Client instance
Returns:
str: Torrent or magnet link extracted from sub-page
"""
definition = definitions[provider]
definition = get_alias(definition, get_setting("%s_alias" % provider))
try:
matches = re.findall(r'magnet:\?[^\'"\s<>\[\]]+', content)
if matches:
result = matches[0]
log.debug('[%s] Matched magnet link: %s' % (provider, repr(result)))
return result
matches = re.findall('http(.*?).torrent["\']', content)
if matches:
result = 'http' + matches[0] + '.torrent'
result = result.replace('torcache.net', 'itorrents.org')
log.debug('[%s] Matched torrent link: %s' % (provider, repr(result)))
return result
matches = re.findall('/download\?token=[A-Za-z0-9%]+', content)
if matches:
result = definition['root_url'] + matches[0]
log.debug('[%s] Matched download link with token: %s' % (provider, repr(result)))
return result
matches = re.findall('"(/download/[A-Za-z0-9]+)"', content)
if matches:
result = definition['root_url'] + matches[0]
log.debug('[%s] Matched download link: %s' % (provider, repr(result)))
return result
matches = re.findall('/torrents/download/\?id=[a-z0-9-_.]+', content) # t411
if matches:
result = definition['root_url'] + matches[0]
log.debug('[%s] Matched download link with an ID: %s' % (provider, repr(result)))
return result
matches = re.findall('\: ([A-Fa-f0-9]{40})', content)
if matches:
result = "magnet:?xt=urn:btih:" + matches[0]
log.debug('[%s] Matched magnet info_hash search: %s' % (provider, repr(result)))
return result
except:
pass
return None
def run_provider(provider, payload, method):
""" Provider thread entrypoint
Args:
provider (str): Provider ID
payload (dict): Search payload from Elementum
method (str): Type of search, can be ``general``, ``movie``, ``show``, ``season`` or ``anime``
"""
log.debug("Processing %s with %s method" % (provider, method))
filterInstance = Filtering()
if method == 'movie':
filterInstance.use_movie(provider, payload)
elif method == 'season':
filterInstance.use_season(provider, payload)
elif method == 'episode':
filterInstance.use_episode(provider, payload)
elif method == 'anime':
filterInstance.use_anime(provider, payload)
else:
filterInstance.use_general(provider, payload)
if 'is_api' in definitions[provider]:
results = process(provider=provider, generator=extract_from_api, filtering=filterInstance, has_special=payload['has_special'], skip_auth=payload['skip_auth'])
else:
results = process(provider=provider, generator=extract_torrents, filtering=filterInstance, has_special=payload['has_special'], skip_auth=payload['skip_auth'])
got_results(provider, results)
def get_search_query(definition, key):
if 'parser' not in definition or key not in definition['parser']:
return ""
if key == 'key' or key == 'table' or key == 'row':
return "dom." + definition['parser'][key]
return definition['parser'][key]
|
worker.py | # -*- coding: utf-8 -*-
"""
Created on November 11, 2017
@author: neerbek
"""
import subprocess
from threading import Lock
from typing import List
from typing import Tuple
from typing import IO
from typing import cast
# from threading import Thread
import os
import controller
FileDescriptorType = IO[str]
class ExceptionWithMessage(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.message = msg
class JobStillRunningException(ExceptionWithMessage):
def __init__(self, msg):
ExceptionWithMessage.__init__(self, msg)
class NoWorkerException(ExceptionWithMessage):
def __init__(self, msg):
ExceptionWithMessage.__init__(self, msg)
class Worker:
@staticmethod
def getLogfile(jobname: str) -> str:
return jobname + ".log"
def __init__(self) -> None:
self.protectedIsRunning: bool = False
self.protectedName: str = None
self.protectedPopen: subprocess.Popen = None
self.protectedPopenStdOut: FileDescriptorType = None
self.protectedCommand: List[str] = None
self.protectedFile: FileDescriptorType = None
self.protectedBytesRead: int = 0
self.protectedLastLog: List[str] = []
self.mutex: Lock = Lock()
def getName(self) -> str:
with self.mutex:
return self.protectedName
def addNew(self, name: str, command: List[str]) -> None:
self.reset()
with self.mutex:
self.protectedName = name
self.protectedCommand = command
def reset(self) -> None: # note does not reset isRunning
with self.mutex:
if self.protectedPopen is not None:
if self.protectedPopen.poll() is None:
raise JobStillRunningException("job " + self.protectedName + " not completed. poll={}".format(self.protectedPopen.poll()))
print(type(self.protectedPopenStdOut))
fd = self.protectedPopenStdOut
fd.flush()
os.fsync(cast(int, fd)) # mypy has troubles with file descriptors
fd.close()
self.protectedPopen = None
self.protectedName = None
self.protectedCommand = None
if self.protectedFile is not None:
self.protectedFile.close()
self.protectedFile = None
self.protectedBytesRead = 0
self.protectedLastLog = []
def setOutput(self, output: Tuple[subprocess.Popen, FileDescriptorType]) -> None:
with self.mutex:
self.protectedPopen = output[0]
self.protectedPopenStdOut = output[1]
def getIsProcessRunning(self) -> bool:
with self.mutex:
if self.protectedPopen is not None:
if self.protectedPopen.poll() is None:
# try:
# self.protectedPopen.wait(timeout=1) # quicker close
# except subprocess.TimeoutExpired:
# pass
if self.protectedPopen.poll() is None:
return True
return False
def getIsRunning(self) -> bool:
with self.mutex:
return self.protectedIsRunning
def testAndSetRunning(self, value=True) -> bool:
"""tests isRunning in a thread safe fashion. If value==True (default) then
if isRunning is False, then it is set to True and True is returned
"""
with self.mutex:
if self.protectedIsRunning != value:
self.protectedIsRunning = value
return True
return False
def getCurrentLog(self) -> List[str]:
with self.mutex:
logfile = Worker.getLogfile(self.protectedName)
if self.protectedFile is None:
self.protectedFile = open(logfile, "r")
self.protectedBytesRead = 0
res = []
size = os.path.getsize(logfile)
if size == self.protectedBytesRead:
return []
while size > self.protectedBytesRead:
line = self.protectedFile.readline()
self.protectedBytesRead += len(line)
line = line.rstrip()
res.append(line)
if len(res) > 200:
res = res[100:]
self.protectedLastLog = res
return res
def getLastLog(self) -> List[str]:
with self.mutex:
res = [line for line in self.protectedLastLog]
return res
class WorkerManager:
def __init__(self) -> None:
self.workers: List[Worker] = []
self.storageWorker: Worker = None
def initialize(self, numWorkers: int) -> None:
if len(self.workers) > 0:
w: Worker
for w in self.workers:
w.reset() # will raise if w is not stopped
w.testAndSetRunning(False)
self.storageWorker.reset()
self.storageWorker.testAndSetRunning(False)
self.workers = [Worker() for i in range(numWorkers)]
self.storageWorker = Worker()
def getWorker(self, workerName: str) -> Worker:
for worker in self.workers:
if worker.getName() == workerName:
return worker
if workerName == "storage":
return self.storageWorker
raise NoWorkerException("worker: " + workerName + " not found")
def hasReadyWorker(self) -> bool:
for i in range(len(self.workers)):
worker = self.workers[i]
if not worker.getIsRunning():
return True
return False
def findReadyWorker(self) -> Worker:
for i in range(len(self.workers)):
worker = self.workers[i]
if worker.testAndSetRunning():
return worker
return None
def getJobList(self) -> List[controller.Job]:
freeWorkers = 0
res: List[controller.Job] = []
for i in range(len(self.workers)):
worker = self.workers[i]
isCompleted = not worker.getIsProcessRunning()
with worker.mutex:
if not worker.protectedIsRunning:
freeWorkers += 1
continue
# isProcessRunning = False
# if worker.protectedPopen != None:
# isProcessRunning = (worker.protectedPopen.poll() == None)
res.append(controller.RunningJob(worker.protectedName, worker.protectedCommand, isCompleted))
worker = self.storageWorker
isCompleted = not worker.getIsProcessRunning()
with worker.mutex:
if worker.protectedIsRunning:
res.append(controller.RunningJob(worker.protectedName, worker.protectedCommand, isCompleted))
res.append(controller.Job("Free Workers", param=[freeWorkers]))
return res
# class ThreadContainer:
# def __init__(self, name, workerManager):
# self.name = name
# self.workerManager = workerManager
# self.t = None
# self.mutex = Lock()
# self.stopSignal = False
# def do_stop(self):
# with self.mutex:
# self.stopSignal = True
# def doStop(self):
# with self.mutex:
# return self.stopSignal
# def run_internal(self):
# work = None
# while not self.doStop():
# try:
# for worker in workerManager.workers:
# with worker.mutex:
# if worker.protectedIsRunning and worker.protectedOutput != None:
# count = 0
# for l in worker.protectOutput:
# if l
# worker.protectedCurrentLog.append(l)
# work = self.myparent.get_work()
# except Exception e:
# print(self.name + " worker " + worker.getName() + " threw exception. {}".format(e))
# time.sleep(10) # TODO: what should the constant be here
# def start(self):
# self.t = Thread(target=self.run_internal, args=())
# self.t.start()
|
config.py | import logging
import sched
import threading
import time
import pyodbc
import requests
import yaml
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
LOGGER = logging.getLogger(__name__)
class AppConfig:
def __init__(self):
self.config = None
self.scheduler = sched.scheduler(time.time, time.sleep)
self._load()
schedule_thread = threading.Thread(target=self.scheduler.run)
schedule_thread.start()
def _load(self):
with requests.get('https://codeday.blob.core.windows.net/codeday-ml-ci-cd/resources/config.yaml') as res:
self.config = yaml.safe_load(res.text)
LOGGER.info('loaded latest application configuration')
self.scheduler.enter(30, 1, self._load)
class DBConfig:
def __init__(self):
self.app_config = AppConfig()
def get_connection(self):
conn_string = 'Driver={{ODBC Driver 17 for SQL Server}};Server=tcp:{url};Database=codeday;Uid={user};' \
'Pwd={password};Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;'
url = self.app_config.config['db']['url']
user = self.app_config.config['db']['user']
password = self.app_config.config['db']['password']
connection = pyodbc.connect(
conn_string.format(
url=url,
user=user,
password=password
)
)
return connection
|
JumpscriptFactory.py | from JumpScale import j
import time
import imp
import linecache
import inspect
import JumpScale.baselib.redis
import multiprocessing
import tarfile
import StringIO
import collections
import os
import base64
import traceback
import signal
import sys
class Jumpscript(object):
def __init__(self, ddict=None, path=None):
self._loaded = False
self.name=""
self.organization=""
self.period = 0
self.category = ""
self.lastrun = 0
self.source=""
self.debug = False
self.path=path
self.id = None
self.startatboot = False
self.path = path
self.debug=False
self.timeout = None
if ddict:
ddict.pop('path', None)
self.__dict__.update(ddict)
if path:
self.load()
self.loadAttributes()
def write(self):
if not self.path:
jscriptdir = j.system.fs.joinPaths(j.dirs.tmpDir,"jumpscripts")
j.system.fs.createDir(jscriptdir)
self.path=j.system.fs.joinPaths(jscriptdir, "%s_%s.py" % (self.organization, self.name))
content="""
from JumpScale import j
"""
content += self.source
j.system.fs.writeFile(filename=self.path, contents=content)
def load(self):
self._loaded = True
md5sum = j.tools.hash.md5_string(self.path)
modulename = 'JumpScale.jumpscript_%s' % md5sum
linecache.checkcache(self.path)
self.module = imp.load_source(modulename, self.path)
if self.source.find("DEBUG NOW")!=-1:
self.debug=True
def getDict(self):
result = dict()
for attrib in ('name', 'author', 'organization', 'category', 'license', 'version', 'roles', 'source', 'path', 'descr', 'queue', 'async', 'period', 'order', 'log', 'enable', 'startatboot', 'gid', 'id','timeout'):
result[attrib] = getattr(self, attrib)
return result
def loadAttributes(self):
name = getattr(self.module, 'name', "")
if name=="":
name=j.system.fs.getBaseName(self.path)
name=name.replace(".py","").lower()
source = inspect.getsource(self.module)
self.name=name
self.author=getattr(self.module, 'author', "unknown")
self.organization=getattr(self.module, 'organization', "unknown")
self.category=getattr(self.module, 'category', "unknown")
self.license=getattr(self.module, 'license', "unknown")
self.version=getattr(self.module, 'version', "1.0")
self.debug=getattr(self.module, 'debug', False)
self.roles=getattr(self.module, 'roles', [])
self.source=source
self.descr=self.module.descr
self.queue=getattr(self.module, 'queue', "")
self.timeout=getattr(self.module, 'timeout', None)
self.async = getattr(self.module, 'async',False)
self.period=getattr(self.module, 'period',0)
self.order=getattr(self.module, 'order', 1)
self.log=getattr(self.module, 'log', True)
self.enable=getattr(self.module, 'enable', True)
self.startatboot=getattr(self.module, 'startatboot', False)
self.gid=getattr(self.module, 'gid', j.application.whoAmI.gid)
def getKey(self):
return "%s_%s" % (self.organization, self.name)
def __eq__(self, other):
if not isinstance(other, Jumpscript):
return False
return self.name == other.name and self.organization == other.organization
def executeInWorker(self, *args, **kwargs):
if not self.path:
self.write()
if not self._loaded:
self.load()
if self.debug:
result = self.executeInProcess(*args, **kwargs)
return result
def helper(pipe):
def get_timeout_backtrace():
proc = multiprocessing.current_process()
_, stdout, _ = j.do.execute(
"py-spy --pid {} --dump".format(proc.pid),
outputStdout=False,
dieOnNonZeroExitCode=False
)
return stdout
def errorhandler(sig, frame):
try:
msg = 'Failed to execute job on time'
eco = j.errorconditionhandler.getErrorConditionObject(msg=msg)
eco.backtrace = get_timeout_backtrace()
eco.tb = None
eco.tags = "jscategory:%s"%self.category
eco.jid = j.application.jid
eco.tags += " jsorganization:%s"%self.organization
eco.tags +=" jsname:%s"%self.name
j.errorconditionhandler.raiseOperationalCritical(eco=eco,die=False)
except Exception as e:
eco = str(e)
pipe.send(("TIMEOUT", eco))
# when handling sigterm we need to exit
sys.exit(2)
signal.signal(signal.SIGTERM, errorhandler)
try:
result = self.executeInProcess(*args, **kwargs)
pipe.send(result)
except Exception as e:
try:
result = self._getECO(e)
except Exception as e:
msg = 'Failed parsing original exception: %s' % e
result = j.errorconditionhandler.getErrorConditionObject(msg=msg)
pipe.send((False, result))
ppipe, cpipe = multiprocessing.Pipe()
proc = multiprocessing.Process(target=helper, args=(cpipe,))
proc.start()
cpipe.close()
proc.join(self.timeout)
if proc.is_alive():
proc.terminate()
proc.join(5)
if proc.is_alive():
try:
os.kill(proc.pid, signal.SIGKILL)
except (ProcessLookupError, OSError):
pass
# reap process
proc.join(5)
msg = 'Failed to execute job on time and failed to kill cleanly'
eco = j.errorconditionhandler.getErrorConditionObject(msg=msg)
eco.errormessagePub = 'JumpScript died unexpectedly %s'
eco.tb = False
return "TIMEOUT", eco.dump()
try:
return ppipe.recv()
except Exception as e:
eco = j.errorconditionhandler.parsePythonErrorObject(e)
eco['errormessagePub'] = 'JumpScript died unexpectedly %s'
return False, result
def _getECO(self, e):
eco = j.errorconditionhandler.parsePythonErrorObject(e)
eco.tb = None
eco.errormessage='Exec error procmgr jumpscr:%s_%s on node:%s_%s %s'%(self.organization,self.name, \
j.application.whoAmI.gid, j.application.whoAmI.nid,eco.errormessage)
eco.tags="jscategory:%s"%self.category
eco.jid = j.application.jid
eco.tags+=" jsorganization:%s"%self.organization
eco.tags+=" jsname:%s"%self.name
return eco
def executeInProcess(self, *args, **kwargs):
try:
return True, self.module.action(*args, **kwargs)
except Exception as e:
print "error in jumpscript factory: execute in process."
eco = self._getECO(e)
j.errorconditionhandler.raiseOperationalCritical(eco=eco,die=False)
print(eco)
return False, eco
def executeLocal(self, *args, **kwargs):
if not self.path:
self.write()
if not self._loaded:
self.load()
return self.module.action(*args, **kwargs)
def execute(self, *args, **kwargs):
"""
"""
result = None, None
redisw = kwargs.pop('_redisw', j.clients.redisworker)
if not self.enable:
return
if not self.async:
result = list(self.executeInProcess(*args, **kwargs))
if not result[0]:
eco = result[1]
eco.type = str(eco.type)
result[1] = eco.__dict__
else:
#make sure this gets executed by worker
queue = getattr(self, 'queue', 'default') #fall back to default queue if none specified
result=redisw.execJumpscript(jumpscript=self,_timeout=self.timeout,_queue=queue,_log=self.log,_sync=False)
self.lastrun = time.time()
if result!=None:
print("ok:%s"%self.name)
return result
"""
Metadata about a Lua Jumpscript.
"""
LuaJumpscript = collections.namedtuple('LuaJumpscript', field_names=(
'name', 'path', 'organization', 'queue', 'log', 'id', 'enable',
))
class JumpscriptFactory:
"""
"""
def __init__(self):
self.basedir = j.system.fs.joinPaths(j.dirs.baseDir, 'apps', 'processmanager')
def getJSClass(self):
return Jumpscript
def getArchivedJumpscripts(self, bz2_compressed=True, types=('processmanager', 'jumpscripts')):
"""
Returns the available jumpscripts in TAR format that is optionally compressed using bzip2.
Args:
bz2_compressed (boolean): If True then the returned TAR is bzip2-compressed
types (sequence of str): A sequence of the types of jumpscripts to be packed in the returned archive.
possible values in the sequence are 'processmanager', 'jumpscripts', and 'luajumpscripts'.
"""
fp = StringIO.StringIO()
with tarfile.open(fileobj=fp, mode='w:bz2' if bz2_compressed else 'w') as tar:
for jumpscript_type in types:
parent_path = '%s/apps/agentcontroller/%s' % (j.dirs.baseDir, jumpscript_type)
for allowed_filename_extension in ('py', 'lua'):
for file_path in j.system.fs.walkExtended(parent_path, recurse=1, dirs=False,
filePattern='*.' + allowed_filename_extension):
path_in_archive = jumpscript_type + '/' + file_path.split(parent_path)[1]
tar.add(file_path, path_in_archive)
return fp.getvalue()
def loadFromAC(self, acl=None):
if acl is None:
acl = j.clients.agentcontroller.getByInstance()
tar = base64.decodestring(acl.getJumpscripts())
self.loadFromTar(tar, 'bz2')
def loadFromTar(self, tarcontent, type):
j.system.fs.removeDirTree(self.basedir)
import tarfile
fp = StringIO.StringIO()
fp.write(tarcontent)
fp.seek(0)
mode = "r:%s" % type
tar = tarfile.open(fileobj=fp, mode=mode)
for tarinfo in tar:
if tarinfo.isfile():
print(tarinfo.name)
if tarinfo.name.find("processmanager/")==0:
tar.extract(tarinfo.name, j.system.fs.getParent(self.basedir))
if tarinfo.name.find("jumpscripts/")==0:
tar.extract(tarinfo.name, self.basedir)
@staticmethod
def introspectLuaJumpscript(path):
"""
Introspects for a Lua Jumpscript at the given path and returns a LuaJumpscript object with the results.
Args:
path (str): the absolute path to the jumpscript file.
Raises:
IOError if the file at the path could not be opened.
"""
assert os.path.isabs(path), 'An absolute file path is needed'
if not os.path.exists(path):
raise IOError(path + ' does not exist')
relative_path = path.split('agentcontroller/')[1] # Remove the string "^.*agentcontroller/"
# The Lua Jumpscript metadata is inferred conventionally using the jumpscript file's relative path as follows:
# luajumpscripts/ORGANIZATION[/IRRELEVANT_SUBPATH]/JUMPSCRIPT_NAME.lua
#
# Note: The IRRELEVANT_SUBPATH is optional and is not used.
path_components = relative_path.split('/')
jumpscript_name = os.path.splitext(path_components[-1])[0]
jumpscript_organization = path_components[1]
return LuaJumpscript(
name=jumpscript_name,
path=path,
organization=jumpscript_organization,
queue=None,
log=True,
id=None,
enable=True
)
|
_channel.py | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Invocation-side implementation of gRPC Python."""
import sys
import threading
import time
import logging
import grpc
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
_EMPTY_METADATA = cygrpc.Metadata(())
_UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
def _deadline(timeout):
if timeout is None:
return None, _INFINITE_FUTURE
else:
deadline = time.time() + timeout
return deadline, cygrpc.Timespec(deadline)
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
def _wait_once_until(condition, until):
if until is None:
condition.wait()
else:
remaining = until - time.time()
if remaining < 0:
raise grpc.FutureTimeoutError()
else:
condition.wait(timeout=remaining)
_INTERNAL_CALL_ERROR_MESSAGE_FORMAT = (
'Internal gRPC call error %d. ' +
'Please report to https://github.com/grpc/grpc/issues')
def _check_call_error(call_error, metadata):
if call_error == cygrpc.CallError.invalid_metadata:
raise ValueError('metadata was invalid: %s' % metadata)
elif call_error != cygrpc.CallError.ok:
raise ValueError(_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
def _call_error_set_RPCstate(state, call_error, metadata):
if call_error == cygrpc.CallError.invalid_metadata:
_abort(state, grpc.StatusCode.INTERNAL,
'metadata was invalid: %s' % metadata)
else:
_abort(state, grpc.StatusCode.INTERNAL,
_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = _EMPTY_METADATA
state.trailing_metadata = _EMPTY_METADATA
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.received_metadata
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.received_message.bytes()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.received_metadata
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.received_status_code)
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
batch_operation.received_status_code,
batch_operation.received_status_details)
else:
state.code = code
state.details = batch_operation.received_status_details
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, call, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
callback()
return call if done else None
return handle_event
def _consume_request_iterator(request_iterator, state, call,
request_serializer):
event_handler = _event_handler(state, call, None)
def consume_request_iterator():
while True:
try:
request = next(request_iterator)
except StopIteration:
break
except Exception as e:
logging.exception("Exception iterating requests!")
call.cancel()
_abort(state, grpc.StatusCode.UNKNOWN,
"Exception iterating requests!")
return
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
call.cancel()
details = 'Exception serializing request!'
_abort(state, grpc.StatusCode.INTERNAL, details)
return
else:
operations = (cygrpc.operation_send_message(
serialized_request, _EMPTY_FLAGS),)
call.start_client_batch(
cygrpc.Operations(operations), event_handler)
state.due.add(cygrpc.OperationType.send_message)
while True:
state.condition.wait()
if state.code is None:
if cygrpc.OperationType.send_message not in state.due:
break
else:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),)
call.start_client_batch(
cygrpc.Operations(operations), event_handler)
state.due.add(cygrpc.OperationType.send_close_from_client)
def stop_consumption_thread(timeout):
with state.condition:
if state.code is None:
call.cancel()
state.cancelled = True
_abort(state, grpc.StatusCode.CANCELLED, 'Cancelled!')
state.condition.notify_all()
consumption_thread = _common.CleanupThread(
stop_consumption_thread, target=consume_request_iterator)
consumption_thread.start()
class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def cancel(self):
with self._state.condition:
if self._state.code is None:
self._call.cancel()
self._state.cancelled = True
_abort(self._state, grpc.StatusCode.CANCELLED, 'Cancelled!')
self._state.condition.notify_all()
return False
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def result(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(lambda: fn(self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state, self._call,
self._response_deserializer)
self._call.start_client_batch(
cygrpc.Operations(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
event_handler)
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
while True:
self._state.condition.wait()
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
with self._state.condition:
return self._state.code is None
def time_remaining(self):
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def initial_metadata(self):
with self._state.condition:
while self._state.initial_metadata is None:
self._state.condition.wait()
return _common.application_metadata(self._state.initial_metadata)
def trailing_metadata(self):
with self._state.condition:
while self._state.trailing_metadata is None:
self._state.condition.wait()
return _common.application_metadata(self._state.trailing_metadata)
def code(self):
with self._state.condition:
while self._state.code is None:
self._state.condition.wait()
return self._state.code
def details(self):
with self._state.condition:
while self._state.details is None:
self._state.condition.wait()
return _common.decode(self._state.details)
def _repr(self):
with self._state.condition:
if self._state.code is None:
return '<_Rendezvous object of in-flight RPC>'
else:
return '<_Rendezvous of RPC that terminated with ({}, {})>'.format(
self._state.code, _common.decode(self._state.details))
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._call.cancel()
self._state.cancelled = True
self._state.code = grpc.StatusCode.CANCELLED
self._state.condition.notify_all()
def _start_unary_request(request, timeout, request_serializer):
deadline, deadline_timespec = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), _EMPTY_METADATA, _EMPTY_METADATA,
grpc.StatusCode.INTERNAL,
'Exception serializing request!')
rendezvous = _Rendezvous(state, None, None, deadline)
return deadline, deadline_timespec, None, rendezvous
else:
return deadline, deadline_timespec, serialized_request, None
def _end_unary_response_blocking(state, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _Rendezvous(state, None, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _Rendezvous(state, None, None, deadline)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def _prepare(self, request, timeout, metadata):
deadline, deadline_timespec, serialized_request, rendezvous = (
_start_unary_request(request, timeout, self._request_serializer))
if serialized_request is None:
return None, None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
return state, operations, deadline, deadline_timespec, None
def _blocking(self, request, timeout, metadata, credentials):
state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
request, timeout, metadata)
if rendezvous:
raise rendezvous
else:
completion_queue = cygrpc.CompletionQueue()
call = self._channel.create_call(None, 0, completion_queue,
self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
call_error = call.start_client_batch(
cygrpc.Operations(operations), None)
_check_call_error(call_error, metadata)
_handle_event(completion_queue.poll(), state,
self._response_deserializer)
return state, deadline
def __call__(self, request, timeout=None, metadata=None, credentials=None):
state, deadline, = self._blocking(request, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, False, deadline)
def with_call(self, request, timeout=None, metadata=None, credentials=None):
state, deadline, = self._blocking(request, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, True, deadline)
def future(self, request, timeout=None, metadata=None, credentials=None):
state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
request, timeout, metadata)
if rendezvous:
return rendezvous
else:
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call,
self._response_deserializer)
with state.condition:
call_error = call.start_client_batch(
cygrpc.Operations(operations), event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self, request, timeout=None, metadata=None, credentials=None):
deadline, deadline_timespec, serialized_request, rendezvous = (
_start_unary_request(request, timeout, self._request_serializer))
if serialized_request is None:
raise rendezvous
else:
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call,
self._response_deserializer)
with state.condition:
call.start_client_batch(
cygrpc.Operations((
cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
)), event_handler)
operations = (
cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_request,
_EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(
cygrpc.Operations(operations), event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def _blocking(self, request_iterator, timeout, metadata, credentials):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
completion_queue = cygrpc.CompletionQueue()
call = self._channel.create_call(None, 0, completion_queue,
self._method, None, deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
with state.condition:
call.start_client_batch(
cygrpc.Operations(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
None)
operations = (
cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(
cygrpc.Operations(operations), None)
_check_call_error(call_error, metadata)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
while True:
event = completion_queue.poll()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, deadline
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
state, deadline, = self._blocking(request_iterator, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, False, deadline)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
state, deadline, = self._blocking(request_iterator, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, True, deadline)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call, self._response_deserializer)
with state.condition:
call.start_client_batch(
cygrpc.Operations(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
event_handler)
operations = (
cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(
cygrpc.Operations(operations), event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call, self._response_deserializer)
with state.condition:
call.start_client_batch(
cygrpc.Operations(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
event_handler)
operations = (
cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(
cygrpc.Operations(operations), event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.completion_queue = cygrpc.CompletionQueue()
self.managed_calls = None
def _run_channel_spin_thread(state):
def channel_spin():
while True:
event = state.completion_queue.poll()
completed_call = event.tag(event)
if completed_call is not None:
with state.lock:
state.managed_calls.remove(completed_call)
if not state.managed_calls:
state.managed_calls = None
return
def stop_channel_spin(timeout):
with state.lock:
if state.managed_calls is not None:
for call in state.managed_calls:
call.cancel()
channel_spin_thread = _common.CleanupThread(
stop_channel_spin, target=channel_spin)
channel_spin_thread.start()
def _channel_managed_call_management(state):
def create(parent, flags, method, host, deadline):
"""Creates a managed cygrpc.Call and a function to call to drive it.
If operations are successfully added to the returned cygrpc.Call, the
returned function must be called. If operations are not successfully added
to the returned cygrpc.Call, the returned function must not be called.
Args:
parent: A cygrpc.Call to be used as the parent of the created call.
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A cygrpc.Timespec to be the deadline of the created call.
Returns:
A cygrpc.Call with which to conduct an RPC and a function to call if
operations are successfully started on the call.
"""
call = state.channel.create_call(parent, flags, state.completion_queue,
method, host, deadline)
def drive():
with state.lock:
if state.managed_calls is None:
state.managed_calls = set((call,))
_run_channel_spin_thread(state)
else:
state.managed_calls.add(call)
return call, drive
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
connectivity)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = threading.Thread(
target=_deliver, args=(
state,
state.connectivity,
callbacks,))
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
callbacks = tuple(callback
for callback, unused_but_known_to_be_none_connectivity
in state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
completion_queue = cygrpc.CompletionQueue()
while True:
channel.watch_connectivity_state(connectivity,
cygrpc.Timespec(time.time() + 0.2),
completion_queue, None)
event = completion_queue.poll()
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
callbacks = _deliveries(state)
if callbacks:
_spawn_delivery(state, callbacks)
def _moot(state):
with state.lock:
del state.callbacks_and_connectivities[:]
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
def cancel_all_subscriptions(timeout):
_moot(state)
polling_thread = _common.CleanupThread(
cancel_all_subscriptions,
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity
) in enumerate(state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _options(options):
return list(options) + [
(cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)
]
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
"""
self._channel = cygrpc.Channel(
_common.encode(target),
_common.channel_args(_options(options)), credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def __del__(self):
_moot(self._connectivity_state)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.