source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
util.py | """Test utilities."""
from importlib import reload as reload_module
import io
import logging
from multiprocessing import Event
from multiprocessing import Process
import shutil
import sys
import tempfile
import unittest
import warnings
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
import josepy as jose
import OpenSSL
import pkg_resources
from certbot import interfaces
from certbot import util
from certbot._internal import configuration
from certbot._internal import constants
from certbot._internal import lock
from certbot._internal import storage
from certbot.compat import filesystem
from certbot.compat import os
from certbot.display import util as display_util
try:
# When we remove this deprecated import, we should also remove the
# "external-mock" test environment and the mock dependency listed in
# tools/pinning/pyproject.toml.
import mock
warnings.warn(
"The external mock module is being used for backwards compatibility "
"since it is available, however, future versions of Certbot's tests will "
"use unittest.mock. Be sure to update your code accordingly.",
PendingDeprecationWarning
)
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
def vector_path(*names):
"""Path to a test vector."""
return pkg_resources.resource_filename(
__name__, os.path.join('testdata', *names))
def load_vector(*names):
"""Load contents of a test vector."""
# luckily, resource_string opens file in binary mode
data = pkg_resources.resource_string(
__name__, os.path.join('testdata', *names))
# Try at most to convert CRLF to LF when data is text
try:
return data.decode().replace('\r\n', '\n').encode()
except ValueError:
# Failed to process the file with standard encoding.
# Most likely not a text file, return its bytes untouched.
return data
def _guess_loader(filename, loader_pem, loader_der):
_, ext = os.path.splitext(filename)
if ext.lower() == '.pem':
return loader_pem
elif ext.lower() == '.der':
return loader_der
raise ValueError("Loader could not be recognized based on extension") # pragma: no cover
def load_cert(*names):
"""Load certificate."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate(loader, load_vector(*names))
def load_csr(*names):
"""Load certificate request."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_certificate_request(loader, load_vector(*names))
def load_comparable_csr(*names):
"""Load ComparableX509 certificate request."""
return jose.ComparableX509(load_csr(*names))
def load_rsa_private_key(*names):
"""Load RSA private key."""
loader = _guess_loader(names[-1], serialization.load_pem_private_key,
serialization.load_der_private_key)
return jose.ComparableRSAKey(loader(
load_vector(*names), password=None, backend=default_backend()))
def load_pyopenssl_private_key(*names):
"""Load pyOpenSSL private key."""
loader = _guess_loader(
names[-1], OpenSSL.crypto.FILETYPE_PEM, OpenSSL.crypto.FILETYPE_ASN1)
return OpenSSL.crypto.load_privatekey(loader, load_vector(*names))
def make_lineage(config_dir, testfile, ec=False):
"""Creates a lineage defined by testfile.
This creates the archive, live, and renewal directories if
necessary and creates a simple lineage.
:param str config_dir: path to the configuration directory
:param str testfile: configuration file to base the lineage on
:returns: path to the renewal conf file for the created lineage
:rtype: str
"""
lineage_name = testfile[:-len('.conf')]
conf_dir = os.path.join(
config_dir, constants.RENEWAL_CONFIGS_DIR)
archive_dir = os.path.join(
config_dir, constants.ARCHIVE_DIR, lineage_name)
live_dir = os.path.join(
config_dir, constants.LIVE_DIR, lineage_name)
for directory in (archive_dir, conf_dir, live_dir,):
if not os.path.exists(directory):
filesystem.makedirs(directory)
sample_archive = vector_path('sample-archive{}'.format('-ec' if ec else ''))
for kind in os.listdir(sample_archive):
shutil.copyfile(os.path.join(sample_archive, kind),
os.path.join(archive_dir, kind))
for kind in storage.ALL_FOUR:
os.symlink(os.path.join(archive_dir, '{0}1.pem'.format(kind)),
os.path.join(live_dir, '{0}.pem'.format(kind)))
conf_path = os.path.join(config_dir, conf_dir, testfile)
with open(vector_path(testfile)) as src:
with open(conf_path, 'w') as dst:
dst.writelines(
line.replace('MAGICDIR', config_dir) for line in src)
return conf_path
def patch_get_utility(target='zope.component.getUtility'):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
:param str target: path to patch
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
return mock.patch(target, new_callable=_create_get_utility_mock)
def patch_get_utility_with_stdout(target='zope.component.getUtility',
stdout=None):
"""Patch zope.component.getUtility to use a special mock IDisplay.
The mock IDisplay works like a regular mock object, except it also
also asserts that methods are called with valid arguments.
The `message` argument passed to the IDisplay methods is passed to
stdout's write method.
:param str target: path to patch
:param object stdout: object to write standard output to; it is
expected to have a `write` method
:returns: mock zope.component.getUtility
:rtype: mock.MagicMock
"""
stdout = stdout if stdout else io.StringIO()
freezable_mock = _create_get_utility_mock_with_stdout(stdout)
return mock.patch(target, new=freezable_mock)
class FreezableMock:
"""Mock object with the ability to freeze attributes.
This class works like a regular mock.MagicMock object, except
attributes and behavior set before the object is frozen cannot
be changed during tests.
If a func argument is provided to the constructor, this function
is called first when an instance of FreezableMock is called,
followed by the usual behavior defined by MagicMock. The return
value of func is ignored.
"""
def __init__(self, frozen=False, func=None, return_value=mock.sentinel.DEFAULT):
self._frozen_set = set() if frozen else {'freeze', }
self._func = func
self._mock = mock.MagicMock()
if return_value != mock.sentinel.DEFAULT:
self.return_value = return_value
self._frozen = frozen
def freeze(self):
"""Freeze object preventing further changes."""
self._frozen = True
def __call__(self, *args, **kwargs):
if self._func is not None:
self._func(*args, **kwargs)
return self._mock(*args, **kwargs)
def __getattribute__(self, name):
if name == '_frozen':
try:
return object.__getattribute__(self, name)
except AttributeError:
return False
elif name in ('return_value', 'side_effect',):
return getattr(object.__getattribute__(self, '_mock'), name)
elif name == '_frozen_set' or name in self._frozen_set:
return object.__getattribute__(self, name)
else:
return getattr(object.__getattribute__(self, '_mock'), name)
def __setattr__(self, name, value):
""" Before it is frozen, attributes are set on the FreezableMock
instance and added to the _frozen_set. Attributes in the _frozen_set
cannot be changed after the FreezableMock is frozen. In this case,
they are set on the underlying _mock.
In cases of return_value and side_effect, these attributes are always
passed through to the instance's _mock and added to the _frozen_set
before the object is frozen.
"""
if self._frozen:
if name in self._frozen_set:
raise AttributeError('Cannot change frozen attribute ' + name)
return setattr(self._mock, name, value)
if name != '_frozen_set':
self._frozen_set.add(name)
if name in ('return_value', 'side_effect'):
return setattr(self._mock, name, value)
return object.__setattr__(self, name, value)
def _create_get_utility_mock():
display = FreezableMock()
# Use pylint code for disable to keep on single line under line length limit
for name in interfaces.IDisplay.names(): # pylint: E1120
if name != 'notification':
frozen_mock = FreezableMock(frozen=True, func=_assert_valid_call)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _create_get_utility_mock_with_stdout(stdout):
def _write_msg(message, *unused_args, **unused_kwargs):
"""Write to message to stdout.
"""
if message:
stdout.write(message)
def mock_method(*args, **kwargs):
"""
Mock function for IDisplay methods.
"""
_assert_valid_call(args, kwargs)
_write_msg(*args, **kwargs)
display = FreezableMock()
# Use pylint code for disable to keep on single line under line length limit
for name in interfaces.IDisplay.names(): # pylint: E1120
if name == 'notification':
frozen_mock = FreezableMock(frozen=True,
func=_write_msg)
setattr(display, name, frozen_mock)
else:
frozen_mock = FreezableMock(frozen=True,
func=mock_method)
setattr(display, name, frozen_mock)
display.freeze()
return FreezableMock(frozen=True, return_value=display)
def _assert_valid_call(*args, **kwargs):
assert_args = [args[0] if args else kwargs['message']]
assert_kwargs = {}
assert_kwargs['default'] = kwargs.get('default', None)
assert_kwargs['cli_flag'] = kwargs.get('cli_flag', None)
assert_kwargs['force_interactive'] = kwargs.get('force_interactive', False)
display_util.assert_valid_call(*assert_args, **assert_kwargs)
class TempDirTestCase(unittest.TestCase):
"""Base test class which sets up and tears down a temporary directory"""
def setUp(self):
"""Execute before test"""
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
"""Execute after test"""
# Cleanup opened resources after a test. This is usually done through atexit handlers in
# Certbot, but during tests, atexit will not run registered functions before tearDown is
# called and instead will run them right before the entire test process exits.
# It is a problem on Windows, that does not accept to clean resources before closing them.
logging.shutdown()
# Remove logging handlers that have been closed so they won't be
# accidentally used in future tests.
logging.getLogger().handlers = []
util._release_locks() # pylint: disable=protected-access
shutil.rmtree(self.tempdir)
class ConfigTestCase(TempDirTestCase):
"""Test class which sets up a NamespaceConfig object."""
def setUp(self):
super().setUp()
self.config = configuration.NamespaceConfig(
mock.MagicMock(**constants.CLI_DEFAULTS)
)
self.config.verb = "certonly"
self.config.config_dir = os.path.join(self.tempdir, 'config')
self.config.work_dir = os.path.join(self.tempdir, 'work')
self.config.logs_dir = os.path.join(self.tempdir, 'logs')
self.config.cert_path = constants.CLI_DEFAULTS['auth_cert_path']
self.config.fullchain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.chain_path = constants.CLI_DEFAULTS['auth_chain_path']
self.config.server = "https://example.com"
def _handle_lock(event_in, event_out, path):
"""
Acquire a file lock on given path, then wait to release it. This worker is coordinated
using events to signal when the lock should be acquired and released.
:param multiprocessing.Event event_in: event object to signal when to release the lock
:param multiprocessing.Event event_out: event object to signal when the lock is acquired
:param path: the path to lock
"""
if os.path.isdir(path):
my_lock = lock.lock_dir(path)
else:
my_lock = lock.LockFile(path)
try:
event_out.set()
assert event_in.wait(timeout=20), 'Timeout while waiting to release the lock.'
finally:
my_lock.release()
def lock_and_call(callback, path_to_lock):
"""
Grab a lock on path_to_lock from a foreign process then execute the callback.
:param callable callback: object to call after acquiring the lock
:param str path_to_lock: path to file or directory to lock
"""
# Reload certbot.util module to reset internal _LOCKS dictionary.
reload_module(util)
emit_event = Event()
receive_event = Event()
process = Process(target=_handle_lock, args=(emit_event, receive_event, path_to_lock))
process.start()
# Wait confirmation that lock is acquired
assert receive_event.wait(timeout=10), 'Timeout while waiting to acquire the lock.'
# Execute the callback
callback()
# Trigger unlock from foreign process
emit_event.set()
# Wait for process termination
process.join(timeout=10)
assert process.exitcode == 0
def skip_on_windows(reason):
"""Decorator to skip permanently a test on Windows. A reason is required."""
def wrapper(function):
"""Wrapped version"""
return unittest.skipIf(sys.platform == 'win32', reason)(function)
return wrapper
def temp_join(path):
"""
Return the given path joined to the tempdir path for the current platform
Eg.: 'cert' => /tmp/cert (Linux) or 'C:\\Users\\currentuser\\AppData\\Temp\\cert' (Windows)
"""
return os.path.join(tempfile.gettempdir(), path)
|
PyShell.py | #! /usr/bin/env python
from __future__ import print_function
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import io
import linecache
from code import InteractiveInterpreter
from platform import python_version, system
try:
from Tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
sys.exit(1)
import tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
from idlelib import IOBinding
IDENTCHARS = string.ascii_letters + string.digits + "_"
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
import warnings
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way."""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n" % (category.__name__, message)
return s
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, IOError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.GetOption('main','Theme','name')
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath,"r") as old_file:
lines = old_file.readlines()
except IOError:
lines = []
try:
with open(self.breakpointPath,"w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except IOError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec", dont_inherit=True)
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, unicode) and IOBinding.encoding != 'utf-8':
try:
source = '# -*- coding: %s -*-\n%s' % (
IOBinding.encoding,
source.encode(IOBinding.encoding))
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Window"),
("help", "_Help"),
]
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
from idlelib import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from idlelib.StackViewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super(PyShell, self).rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert', '<', 'iomark'):
return 'disabled'
return super(PyShell, self).rmenu_check_paste()
class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self._encoding = encoding
@property
def encoding(self):
return self._encoding
@property
def name(self):
return '<%s>' % self.tags
def isatty(self):
return True
class PseudoOutputFile(PseudoFile):
def writable(self):
return True
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if type(s) not in (unicode, str, bytearray):
# See issue #19481
if isinstance(s, unicode):
s = unicode.__getitem__(s, slice(None))
elif isinstance(s, str):
s = str.__str__(s)
elif isinstance(s, bytearray):
s = bytearray.__str__(s)
else:
raise TypeError('must be string, not ' + type(s).__name__)
return self.shell.write(s, self.tags)
class PseudoInputFile(PseudoFile):
def __init__(self, shell, tags, encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self._line_buffer = ''
def readable(self):
return True
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
if size < 0:
while True:
line = self.shell.readline()
if not line: break
result += line
else:
while len(result) < size:
line = self.shell.readline()
if not line: break
result += line
self._line_buffer = result[size:]
result = result[:size]
return result
def readline(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
size = len(line)
eol = line.find('\n', 0, size)
if eol >= 0:
size = eol + 1
self._line_buffer = line[size:]
return line[:size]
def close(self):
self.shell.close()
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script, file=sys.stderr)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if dir not in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# start editor and/or shell windows:
root = Tk(className="Idle")
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif TkVersion >= 8.5:
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(file=iconfile) for iconfile in iconfiles]
root.tk.call('wm', 'iconphoto', str(root), "-default", *icons)
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic OS X Tk versions and print a warning
# message in the IDLE shell window; this is less intrusive
# than always opening a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand("print('%s')" % tkversionwarning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
_utils.py | import platform
import sys
import os
import queue
import socket
import threading
import subprocess
from typing import IO, Optional, Union
from adbutils.constant import DEFAULT_ADB_PATH
def check_file(fileName: str):
"""check file in path"""
return os.path.isfile(f'{fileName}')
def get_std_encoding(stream):
"""
Get encoding of the stream
Args:
stream: stream
Returns:
encoding or file system encoding
"""
return getattr(stream, "encoding", None) or sys.getfilesystemencoding()
def split_cmd(cmds) -> list:
"""
Split the commands to the list for subprocess
Args:
cmds (str): command
Returns:
command list
"""
# cmds = shlex.split(cmds) # disable auto removing \ on windows
return cmds.split() if isinstance(cmds, str) else list(cmds)
def _popen_kwargs() -> dict:
creationflags = 0
startupinfo = None
if sys.platform.startswith('win'):
try:
creationflags = subprocess.CREATE_NO_WINDOW # python 3.7+
except AttributeError:
creationflags = 0x8000000
return {
'creationflags': creationflags,
'startupinfo': startupinfo,
}
def get_adb_exe() -> str:
"""
获取adb路径
:return:
"""
# find in $PATH
cmds = ['adb', "--version"]
try:
with open(os.devnull, "w") as null:
subprocess.check_call(
cmds, stdout=null, stderr=subprocess.STDOUT, **_popen_kwargs()
)
adb_path = 'adb'
except (FileNotFoundError, OSError, ValueError):
system = platform.system()
machine = platform.machine()
adb_path = DEFAULT_ADB_PATH.get(f'{system}-{machine}')
if not adb_path:
adb_path = DEFAULT_ADB_PATH.get(f'{system}')
if not adb_path:
raise RuntimeError(f"No adb executable supports this platform({system}-{machine}).")
return adb_path
class NonBlockingStreamReader(object):
# TODO: 增加一个方法用于非阻塞状态将stream输出存入文件
def __init__(self, stream: IO, raise_EOF: Optional[bool] = False, print_output: bool = True,
print_new_line: bool = True):
self._s = stream
self._q = queue.Queue()
self._lastline = None
self.name = id(self)
def _populateQueue(_stream: IO, _queue: queue.Queue, kill_event: threading.Event):
"""
Collect lines from 'stream' and put them in 'queue'
Args:
_stream: 文件流
_queue: 队列
kill_event: 一个事件管理标志
Returns:
None
"""
while not kill_event.is_set():
line = _stream.readline()
if line is not None:
_queue.put(line)
if print_output:
if print_new_line and line == self._lastline:
continue
self._lastline = line
elif kill_event.is_set():
break
elif raise_EOF:
raise UnexpectedEndOfStream
else:
break
self._kill_event = threading.Event()
self._t = threading.Thread(target=_populateQueue, args=(self._s, self._q, self._kill_event))
self._t.daemon = True
self._t.start() # start collecting lines from the stream
def readline(self, timeout: Union[int] = None):
try:
return self._q.get(block=timeout is not None, timeout=timeout)
except queue.Empty:
return None
def read(self) -> bytes:
lines = []
while True:
line = self.readline()
if line is None:
break
lines.append(line)
return b"".join(lines)
def kill(self) -> None:
self._kill_event.set()
class UnexpectedEndOfStream(Exception):
pass
CLEANUP_CALLS = queue.Queue()
def reg_cleanup(func, *args, **kwargs):
"""
Clean the register for given function
Args:
func: function name
*args: optional argument
**kwargs: optional arguments
Returns:
None
"""
CLEANUP_CALLS.put((func, args, kwargs))
class SafeSocket(object):
"""safe and exact recv & send"""
def __init__(self, sock=None):
if sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
self.buf = b""
# PEP 3113 -- Removal of Tuple Parameter Unpacking
# https://www.python.org/dev/peps/pep-3113/
def connect(self, tuple_hp):
host, port = tuple_hp
self.sock.connect((host, port))
def send(self, msg):
totalsent = 0
while totalsent < len(msg):
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise socket.error("socket connection broken")
totalsent += sent
def recv(self, size):
while len(self.buf) < size:
trunk = self.sock.recv(min(size-len(self.buf), 4096))
if trunk == b"":
raise socket.error("socket connection broken")
self.buf += trunk
ret, self.buf = self.buf[:size], self.buf[size:]
return ret
def recv_with_timeout(self, size, timeout=2):
self.sock.settimeout(timeout)
try:
ret = self.recv(size)
except socket.timeout:
ret = None
finally:
self.sock.settimeout(None)
return ret
def recv_nonblocking(self, size):
self.sock.settimeout(0)
try:
ret = self.recv(size)
except socket.error as e:
# 10035 no data when nonblocking
if e.args[0] == 10035: # errno.EWOULDBLOCK
ret = None
# 10053 connection abort by client
# 10054 connection reset by peer
elif e.args[0] in [10053, 10054]: # errno.ECONNABORTED:
raise
else:
raise
return ret
def close(self):
self.sock.close()
|
console.py | from __future__ import absolute_import, division, print_function
import asyncio
import copy
import json
import logging
import os
import random
import string
import threading
from code import InteractiveConsole
from collections import OrderedDict
from decimal import Decimal
from os.path import exists, join
import eth_utils
from eth_account import Account
from eth_keys import keys
from hexbytes import HexBytes
from mnemonic import Mnemonic
from trezorlib.customer_ui import CustomerUI
from electrum import MutiBase, bitcoin, commands, constants, daemon, ecc, keystore, paymentrequest, simple_config, util
from electrum.address_synchronizer import TX_HEIGHT_FUTURE, TX_HEIGHT_LOCAL
from electrum.bip32 import BIP32Node
from electrum.bip32 import convert_bip32_path_to_list_of_uint32 as parse_path
from electrum.bip32 import get_uncompressed_key
from electrum.bitcoin import COIN, is_address
from electrum.constants import read_json
from electrum.eth_wallet import Eth_Wallet, Imported_Eth_Wallet, Standard_Eth_Wallet
from electrum.i18n import _, set_language
from electrum.interface import ServerAddr
from electrum.keystore import (
Hardware_KeyStore,
Imported_KeyStore,
bip44_derivation,
bip44_eth_derivation,
purpose48_derivation,
)
from electrum.mnemonic import Wordlist
from electrum.network import BestEffortRequestFailed, TxBroadcastError
from electrum.plugin import Plugins
from electrum.pywalib import InvalidValueException, PyWalib
from electrum.storage import WalletStorage
# from electrum.plugins.trezor.clientbase import TrezorClientBase
from electrum.transaction import PartialTransaction, PartialTxOutput, SerializationError, Transaction, tx_from_any
from electrum.util import (
DecimalEncoder,
DerivedWalletLimit,
FailedGetTx,
FailedToSwitchWallet,
Fiat,
FileAlreadyExist,
InvalidBip39Seed,
InvalidPassword,
NotEnoughFunds,
NotEnoughFundsStr,
NotSupportExportSeed,
Ticker,
UnavaiableHdWallet,
UnavailableBtcAddr,
UnavailableEthAddr,
UnavailablePrivateKey,
UnavailablePublicKey,
UnsupportedCurrencyCoin,
UserCancel,
bfh,
create_and_start_event_loop,
)
from electrum.util import user_dir as get_dir
from electrum.wallet import Imported_Wallet, Standard_Wallet, Wallet
from electrum.wallet_db import WalletDB
from electrum_gui.android import hardware, helpers, wallet_context
from electrum_gui.common import the_begging
from ..common.basic.functional.text import force_text
from ..common.basic.orm.database import db
from ..common.coin import manager as coin_manager
from ..common.price import manager as price_manager
from .create_wallet_info import CreateWalletInfo
from .derived_info import DerivedInfo
from .tx_db import TxDb
log_info = logging.getLogger(__name__)
IS_ANDROID = True
if "iOS_DATA" in os.environ:
from .ioscallback import CallHandler
IS_ANDROID = False
PURPOSE_POS = 1
ACCOUNT_POS = 3
INDEX_POS = 5
BTC_BLOCK_INTERVAL_TIME = 10
DEFAULT_ADDR_TYPE = 49
ticker = None
PURPOSE_TO_ADDRESS_TYPE = {
44: 'p2pkh',
49: 'p2wpkh-p2sh',
84: 'p2wpkh',
}
class AndroidConsole(InteractiveConsole):
"""`interact` must be run on a background thread, because it blocks waiting for input."""
def __init__(self, app, cmds):
namespace = dict(c=cmds, context=app)
namespace.update({name: CommandWrapper(cmds, name) for name in all_commands})
namespace.update(help=Help())
InteractiveConsole.__init__(self, locals=namespace)
def interact(self):
try:
InteractiveConsole.interact(
self,
banner=(
_("WARNING!")
+ "\n"
+ _(
"Do not enter code here that you don't understand. Executing the wrong "
"code could lead to your coins being irreversibly lost."
)
+ "\n"
+ "Type 'help' for available commands and variables."
),
)
except SystemExit:
pass
class CommandWrapper:
def __init__(self, cmds, name):
self.cmds = cmds
self.name = name
def __call__(self, *args, **kwargs):
return getattr(self.cmds, self.name)(*args, **kwargs)
class Help:
def __repr__(self):
return self.help()
def __call__(self, *args):
print(self.help(*args))
def help(self, name_or_wrapper=None):
if name_or_wrapper is None:
return (
"Commands:\n"
+ "\n".join(f" {cmd}" for name, cmd in sorted(all_commands.items()))
+ "\nType help(<command>) for more details.\n"
"The following variables are also available: "
"c.config, c.daemon, c.network, c.wallet, context"
)
else:
if isinstance(name_or_wrapper, CommandWrapper):
cmd = all_commands[name_or_wrapper.name]
else:
cmd = all_commands[name_or_wrapper]
return f"{cmd}\n{cmd.description}"
def verify_address(address) -> bool:
return is_address(address, net=constants.net)
def verify_xpub(xpub: str) -> bool:
return keystore.is_bip32_key(xpub)
# Adds additional commands which aren't available over JSON RPC.
class AndroidCommands(commands.Commands):
_recovery_flag = True
def __init__(self, android_id=None, config=None, user_dir=None, callback=None, chain_type="mainnet"):
self.asyncio_loop, self._stop_loop, self._loop_thread = create_and_start_event_loop()
self.config = config or simple_config.SimpleConfig({"auto_connect": True})
if user_dir is None:
self.user_dir = get_dir()
else:
self.user_dir = user_dir
fd = daemon.get_file_descriptor(self.config)
if not fd:
raise BaseException(("Daemon already running, Don't start the wallet repeatedly"))
set_language(self.config.get("language", "zh_CN"))
# Initialize here rather than in start() so the DaemonModel has a chance to register
# its callback before the daemon threads start.
self.daemon = daemon.Daemon(self.config, fd)
self.coins = read_json("eth_servers.json", {})
if constants.net.NET == "Bitcoin":
chain_type = "mainnet"
else:
chain_type = "testnet"
self.pywalib = PyWalib(self.config, chain_type=chain_type, path=self._tx_list_path(name="tx_info.db"))
self.txdb = TxDb(path=self._tx_list_path(name="tx_info.db"))
self.pywalib.set_server(self.coins["eth"])
self.network = self.daemon.network
self.daemon_running = False
self.wizard = None
self.plugin = Plugins(self.config, "cmdline")
self.label_plugin = self.plugin.load_plugin("labels")
self.label_flag = self.config.get("use_labels", False)
self.callbackIntent = None
self.hd_wallet = None
self.check_pw_wallet = None
self.wallet = None
self.client = None
self.recovery_wallets = {}
self.path = ""
self.replace_wallet_info = {}
ran_str = self.config.get("ra_str", None)
if ran_str is None:
ran_str = "".join(random.sample(string.ascii_letters + string.digits, 8))
self.config.set_key("ra_str", ran_str)
self.android_id = android_id + ran_str
self.wallet_context = wallet_context.WalletContext(self.config, self.user_dir)
if self.network:
interests = [
"wallet_updated",
"network_updated",
"blockchain_updated",
"status",
"new_transaction",
"verified",
"set_server_status",
]
util.register_callback(self.on_network_event, interests)
util.register_callback(self.on_fee, ["fee"])
# self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
util.register_callback(self.on_quotes, ["on_quotes"])
util.register_callback(self.on_history, ["on_history"])
self.fiat_unit = self.daemon.fx.ccy if self.daemon.fx.is_enabled() else ""
self.decimal_point = self.config.get("decimal_point", util.DECIMAL_POINT_DEFAULT)
self.hw_info = {}
for k, v in util.base_units_inverse.items():
if k == self.decimal_point:
self.base_unit = v
self.old_history_len = 0
self.old_history_info = []
self.num_zeros = int(self.config.get("num_zeros", 0))
self.config.set_key("log_to_file", True, save=True)
self.rbf = self.config.get("use_rbf", True)
self.ccy = self.daemon.fx.get_currency()
self.pre_balance_info = ""
self.addr_index = 0
self.rbf_tx = ""
self.m = 0
self.n = 0
self.token_list_by_chain = OrderedDict()
self.config.set_key("auto_connect", True, True)
global ticker
ticker = Ticker(5.0, self.update_status)
ticker.start()
if IS_ANDROID:
if callback is not None:
self.set_callback_fun(callback)
else:
self.my_handler = CallHandler.alloc().init()
self.set_callback_fun(self.my_handler)
self.start_daemon()
self.get_block_info()
the_begging.initialize()
self.trezor_manager = hardware.TrezorManager(self.plugin)
def __getattr__(self, name):
if name in self.trezor_manager.exposed_commands:
return getattr(self.trezor_manager, name)
raise AttributeError
def set_language(self, language):
"""
Set the language of error messages displayed to users
:param language: zh_CN/en_UK as string
"""
set_language(language)
self.config.set_key("language", language)
# BEGIN commands from the argparse interface.
def stop_loop(self):
self.asyncio_loop.call_soon_threadsafe(self._stop_loop.set_result, 1)
self._loop_thread.join(timeout=1)
def on_fee(self, event, *arg):
try:
self.fee_status = self.config.get_fee_status()
except BaseException as e:
raise e
# def on_fee_histogram(self, *args):
# self.update_history()
def on_quotes(self, d):
self.update_status()
def on_history(self, d):
if self.wallet:
self.wallet.clear_coin_price_cache()
# self.update_history()
def update_status(self):
if self.wallet is None:
return
coin = self.wallet.coin
address = self.wallet.get_addresses()[0]
out = dict()
if coin in self.coins: # eth base
address = self.pywalib.web3.toChecksumAddress(address)
balance_info = self.wallet.get_all_balance(address, self.coins[coin]["symbol"])
balance_info = self._fill_balance_info_with_fiat(self.wallet.coin, balance_info)
sum_fiat = sum(i.get("fiat", 0) for i in balance_info.values())
main_coin_balance_info = balance_info.pop(coin, dict())
out["coin"] = coin
out["address"] = address
out["balance"] = main_coin_balance_info.get("balance", "0")
out["fiat"] = f"{self.daemon.fx.ccy_amount_str(main_coin_balance_info.get('fiat') or 0, True)} {self.ccy}"
sorted_tokens = sorted(
balance_info.values(),
key=lambda i: (Decimal(i.get("fiat", 0)), Decimal(i.get("balance", 0))),
reverse=True,
)
out["tokens"] = [
{
"coin": i.get("symbol"),
"address": i.get("address"),
"balance": i.get("balance", "0"),
"fiat": f"{self.daemon.fx.ccy_amount_str(i.get('fiat') or 0, True)} {self.ccy}",
}
for i in sorted_tokens
]
out["sum_fiat"] = f"{self.daemon.fx.ccy_amount_str(sum_fiat, True)} {self.ccy}"
elif (
self.network
and self.network.is_connected()
and self.network.get_server_height() != 0
and self.wallet.up_to_date
): # btc
c, u, x = self.wallet.get_balance()
show_balance = c + u
out["coin"] = "btc"
out["address"] = address
out["balance"] = self.format_amount(show_balance)
out["fiat"] = self.daemon.fx.format_amount_and_units(show_balance) if self.daemon.fx else None
if u:
out["unconfirmed"] = self.format_amount(u, is_diff=True).strip()
if x:
out["unmatured"] = self.format_amount(x, is_diff=True).strip()
if out and self.callbackIntent is not None:
self.callbackIntent.onCallback("update_status=%s" % json.dumps(out, cls=DecimalEncoder))
def get_remove_flag(self, tx_hash):
height = self.wallet.get_tx_height(tx_hash).height
if height in [TX_HEIGHT_FUTURE, TX_HEIGHT_LOCAL]:
return True
else:
return False
def remove_local_tx(self, delete_tx):
"""
:param delete_tx: tx_hash that you need to delete
:return :
"""
try:
to_delete = {delete_tx}
to_delete |= self.wallet.get_depending_transactions(delete_tx)
for tx in to_delete:
self.wallet.remove_transaction(tx)
self.delete_tx(tx)
self.wallet.save_db()
except BaseException as e:
raise e
# need to update at least: history_list, utxo_list, address_list
# self.parent.need_update.set()
def delete_tx(self, hash):
try:
if self.label_flag and self.wallet.wallet_type != "standard":
self.label_plugin.push_tx(self.wallet, "deltx", hash)
except BaseException as e:
if e != "Could not decode:":
log_info.info("push_tx delete_tx error {}.".format(e))
pass
def get_wallet_info(self):
wallet_info = {}
wallet_info["balance"] = self.balance
wallet_info["fiat_balance"] = self.fiat_balance
wallet_info["name"] = self.wallet.get_name()
return json.dumps(wallet_info)
def on_network_event(self, event, *args):
if event == "set_server_status" and self.callbackIntent is not None:
self.callbackIntent.onCallback("set_server_status=%s" % args[0])
elif event in (
"network_updated",
"wallet_updated",
"blockchain_updated",
"status",
"new_transaction",
"verified",
):
self.update_status()
def daemon_action(self):
self.daemon_running = True
self.daemon.run_daemon()
def start_daemon(self):
t1 = threading.Thread(target=self.daemon_action)
t1.setDaemon(True)
t1.start()
def status(self):
"""Get daemon status"""
try:
self._assert_daemon_running()
except Exception as e:
raise BaseException(e)
return self.daemon.run_daemon({"subcommand": "status"})
def stop(self):
"""Stop the daemon"""
try:
self._assert_daemon_running()
except Exception as e:
raise BaseException(e)
self.daemon.stop()
self.daemon_running = False
self.stop_loop()
self.plugin.stop()
global ticker
ticker.cancel()
the_begging.terminate()
def set_hd_wallet(self, wallet_obj):
if self.hd_wallet is None:
self.hd_wallet = wallet_obj
def load_wallet(self, name, password=None):
"""
load an wallet
:param name: wallet name as a string
:param password: the wallet password as a string
:return:
"""
try:
self._assert_daemon_running()
except Exception as e:
raise BaseException(e)
path = self._wallet_path(name)
wallet = self.daemon.get_wallet(path)
if not wallet:
storage = WalletStorage(path)
if not storage.file_exists():
# (_("Your {} were successfully imported").format(title))
raise BaseException(_("Failed to load file {}".format(path)))
if storage.is_encrypted():
if not password:
raise InvalidPassword()
storage.decrypt(password)
db = WalletDB(storage.read(), manual_upgrades=False)
if db.requires_split():
return
if db.requires_upgrade():
return
if db.get_action():
return
wallet_type = db.data["wallet_type"]
coin = wallet_context.split_coin_from_wallet_type(wallet_type)
if coin in self.coins:
with self.pywalib.override_server(self.coins[coin]):
if "importe" in wallet_type:
wallet = Eth_Wallet(db, storage, config=self.config)
else:
index = 0
if "address_index" in db.data:
index = db.data["address_index"]
wallet = Standard_Eth_Wallet(db, storage, config=self.config, index=index)
else:
wallet = Wallet(db, storage, config=self.config)
wallet.start_network(self.network)
if self.wallet_context.is_hd(name):
self.set_hd_wallet(wallet)
self.daemon.add_wallet(wallet)
def close_wallet(self, name=None):
"""Close a wallet"""
try:
self._assert_daemon_running()
except Exception as e:
raise BaseException(e)
self.daemon.stop_wallet(self._wallet_path(name))
def set_syn_server(self, flag):
"""
Enable/disable sync server
:param flag: flag as bool
:return: raise except if error
"""
try:
self.label_flag = flag
self.config.set_key("use_labels", bool(flag))
if (
self.label_flag
and self.wallet
and self.wallet.wallet_type != "btc-standard"
and self.wallet.wallet_type != "eth-standard"
):
self.label_plugin.load_wallet(self.wallet)
except Exception as e:
raise BaseException(e)
def set_callback_fun(self, callbackIntent):
self.callbackIntent = callbackIntent
def set_multi_wallet_info(self, name, m, n):
try:
self._assert_daemon_running()
except Exception as e:
raise BaseException(e)
if self.wizard is not None:
self.wizard = None
self.wizard = MutiBase.MutiBase(self.config)
path = self._wallet_path(name)
self.wizard.set_multi_wallet_info(path, m, n)
self.m = m
self.n = n
def add_xpub(self, xpub, device_id=None, account_id=0, type=84, coin="btc"):
try:
self._assert_daemon_running()
self._assert_wizard_isvalid()
if BIP32Node.from_xkey(xpub).xtype != "p2wsh" and self.n >= 2:
xpub = BIP32Node.get_p2wsh_from_other(xpub)
coinid = None
if coin in self.coins:
coinid = self.coins[coin]["coinId"]
self.wizard.restore_from_xpub(xpub, device_id, account_id, type=type, coin=coin, coinid=coinid)
except Exception as e:
raise BaseException(e)
def delete_xpub(self, xpub):
"""
Delete xpub when create multi-signature wallet
:param xpub: WIF pubkey
:return:
"""
try:
self._assert_daemon_running()
self._assert_wizard_isvalid()
self.wizard.delete_xpub(xpub)
except Exception as e:
raise BaseException(e)
def get_keystores_info(self):
try:
self._assert_daemon_running()
self._assert_wizard_isvalid()
ret = self.wizard.get_keystores_info()
except Exception as e:
raise BaseException(e)
return ret
def set_sync_server_host(self, ip, port):
"""
Set sync server host/port
:param ip: the server host (exp..."127.0.0.1")
:param port: the server port (exp..."port")
:return: raise except if error
"""
try:
if self.label_flag:
self.label_plugin.set_host(ip, port)
self.config.set_key("sync_server_host", "%s:%s" % (ip, port))
except BaseException as e:
raise e
def get_sync_server_host(self):
"""
Get sync server host,you can pull label/xpubs/tx to server
:return: ip+port like "39.105.86.163:8080"
"""
try:
return self.config.get("sync_server_host", "39.105.86.163:8080")
except BaseException as e:
raise e
def get_cosigner_num(self):
try:
self._assert_daemon_running()
self._assert_wizard_isvalid()
except Exception as e:
raise BaseException(e)
return self.wizard.get_cosigner_num()
def create_multi_wallet(self, name, hd=False, hide_type=False, coin="btc", index=0) -> str:
try:
self._assert_daemon_running()
self._assert_wizard_isvalid()
temp_path = helpers.get_temp_file()
path = self._wallet_path(temp_path)
wallet_type = "%s-hw-%s-%s" % (coin, self.m, self.n)
storage, db = self.wizard.create_storage(path=path, password=None, coin=coin)
except Exception as e:
raise BaseException(e)
if storage:
if "btc" == coin:
wallet = Wallet(db, storage, config=self.config)
wallet.set_derived_master_xpub(self.hw_info["xpub"])
else:
wallet = Standard_Eth_Wallet(db, storage, config=self.config, index=index)
wallet.coin = coin
wallet_storage_path = self._wallet_path(wallet.identity)
if self.daemon.get_wallet(wallet_storage_path) is not None:
raise BaseException(FileAlreadyExist())
wallet.status_flag = "btc-hw-%s-%s" % (self.m, self.n)
wallet.hide_type = hide_type
wallet.set_name(name)
wallet.storage.set_path(wallet_storage_path)
wallet.save_db()
self.daemon.add_wallet(wallet)
wallet.update_password(old_pw=None, new_pw=None, str_pw=self.android_id, encrypt_storage=True)
if "btc" == coin:
wallet.start_network(self.daemon.network)
# if hd:
# wallet_type = "btc-hd-hw-%s-%s" % (self.m, self.n)
# else:
wallet_type = "%s-hw-derived-%s-%s" % (coin, self.m, self.n)
if not hide_type:
self.wallet_context.set_wallet_type(wallet.identity, wallet_type)
self.wallet = wallet
self.wallet_name = wallet.basename()
# self.select_wallet(self.wallet_name)
# if self.label_flag and not hide_type:
# wallet_name = ""
# if wallet_type[0:1] == "1":
# wallet_name = name
# else:
# wallet_name = "共管钱包"
# self.label_plugin.create_wallet(self.wallet, wallet_type, wallet_name)
self.wizard = None
wallet_info = CreateWalletInfo.create_wallet_info(coin_type="btc", name=self.wallet_name)
out = self.get_create_info_by_json(wallet_info=wallet_info)
return json.dumps(out)
def pull_tx_infos(self):
"""
Get real-time multi-signature transaction info from sync_server
"""
try:
self._assert_wallet_isvalid()
if self.label_flag and self.wallet.wallet_type != "standard":
data = self.label_plugin.pull_tx(self.wallet)
data_list = json.loads(data)
except_list = []
data_list.reverse()
for txinfo in data_list:
try:
tx = tx_from_any(txinfo["tx"])
tx.deserialize()
self.do_save(tx)
except BaseException as e:
temp_data = {}
temp_data["tx_hash"] = txinfo["tx_hash"]
temp_data["error"] = str(e)
except_list.append(temp_data)
pass
# return json.dumps(except_list)
# self.sync_timer = threading.Timer(5.0, self.pull_tx_infos)
# self.sync_timer.start()
except BaseException as e:
raise BaseException(e)
def bulk_create_wallet(self, wallets_info):
"""
Create wallets in bulk
:param wallets_info:[{m,n,name,[xpub1,xpub2,xpub3]}, ....]
:return:
"""
wallets_list = json.loads(wallets_info)
create_failed_into = {}
for m, n, name, xpubs in wallets_list:
try:
self.import_create_hw_wallet(name, m, n, xpubs)
except BaseException as e:
create_failed_into[name] = str(e)
return json.dumps(create_failed_into)
def import_create_hw_wallet(self, name, m, n, xpubs, hide_type=False, hd=False, path="bluetooth", coin="btc"):
"""
Create a wallet
:param name: wallet name as string
:param m: number of consigner as string
:param n: number of signers as string
:param xpubs: all xpubs as [[xpub1, device_id], [xpub2, device_id],....]
:param hide_type: whether to create a hidden wallet as bool
:param hd: whether to create hd wallet as bool
:param derived: whether to create hd derived wallet as bool
:param coin: btc/eth/bsc as string
:return: json like {'seed':''
'wallet_info':''
'derived_info':''}
"""
try:
if hd:
return self.recovery_hd_derived_wallet(xpub=self.hw_info["xpub"], hw=True, path=path)
self.set_multi_wallet_info(name, m, n)
xpubs_list = json.loads(xpubs)
for xpub_info in xpubs_list:
if len(xpub_info) == 2:
self.add_xpub(
xpub_info[0],
xpub_info[1],
account_id=self.hw_info["account_id"],
type=self.hw_info["type"],
coin=coin,
)
else:
self.add_xpub(
xpub_info, account_id=self.hw_info["account_id"], type=self.hw_info["type"], coin=coin
)
wallet_name = self.create_multi_wallet(
name, hd=hd, hide_type=hide_type, coin=coin, index=self.hw_info["account_id"]
)
if len(self.hw_info) != 0:
bip39_path = self.get_coin_derived_path(self.hw_info["account_id"], coin=coin)
self.update_devired_wallet_info(bip39_path, self.hw_info["xpub"] + coin.lower(), name, coin)
return wallet_name
except BaseException as e:
raise e
def get_wallet_info_from_server(self, xpub):
"""
Get all wallet info that created by the xpub
:param xpub: xpub from read by hardware as str
:return:
"""
try:
if self.label_flag:
Vpub_data = []
title = "Vpub" if constants.net.TESTNET else "Zpub"
if xpub[0:4] == title:
Vpub_data = json.loads(self.label_plugin.pull_xpub(xpub))
xpub = BIP32Node.get_p2wpkh_from_p2wsh(xpub)
vpub_data = json.loads(self.label_plugin.pull_xpub(xpub))
return json.dumps(Vpub_data + vpub_data if Vpub_data is not None else vpub_data)
except BaseException as e:
raise e
def get_default_fee_status(self):
"""
Get default fee,now is ETA, for btc only
:return: The fee info as 180sat/byte when you set regtest net and the mainnet is obtained in real time
"""
try:
x = 1
self.config.set_key("mempool_fees", x == 2)
self.config.set_key("dynamic_fees", x > 0)
return self.config.get_fee_status()
except BaseException as e:
raise e
def get_amount(self, amount):
try:
x = Decimal(str(amount))
except BaseException:
return None
# scale it to max allowed precision, make it an int
power = pow(10, self.decimal_point)
max_prec_amount = int(power * x)
return max_prec_amount
def set_dust(self, dust_flag):
"""
Enable/disable use dust
:param dust_flag: as bool
:return:
"""
if dust_flag != self.config.get("dust_flag", True):
self.dust_flag = dust_flag
self.config.set_key("dust_flag", self.dust_flag)
def parse_output(self, outputs):
all_output_add = json.loads(outputs)
outputs_addrs = []
for key in all_output_add:
for address, amount in key.items():
if amount != "!":
amount = self.get_amount(amount)
if amount <= 546:
raise BaseException(_("Dust transaction"))
outputs_addrs.append(PartialTxOutput.from_address_and_value(address, amount))
return outputs_addrs
def get_coins(self, coins_info):
coins = []
for utxo in self.coins:
info = utxo.to_json()
temp_utxo = {}
temp_utxo[info["prevout_hash"]] = info["address"]
if coins_info.__contains__(temp_utxo):
coins.append(utxo)
return coins
def format_return_data(self, feerate, size, block):
fee = float(feerate / 1000) * size
ret_data = {
"fee": self.format_amount(fee),
"feerate": feerate / 1000,
"time": block * BTC_BLOCK_INTERVAL_TIME,
"fiat": self.daemon.fx.format_amount_and_units(fee) if self.daemon.fx else None,
"size": size,
}
return ret_data
def get_default_fee_info(self, feerate=None, coin="btc", eth_tx_info=None):
"""
Get default fee info for btc
:param feerate: Custom rates need to be sapcified as true
:param coin: btc or eth, btc default
:param eth_tx_info: optional, dict contains one of: to_address, contract_address, value, data
:return:
if coin is "btc":
if feerate is true:
return data like {"customer":{"fee":"","feerate":, "time":"", "fiat":"", "size":""}}
if feerate is None:
return data like {"slow":{"fee":"","feerate":, "time":"", "fiat":"", "size":""},
"normal":{"fee":"","feerate":, "time":"", "fiat":"", "size":""},
"fast":{"fee":"","feerate":, "time":"", "fiat":"", "size":""},
"slowest":{"fee":"","feerate":, "time":"", "fiat":"", "size":""}}
else:
return data like
{"rapid": {"gas_price": 87, "time": 0.25, "gas_limit": 40000, "fee": "0.00348", "fiat": "4.77 USD"},
"fast": {"gas_price": 86, "time": 1, "gas_limit": 40000, "fee": "0.00344", "fiat": "4.71 USD"},
"normal": {"gas_price": 79, "time": 3, "gas_limit": 40000, "fee": "0.00316", "fiat": "4.33 USD"},
"slow": {"gas_price": 72, "time": 10, "gas_limit": 40000, "fee": "0.00288", "fiat": "3.95 USD"}}
"""
self._assert_wallet_isvalid()
if coin in self.coins:
if eth_tx_info:
eth_tx_info = json.loads(eth_tx_info)
else:
eth_tx_info = {}
eth_tx_info.pop("gas_price", None)
eth_tx_info.pop("gas_limit", None)
address = self.wallet.get_addresses()[0]
fee = self.eth_estimate_fee(coin, address, **eth_tx_info)
return json.dumps(fee, cls=DecimalEncoder)
fee_info_list = self.get_block_info()
out_size_p2pkh = 141
out_info = {}
if feerate is None:
for block, feerate in fee_info_list.items():
if block == 2 or block == 5 or block == 10:
key = "slow" if block == 10 else "normal" if block == 5 else "fast" if block == 2 else "slowest"
out_info[key] = self.format_return_data(feerate, out_size_p2pkh, block)
else:
block = helpers.get_best_block_by_feerate(float(feerate) * 1000, fee_info_list)
out_info["customer"] = self.format_return_data(float(feerate) * 1000, out_size_p2pkh, block)
return json.dumps(out_info)
def eth_estimate_fee(
self,
coin,
from_address,
to_address="",
contract_address=None,
value="0",
data="",
gas_price=None,
gas_limit=None,
):
estimate_gas_prices = self.pywalib.get_gas_price(coin)
if gas_price:
gas_price = Decimal(gas_price)
best_time = self.pywalib.get_best_time_by_gas_price(gas_price, estimate_gas_prices)
estimate_gas_prices = {"customer": {"gas_price": gas_price, "time": best_time}}
if not gas_limit:
gas_limit = self.pywalib.estimate_gas_limit(
from_address,
to_address,
self.wallet.get_contract_token(contract_address),
value,
data,
)
gas_limit = Decimal(gas_limit)
last_price = price_manager.get_last_price(self._coin_to_chain_code(self.wallet.coin), self.ccy)
for val in estimate_gas_prices.values():
val["gas_limit"] = gas_limit
val["fee"] = self.pywalib.web3.fromWei(
gas_limit * self.pywalib.web3.toWei(val["gas_price"], "gwei"), "ether"
)
val["fiat"] = f"{self.daemon.fx.ccy_amount_str(Decimal(val['fee']) * last_price, True)} {self.ccy}"
return estimate_gas_prices
def get_block_info(self):
fee_info_list = self.config.get_block_fee_info()
if fee_info_list is not None:
self.config.set_key("fee_info_list", fee_info_list)
else:
fee_info_list = self.config.get("fee_info_list", fee_info_list)
if fee_info_list is None:
fee_info = read_json("server_config.json", {})
fee_info_list = fee_info["feerate_info"]
return fee_info_list
def get_fee_by_feerate(self, coin="btc", outputs=None, message=None, feerate=None, customer=None, eth_tx_info=None):
"""
Get fee info when Send
:param coin: btc or eth, btc default
:param outputs: Outputs info as json [{addr1, value}, ...]
:param message: What you want say as sting
:param feerate: Feerate retruned by get_default_fee_status api
:param customer: User choose coin as bool
:param eth_tx_info: optional, dict contains one of: to_address, contract_address, value, data, gas_price, gas_limit
:return:
if coin is "btc"
json like {"amount": 0.5 BTC,
"size": 141,
"fee": 0.0003 BTC,
"time": 30,
"tx": ""}
else if coin is "eth":
json like {"gas_price": 110, "time": 0.25, "gas_limit": 36015, "fee": "0.00396165", "fiat": "5.43 USD"}
"""
if coin in self.coins:
if eth_tx_info:
eth_tx_info = json.loads(eth_tx_info)
else:
eth_tx_info = {}
if not eth_tx_info.get("gas_price"):
raise InvalidValueException()
fee = self.eth_estimate_fee(coin, self.wallet.get_addresses()[0], **eth_tx_info)
fee = fee.get("customer")
return json.dumps(fee, cls=DecimalEncoder)
try:
self._assert_wallet_isvalid()
outputs_addrs = self.parse_output(outputs)
if customer is None:
coins = self.wallet.get_spendable_coins(domain=None)
else:
coins = self.get_coins(json.loads(customer))
c, u, x = self.wallet.get_balance()
if not coins and self.config.get("confirmed_only", False):
raise BaseException(_("Please use unconfirmed utxo."))
fee_per_kb = 1000 * Decimal(feerate)
from functools import partial
fee_estimator = partial(self.config.estimate_fee_for_feerate, fee_per_kb)
# tx = self.wallet.make_unsigned_transaction(coins=coins, outputs = outputs_addrs, fee=self.get_amount(fee_estimator))
tx = self.wallet.make_unsigned_transaction(coins=coins, outputs=outputs_addrs, fee=fee_estimator)
tx.set_rbf(self.rbf)
self.wallet.set_label(tx.txid(), message)
size = tx.estimated_size()
fee = tx.get_fee()
self.tx = tx
tx_details = self.wallet.get_tx_info(tx)
fee_info_list = self.get_block_info()
block = helpers.get_best_block_by_feerate(float(feerate) * 1000, fee_info_list)
ret_data = {
"amount": self.format_amount(tx_details.amount),
"size": size,
"fee": self.format_amount(tx_details.fee),
"time": block * BTC_BLOCK_INTERVAL_TIME,
"tx": str(self.tx),
}
return json.dumps(ret_data)
except NotEnoughFunds:
raise BaseException(NotEnoughFundsStr())
except BaseException as e:
raise BaseException(e)
def mktx(self, tx=None):
"""
Confirm to create transaction, for btc only
:param tx: tx that created by get_fee_by_feerate
:return: json like {"tx":""}
"""
try:
self._assert_wallet_isvalid()
tx = tx_from_any(tx)
tx.deserialize()
except Exception as e:
raise BaseException(e)
ret_data = {"tx": str(self.tx)}
try:
if self.label_flag and self.wallet.wallet_type != "standard":
self.label_plugin.push_tx(self.wallet, "createtx", tx.txid(), str(self.tx))
except Exception as e:
log_info.info("push_tx createtx error {}.".format(e))
pass
json_str = json.dumps(ret_data)
return json_str
def deserialize(self, raw_tx):
try:
tx = Transaction(raw_tx)
tx.deserialize()
except Exception as e:
raise BaseException(e)
# ### coinjoin
# def join_tx_with_another(self, tx: 'PartialTransaction', other_tx: 'PartialTransaction') -> None:
# if tx is None or other_tx is None:
# raise BaseException("tx or other_tx is empty")
# try:
# print(f"join_tx_with_another.....in.....")
# tx = tx_from_any(tx)
# other_tx = tx_from_any(other_tx)
# if not isinstance(tx, PartialTransaction):
# raise BaseException('TX must partial transactions.')
# except BaseException as e:
# raise BaseException(("Bixin was unable to parse your transaction") + ":\n" + repr(e))
# try:
# print(f"join_tx_with_another.......{tx, other_tx}")
# tx.join_with_other_psbt(other_tx)
# except BaseException as e:
# raise BaseException(("Error joining partial transactions") + ":\n" + repr(e))
# return tx.serialize_as_bytes().hex()
# def export_for_coinjoin(self, export_tx) -> PartialTransaction:
# if export_tx is None:
# raise BaseException("export_tx is empty")
# export_tx = tx_from_any(export_tx)
# if not isinstance(export_tx, PartialTransaction):
# raise BaseException("Can only export partial transactions for coinjoins.")
# tx = copy.deepcopy(export_tx)
# tx.prepare_for_export_for_coinjoin()
# return tx.serialize_as_bytes().hex()
# ####
def format_amount(self, x, is_diff=False, whitespaces=False):
return util.format_satoshis(
x, is_diff=is_diff, num_zeros=self.num_zeros, decimal_point=self.decimal_point, whitespaces=whitespaces
)
def base_unit(self):
return util.decimal_point_to_base_unit_name(self.decimal_point)
# set use unconfirmed coin
def set_unconf(self, x):
"""
Enable/disable spend confirmed_only input, for btc only
:param x: as bool
:return:None
"""
self.config.set_key("confirmed_only", bool(x))
# fiat balance
def get_currencies(self):
"""
Get fiat list
:return:json exp:{'CNY', 'USD'...}
"""
self._assert_daemon_running()
currencies = sorted(self.daemon.fx.get_currencies(self.daemon.fx.get_history_config()))
return json.dumps(currencies)
def get_exchanges(self):
"""
Get exchange server list
:return: json exp:{'exchanges', ...}
"""
if not self.daemon.fx:
return
b = self.daemon.fx.is_enabled()
if b:
h = self.daemon.fx.get_history_config()
c = self.daemon.fx.get_currency()
exchanges = self.daemon.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.daemon.fx.get_exchanges_by_ccy("USD", False)
return json.dumps(sorted(exchanges))
def set_exchange(self, exchange):
"""
Set exchange server
:param exchange: exchange server name as string like "exchanges"
:return:None
"""
if self.daemon.fx and self.daemon.fx.is_enabled() and exchange and exchange != self.daemon.fx.exchange.name():
self.daemon.fx.set_exchange(exchange)
def set_currency(self, ccy):
"""
Set fiat
:param ccy: fiat as string like "CNY"
:return:None
"""
self.daemon.fx.set_enabled(True)
if ccy != self.ccy:
self.daemon.fx.set_currency(ccy)
self.ccy = ccy
self.update_status()
def get_exchange_currency(self, type, amount):
"""
You can get coin to fiat or get fiat to coin
:param type: base/fiat as str
:param amount: value
:return:
exp:
if you want get fiat from coin,like this:
get_exchange_currency("base", 1)
return: 1,000.34 CNY
if you want get coin from fiat, like this:
get_exchange_currency("fiat", 1000)
return: 1 mBTC
"""
text = ""
rate = self.daemon.fx.exchange_rate() if self.daemon.fx else Decimal("NaN")
if rate.is_nan() or amount is None:
return text
else:
if type == "base":
amount = self.get_amount(amount)
text = self.daemon.fx.ccy_amount_str(amount * Decimal(rate) / COIN, False)
elif type == "fiat":
text = self.format_amount((int(Decimal(amount) / Decimal(rate) * COIN)))
return text
def set_base_uint(self, base_unit):
"""
Set base unit for(BTC/mBTC/bits/sat), for btc only
:param base_unit: (BTC or mBTC or bits or sat) as string
:return:None
"""
self.base_unit = base_unit
self.decimal_point = util.base_unit_name_to_decimal_point(self.base_unit)
self.config.set_key("decimal_point", self.decimal_point, True)
self.update_status()
def format_amount_and_units(self, amount):
try:
self._assert_daemon_running()
except Exception as e:
raise BaseException(e)
text = self.format_amount(amount) + " " + self.base_unit
x = self.daemon.fx.format_amount_and_units(amount) if self.daemon.fx else None
if text and x:
text += " (%s)" % x
return text
# #proxy
def set_proxy(self, proxy_mode, proxy_host, proxy_port, proxy_user, proxy_password):
"""
Set proxy server
:param proxy_mode: SOCK4/SOCK5 as string
:param proxy_host: server ip
:param proxy_port: server port
:param proxy_user: login user that you registed
:param proxy_password: login password that you registed
:return: raise except if error
"""
try:
net_params = self.network.get_parameters()
proxy = None
if proxy_mode != "" and proxy_host != "" and proxy_port != "":
proxy = {
"mode": str(proxy_mode).lower(),
"host": str(proxy_host),
"port": str(proxy_port),
"user": str(proxy_user),
"password": str(proxy_password),
}
net_params = net_params._replace(proxy=proxy)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
except BaseException as e:
raise e
def recover_tx_info(self, tx):
try:
tx = tx_from_any(str(tx))
temp_tx = copy.deepcopy(tx)
temp_tx.deserialize()
temp_tx.add_info_from_wallet(self.wallet)
return temp_tx
except BaseException as e:
raise e
def get_tx_info_from_raw(self, raw_tx, tx_list=None):
"""
You can get detail info from a raw_tx, for btc only
:param raw_tx: Raw tx as string
:return: json like {'txid': ,
'can_broadcast': true,
'amount': "",
'fee': "",
'description': "",
'tx_status': "",
'sign_status': "",
'output_addr': "",
'input_addr': ["addr", ],
'height': 2000,
'cosigner': "",
'tx': "",
'show_status': [1, _("Unconfirmed")]}
"""
try:
tx = self.recover_tx_info(raw_tx)
except Exception as e:
tx = None
raise BaseException(e)
data = {}
data = self.get_details_info(tx, tx_list=tx_list)
return data
def _get_input_info(self, tx, all_input_info=False):
input_list = []
local_addr = self.txdb.get_received_tx_input_info(tx.txid())
if local_addr:
addr_info = json.loads(local_addr[0][1])
if (all_input_info and len(addr_info) > 1) or (not all_input_info and len(addr_info) == 1):
return addr_info
for txin in tx.inputs():
input_info = {}
addr, value = self.wallet.get_txin_address_and_value(txin)
if not addr:
import asyncio
try:
addr, value = asyncio.run_coroutine_threadsafe(
self.gettransaction(txin.prevout.txid.hex(), txin.prevout.out_idx), self.network.asyncio_loop
).result()
except BaseException:
addr, value = "", 0
input_info['address'] = addr
input_info['amount'] = self.format_amount(value)
input_list.append(input_info)
if not all_input_info:
break
self.txdb.add_received_tx_input_info(tx.txid(), json.dumps(input_list))
return input_list
def get_fee_from_server(self, txid):
"""Retrieve a transaction. """
url_info = read_json("server_config.json", {})
url_list = url_info["btc_server"]
for urlinfo in url_list:
for key, url in urlinfo.items():
url += txid
try:
import requests
response = requests.get(url, timeout=2)
# response = await self.network.send_http_on_proxy("get", url, timeout=2)
response_json = response.json()
except BaseException as e:
log_info.info("get fee from server error {}.".format(e))
pass
continue
if response_json.__contains__("fee"):
return self.format_amount_and_units(response_json["fee"])
elif response_json.__contains__("fees"):
return self.format_amount_and_units(response_json["fees"])
else:
continue
return ""
# def get_receive_fee_by_hash(self, tx_hash):
# import asyncio
# try:
# fee = asyncio.run_coroutine_threadsafe(
# self.get_fee_from_server(tx_hash),
# self.network.asyncio_loop).result()
# except:
# fee = None
# return fee
def get_details_info(self, tx, tx_list=None):
try:
self._assert_wallet_isvalid()
except Exception as e:
raise BaseException(e)
tx_details = self.wallet.get_tx_info(tx)
if "Partially signed" in tx_details.status:
temp_s, temp_r = tx.signature_count()
s = int(temp_s / len(tx.inputs()))
r = len(self.wallet.get_keystores())
elif "Unsigned" in tx_details.status:
s = 0
r = len(self.wallet.get_keystores())
else:
if self.wallet.wallet_type == "standard" or self.wallet.wallet_type == "imported":
s = r = len(self.wallet.get_keystores())
else:
s, r = self.wallet.wallet_type.split("of", 1)
in_list = self._get_input_info(tx)
out_list = []
for index, o in enumerate(tx.outputs()):
address, value = o.address, o.value
out_info = {}
out_info["addr"] = address
out_info["amount"] = self.format_amount_and_units(value)
out_info["is_change"] = True if (index == len(tx.outputs()) - 1) and (len(tx.outputs()) != 1) else False
out_list.append(out_info)
amount_str = ""
if tx_details.amount is None:
amount_str = _("Transaction not related to the current wallet.")
else:
amount_str = self.format_amount_and_units(tx_details.amount)
block_height = tx_details.tx_mined_status.height
show_fee = ""
if tx_details.fee is not None:
show_fee = self.format_amount_and_units(tx_details.fee)
else:
if tx_list is None:
show_fee_list = self.txdb.get_received_tx_fee_info(tx_details.txid)
if len(show_fee_list) != 0:
show_fee = show_fee_list[0][1]
if show_fee == "":
show_fee = self.get_fee_from_server(tx_details.txid)
if show_fee != "":
self.txdb.add_received_tx_fee_info(tx_details.txid, show_fee)
if block_height == -2:
status = _("Unconfirmed")
can_broadcast = False
else:
status = tx_details.status
can_broadcast = tx_details.can_broadcast
ret_data = {
"txid": tx_details.txid,
"can_broadcast": can_broadcast,
"amount": amount_str,
"fee": show_fee,
# 'description': self.wallet.get_label(tx_details.txid) if 44 != int(self.wallet.keystore.get_derivation_prefix().split('/')[COIN_POS].split('\'')[0]) else "",
"description": "",
"tx_status": status,
"sign_status": [s, r],
"output_addr": out_list,
"input_addr": in_list,
"height": block_height,
"cosigner": [x.xpub if not isinstance(x, Imported_KeyStore) else "" for x in self.wallet.get_keystores()],
"tx": str(tx),
"show_status": [1, _("Unconfirmed")]
if (block_height == 0 or (block_height < 0 and not can_broadcast))
else [3, _("Confirmed")]
if block_height > 0
else [2, _("Sending failure")],
}
json_data = json.dumps(ret_data)
return json_data
# invoices
def delete_invoice(self, key):
try:
self._assert_wallet_isvalid()
self.wallet.delete_invoice(key)
except Exception as e:
raise BaseException(e)
def get_invoices(self):
try:
self._assert_wallet_isvalid()
return self.wallet.get_invoices()
except Exception as e:
raise BaseException(e)
def do_save(self, tx):
try:
if not self.wallet.add_transaction(tx):
raise BaseException(
_(("Transaction cannot be saved. It conflicts with current history. tx={}").format(tx.txid()))
)
except BaseException as e:
raise BaseException(e)
else:
self.wallet.save_db()
def update_invoices(self, old_tx, new_tx):
try:
self._assert_wallet_isvalid()
self.wallet.update_invoice(old_tx, new_tx)
except Exception as e:
raise BaseException(e)
def clear_invoices(self):
try:
self._assert_wallet_isvalid()
self.wallet.clear_invoices()
except Exception as e:
raise BaseException(e)
def get_history_tx(self):
try:
self._assert_wallet_isvalid()
except Exception as e:
raise BaseException(e)
history = reversed(self.wallet.get_history())
all_data = [self.get_card(*item) for item in history]
return all_data
# get input address for receive tx
async def gettransaction(self, txid, n):
"""Retrieve a transaction. """
tx = None
raw = await self.network.get_transaction(txid, timeout=3)
if raw:
tx = Transaction(raw)
else:
raise Exception("Unknown transaction")
if tx.txid() != txid:
raise Exception("Mismatching txid")
addr = tx._outputs[n].address
value = tx._outputs[n].value
return addr, value
def get_btc_tx_list(self, start=None, end=None, search_type=None): # noqa
history_data = []
try:
history_info = self.get_history_tx()
local_tx = self.txdb.get_tx_info(self.wallet.get_addresses()[0])
except BaseException as e:
raise e
if search_type is None:
history_data = history_info
elif search_type == "send":
for info in history_info:
if info["is_mine"]:
history_data.append(info)
elif search_type == "receive":
for info in history_info:
if not info["is_mine"]:
history_data.append(info)
history_len = len(history_data)
local_len = len(local_tx)
all_tx_len = history_len + local_len
if start is None or end is None:
start = 0
if "receive" == search_type:
end = history_len
else:
end = all_tx_len
if (search_type is None or "send" in search_type) and all_tx_len == self.old_history_len:
return json.dumps(self.old_history_info[start:end])
all_data = []
if search_type == "receive":
for pos, info in enumerate(history_data):
if pos >= start and pos <= end:
self.get_history_show_info(info, all_data)
return json.dumps(all_data)
else:
self.old_history_len = all_tx_len
for info in history_data:
self.get_history_show_info(info, all_data)
# local_tx = self.txdb.get_tx_info(self.wallet.get_addresses()[0])
for info in local_tx:
i = {}
i["type"] = "history"
data = self.get_tx_info_from_raw(info[3], tx_list=True)
i["tx_status"] = _("Sending failure")
i["date"] = util.format_time(int(info[4]))
i["tx_hash"] = info[0]
i["is_mine"] = True
i["confirmations"] = 0
data = json.loads(data)
i["address"] = helpers.get_show_addr(data["output_addr"][0]["addr"])
amount = data["amount"].split(" ")[0]
if amount[0] == "-":
amount = amount[1:]
fee = data["fee"].split(" ")[0]
fait = self.daemon.fx.format_amount_and_units(float(amount) + float(fee)) if self.daemon.fx else None
show_amount = "%.8f" % (float(amount) + float(fee))
show_amount = str(show_amount).rstrip("0")
if show_amount[-1] == ".":
show_amount = show_amount[0:-1]
i["amount"] = "%s %s (%s)" % (show_amount, self.base_unit, fait)
all_data.append(i)
all_data.sort(reverse=True, key=lambda info: info["date"])
self.old_history_info = all_data
return json.dumps(all_data[start:end])
def get_eth_tx_list(self, wallet_obj, contract_address=None, search_type=None):
contract = self.wallet.get_contract_token(contract_address) if contract_address else None
txs = PyWalib.get_transaction_history(
wallet_obj.get_addresses()[0],
contract=contract,
search_type=search_type,
)
chain_code = self._coin_to_chain_code(wallet_obj.coin)
main_coin_price = price_manager.get_last_price(chain_code, self.ccy)
if contract:
coins = coin_manager.query_coins_by_token_addresses(chain_code, [contract.address.lower()])
amount_coin_price = price_manager.get_last_price(coins[0].code, self.ccy) if coins else Decimal(0)
else:
amount_coin_price = main_coin_price
for tx in txs:
fiat = Decimal(tx["amount"]) * amount_coin_price
fee_fiat = Decimal(tx["fee"]) * main_coin_price
tx["amount"] = f"{tx['amount']} {tx['coin']} ({self.daemon.fx.ccy_amount_str(fiat, True)} {self.ccy})"
tx["fee"] = (
f"{tx['fee']} {self.pywalib.coin_symbol} "
f"({self.daemon.fx.ccy_amount_str(fee_fiat, True)} {self.ccy})"
)
return json.dumps(txs, cls=DecimalEncoder)
def get_detail_tx_info_by_hash(self, tx_hash):
"""
Show detailed inputs and outputs
:param tx_hash:
:return: {
"input_list": [{"address":"", "amount":""},...]
"output_list": [{"address":"", "amount":""},...]
}
"""
self._assert_wallet_isvalid()
tx = self.get_btc_raw_tx(tx_hash)
try:
in_list = self._get_input_info(tx, all_input_info=True)
except Exception:
in_list = []
out_list = [{"address": output.address, "amount": self.format_amount(output.value)} for output in tx.outputs()]
return json.dumps({"input_list": in_list, "output_list": out_list})
def get_all_tx_list(self, search_type=None, coin="btc", contract_address=None, start=None, end=None):
"""
Get the histroy list with the wallet that you select
:param search_type: None/send/receive as str
:param coin: btc/eth as string
:param contract_address: contract address on eth base chains
:param start: start position as int
:param end: end position as int
:return:
exp:
[{"type":"",
"tx_status":"",
"date":"",
"tx_hash":"",
"is_mine":"",
"confirmations":"",
"address":"",
"amount":""}, ...]
"""
try:
if coin == "btc":
return self.get_btc_tx_list(start=start, end=end, search_type=search_type)
else:
return self.get_eth_tx_list(self.wallet, contract_address=contract_address, search_type=search_type)
except BaseException as e:
raise e
def get_history_show_info(self, info, list_info):
info["type"] = "history"
data = self.get_tx_info(info["tx_hash"], tx_list=True)
info["tx_status"] = json.loads(data)["tx_status"]
info["address"] = (
helpers.get_show_addr(json.loads(data)["output_addr"][0]["addr"])
if info["is_mine"]
else helpers.get_show_addr(json.loads(data)["input_addr"][0]["address"])
)
time = self.txdb.get_tx_time_info(info["tx_hash"])
if len(time) != 0:
info["date"] = util.format_time(int(time[0][1]))
list_info.append(info)
def get_btc_raw_tx(self, tx_hash):
self._assert_wallet_isvalid()
tx = self.wallet.db.get_transaction(tx_hash)
if not tx:
local_tx = self.txdb.get_tx_info(self.wallet.get_addresses()[0])
for temp_tx in local_tx:
if temp_tx[0] == tx_hash:
return self.get_tx_info_from_raw(temp_tx[3])
raise Exception(_("Failed to get transaction details."))
# tx = PartialTransaction.from_tx(tx)
tx = copy.deepcopy(tx)
try:
tx.deserialize()
except Exception as e:
raise e
tx.add_info_from_wallet(self.wallet)
return tx
def get_tx_info(self, tx_hash, coin="btc", tx_list=None):
"""
Get detail info by tx_hash
:param tx_hash: tx_hash as string
:param tx_list: Not for app
:param coin: btc/eth
:return:
Json like {'txid': ,
'can_broadcast': true,
'amount': "",
'fee': "",
'description': "",
'tx_status': "",
'sign_status': "",
'output_addr': "",
'input_addr': ["addr", ],
'height': 2000,
'cosigner': "",
'tx': "",
'show_status': [1, _("Unconfirmed")]}
"""
try:
self._assert_wallet_isvalid()
except Exception as e:
raise BaseException(e)
if coin in self.coins:
return self.get_eth_tx_info(tx_hash)
tx = self.get_btc_raw_tx(tx_hash)
return self.get_details_info(tx, tx_list=tx_list)
def get_eth_tx_info(self, tx_hash) -> str:
tx = self.pywalib.get_transaction_info(tx_hash)
main_coin_price = price_manager.get_last_price(self._coin_to_chain_code(self.wallet.coin), self.ccy)
fiat = Decimal(tx["amount"]) * main_coin_price
fee_fiat = Decimal(tx["fee"]) * main_coin_price
tx[
"amount"
] = f"{tx['amount']} {self.pywalib.coin_symbol} ({self.daemon.fx.ccy_amount_str(fiat, True)} {self.ccy})"
tx[
"fee"
] = f"{tx['fee']} {self.pywalib.coin_symbol} ({self.daemon.fx.ccy_amount_str(fee_fiat, True)} {self.ccy})"
return json.dumps(tx, cls=DecimalEncoder)
def get_card(self, tx_hash, tx_mined_status, delta, fee, balance):
try:
self._assert_wallet_isvalid()
self._assert_daemon_running()
except Exception as e:
raise BaseException(e)
status, status_str = self.wallet.get_tx_status(tx_hash, tx_mined_status)
label = self.wallet.get_label(tx_hash) if tx_hash else ""
ri = {}
ri["tx_hash"] = tx_hash
ri["date"] = status_str
ri["message"] = label
ri["confirmations"] = tx_mined_status.conf
if delta is not None:
ri["is_mine"] = delta < 0
if delta < 0:
delta = -delta
ri["amount"] = self.format_amount_and_units(delta)
if self.fiat_unit:
fx = self.daemon.fx
fiat_value = delta / Decimal(bitcoin.COIN) * self.wallet.price_at_timestamp(tx_hash, fx.timestamp_rate)
fiat_value = Fiat(fiat_value, fx.ccy)
ri["quote_text"] = fiat_value.to_ui_string()
return ri
def get_wallet_address_show_UI(self, next=None):
"""
Get receving address, for btc only
:param next: if you want change address, you can set the param
:return: json like {'qr_data':"", "addr":""}
"""
try:
self._assert_wallet_isvalid()
show_addr_info = self.config.get("show_addr_info", {})
if show_addr_info.__contains__(self.wallet.__str__()):
self.show_addr = show_addr_info[self.wallet.__str__()]
else:
self.show_addr = self.wallet.get_addresses()[0]
if next:
addr = self.wallet.create_new_address(False)
self.show_addr = addr
show_addr_info[self.wallet.__str__()] = self.show_addr
self.config.set_key("show_addr_info", show_addr_info)
if bitcoin.is_address(self.show_addr):
data = util.create_bip21_uri(self.show_addr, "", "")
elif self.pywalib.web3.isAddress(self.show_addr):
prefix = "ethereum" if self.wallet.coin == "eth" else self.wallet.coin
data = f"{prefix}:{self.show_addr}"
else:
data = self.show_addr
except Exception as e:
raise BaseException(e)
data_json = {}
data_json["qr_data"] = data
data_json["addr"] = self.show_addr
return json.dumps(data_json)
def get_all_funded_address(self):
"""
Get a address list of have balance, for btc only
:return: json like [{"address":"", "balance":""},...]
"""
try:
self._assert_wallet_isvalid()
all_addr = self.wallet.get_addresses()
funded_addrs_list = []
for addr in all_addr:
c, u, x = self.wallet.get_addr_balance(addr)
balance = c + u + x
if balance == 0:
continue
funded_addr = {}
funded_addr["address"] = addr
funded_addr["balance"] = self.format_amount_and_units(balance)
funded_addrs_list.append(funded_addr)
return json.dumps(funded_addrs_list)
except Exception as e:
raise BaseException(e)
def get_unspend_utxos(self):
"""
Get unspend utxos if you need
:return:
"""
try:
coins = []
for txin in self.wallet.get_utxos():
d = txin.to_json()
dust_sat = int(d["value_sats"])
v = d.pop("value_sats")
d["value"] = self.format_amount(v) + " " + self.base_unit
if dust_sat <= 546:
if self.config.get("dust_flag", True):
continue
coins.append(d)
self.coins.append(txin)
return coins
except BaseException as e:
raise e
def save_tx_to_file(self, path, tx):
"""
Save the psbt/tx to path
:param path: path as string
:param tx: raw tx as string
:return: raise except if error
"""
try:
if tx is None:
raise BaseException("The tx cannot be empty")
tx = tx_from_any(tx)
if isinstance(tx, PartialTransaction):
tx.finalize_psbt()
if tx.is_complete(): # network tx hex
path += ".txn"
with open(path, "w+") as f:
network_tx_hex = tx.serialize_to_network()
f.write(network_tx_hex + "\n")
else: # if partial: PSBT bytes
assert isinstance(tx, PartialTransaction)
path += ".psbt"
with open(path, "wb+") as f:
f.write(tx.serialize_as_bytes())
except Exception as e:
raise BaseException(e)
def read_tx_from_file(self, path: str, is_tx=True) -> str:
"""
Import tx info from path
:param is_tx: if True psbt tx else message
:param path: path as string
:return: serialized tx or file content
"""
try:
with open(path, "rb" if is_tx else "r") as f:
file_content = f.read()
if is_tx:
tx = tx_from_any(file_content)
except (ValueError, IOError, os.error) as reason:
raise BaseException(_("Failed to open file.{}").format(reason))
else:
return tx if is_tx else file_content
def parse_address(self, data):
data = data.strip()
try:
out = util.parse_URI(data)
r = out.get("r")
sig = out.get("sig")
name = out.get("name")
if r or (name and sig):
if name and sig:
s = paymentrequest.serialize_request(out).SerializeToString()
result = paymentrequest.PaymentRequest(s)
else:
result = asyncio.run_coroutine_threadsafe(
paymentrequest.get_payment_request(r), self.network.asyncio_loop
).result()
out = {"address": result.get_address(), "memo": result.get_memo()}
if result.get_amount() != 0:
out["amount"] = result.get_amount()
return out
except Exception as e:
raise Exception(e)
def parse_tx(self, data):
# try to decode transaction
try:
# text = bh2u(base_decode(data, base=43))
tx = self.recover_tx_info(data)
except Exception as e:
tx = None
raise BaseException(e)
data = self.get_details_info(tx)
return data
def parse_pr(self, data):
"""
Parse qr code which generated by address or tx, for btc only
:param data: qr cata as str
:return:
if data is address qr data, return data like:
{"type": 1, "data":"bcrt1qzm6y9j0zg9nnludkgtc0pvhet0sf76szjw7fjw"}
if data is tx qr data, return data like:
{"type": 2, "data":"02000000000103f9f51..."}
if data is not address and not tx, return data like:
{"type": 3, "data":"parse pr error"}
"""
add_status_flag = False
tx_status_flag = False
add_data = {}
try:
add_data = self.parse_address(data)
add_status_flag = True
except BaseException:
add_status_flag = False
try:
tx_data = self.parse_tx(data)
tx_status_flag = True
except BaseException:
tx_status_flag = False
out_data = {}
if add_status_flag:
out_data["type"] = 1
out_data["data"] = add_data
elif tx_status_flag:
out_data["type"] = 2
out_data["data"] = json.loads(tx_data)
else:
out_data["type"] = 3
out_data["data"] = "parse pr error"
return json.dumps(out_data, cls=DecimalEncoder)
def update_local_info(self, txid, address, tx, msg):
self.remove_local_tx(txid)
self.txdb.add_tx_info(address=address, tx_hash=txid, psbt_tx="", raw_tx=tx, failed_info=msg)
def broadcast_tx(self, tx: str) -> str:
"""
Broadcast the tx, for btc only
:param tx: tx as string
:return: 'success'
:raise: BaseException
"""
trans = None
try:
if isinstance(tx, str):
trans = tx_from_any(tx)
trans.deserialize()
if self.network and self.network.is_connected():
self.network.run_from_another_thread(self.network.broadcast_transaction(trans))
else:
self.txdb.add_tx_time_info(trans.txid())
raise BaseException(_("Cannot broadcast transaction due to network connected exceptions"))
except SerializationError:
raise BaseException(_("Transaction formatter error"))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
self.update_local_info(trans.txid(), self.wallet.get_addresses()[0], tx, msg)
raise BaseException(msg)
except BestEffortRequestFailed as e:
msg = str(e)
raise BaseException(msg)
else:
return "success"
finally:
if trans:
self.txdb.add_tx_time_info(trans.txid())
def set_use_change(self, status_change):
"""
Enable/disable change address, for btc only
:param status_change: as bool
:return: raise except if error
"""
try:
self._assert_wallet_isvalid()
except Exception as e:
raise BaseException(e)
if self.wallet.use_change == status_change:
return
self.config.set_key("use_change", status_change, False)
self.wallet.use_change = status_change
def sign_message(self, address, message, path="android_usb", password=None):
"""
Sign message, for btc only
:param address: must bitcoin address, must in current wallet as string
:param message: message need be signed as string
:param path: NFC/android_usb/bluetooth as str, used by hardware
:param password: as string
:return: signature string
"""
if path:
self.trezor_manager.ensure_client(path)
self._assert_wallet_isvalid()
address = address.strip()
message = message.strip()
coin = self.wallet.coin
if coin in self.coins:
if not self.pywalib.web3.isAddress(address):
raise UnavailableEthAddr()
elif coin == 'btc':
if not bitcoin.is_address(address):
raise UnavailableBtcAddr()
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ["p2pkh", "p2wpkh", "p2wpkh-p2sh"]:
raise BaseException(_("Current wallet does not support signature message:{}".format(txin_type)))
else:
raise UnsupportedCurrencyCoin()
if self.wallet.is_watching_only():
raise BaseException(_("This is a watching-only wallet."))
if not self.wallet.is_mine(address):
raise BaseException(_("The address is not in the current wallet."))
sig = self.wallet.sign_message(address, message, password)
return force_text(sig)
def verify_message(self, address, message, signature, coin='btc', path="android_usb"):
"""
Verify the message that you signed by sign_message, for btc only
:param address: must bitcoin address as str
:param message: message as str
:param signature: sign info retured by sign_message api
:return: true/false as bool
"""
address = address.strip()
message = message.strip().encode("utf-8")
if coin == 'btc':
if not bitcoin.is_address(address):
raise UnavailableBtcAddr()
elif coin in self.coins:
if not self.pywalib.web3.isAddress(address):
raise UnavailableEthAddr()
else:
raise UnsupportedCurrencyCoin()
try:
self.trezor_manager.ensure_client(path)
verified = self.wallet.verify_message(address, message, signature)
except Exception:
verified = False
return verified
def get_cur_wallet_token_address(self):
"""
Get all token contract addresses in the current wallet
:return:
"""
try:
return json.dumps(self.wallet.get_all_token_address())
except BaseException as e:
raise e
def get_customer_token_info(self, contract_address):
"""
Add one token info
:param contract_address:
:return: {
"chain_id": "",
"decimals" : "",
"address" : "",
"symbol" : "",
"name" : "",
"logoURI": "",
"rank": 0
"""
token_info = PyWalib.get_token_info("", contract_address)
return json.dumps(token_info)
def get_all_token_info(self):
"""
Get all token information
:return:
"""
chain_code = PyWalib.get_chain_code()
token_info = self.token_list_by_chain.get(chain_code)
if token_info is None:
token_info = {
token["address"].lower(): token
for token in read_json(f"{chain_code}_token_list.json", {}).get("tokens", ())
}
self.token_list_by_chain[chain_code] = token_info
return json.dumps(list(self.token_list_by_chain.get(chain_code).values()))
def get_all_customer_token_info(self):
chain_code = PyWalib.get_chain_code()
return json.dumps(self.wallet_context.get_customer_token_info(chain_code))
def add_token(self, symbol, contract_addr):
"""
Add token to eth, for eth/bsc only
:param symbol: coin symbol
:param contract_addr: coin address
:return: raise except if error
"""
if not contract_addr:
raise BaseException("Contract address cannot be empty")
chain_code = PyWalib.get_chain_code()
contract_addr = contract_addr.lower()
token_info = self.token_list_by_chain.get(chain_code, {}).get(contract_addr) or PyWalib.get_token_info(
symbol, contract_addr
)
with db.atomic():
coin_manager.add_coin(
chain_code,
contract_addr,
token_info["symbol"],
token_info["decimals"],
token_info.get("name"),
token_info.get("logoURI"),
)
if contract_addr not in list(self.token_list_by_chain.get(chain_code, {}).keys())[:50]:
self.wallet_context.add_customer_token_info(token_info, chain_code)
contract_addr = self.pywalib.web3.toChecksumAddress(contract_addr)
self.wallet.add_contract_token(symbol, contract_addr)
def delete_token(self, contract_addr):
"""
Delete token from current wallet, for eth/bsc only
:param contract_addr: coin address
:return: raise except if error
"""
try:
self.wallet.delete_contract_token(contract_addr)
except BaseException as e:
raise e
def sign_eth_tx(
self,
to_addr,
value,
path="android_usb",
password=None,
contract_addr=None,
gas_price=None,
gas_limit=None,
data=None,
nonce=None,
auto_send_tx=True,
):
"""
Send for eth, for eth/bsc only
:param to_addr: as string
:param value: amount to send
:param path: NFC/android_usb/bluetooth as str, used by hardware
:param password: as string
:param contract_addr: need if send to contranct
:param gas_price: as string, unit is Gwei
:param gas_limit: as string
:param data: eth tx custom data, as hex string
:param nonce: from address nonce
:return: tx_hash as string
"""
from_address = self.wallet.get_addresses()[0]
if contract_addr is None:
tx_dict = self.pywalib.get_transaction(
from_address, to_addr, value, gas_price=gas_price, gas_limit=gas_limit, data=data, nonce=nonce
)
else:
contract_addr = self.pywalib.web3.toChecksumAddress(contract_addr)
contract = self.wallet.get_contract_token(contract_addr)
assert contract is not None
tx_dict = self.pywalib.get_transaction(
from_address,
to_addr,
value,
contract=contract,
gas_price=gas_price,
gas_limit=gas_limit,
data=data,
nonce=nonce,
)
signed_tx_hex = None
if isinstance(self.wallet.get_keystore(), Hardware_KeyStore):
if path:
address_path = helpers.get_derivation_path(self.wallet, from_address)
address_n = parse_path(address_path)
self.trezor_manager.ensure_client(path)
(v, r, s) = self.wallet.sign_transaction(
address_n,
tx_dict["nonce"],
tx_dict["gasPrice"],
tx_dict["gas"],
str(tx_dict["to"]),
tx_dict["value"],
data=bytes.fromhex(eth_utils.remove_0x_prefix(tx_dict["data"])) if tx_dict.get("data") else None,
chain_id=tx_dict["chainId"],
)
from eth_utils.encoding import big_endian_to_int
r = big_endian_to_int(r)
s = big_endian_to_int(s)
signed_tx_hex = self.pywalib.serialize_tx(tx_dict, vrs=(v, r, s))
else:
signed_tx_hex = self.pywalib.sign_tx(self.wallet.get_account(from_address, password), tx_dict)
if signed_tx_hex and auto_send_tx:
return self.pywalib.send_tx(signed_tx_hex)
return signed_tx_hex
def dapp_eth_sign_tx(
self,
transaction: str,
path="android_usb",
password=None,
):
transaction = json.loads(transaction)
current_address = self.wallet.get_addresses()[0]
if transaction.get("from") and transaction["from"].lower() != current_address.lower():
raise Exception(f"current wallet address is {current_address}, not {transaction['from']}")
if not transaction.get("to"):
raise Exception("'to' address not found")
signed_tx_hex = self.sign_eth_tx(
to_addr=transaction["to"],
value=self.pywalib.web3.fromWei(transaction["value"], "ether") if transaction.get("value") else 0,
gas_price=self.pywalib.web3.fromWei(transaction["gasPrice"], "gwei")
if transaction.get("gasPrice")
else None,
gas_limit=transaction.get("gas"),
data=transaction.get("data"),
nonce=transaction.get("nonce"),
path=path,
password=password,
auto_send_tx=False,
)
signed_tx_info = self.pywalib.decode_signed_tx(signed_tx_hex)
return json.dumps(signed_tx_info)
def dapp_eth_send_tx(self, tx_hex: str):
return self.pywalib.send_tx(tx_hex)
def dapp_eth_rpc_info(self):
return json.dumps(PyWalib.get_rpc_info())
def dapp_eth_keccak(self, message: str) -> str:
if message.startswith("0x"):
message_bytes = bytes.fromhex(eth_utils.remove_0x_prefix(message))
else:
message_bytes = message.encode()
return PyWalib.web3.keccak(message_bytes).hex()
def sign_tx(self, tx, path=None, password=None):
"""
Sign one transaction, for btc only
:param tx: tx info as str
:param path: NFC/android_usb/bluetooth as str, used by hardware
:param password: password as str
:return: signed tx on success and raise except on failure
"""
try:
if path is not None:
self.trezor_manager.ensure_client(path)
self._assert_wallet_isvalid()
tx = tx_from_any(tx)
tx.deserialize()
sign_tx = self.wallet.sign_transaction(tx, password)
try:
if self.label_flag and self.wallet.wallet_type != "standard":
self.label_plugin.push_tx(self.wallet, "signtx", tx.txid(), str(sign_tx))
except BaseException as e:
log_info.info("push_tx signtx error {}.".format(e))
pass
return self.get_tx_info_from_raw(sign_tx)
except BaseException as e:
# msg = e.__str__()
# self.update_local_info(tx.txid(), self.wallet.get_addresses()[0], tx, msg)
raise BaseException(e)
def get_derived_list(self, xpub):
try:
derived_info = DerivedInfo(self.config)
derived_info.init_recovery_num()
for derived_wallet in self.wallet_context.iter_derived_wallets(xpub):
derived_info.update_recovery_info(derived_wallet['account_id'])
if derived_info.recovery_num:
derived_info.reset_list()
return derived_info.get_list()
else:
return None
except BaseException as e:
raise e
def show_address(self, address, path="android_usb", coin="btc") -> str:
"""
Verify address on hardware, used by hardware
:param address: address as str
:param path: NFC/android_usb/bluetooth as str
:return:1/except
"""
try:
self.trezor_manager.plugin.show_address(
path=path, ui=CustomerUI(), wallet=self.wallet, address=address, coin=coin
)
return "1"
except Exception as e:
raise BaseException(e)
def is_encrypted_with_hw_device(self):
self.wallet.storage.is_encrypted_with_hw_device()
def get_xpub_from_hw(self, path="android_usb", _type="p2wpkh", is_creating=True, account_id=None, coin="btc"):
"""
Get extended public key from hardware, used by hardware
:param path: NFC/android_usb/bluetooth as str
:param _type: p2wsh/p2pkh/p2wpkh-p2sh as string
:coin: btc/eth as string
:return: xpub string
"""
self.hw_info["device_id"] = self.trezor_manager.get_device_id(path)
if account_id is None:
account_id = 0
if coin == "btc":
self.hw_info["type"] = _type
if _type == "p2wsh":
derivation = purpose48_derivation(account_id, xtype="p2wsh")
self.hw_info["type"] = 48
# derivation = bip44_derivation(account_id, bip43_purpose=48)
elif _type == "p2wpkh":
derivation = bip44_derivation(account_id, bip43_purpose=84)
self.hw_info["type"] = 84
elif _type == "p2pkh":
derivation = bip44_derivation(account_id, bip43_purpose=44)
self.hw_info["type"] = 44
elif _type == "p2wpkh-p2sh":
derivation = bip44_derivation(account_id, bip43_purpose=49)
self.hw_info["type"] = 49
xpub = self.trezor_manager.get_xpub(path, derivation, _type, is_creating)
else:
self.hw_info["type"] = self.coins[coin]["addressType"]
derivation = bip44_eth_derivation(
account_id, bip43_purpose=self.coins[coin]["addressType"], cointype=self.coins[coin]["coinId"]
)
derivation = util.get_keystore_path(derivation)
xpub = self.trezor_manager.get_eth_xpub(path, derivation)
return xpub
def create_hw_derived_wallet(self, path="android_usb", _type="p2wpkh", is_creating=True, coin="btc"):
"""
Create derived wallet by hardware, used by hardware
:param path: NFC/android_usb/bluetooth as string
:param _type: p2wsh/p2wsh/p2pkh/p2pkh-p2sh as string
:coin: btc/eth as string
:return: xpub as string
"""
xpub = self.get_xpub_from_hw(path=path, _type="p2wpkh", coin=coin)
list_info = self.get_derived_list(xpub + coin.lower())
self.hw_info["xpub"] = xpub
if list_info is None:
self.hw_info["account_id"] = 0
dervied_xpub = self.get_xpub_from_hw(path=path, _type=_type, account_id=0, coin=coin)
return dervied_xpub
if len(list_info) == 0:
raise BaseException(DerivedWalletLimit())
dervied_xpub = self.get_xpub_from_hw(path=path, _type=_type, account_id=list_info[0], coin=coin)
self.hw_info["account_id"] = list_info[0]
return dervied_xpub
def export_keystore(self, password):
"""
Export keystory from eth wallet, for eth only
:param password: password as string
:return:Keystore info for success/exception info for fuilure
"""
try:
address = self.wallet.get_addresses()[0]
keystore = self.wallet.export_keystore(address, password=password)
return json.dumps(keystore)
except BaseException as e:
raise e
def export_privkey(self, password):
"""
Export privkey for the first receiving address
:param password: password as string
:return: private as string
.. code-block:: python
testcommond.load_all_wallet()
testcommond.select_wallet("BTC-1")
priv = testcommond.export_privkey(password)
if the wallet you select is "btc" wallet, you will get:
'cVJo3o48E6j8xxbTQprEzaCWQJ7aL3Y59R2W1owzJnX8NBh5AXnt'
if the wallet you select is "eth" wallet, you will get:
'0x31271366888ccad468157770734b5ac0d98a9363d4b229767a28c44fde445f51'
"""
try:
address = self.wallet.get_addresses()[0]
priv = self.wallet.export_private_key(address, password=password)
if -1 != priv.find(":"):
priv = priv.split(":")[1]
return priv
except BaseException as e:
raise e
def export_seed(self, password, name):
"""
Export seed by on-chain wallet
:param password: password by string
:return: Mnemonic as string
"""
try:
wallet = self.daemon.get_wallet(self._wallet_path(name))
if not wallet.has_seed():
raise BaseException(NotSupportExportSeed())
keystore = wallet.get_keystore()
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
return seed + passphrase
except BaseException as e:
raise e
def has_seed(self):
"""
Check if the wallet have seed
:return: True/False as bool
"""
if not self.wallet.has_seed():
raise BaseException(NotSupportExportSeed())
return self.wallet.has_seed()
def is_seed(self, x):
try:
seed_flag = False
if keystore.is_seed(x):
seed_flag = True
else:
is_checksum, is_wordlist = keystore.bip39_is_checksum_valid(x)
if is_checksum:
seed_flag = True
return seed_flag
except BaseException as e:
raise e
def get_addrs_from_seed(self, seed, passphrase=""):
"""
Get p2wpkh/p2wpkh-p2sh/p2pkh/electrum address by one seed
:param seed: seed as str
:param passphrase: passphrase if you need
:return:
"""
list_type_info = ["p2wpkh", "p2wpkh-p2sh", "p2pkh", "electrum"]
out = {}
for type in list_type_info:
bip39_derivation = ""
if type == "p2pkh":
bip39_derivation = bip44_derivation(0, bip43_purpose=44)
elif type == "p2wpkh":
bip39_derivation = bip44_derivation(0, bip43_purpose=84)
elif type == "p2wpkh-p2sh":
bip39_derivation = bip44_derivation(0, bip43_purpose=49)
if type == "electrum":
from electrum.mnemonic import Mnemonic
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype="standard")
node = rootnode.subkey_at_private_derivation("m/0'/")
else:
bip32_seed = keystore.bip39_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=("standard" if type == "p2pkh" else type))
node = rootnode.subkey_at_private_derivation(bip39_derivation)
from electrum import bip32
xpub_master = bip32.xpub_from_xprv(node.to_xprv())
node_master = BIP32Node.from_xkey(xpub_master)
xpub = node_master.subkey_at_public_derivation((0,)).to_xpub()
node = BIP32Node.from_xkey(xpub).subkey_at_public_derivation((0,))
pubkey = node.eckey.get_public_key_bytes(compressed=True).hex()
addr = bitcoin.pubkey_to_address("p2wpkh" if type == "electrum" else type, pubkey)
temp = {}
temp["addr"] = addr
temp["derivation"] = bip39_derivation
out[type] = temp
return json.dumps(out)
def get_wallet_by_name(self, name):
return self.daemon.get_wallet(self._wallet_path(name))
def get_xpub_by_name(self, name, wallet_obj):
if self.wallet_context.is_hd(name):
return self.get_hd_wallet_encode_seed()
else:
return wallet_obj.keystore.xpub
def get_backup_info(self, name):
"""
Get backup status
:param name: Wallet key
:return: True/False as bool
"""
try:
wallet = self.get_wallet_by_name(name)
if isinstance(wallet, Imported_Wallet) or isinstance(wallet, Imported_Eth_Wallet):
return True
if wallet.has_seed():
xpub = self.get_xpub_by_name(name, wallet)
return self.wallet_context.get_backup_flag(xpub)
except BaseException as e:
raise e
return True
def delete_backup_info(self, name):
"""
Delete one backup status in the config
:param name: Wallet key
:return: None
"""
try:
wallet = self.get_wallet_by_name(name)
xpub = self.get_xpub_by_name(name, wallet)
self.wallet_context.remove_backup_info(xpub)
except BaseException as e:
raise e
def create_hd_wallet(
self, password, seed=None, passphrase="", purpose=84, strength=128, create_coin=json.dumps(["btc"])
):
"""
Create hd wallet
:param password: password as str
:param seed: import create hd wallet if seed is not None
:param purpose: 84/44/49 only for btc
:param strength: Length of the Mnemonic word as (128/256)
:param create_coin: List of wallet types to be created like "["btc","eth"]"
:return: json like {'seed':''
'wallet_info':''
'derived_info':''}
"""
self._assert_daemon_running()
new_seed = None
wallet_data = []
if seed is not None:
is_checksum_valid, _ = keystore.bip39_is_checksum_valid(seed)
if not is_checksum_valid:
raise BaseException(InvalidBip39Seed())
return self.recovery_hd_derived_wallet(password, seed, passphrase)
if seed is None:
seed = Mnemonic("english").generate(strength=strength)
new_seed = seed
create_coin_list = json.loads(create_coin)
if "btc" in create_coin_list:
wallet_info = self.create(
"BTC-1",
password,
seed=seed,
passphrase=passphrase,
bip39_derivation=bip44_derivation(0, purpose),
hd=True,
)
wallet_data.append(json.loads(wallet_info))
for coin, info in self.coins.items():
if coin in create_coin_list:
name = "%s-1" % coin.upper()
bip39_derivation = bip44_eth_derivation(0, info["addressType"], cointype=info["coinId"])
wallet_info = self.create(
name,
password,
seed=seed,
passphrase=passphrase,
bip39_derivation=bip39_derivation,
hd=True,
coin=coin,
)
wallet_data.append(json.loads(wallet_info))
key = self.get_hd_wallet_encode_seed(seed=seed)
self.wallet_context.set_backup_info(key)
out_info = []
for info in wallet_data:
out_info.append(info["wallet_info"][0])
out = self.get_create_info_by_json(new_seed, out_info)
return json.dumps(out)
def get_wallet_num(self):
list_info = json.loads(self.list_wallets())
num = 0
for info in list_info:
for key, value in info.items():
if -1 == value["type"].find("-hw-"):
num += 1
return num
def verify_legality(self, data, flag="", coin="btc", password=None): # noqa
"""
Verify legality for seed/private/public/address
:param data: data as string
:param falg: seed/private/public/address/keystore as string
:param coin: btc/eth as string
:return: raise except if failed
"""
if flag == "seed":
is_checksum, is_wordlist = keystore.bip39_is_checksum_valid(data)
if not keystore.is_seed(data) and not is_checksum:
raise BaseException(_("Incorrect mnemonic format."))
if coin == "btc":
if flag == "private":
try:
ecc.ECPrivkey(bfh(data))
except BaseException:
private_key = keystore.get_private_keys(data, allow_spaces_inside_key=False)
if private_key is None:
raise BaseException(UnavailablePrivateKey())
elif flag == "public":
try:
ecc.ECPubkey(bfh(data))
except BaseException:
raise BaseException(UnavailablePublicKey())
elif flag == "address":
if not bitcoin.is_address(data):
raise UnavailableBtcAddr()
elif coin in self.coins:
if flag == "private":
try:
keys.PrivateKey(HexBytes(data))
except BaseException:
raise BaseException(UnavailablePrivateKey())
elif flag == "keystore":
try:
Account.decrypt(json.loads(data), password).hex()
except (TypeError, KeyError, NotImplementedError):
raise BaseException(_("Incorrect eth keystore."))
except BaseException:
raise InvalidPassword()
elif flag == "public":
try:
uncom_key = get_uncompressed_key(data)
keys.PublicKey(HexBytes(uncom_key[2:]))
except BaseException:
raise BaseException(UnavailablePublicKey())
elif flag == "address":
if not self.pywalib.web3.isAddress(data):
raise UnavailableEthAddr()
def replace_watch_only_wallet(self, replace=True):
"""
When a watch-only wallet exists locally and a non-watch-only wallet is created,
the interface can be called to delete the watch-only wallet and keey the non-watch-only wallet
:param replace:True/False as bool
:return: wallet key as string
"""
wallet = self.replace_wallet_info["wallet"]
if replace:
self.delete_wallet(password=self.replace_wallet_info["password"], name=wallet.identity)
self.create_new_wallet_update(
wallet=wallet,
seed=self.replace_wallet_info["seed"],
password=self.replace_wallet_info["password"],
wallet_type=self.replace_wallet_info["wallet_type"],
bip39_derivation=self.replace_wallet_info["bip39_derivation"],
)
self.replace_wallet_info = {}
return str(wallet)
def update_replace_info(
self,
wallet_obj,
seed=None,
password=None,
wallet_type=None,
bip39_derivation=None,
):
self.replace_wallet_info["wallet"] = wallet_obj
self.replace_wallet_info["seed"] = seed
self.replace_wallet_info["password"] = password
self.replace_wallet_info["wallet_type"] = wallet_type
self.replace_wallet_info["bip39_derivation"] = bip39_derivation
def create_new_wallet_update(
self,
wallet=None,
seed=None,
password=None,
wallet_type=None,
bip39_derivation=None,
):
wallet.ensure_storage(self._wallet_path(wallet.identity))
wallet.update_password(old_pw=None, new_pw=password, str_pw=self.android_id, encrypt_storage=True)
self.daemon.add_wallet(wallet)
if wallet.coin == "btc":
wallet.start_network(self.daemon.network)
self.wallet_context.set_wallet_type(wallet.identity, wallet_type)
if bip39_derivation is not None:
self.set_hd_wallet(wallet)
self.update_devired_wallet_info(
bip39_derivation, self.get_hd_wallet_encode_seed(seed=seed, coin=wallet.coin), wallet.name, wallet.coin
)
def create( # noqa
self,
name,
password=None,
seed_type="segwit",
seed=None,
passphrase="",
bip39_derivation=None,
master=None,
addresses=None,
privkeys=None,
hd=False,
purpose=49,
coin="btc",
keystores=None,
keystore_password=None,
strength=128,
):
"""
Create or restore a new wallet
:param name: Wallet name as string
:param password: Password ans string
:param seed_type: Not for now
:param seed: Mnemonic word as string
:param passphrase:Customised passwords as string
:param bip39_derivation:Not for now
:param master:Not for now
:param addresses:To create a watch-only wallet you need
:param privkeys:To create a wallet with a private key you need
:param hd:Not for app
:param purpose:BTC address type as (44/49/84), for BTC only
:param coin:"btc"/"eth" as string to specify whether to create a BTC/ETH wallet
:param keystores:as string for ETH only
:param strength:Length of the Mnemonic word as (128/256)
:return: json like {'seed':''
'wallet_info':''
'derived_info':''}
.. code-block:: python
create a btc wallet by address:
create("test5", addresses="bcrt1qzm6y9j0zg9nnludkgtc0pvhet0sf76szjw7fjw")
create a eth wallet by address:
create("test4", addresses="0x....", coin="eth")
create a btc wallet by privkey:
create("test3", password=password, purpose=84, privkeys="cRR5YkkGHTph8RsM1bQv7YSzY27hxBBhoJnVdHjGnuKntY7RgoGw")
create a eth wallet by privkey:
create("test3", password=password, privkeys="0xe6841ceb170becade0a4aa3e157f08871192f9de1c35835de5e1b47fc167d27e", coin="eth")
create a btc wallet by seed:
create(name, password, seed='pottery curtain belt canal cart include raise receive sponsor vote embody offer')
create a eth wallet by seed:
create(name, password, seed='pottery curtain belt canal cart include raise receive sponsor vote embody offer', coin="eth")
"""
try:
if self.get_wallet_num() == 0:
self.check_pw_wallet = None
if addresses is None:
self.check_password(password)
except BaseException as e:
raise e
wallet = None
watch_only = False
new_seed = False
if addresses is not None:
watch_only = True
wallet_type = f"{coin}-watch-standard"
if coin == "btc":
wallet = Imported_Wallet.from_pubkey_or_addresses(coin, self.config, addresses)
elif coin in self.coins:
wallet = Imported_Eth_Wallet.from_pubkey_or_addresses(coin, self.config, addresses)
else:
raise BaseException("Only support BTC/ETH")
elif privkeys is not None and coin == "btc":
wallet_type = f"{coin}-private-standard"
wallet = Imported_Wallet.from_privkeys(coin, self.config, privkeys, purpose)
elif coin in self.coins and (privkeys is not None or keystores is not None):
wallet_type = f"{coin}-private-standard"
if keystores is not None:
# keystores always has higher priority
wallet = Imported_Eth_Wallet.from_keystores(coin, self.config, keystores, keystore_password)
else:
wallet = Imported_Eth_Wallet.from_privkeys(coin, self.config, privkeys)
elif bip39_derivation is not None and seed is not None:
wallet_type = f"{coin}-derived-standard"
if coin == "btc":
wallet = Standard_Wallet.from_seed_or_bip39(coin, self.config, seed, passphrase, bip39_derivation)
elif coin in self.coins:
derivation = util.get_keystore_path(bip39_derivation)
index = int(helpers.get_path_info(bip39_derivation, INDEX_POS))
wallet = Standard_Eth_Wallet.from_seed_or_bip39(coin, index, self.config, seed, passphrase, derivation)
elif master is not None:
# TODO: master is only for btc?
wallet_type = "btc-standard"
wallet = Standard_Wallet.from_master_key("btc", self.config, master)
else:
wallet_type = f"{coin}-standard"
if seed is None:
seed = Mnemonic("english").generate(strength=strength)
new_seed = True
if coin == "btc":
wallet = Standard_Wallet.from_seed_or_bip39(
coin, self.config, seed, passphrase, bip44_derivation(0, purpose)
)
elif coin in self.coins:
derivation = util.get_keystore_path(
bip44_eth_derivation(0, self.coins[coin]["addressType"], cointype=self.coins[coin]["coinId"])
)
index = 0
wallet = Standard_Eth_Wallet.from_seed_or_bip39(coin, index, self.config, seed, passphrase, derivation)
wallet.set_name(name)
exist_wallet = self.daemon.get_wallet(self._wallet_path(wallet.identity))
if exist_wallet is not None:
if not watch_only and exist_wallet.is_watching_only():
self.update_replace_info(
wallet,
seed=seed,
password=password,
wallet_type=wallet_type,
bip39_derivation=bip39_derivation,
)
raise BaseException("Replace Watch-olny wallet:%s" % wallet.identity)
else:
raise BaseException(FileAlreadyExist())
self.create_new_wallet_update(
wallet=wallet, seed=seed, password=password, wallet_type=wallet_type, bip39_derivation=bip39_derivation
)
if new_seed:
self.wallet_context.set_backup_info(wallet.keystore.xpub)
ret = {
"seed": seed if new_seed else "",
"wallet_info": [{"coin_type": coin, "name": wallet.identity, "exist": 0}],
"derived_info": [],
}
return json.dumps(ret)
def get_create_info_by_json(self, seed="", wallet_info=None, derived_info=None):
from electrum_gui.android.create_wallet_info import CreateWalletInfo
create_wallet_info = CreateWalletInfo()
create_wallet_info.add_seed(seed)
create_wallet_info.add_wallet_info(wallet_info)
create_wallet_info.add_derived_info(derived_info)
return create_wallet_info.to_json()
def is_watch_only(self):
"""
Check if it is watch only wallet
:return: True/False as bool
"""
self._assert_wallet_isvalid()
return self.wallet.is_watching_only()
def load_all_wallet(self):
"""
Load all wallet info
:return:None
"""
name_wallets = sorted([name for name in os.listdir(self._wallet_path())])
for name in name_wallets:
self.load_wallet(name, password=self.android_id)
def update_wallet_password(self, old_password, new_password):
"""
Update password
:param old_password: old_password as string
:param new_password: new_password as string
:return:None
"""
self._assert_daemon_running()
for _name, wallet in self.daemon._wallets.items():
wallet.update_password(old_pw=old_password, new_pw=new_password, str_pw=self.android_id)
def check_password(self, password):
"""
Check wallet password
:param password: as string
:return: raise except if error
"""
try:
if self.check_pw_wallet is None:
self.check_pw_wallet = self.get_check_wallet()
self.check_pw_wallet.check_password(password, str_pw=self.android_id)
except BaseException as e:
if len(e.args) != 0:
if -1 != e.args[0].find("out of range"):
log_info.info("out of range when check_password error {}.".format(e))
pass
else:
raise e
def recovery_confirmed(self, name_list, hw=False):
"""
If you import hd wallet by seed, you will get a name list
and you need confirm which wallets you want to import
:param name_list: wallets you want to import as list like [name, name2,...]
:param hw: True if you recovery from hardware
:return:None
"""
name_list = json.loads(name_list)
if len(name_list) != 0:
for name in name_list:
if self.recovery_wallets.__contains__(name):
recovery_info = self.recovery_wallets.get(name)
wallet = recovery_info["wallet"]
self.daemon.add_wallet(wallet)
wallet.hide_type = False
wallet.save_db()
if not hw:
self.set_hd_wallet(wallet)
coin = wallet.coin
if coin in self.coins:
wallet_type = "%s-hw-derived-%s-%s" % (coin, 1, 1) if hw else ("%s-derived-standard" % coin)
self.update_devired_wallet_info(
bip44_eth_derivation(
recovery_info["account_id"],
bip43_purpose=self.coins[coin]["addressType"],
cointype=self.coins[coin]["coinId"],
),
recovery_info["key"],
wallet.get_name(),
coin,
)
else:
wallet_type = "btc-hw-derived-%s-%s" % (1, 1) if hw else ("btc-derived-standard")
self.update_devired_wallet_info(
bip44_derivation(recovery_info["account_id"], bip43_purpose=84),
recovery_info["key"],
wallet.get_name(),
coin,
)
self.wallet_context.set_wallet_type(wallet.identity, wallet_type)
for name, info in self.recovery_wallets.items():
info["wallet"].stop()
self.recovery_wallets.clear()
def delete_derived_wallet(self):
wallets = json.loads(self.list_wallets())
for wallet_info in wallets:
for wallet_id, info in wallet_info.items():
try:
if -1 != info["type"].find("-derived-") and -1 == info["type"].find("-hw-"):
key_in_daemon = self._wallet_path(wallet_id)
wallet_obj = self.daemon.get_wallet(key_in_daemon)
self.delete_wallet_devired_info(wallet_obj)
self.delete_wallet_from_deamon(key_in_daemon)
self.wallet_context.remove_type_info(wallet_id)
except Exception as e:
raise BaseException(e)
self.hd_wallet = None
def get_check_wallet(self):
wallets = self.daemon.get_wallets()
for key, wallet in wallets.items():
if not isinstance(wallet.keystore, Hardware_KeyStore) and not wallet.is_watching_only():
return wallet
# key = sorted(wallets.keys())[0]
# # value = wallets.values()[0]
# return wallets[key]
def update_recover_list(self, recovery_list, balance, wallet_obj, label, coin):
show_info = {}
show_info["coin"] = coin
show_info["blance"] = str(balance)
show_info["name"] = str(wallet_obj)
show_info["label"] = label
show_info["exist"] = "1" if self.daemon.get_wallet(self._wallet_path(wallet_obj.identity)) is not None else "0"
recovery_list.append(show_info)
def filter_wallet(self):
recovery_list = []
for name, wallet_info in self.recovery_wallets.items():
try:
wallet = wallet_info["wallet"]
coin = wallet.coin
if coin in self.coins:
with self.pywalib.override_server(self.coins[coin]):
address = wallet.get_addresses()[0]
try:
txids = self.pywalib.get_all_txid(address)
except Exception:
txids = None
if txids:
balance_info = wallet.get_all_balance(address, self.coins[coin]["symbol"])
if not balance_info:
continue
balance_info = self._fill_balance_info_with_fiat(wallet.coin, balance_info)
balance_info = balance_info.get(coin)
fiat_str = self.daemon.fx.ccy_amount_str(balance_info.get("fiat") or 0, True)
balance = f"{balance_info.get('balance', '0')} ({fiat_str} {self.ccy})"
self.update_recover_list(recovery_list, balance, wallet, wallet.get_name(), coin)
continue
else:
history = reversed(wallet.get_history())
for item in history:
c, u, _ = wallet.get_balance()
self.update_recover_list(
recovery_list, self.format_amount_and_units(c + u), wallet, wallet.get_name(), "btc"
)
break
except BaseException as e:
raise e
return recovery_list
def update_recovery_wallet(self, key, wallet_obj, bip39_derivation, name, coin):
wallet_info = {}
wallet_info["key"] = key
wallet_info["wallet"] = wallet_obj
wallet_info["account_id"] = int(self.get_account_id(bip39_derivation, coin))
wallet_info["name"] = name
return wallet_info
def get_coin_derived_path(self, account_id, coin="btc", purpose=84):
if coin == "btc":
return bip44_derivation(account_id, bip43_purpose=purpose)
else:
return bip44_eth_derivation(
account_id, self.coins[coin]["addressType"], cointype=self.coins[coin]["coinId"]
)
def recovery_import_create_hw_wallet(self, i, name, m, n, xpubs, coin="btc"):
try:
self.set_multi_wallet_info(name, m, n)
self.add_xpub(xpubs, self.hw_info["device_id"], i, self.hw_info["type"], coin=coin)
temp_path = helpers.get_temp_file()
path = self._wallet_path(temp_path)
storage, db = self.wizard.create_storage(path=path, password="", coin=coin)
if storage:
if coin in self.coins:
# wallet = Eth_Wallet(db, storage, config=self.config, index=self.hw_info['account_id'])
wallet = Standard_Eth_Wallet(db, storage, config=self.config, index=i)
else:
wallet = Wallet(db, storage, config=self.config)
wallet.set_name(name)
wallet.coin = coin
wallet.hide_type = True
wallet.storage.set_path(self._wallet_path(wallet.identity))
wallet.update_password(old_pw=None, new_pw=None, str_pw=self.android_id, encrypt_storage=True)
if coin == "btc":
wallet.start_network(self.daemon.network)
self.recovery_wallets[wallet.identity] = self.update_recovery_wallet(
xpubs + coin.lower(), wallet, self.get_coin_derived_path(i, coin), name, coin
)
self.wizard = None
except Exception as e:
raise BaseException(e)
def recovery_create_subfun(self, coin, account_id, hw, name, seed, password, passphrase, path):
try:
if coin == "btc":
type_list = [49, 44, 84]
elif coin in self.coins:
type_list = [self.coins[coin]["addressType"]]
for xtype in type_list:
bip39_derivation = self.get_coin_derived_path(account_id, coin, purpose=xtype)
if not AndroidCommands._recovery_flag:
self.recovery_wallets.clear()
AndroidCommands._set_recovery_flag(True)
raise UserCancel()
if not hw:
self.recovery_create(
name,
seed,
password=password,
bip39_derivation=bip39_derivation,
passphrase=passphrase,
coin=coin,
)
else:
_type = PURPOSE_TO_ADDRESS_TYPE.get(xtype, "")
xpub = self.get_xpub_from_hw(path=path, _type=_type, account_id=account_id, coin=coin)
self.recovery_import_create_hw_wallet(account_id, name, 1, 1, xpub, coin=coin)
return True
except BaseException as e:
raise e
def recovery_wallet(
self, seed=None, password=None, passphrase="", coin="btc", xpub=None, hw=False, path="bluetooth"
):
derived = DerivedInfo(self.config)
derived.init_recovery_num()
AndroidCommands._set_recovery_flag(True)
for derived_wallet in self.wallet_context.iter_derived_wallets(xpub):
flag = self.recovery_create_subfun(
coin, derived_wallet['account_id'], hw, derived_wallet['name'], seed, password, passphrase, path
)
if not flag:
return False
derived.update_recovery_info(derived_wallet['account_id'])
derived.reset_list()
account_list = derived.get_list()
if account_list is not None:
for i in account_list:
flag = self.recovery_create_subfun(
coin, i, hw, "%s_derived_%s" % (coin, i), seed, password, passphrase, path
)
if not flag:
return False
def get_hd_wallet(self):
if self.hd_wallet is None:
wallets = self.daemon.get_wallets()
for key, wallet in wallets.items():
if self.wallet_context.is_hd(wallet.identity):
self.hd_wallet = wallet
break
else:
raise BaseException(UnavaiableHdWallet())
return self.hd_wallet
def get_hd_wallet_encode_seed(self, seed=None, coin="", passphrase=""):
if seed is not None:
path = bip44_derivation(0, 84)
ks = keystore.from_bip39_seed(seed, passphrase, path)
self.config.set_key("current_hd_xpub", ks.xpub)
return ks.xpub + coin.lower()
else:
return self.config.get("current_hd_xpub", "") + coin.lower()
@classmethod
def _set_recovery_flag(cls, flag=False):
"""
Support recovery process was withdrawn
:param flag: true/false
:return:
"""
cls._recovery_flag = flag
def filter_wallet_with_account_is_zero(self):
wallet_list = []
for wallet_id, wallet_info in self.recovery_wallets.items():
try:
wallet = wallet_info["wallet"]
coin = wallet.coin
derivation = helpers.get_derivation_path(wallet, wallet.get_addresses()[0])
account_id = int(self.get_account_id(derivation, coin if coin in self.coins else "btc"))
purpose = int(derivation.split("/")[PURPOSE_POS].split("'")[0])
if account_id != 0:
continue
if coin in self.coins or purpose == 49:
exist = 1 if self.daemon.get_wallet(self._wallet_path(wallet_id)) is not None else 0
wallet_info = {
'coin_type': coin,
'name': wallet.identity,
'exist': exist,
'label': wallet.get_name(),
}
wallet_list.append(wallet_info)
except BaseException as e:
raise e
return wallet_list
def recovery_hd_derived_wallet(
self, password=None, seed=None, passphrase="", xpub=None, hw=False, path="bluetooth"
):
if hw:
self.recovery_wallet(xpub=xpub, hw=hw, path=path, coin="btc")
for coin, info in self.coins.items():
with PyWalib.override_server(info):
self.recovery_wallet(xpub=xpub, hw=hw, path=path, coin=coin)
else:
xpub = self.get_hd_wallet_encode_seed(seed=seed, coin="btc")
self.recovery_wallet(seed, password, passphrase, xpub=xpub, hw=hw)
for coin, info in self.coins.items():
xpub = self.get_hd_wallet_encode_seed(seed=seed, coin=coin)
with PyWalib.override_server(info):
self.recovery_wallet(seed, password, passphrase, coin=coin, xpub=xpub, hw=hw)
recovery_list = self.filter_wallet()
wallet_data = self.filter_wallet_with_account_is_zero()
out = self.get_create_info_by_json(wallet_info=wallet_data, derived_info=recovery_list)
return json.dumps(out)
def get_derivat_path(self, purpose=84, coin=None):
"""
Get derivation path
:param coin_type: 44/49/84 as int
:return: 'm/44'/0/0' as string
"""
if coin != "btc":
return bip44_eth_derivation(0, self.coins[coin]["addressType"], cointype=self.coins[coin]["coinId"])
else:
return bip44_derivation(0, purpose)
def create_derived_wallet(self, name, password, coin="btc", purpose=84, strength=128):
"""
Create BTC/ETH derived wallet
:param name: name as str
:param password: password as str
:param coin: btc/eth as str
:param purpose: (44/84/49) for btc only
:param strength: Length of the Mnemonic word as (128/256)
:return: json like {'seed':''
'wallet_info':''
'derived_info':''}
"""
# NOTE: change the below check if adding support for other coins.
# This should be a call like this:
# ```
# if coin_manager.is_derived_wallet_supported(coin):
# raise BaseException(f"Derived wallet of {coin} isn't supported.")
# ```
try:
self.check_password(password)
except BaseException as e:
raise e
seed = self.get_hd_wallet().get_seed(password)
coin_type = constants.net.BIP44_COIN_TYPE if coin == "btc" else self.coins[coin]["coinId"]
purpose = purpose if coin == "btc" else self.coins[coin]["addressType"]
encode_seed = self.get_hd_wallet_encode_seed(seed=seed, coin=coin)
list_info = self.get_derived_list(encode_seed)
if list_info is not None:
if len(list_info) == 0:
raise BaseException(DerivedWalletLimit())
account_id = list_info[0]
else:
account_id = 0
if coin in self.coins:
derivat_path = bip44_eth_derivation(account_id, purpose, cointype=coin_type)
else:
derivat_path = bip44_derivation(account_id, purpose)
return self.create(name, password, seed=seed, bip39_derivation=derivat_path, strength=strength, coin=coin)
def recovery_create(self, name, seed, password, bip39_derivation, passphrase="", coin="btc"):
try:
self.check_password(password)
except BaseException as e:
raise e
account_id = int(self.get_account_id(bip39_derivation, coin))
if account_id == 0:
purpose = int(helpers.get_path_info(bip39_derivation, PURPOSE_POS))
if purpose == 49:
name = "BTC-1"
elif coin in self.coins:
name = "%s-1" % coin.upper()
else:
name = "btc-derived-%s" % purpose
temp_path = helpers.get_temp_file()
path = self._wallet_path(temp_path)
if exists(path):
raise BaseException(FileAlreadyExist())
storage = WalletStorage(path)
db = WalletDB("", manual_upgrades=False)
ks = keystore.from_bip39_seed(
seed, passphrase, util.get_keystore_path(bip39_derivation) if coin in self.coins else bip39_derivation
)
db.put("keystore", ks.dump())
if coin in self.coins:
wallet = Standard_Eth_Wallet(db, storage, config=self.config, index=account_id)
wallet.wallet_type = "%s_standard" % coin
else:
wallet = Standard_Wallet(db, storage, config=self.config)
wallet.hide_type = True
wallet.set_name(name)
wallet.coin = coin
wallet.storage.set_path(self._wallet_path(wallet.identity))
self.recovery_wallets[wallet.identity] = self.update_recovery_wallet(
self.get_hd_wallet_encode_seed(seed=seed, coin=coin), wallet, bip39_derivation, name, coin
)
wallet.update_password(old_pw=None, new_pw=password, str_pw=self.android_id, encrypt_storage=True)
if coin == "btc":
wallet.start_network(self.daemon.network)
wallet.save_db()
def get_devired_num(self, coin="btc"):
"""
Get devired HD num by app
:param coin:btc/eth as string
:return: num as int
"""
xpub = self.get_hd_wallet_encode_seed(coin=coin.lower())
return self.wallet_context.get_derived_num(xpub)
def delete_devired_wallet_info(self, wallet_obj, hw=False):
derivation = helpers.get_derivation_path(wallet_obj, wallet_obj.get_addresses()[0])
coin = wallet_obj.coin
account_id = self.get_account_id(derivation, coin)
if hw:
if coin == "btc":
xpub = wallet_obj.get_derived_master_xpub() + 'btc'
else:
xpub = wallet_obj.keystore.xpub + coin.lower()
else:
xpub = self.get_hd_wallet_encode_seed(coin=coin)
self.wallet_context.remove_derived_wallet(xpub, account_id)
def get_account_id(self, path, coin):
if coin in self.coins:
return helpers.get_path_info(path, INDEX_POS)
else:
return helpers.get_path_info(path, ACCOUNT_POS)
def update_devired_wallet_info(self, bip39_derivation, xpub, name, coin):
account_id = self.get_account_id(bip39_derivation, coin)
self.wallet_context.add_derived_wallet(xpub, name, account_id)
def get_all_mnemonic(self):
"""
Get all mnemonic, num is 2048
:return: json like "[job, ...]"
"""
return json.dumps(Wordlist.from_file("english.txt"))
def get_all_wallet_balance(self):
"""
Get all wallet balances
:return:
{
"all_balance": "21,233.46 CNY",
"wallet_info": [
{
"name": "",
"label": "",
"btc": "0.005 BTC",
"fiat": "1,333.55",
"wallets": [
{"coin": "btc", "balance": "", "fiat": ""}
]
},
{
"name": "",
"label": "",
"wallets": [
{ "coin": "btc", "balance": "", "fiat": ""},
{ "coin": "usdt", "balance": "", "fiat": ""}
]
}
]
}
"""
out = {}
try:
all_balance = Decimal("0")
all_wallet_info = []
for wallet in self.daemon.get_wallets().values():
wallet_info = {"name": wallet.get_name(), "label": str(wallet)}
sum_fiat = Decimal(0)
coin = wallet.coin
if coin in self.coins:
with self.pywalib.override_server(self.coins[coin]):
checksum_from_address = self.pywalib.web3.toChecksumAddress(wallet.get_addresses()[0])
balances_info = wallet.get_all_balance(checksum_from_address, self.coins[coin]["symbol"])
balances_info = self._fill_balance_info_with_fiat(wallet.coin, balances_info)
sorted_balances = [balances_info.pop(coin, {})]
sorted_balances.extend(
sorted(
balances_info.values(),
key=lambda i: (Decimal(i.get("fiat", 0)), Decimal(i.get("balance", 0))),
reverse=True,
)
)
wallet_balances = []
for info in sorted_balances:
fiat = info.get("fiat") or 0
copied_info = {
"coin": info.get("symbol") or coin,
"address": info.get("address"),
"balance": info.get("balance", "0"),
"fiat": f"{self.daemon.fx.ccy_amount_str(fiat, True)} {self.ccy}",
}
wallet_balances.append(copied_info)
sum_fiat += fiat
all_balance += fiat
wallet_info["wallets"] = wallet_balances
wallet_info["sum_fiat"] = sum_fiat
all_wallet_info.append(wallet_info)
else:
c, u, x = wallet.get_balance()
balance = self.daemon.fx.format_amount_and_units(c + u) or f"0 {self.ccy}"
fiat = Decimal(balance.split()[0].replace(",", ""))
all_balance += fiat
wallet_info["btc"] = self.format_amount(c + u) # fixme deprecated field
wallet_info["fiat"] = balance # fixme deprecated field
wallet_info["wallets"] = [
{
"coin": "btc",
"balance": wallet_info["btc"],
"fiat": wallet_info["fiat"],
}
]
wallet_info["sum_fiat"] = Decimal(wallet_info["fiat"].split()[0].replace(",", ""))
all_wallet_info.append(wallet_info)
no_zero_balance_wallets = (i for i in all_wallet_info if i["sum_fiat"] > 0)
no_zero_balance_wallets = sorted(
no_zero_balance_wallets, key=lambda i: i["sum_fiat"], reverse=True
) # sort no-zero balance wallet by fiat currency in reverse order
zero_balance_wallets = (i for i in all_wallet_info if i["sum_fiat"] <= 0)
zero_balance_wallets_dict = {i["label"]: i for i in zero_balance_wallets}
sorted_wallet_labels = (i[0] for i in self.wallet_context.get_stored_wallets_types())
zero_balance_wallets = [
zero_balance_wallets_dict[i] for i in sorted_wallet_labels if i in zero_balance_wallets_dict
] # sort zero balance wallet by created time in reverse order
all_wallet_info = [*no_zero_balance_wallets, *zero_balance_wallets]
out["all_balance"] = "%s %s" % (all_balance, self.ccy)
out["wallet_info"] = all_wallet_info
return json.dumps(out, cls=DecimalEncoder)
except BaseException as e:
raise e
def set_rbf(self, status_rbf):
"""
Enable/disable rbf
:param status_rbf:True/False as bool
:return:
"""
use_rbf = self.config.get("use_rbf", True)
if use_rbf == status_rbf:
return
self.config.set_key("use_rbf", status_rbf)
self.rbf = status_rbf
def get_rbf_status(self, tx_hash):
try:
tx = self.wallet.db.get_transaction(tx_hash)
if not tx:
return False
height = self.wallet.get_tx_height(tx_hash).height
_, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
is_unconfirmed = height <= 0
if tx:
# note: the current implementation of RBF *needs* the old tx fee
rbf = is_mine and self.rbf and fee is not None and is_unconfirmed
if rbf:
return True
else:
return False
except BaseException as e:
raise e
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return util.format_fee_satoshis(fee_rate / 1000, num_zeros=self.num_zeros) + " sat/byte"
def get_rbf_fee_info(self, tx_hash):
tx = self.wallet.db.get_transaction(tx_hash)
if not tx:
raise BaseException(FailedGetTx())
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
raise BaseException(
_("RBF(Replace-By-Fee) fails because it does not get the fee intormation of the original transaction.")
)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
new_rate = Decimal(max(old_fee_rate * 1.5, old_fee_rate + 1)).quantize(Decimal("0.0"))
new_tx = json.loads(self.create_bump_fee(tx_hash, str(new_rate)))
ret_data = {
"current_feerate": self.format_fee_rate(1000 * old_fee_rate),
"new_feerate": str(new_rate),
"fee": new_tx["fee"],
"tx": new_tx["new_tx"],
}
return json.dumps(ret_data)
# TODO:new_tx in history or invoices, need test
def create_bump_fee(self, tx_hash, new_fee_rate):
try:
tx = self.wallet.db.get_transaction(tx_hash)
if not tx:
return False
coins = self.wallet.get_spendable_coins(None, nonlocal_only=False)
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=coins)
fee = new_tx.get_fee()
except BaseException as e:
raise BaseException(e)
new_tx.set_rbf(self.rbf)
out = {"new_tx": str(new_tx), "fee": fee}
self.rbf_tx = new_tx
return json.dumps(out)
def confirm_rbf_tx(self, tx_hash):
try:
self.do_save(self.rbf_tx)
except BaseException:
log_info.info("do save failed {}".format(self.rbf_tx))
pass
try:
if self.label_flag and self.wallet.wallet_type != "standard":
self.label_plugin.push_tx(
self.wallet, "rbftx", self.rbf_tx.txid(), str(self.rbf_tx), tx_hash_old=tx_hash
)
except Exception as e:
log_info.info("push_tx rbftx error {}".format(e))
pass
return self.rbf_tx
def get_rbf_or_cpfp_status(self, tx_hash):
try:
status = {}
tx = self.wallet.db.get_transaction(tx_hash)
if not tx:
raise BaseException("tx is None")
tx_details = self.wallet.get_tx_info(tx)
is_unconfirmed = tx_details.tx_mined_status.height <= 0
if is_unconfirmed and tx:
# note: the current implementation of rbf *needs* the old tx fee
if tx_details.can_bump and tx_details.fee is not None:
status["rbf"] = True
else:
child_tx = self.wallet.cpfp(tx, 0)
if child_tx:
status["cpfp"] = True
return json.dumps(status)
except BaseException as e:
raise e
def get_cpfp_info(self, tx_hash, suggested_feerate=None):
try:
self._assert_wallet_isvalid()
parent_tx = self.wallet.db.get_transaction(tx_hash)
if not parent_tx:
raise BaseException(FailedGetTx())
info = {}
child_tx = self.wallet.cpfp(parent_tx, 0)
if child_tx:
total_size = parent_tx.estimated_size() + child_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
raise BaseException(
_(
"CPFP(Child Pays For Parent) fails because it does not get the fee intormation of the original transaction."
)
)
info["total_size"] = "(%s) bytes" % total_size
max_fee = child_tx.output_value()
info["input_amount"] = self.format_amount(max_fee) + " " + self.base_unit
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
if suggested_feerate is None:
suggested_feerate = self.config.fee_per_kb()
else:
suggested_feerate = suggested_feerate * 1000
if suggested_feerate is None:
raise BaseException(
f"""{_("Failed CPFP(Child Pays For Parent)'")}: {_('dynamic fee estimates not available')}"""
)
parent_feerate = parent_fee / parent_tx.estimated_size() * 1000
info["parent_feerate"] = self.format_fee_rate(parent_feerate) if parent_feerate else ""
info["fee_rate_for_child"] = self.format_fee_rate(suggested_feerate) if suggested_feerate else ""
fee_for_child = get_child_fee_from_total_feerate(suggested_feerate)
info["fee_for_child"] = util.format_satoshis_plain(fee_for_child, decimal_point=self.decimal_point)
if fee_for_child is None:
raise BaseException("fee_for_child is none")
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + " " + self.base_unit) if out_amt else ""
info["output_amount"] = out_amt_str
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + " " + self.base_unit) if comb_fee else ""
info["total_fee"] = comb_fee_str
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ""
info["total_feerate"] = comb_feerate_str
if fee_for_child is None:
raise BaseException(_("Sub-transaction fee error.")) # fee left empty, treat is as "cancel"
if fee_for_child > max_fee:
raise BaseException(_("Exceeding the Maximum fee limit."))
return json.dumps(info)
except BaseException as e:
raise e
def create_cpfp_tx(self, tx_hash, fee_for_child):
try:
self._assert_wallet_isvalid()
parent_tx = self.wallet.db.get_transaction(tx_hash)
if not parent_tx:
raise BaseException(FailedGetTx())
new_tx = self.wallet.cpfp(parent_tx, self.get_amount(fee_for_child))
new_tx.set_rbf(self.rbf)
out = {"new_tx": str(new_tx)}
try:
self.do_save(new_tx)
if self.label_flag and self.wallet.wallet_type != "standard":
self.label_plugin.push_tx(self.wallet, "createtx", new_tx.txid(), str(new_tx))
except BaseException as e:
log_info.info("push_tx createtx error {}".format(e))
pass
return json.dumps(out)
except BaseException as e:
raise e
def get_default_server(self):
"""
Get default electrum server
:return: json like {'host':'127.0.0.1', 'port':'3456'}
"""
try:
self._assert_daemon_running()
net_params = self.network.get_parameters()
host, port = net_params.server.host, net_params.server.port
except BaseException as e:
raise e
default_server = {
"host": host,
"port": port,
}
return json.dumps(default_server)
def set_server(self, host, port):
"""
Custom server
:param host: host as str
:param port: port as str
:return: raise except if error
"""
try:
self._assert_daemon_running()
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference("%s:%s" % (host, port))
if not server:
raise Exception("failed to parse")
except BaseException as e:
raise e
net_params = net_params._replace(server=server, auto_connect=True)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
except BaseException as e:
raise e
def get_server_list(self):
"""
Get Servers
:return: servers info as json
"""
try:
self._assert_daemon_running()
servers = self.daemon.network.get_servers()
except BaseException as e:
raise e
return json.dumps(servers)
def rename_wallet(self, old_name, new_name):
"""
Rename the wallet
:param old_name: old name as string
:param new_name: new name as string
:return: raise except if error
"""
try:
self._assert_daemon_running()
if old_name is None or new_name is None:
raise BaseException(("Please enter the correct file name."))
else:
wallet = self.daemon.get_wallet(self._wallet_path(old_name))
wallet.set_name(new_name)
wallet.db.set_modified(True)
wallet.save_db()
except BaseException as e:
raise e
def update_wallet_name(self, old_name, new_name):
try:
self._assert_daemon_running()
if old_name is None or new_name is None:
raise BaseException("Please enter the correct file name")
else:
os.rename(self._wallet_path(old_name), self._wallet_path(new_name))
self.daemon.pop_wallet(self._wallet_path(old_name))
self.load_wallet(new_name, password=self.android_id)
self.select_wallet(new_name)
return new_name
except BaseException as e:
raise e
def switch_wallet(self, name):
"""
Switching to a specific wallet
:param name: name as string
:return: json like
{
"name": "",
"label": "",
"wallets": [
{"coin": "usdt", "address": ""},
...
]
}
"""
self._assert_daemon_running()
if name is None:
raise FailedToSwitchWallet()
self.wallet = self.daemon.get_wallet(self._wallet_path(name))
self.wallet.use_change = self.config.get("use_change", False)
coin = self.wallet.coin
if coin in self.coins:
PyWalib.set_server(self.coins[coin])
contract_info = self.wallet.get_contract_symbols_with_address()
info = {"name": name, "label": self.wallet.get_name(), "wallets": contract_info}
else:
self.wallet.set_key_pool_size()
c, u, x = self.wallet.get_balance()
util.trigger_callback("wallet_updated", self.wallet)
info = {
"name": name,
"label": self.wallet.get_name(),
"wallets": [],
}
if self.label_flag and self.wallet.wallet_type != "standard":
self.label_plugin.load_wallet(self.wallet)
return json.dumps(info, cls=DecimalEncoder)
def get_wallet_balance(self):
"""
Get the current balance of your wallet
:return: json like
{
"all_balance": ""
"wallets": [
{"coin": "eth", "address": "", "balance": "", "fiat": ""},
{"coin": "usdt", "address": "", "balance": "", "fiat": ""}
]
}
"""
self._assert_wallet_isvalid()
coin = self.wallet.coin
if coin in self.coins:
addrs = self.wallet.get_addresses()
checksum_from_address = self.pywalib.web3.toChecksumAddress(addrs[0])
balance_info = self.wallet.get_all_balance(checksum_from_address, self.coins[coin]["symbol"])
balance_info = self._fill_balance_info_with_fiat(self.wallet.coin, balance_info)
sum_fiat = sum(i.get("fiat", 0) for i in balance_info.values())
sum_fiat = f"{self.daemon.fx.ccy_amount_str(sum_fiat, True)} {self.ccy}"
sorted_balances = [balance_info.pop(coin, {})]
sorted_balances.extend(
sorted(
balance_info.values(),
key=lambda i: (Decimal(i.get("fiat", 0)), Decimal(i.get("balance", 0))),
reverse=True,
)
)
wallet_balances = [
{
"coin": i.get("symbol") or coin,
"address": i.get("address"),
"balance": i.get("balance", "0"),
"fiat": f"{self.daemon.fx.ccy_amount_str(i.get('fiat') or 0, True)} {self.ccy}",
}
for i in sorted_balances
]
info = {"all_balance": sum_fiat, "wallets": wallet_balances}
else:
c, u, x = self.wallet.get_balance()
balance = c + u
fait = self.daemon.fx.format_amount_and_units(balance) if self.daemon.fx else None
fait = fait or f"0 {self.ccy}"
info = {
"all_balance": "%s" % fait, # fixme deprecated field
"wallets": [{"coin": "btc", "balance": self.format_amount(balance), "fiat": fait}],
}
if self.label_flag and self.wallet.wallet_type != "standard":
self.label_plugin.load_wallet(self.wallet)
return json.dumps(info, cls=DecimalEncoder)
def select_wallet(self, name): # TODO: Will be deleted later
"""
Select wallet by name
:param name: name as string
:return: json like
{
"name": "",
"label": "",
"wallets": [
{"coin": "eth", "balance": "", "fiat": ""},
{"coin": "usdt", "balance": "", "fiat": ""}
]
}
"""
try:
self._assert_daemon_running()
if name is None:
self.wallet = None
else:
self.wallet = self.daemon.get_wallet(self._wallet_path(name))
self.wallet.use_change = self.config.get("use_change", False)
coin = self.wallet.coin
if coin in self.coins:
PyWalib.set_server(self.coins[coin])
addrs = self.wallet.get_addresses()
checksum_from_address = self.pywalib.web3.toChecksumAddress(addrs[0])
balance_info = self.wallet.get_all_balance(checksum_from_address, self.coins[coin]["symbol"])
balance_info = self._fill_balance_info_with_fiat(self.wallet.coin, balance_info)
sorted_balances = [balance_info.pop(coin, {})]
sorted_balances.extend(
sorted(
balance_info.values(),
key=lambda i: (Decimal(i.get("fiat", 0)), Decimal(i.get("balance", 0))),
reverse=True,
)
)
wallet_balances = [
{
"coin": i.get("symbol") or coin,
"address": i.get("address"),
"balance": i.get("balance") or "0",
"fiat": f"{self.daemon.fx.ccy_amount_str(i.get('fiat') or 0, True)} {self.ccy}",
}
for i in sorted_balances
]
info = {"name": name, "label": self.wallet.get_name(), "wallets": wallet_balances}
return json.dumps(info, cls=DecimalEncoder)
else:
c, u, x = self.wallet.get_balance()
util.trigger_callback("wallet_updated", self.wallet)
balance = c + u
fait = self.daemon.fx.format_amount_and_units(balance) if self.daemon.fx else None
fait = fait or f"0 {self.ccy}"
info = {
"balance": self.format_amount(balance) + " (%s)" % fait, # fixme deprecated field
"name": name,
"label": self.wallet.get_name(),
"wallets": [{"coin": "btc", "balance": self.format_amount(balance), "fiat": fait}],
}
if self.label_flag and self.wallet.wallet_type != "standard":
self.label_plugin.load_wallet(self.wallet)
return json.dumps(info, cls=DecimalEncoder)
except BaseException as e:
raise BaseException(e)
def _fill_balance_info_with_fiat(self, coin: str, balance_info: dict) -> dict:
main_coin_price = price_manager.get_last_price(coin, self.ccy)
token_addresses = [i["address"].lower() for i in balance_info.values() if i.get("address")]
chain_code = self._coin_to_chain_code(coin)
tokens = coin_manager.query_coins_by_token_addresses(chain_code, token_addresses)
token_prices = {i.token_address.lower(): price_manager.get_last_price(i.code, self.ccy) for i in tokens}
new_balance_info = {}
for k, v in balance_info.items():
price = main_coin_price if not v.get("address") else token_prices.get(v["address"].lower())
price = price or Decimal(0)
new_balance_info[k] = {**v, "fiat": Decimal(v.get("balance") or 0) * price}
return new_balance_info
def _coin_to_chain_code(self, coin: str) -> str:
chain_code = f"t{coin}" if PyWalib.chain_type == "testnet" else coin
return chain_code
def list_wallets(self, type_=None):
"""
List available wallets
:param type: None/hw/hd/btc/eth/bsc/heco
:return: json like "[{"wallet_key":{'type':"", "addr":"", "name":"", "label":"", "device_id": ""}}, ...]"
exp:
all_list = testcommond.list_wallets()
hd_list = testcommond.list_wallets(type='hd')
hw_list = testcommond.list_wallets(type='hw')
btc_list = testcommond.list_wallets(type='btc')
eth_list = testcommond.list_wallets(type='eth')
"""
coin = None
generic_wallet_type = None
if type_ in ("hw", "hd"):
generic_wallet_type = type_
elif type_ in ("btc", "eth", "bsc", "heco"):
coin = type_
elif type_ is not None:
raise BaseException(_("Unsupported coin types"))
wallet_infos = []
for wallet_id, wallet_type in self.wallet_context.get_stored_wallets_types(generic_wallet_type, coin):
wallet = self.daemon.get_wallet(self._wallet_path(wallet_id))
if isinstance(wallet.keystore, Hardware_KeyStore):
device_id = wallet.get_device_info()
else:
device_id = ""
wallet_infos.append(
{
wallet_id: {
"type": wallet_type,
"addr": wallet.get_addresses()[0],
"name": wallet.identity,
"label": wallet.get_name(),
"device_id": device_id,
}
}
)
return json.dumps(wallet_infos)
def delete_wallet_from_deamon(self, name):
try:
self._assert_daemon_running()
self.daemon.delete_wallet(name)
except BaseException as e:
raise BaseException(e)
def has_history_wallet(self, wallet_obj):
coin = wallet_obj.coin
if coin in self.coins:
txids = self.pywalib.get_all_txid(wallet_obj.get_addresses()[0])
return bool(txids)
elif coin == "btc":
history = wallet_obj.get_history()
return bool(history)
def reset_config_info(self):
self.wallet_context.clear_type_info()
self.wallet_context.clear_derived_info()
self.wallet_context.clear_backup_info()
self.decimal_point = 5
self.config.set_key("decimal_point", self.decimal_point)
self.config.set_key("language", "zh_CN")
self.config.set_key("sync_server_host", "39.105.86.163:8080")
self.config.set_key("show_addr_info", {})
def reset_wallet_info(self):
"""
Reset all wallet info when Reset App
:return: raise except if error
"""
try:
util.delete_file(self._wallet_path())
util.delete_file(self._tx_list_path())
self.reset_config_info()
self.hd_wallet = None
self.check_pw_wallet = None
self.daemon._wallets.clear()
except BaseException as e:
raise e
def delete_wallet_devired_info(self, wallet_obj, hw=False):
have_tx = self.has_history_wallet(wallet_obj)
if not have_tx:
# delete wallet info from config
self.delete_devired_wallet_info(wallet_obj, hw=hw)
def delete_wallet(self, password="", name="", hd=None):
"""
Delete (a/all hd) wallet
:param password: Password as string
:param name: Wallet key
:param hd: True if you want to delete all hd wallet
:return: None
"""
try:
wallet = self.daemon.get_wallet(self._wallet_path(name))
if not wallet.is_watching_only() and not self.wallet_context.is_hw(name):
self.check_password(password=password)
if hd is not None:
self.delete_derived_wallet()
else:
if self.wallet_context.is_derived(name):
hw = self.wallet_context.is_hw(name)
self.delete_wallet_devired_info(wallet, hw=hw)
self.delete_wallet_from_deamon(self._wallet_path(name))
self.wallet_context.remove_type_info(name)
# os.remove(self._wallet_path(name))
except Exception as e:
raise BaseException(e)
def _assert_daemon_running(self):
if not self.daemon_running:
raise BaseException(
_("The background process does not start and it is recommended to restart the application.")
)
# Same wording as in electrum script.
def _assert_wizard_isvalid(self):
if self.wizard is None:
raise BaseException("Wizard not running")
# Log callbacks on stderr so they'll appear in the console activity.
def _assert_wallet_isvalid(self):
if self.wallet is None:
raise BaseException(_("You haven't chosen a wallet yet."))
# Log callbacks on stderr so they'll appear in the console activity.
def _assert_hd_wallet_isvalid(self):
if self.hd_wallet is None:
raise BaseException(UnavaiableHdWallet())
def _wallet_path(self, name=""):
if name is None:
if not self.wallet:
raise ValueError("No wallet selected")
return self.wallet.storage.path
else:
wallets_dir = join(self.user_dir, "wallets")
util.make_dir(wallets_dir)
return util.standardize_path(join(wallets_dir, name))
def _tx_list_path(self, name=""):
wallets_dir = join(self.user_dir, "tx_history")
util.make_dir(wallets_dir)
return util.standardize_path(join(wallets_dir, name))
all_commands = commands.known_commands.copy()
for name, func in vars(AndroidCommands).items():
if not name.startswith("_"):
all_commands[name] = commands.Command(func, "")
SP_SET_METHODS = {
bool: "putBoolean",
float: "putFloat",
int: "putLong",
str: "putString",
}
|
task4_client3.py | import socket
from threading import Thread
def send_message():
while True:
s.send(input().encode('utf-8'))
def receive_message():
while True:
data = s.recv(1024)
print(data.decode('utf-8'))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', 8888))
send_th = Thread(target=send_message)
get_th = Thread(target=receive_message)
send_th.start()
get_th.start() |
mtTkinter.py | '''Thread-safe version of Tkinter.
Copyright (c) 2009, Allen B. Taylor
This module is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser Public License for more details.
You should have received a copy of the GNU Lesser Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Usage:
import mtTkinter as Tkinter
# Use "Tkinter." as usual.
or
from mtTkinter import *
# Use Tkinter module definitions as usual.
This module modifies the original Tkinter module in memory, making all
functionality thread-safe. It does this by wrapping the Tk class' tk
instance with an object that diverts calls through an event queue when
the call is issued from a thread other than the thread in which the Tk
instance was created. The events are processed in the creation thread
via an 'after' event.
The modified Tk class accepts two additional keyword parameters on its
__init__ method:
mtDebug:
0 = No debug output (default)
1 = Minimal debug output
...
9 = Full debug output
mtCheckPeriod:
Amount of time in milliseconds (default 100) between checks for
out-of-thread events when things are otherwise idle. Decreasing
this value can improve GUI responsiveness, but at the expense of
consuming more CPU cycles.
Note that, because it modifies the original Tkinter module (in memory),
other modules that use Tkinter (e.g., Pmw) reap the benefits automagically
as long as mtTkinter is imported at some point before extra threads are
created.
Author: Allen B. Taylor, a.b.taylor@gmail.com
'''
import sys
if (int(sys.version[0]) < 3):
from Tkinter import *
import Queue
else:
from tkinter import *
import queue as Queue
import threading
class _Tk(object):
"""
Wrapper for underlying attribute tk of class Tk.
"""
def __init__(self, tk, mtDebug = 0, mtCheckPeriod = 10):
self._tk = tk
# Create the incoming event queue.
self._eventQueue = Queue.Queue(1)
# Identify the thread from which this object is being created so we can
# tell later whether an event is coming from another thread.
self._creationThread = threading.currentThread()
# Store remaining values.
self._debug = mtDebug
self._checkPeriod = mtCheckPeriod
def __getattr__(self, name):
# Divert attribute accesses to a wrapper around the underlying tk
# object.
return _TkAttr(self, getattr(self._tk, name))
class _TkAttr(object):
"""
Thread-safe callable attribute wrapper.
"""
def __init__(self, tk, attr):
self._tk = tk
self._attr = attr
def __call__(self, *args, **kwargs):
"""
Thread-safe method invocation.
Diverts out-of-thread calls through the event queue.
Forwards all other method calls to the underlying tk object directly.
"""
# Check if we're in the creation thread.
if threading.currentThread() == self._tk._creationThread:
# We're in the creation thread; just call the event directly.
if self._tk._debug >= 8 or \
self._tk._debug >= 3 and self._attr.__name__ == 'call' and \
len(args) >= 1 and args[0] == 'after':
print('Calling event directly:', self._attr.__name__, args, kwargs)
return self._attr(*args, **kwargs)
else:
# We're in a different thread than the creation thread; enqueue
# the event, and then wait for the response.
responseQueue = Queue.Queue(1)
if self._tk._debug >= 1:
print('Marshalling event:', self._attr.__name__, args, kwargs)
self._tk._eventQueue.put((self._attr, args, kwargs, responseQueue))
isException, response = responseQueue.get()
# Handle the response, whether it's a normal return value or
# an exception.
if isException:
exType, exValue, exTb = response
raise Exception(exType, exValue, exTb)
else:
return response
# Define a hook for class Tk's __init__ method.
def _Tk__init__(self, *args, **kwargs):
# We support some new keyword arguments that the original __init__ method
# doesn't expect, so separate those out before doing anything else.
new_kwnames = ('mtCheckPeriod', 'mtDebug')
new_kwargs = {}
for name, value in kwargs.items():
if name in new_kwnames:
new_kwargs[name] = value
del kwargs[name]
# Call the original __init__ method, creating the internal tk member.
self.__original__init__mtTkinter(*args, **kwargs)
# Replace the internal tk member with a wrapper that handles calls from
# other threads.
self.tk = _Tk(self.tk, **new_kwargs)
# Set up the first event to check for out-of-thread events.
self.after_idle(_CheckEvents, self)
# Replace Tk's original __init__ with the hook.
Tk.__original__init__mtTkinter = Tk.__init__
Tk.__init__ = _Tk__init__
def _CheckEvents(tk):
"Event checker event."
used = False
try:
# Process all enqueued events, then exit.
while True:
try:
# Get an event request from the queue.
method, args, kwargs, responseQueue = \
tk.tk._eventQueue.get_nowait()
except:
# No more events to process.
break
else:
# Call the event with the given arguments, and then return
# the result back to the caller via the response queue.
used = True
if tk.tk._debug >= 2:
print('Calling event from main thread:', method.__name__, args, kwargs)
try:
responseQueue.put((False, method(*args, **kwargs)))
except (SystemExit, ex):
raise Exception(SystemExit, ex)
except (Exception, ex):
# Calling the event caused an exception; return the
# exception back to the caller so that it can be raised
# in the caller's thread.
from sys import exc_info
exType, exValue, exTb = exc_info()
responseQueue.put((True, (exType, exValue, exTb)))
finally:
# Schedule to check again. If we just processed an event, check
# immediately; if we didn't, check later.
if used:
tk.after_idle(_CheckEvents, tk)
else:
tk.after(tk.tk._checkPeriod, _CheckEvents, tk)
# Test thread entry point.
def _testThread(root):
text = "This is Tcl/Tk version %s" % TclVersion
if TclVersion >= 8.1:
try:
text = text + unicode("\nThis should be a cedilla: \347",
"iso-8859-1")
except NameError:
pass # no unicode support
try:
if root.globalgetvar('tcl_platform(threaded)'):
text = text + "\nTcl is built with thread support"
else:
raise RuntimeError
except:
text = text + "\nTcl is NOT built with thread support"
text = text + "\nmtTkinter works with or without Tcl thread support"
label = Label(root, text=text)
label.pack()
button = Button(root, text="Click me!",
command=lambda root=root: root.button.configure(
text="[%s]" % root.button['text']))
button.pack()
root.button = button
quit = Button(root, text="QUIT", command=root.destroy)
quit.pack()
# The following three commands are needed so the window pops
# up on top on Windows...
root.iconify()
root.update()
root.deiconify()
# Simulate button presses...
button.invoke()
root.after(1000, _pressOk, root, button)
# Test button continuous press event.
def _pressOk(root, button):
button.invoke()
try:
root.after(1000, _pressOk, root, button)
except:
pass # Likely we're exiting
# Test. Mostly borrowed from the Tkinter module, but the important bits moved
# into a separate thread.
if __name__ == '__main__':
import threading
root = Tk(mtDebug = 1)
thread = threading.Thread(target = _testThread, args=(root,))
thread.start()
root.mainloop()
thread.join()
|
util.py | import http.client as httplib
import json
from urllib.parse import urlparse
import threading
import logging
import sys
import math
import time
import datetime
import base64
import codecs
import struct
import time
formatter = logging.Formatter('%(levelname)s : %(asctime)s : %(message)s')
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(0)
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.setLevel(0)
logger.addHandler(handler)
logger.info('Logging enabled.')
def apiCall(resource):
'''
Core API call to the api.guildwars2.com api. Simply makes an http
request and returns the un-jsonified response.
:param resource: the path (e.g. /v2/items) to query for.
'''
found_resource = False #To deal with redirects
parsed_response = False #To deal with invalid responses.
retries = 0 # How many retries have we gone through thus far.
retry_threshold = 5 # For transient errors
retry_delay = 1 # Seconds
url = "api.guildwars2.com"
protocol = "https"
while not found_resource and not parsed_response:
logger.debug("Getting: " + str(protocol) + str(url) + str(resource))
if protocol == "https":
connection = httplib.HTTPSConnection(url)
else:
connection = httplib.HTTPConnection(url)
connection.request("GET", resource)
response = connection.getresponse()
if response.status >= 500 and retries < retry_threshold:
logger.error("Retrying due to potentially transient API failure: " + response.reason + " ; " + str(response.status))
retries += 1
time.sleep(retry_delay)
continue
elif response.status >= 400:
connection.close()
logger.error("API call failed: " + response.reason + " ; " + str(response.status))
raise Exception("API call failed: " + response.reason + " ; " + str(response.status))
elif response.status == 302:
parsed_url = urlparse(response.getheader('location'))
url = parsed_url.netloc
#resource = parsed_url.path FIXME: Determine if I should have this to some extent.
protocol = parsed_url.scheme
logger.debug("Redirecting to: " + str(protocol) + str(url) + str(resource))
continue
elif 200 <= response.status < 300 :
logger.debug("Got response.")
found_resource = True
else:
logger.error("Unspecified response: " + str(response.status))
response_body = response.read()
try:
data = json.loads(response_body.decode())
parsed_response = True
except Exception as e: #Known to be value error; but that came out of nowhere, so being a bit generous here.
logger.error("Json parsing failure for " + str(resource) + " : " + str(e))
parsed_response = False
connection.close()
return data
def _idListApiCall(resource, id_list):
'''
NOTE: INTERNAL FUNCTION
Makes a call to the API appending a list of stringified values comma seperated.
:param resource: the path (e.g. /v2/items?ids=) to query for.
:param id_list: The list of ids to query for.
'''
# Convert the listing IDs to a query string and request it
api_response = []
id_list_string = ""
if len(id_list) > 0:
for each_id in id_list:
id_list_string += str(each_id) + ','
id_list_string = id_list_string[:-1]
api_string = resource + id_list_string
api_response = apiCall(api_string)
return api_response
def idListApiCall_out(resource, id_list, out_list = []):
'''
Given an int/stringified int (or list of the above), returns a dir
of the listings corrosponding to those IDs
:param resource: the path (e.g. /v2/items?ids=) to query for.
:param id_list: An int/stringified int (or list) of the item listings desired.
:param out_list: Simply pass a [] in, it will be populated with the results.
'''
BATCH_SIZE = 200 #Can get pushed higher, but it gets iffy.
# Does nastiness to allow many sorts of valid id_list types.
# e.g. int, stringified int, list of ints and list of stringified ints.
each_id = None
parsed_id_list = []
try:
#If it's an int we just cram it in the string
each_id = int(id_list)
parsed_id_list = [each_id]
except:
#If it's a list of ints we build a comma seperated list
for each_id in id_list:
int(each_id)
parsed_id_list.append(each_id)
while len(parsed_id_list) != 0:
id_list_batch = parsed_id_list[:BATCH_SIZE]
out_list += _idListApiCall(resource, id_list_batch)
parsed_id_list = parsed_id_list[BATCH_SIZE:]
return out_list
#Builds an index of ALL listings. Be ready for a bit of a wait.
#TODO: Maybe try multiprocessing it? the overhead of starting the other interpreters probably isn't worth it.
def idListApiCall(resource, id_list, threaded=True, cache_timeout=0):
'''
Given an int/stringified int (or list of the above), returns a dir
of the listings corrosponding to those IDs
:param resource: the path (e.g. /v2/items?ids=) to query for.
:param id_list: An int/stringified int (or list) of the item listings desired.
'''
out_list = []
if threaded:
#Yes I know it can actually start THREADPOOL_SIZE+1 threads, hush.
THREADPOOL_SIZE=10
threadpool = []
ids_per_thread = math.ceil(len(id_list) / THREADPOOL_SIZE)
for i in range(0, len(id_list), ids_per_thread):
thread = threading.Thread( target=idListApiCall_out, kwargs={"resource":resource, "id_list":id_list[i : math.ceil( i + len(id_list) / THREADPOOL_SIZE )], "out_list":out_list} )
thread.start()
threadpool.append(thread)
for thread in threadpool:
thread.join()
else:
out_list = idListApiCall_out(resource, id_list)
return out_list
def getAllIds(resource):
'''
Returns a list of all current listing ID's
'''
return apiCall(resource)
def getSecretListings(item_api, listing_api):
'''
Get all items that have listings but no index in the public items API
'''
item_ids = item_api.getAllIds()
listing_ids = listing_api.getAllIds()
return list(set(listing_ids) - set(item_ids))
def makeItemCode(item_id, num_items=1):
'''
Generate ingame code from item ID.
:param item_id: the item_id to codify (in int or string form)
:param num_items: the number of items to simulate in the code.
'''
headers = struct.pack('<b', 2) + struct.pack('<b', num_items)
hex_item_id = struct.pack('<i', int(item_id))
return "[&" + base64.b64encode(headers + hex_item_id).decode('utf-8') + "]"
def makeItemCodes(item_ids):
'''
Generate item codes from a list of IDs
:param item_ids: the list of ids to generate codes for.
'''
return [makeItemCode(item_id) for item_id in item_ids]
def _setAttrsFromDir(item, attr_dir):
'''
NOTE: INTERNAL FUNCTION
Helper function for parsing an API response into a class automagically.
'''
for var, val in attr_dir.items():
setattr(item, var, val)
def _determineMaxRequestBatchSize():
'''
NOTE: INTERNAL FUNCTION
Little helper function, tells you what the API limits you to in terms of ID's in a single request,
does so by binary search. At last run, the number was 200.
'''
ids = getAllIds('/v2/items')
#Arbitrary start point; much higher than current limit.
#(But given a guess that the real number is 200+some; this should binary search the target nicely)
previous_batch_size = 1000
batch_size = 800
halt = False
while not halt:
tmp_batch_size = batch_size
try:
logger.debug("Trying batch size: " + str(batch_size))
_idListApiCall('/v2/items?ids=', ids[:batch_size])
if previous_batch_size < batch_size:
batch_size += (batch_size - previous_batch_size)
elif previous_batch_size > batch_size:
batch_size += (previous_batch_size - batch_size) / 2
else:
halt = True
except Exception as e:
logger.exception(e)
if previous_batch_size > batch_size:
batch_size -= (previous_batch_size - batch_size)
elif previous_batch_size < batch_size:
batch_size -= (batch_size - previous_batch_size) / 2
else:
batch_size -= 1
if batch_size <= 0:
halt = True
previous_batch_size = tmp_batch_size
return batch_size
#TODO: variance as well?
def _determineApiDataRefreshRate(sample_id=19697):
'''
NOTE: INTERNAL FUNCTION
Another strange helper, tries to determine some data about the rate at which API data is refreshing,
so that you can most find an optimal rate to query it without going overboard.
NOTE2:
This may also tell us some interesting things about the rate at which data is moving, since pragmatically the refresh
rate has not been consistent; meaning it's likely some sort of push based system, perhaps on transaction
volume in the interim? It would be interesting to experiment with, but I'm rambling.
:param sample_id: What item should be polled. Copper ore used as an index by default because yes.
'''
num_trials = 1000
trial_delay = 1
prev_sell_quantity = 0
prev_buy_quantity = 0
interval_start = None
discarded_first_interval = False #We may be coming in "halfway through"
intervals = []
max_interval = datetime.timedelta()
min_interval = datetime.timedelta(days=9999)
for i in range(0, num_trials):
time.sleep(trial_delay)
interval_end = datetime.datetime.now()
ret = apiCall("/v2/commerce/prices?ids=" + str(sample_id)) # copper ore, it's always moving.
sell_quantity = ret[0]["sells"]["quantity"]
buy_quantity = ret[0]["buys"]["quantity"]
if sell_quantity != prev_sell_quantity or buy_quantity != prev_buy_quantity:
#We need to get the first data first; going from start->first value doesn't count.
if interval_start:
#And even then; we still need to throw out our first "real" interval.
if discarded_first_interval:
interval = interval_end - interval_start
intervals.append(interval)
logger.debug("Found interval: " + str(interval) + " In trial: " + str(i))
if interval > max_interval:
logger.debug("Found new max interval")
max_interval = interval
if interval < min_interval:
logger.debug("Found new min interval")
min_interval = interval
else:
discarded_first_interval = True
prev_sell_quantity = sell_quantity
prev_buy_quantity = buy_quantity
interval_start = interval_end
print(intervals)
total_delta = datetime.timedelta(0,0,0)
for interval in intervals:
total_delta += interval
if len(intervals) > 0:
average_interval = total_delta / len(intervals)
return {"mean": average_interval, "min": min_interval, "max": max_interval}
def zipItemsAndListings(items, listings):
'''
Associate the items and listings sharing IDs in a {uid:{item:item,listing:listing}} datastructure.
Only adds to the zipped list if present in both lists.
:param items: the list of items to zip.
:param listings: the list of listings to zip.
'''
listing_map = {listing.id:listing for listing in listings}
results = {}
for item in items:
if item.id in listing_map:
listing = listing_map[item.id]
results[item.name + '.' + str(item.id)] = {"item":item, "listing":listing}
return results
def zipObjectsByField(iterable_a, iterable_b, field):
'''
More generic object zipper. Takes two iterables and zips the contents
along a single field.
They must not be of the same length, only objects which have a zipped
partner are returned.
:param iterable_a: The first iterable of objects
:param iterable_b: The second iterable of objects
:param field: The field to zip on
'''
zipped_map = []
field_index = {getattr(item, field):item for item in iterable_a}
for item in iterable_b:
if getattr(item, field) in field_index:
joined_item = field_index[getattr(item, field)]
zipped_map[iterable_a] = iterable_b
return zipped_map
if __name__ == "__main__":
print(getItemCode(29952))
|
worker.py | import argparse
import multiprocessing
import os
import random
import socket
import struct
import time
from threading import Thread
import torch
import common
stop = False
def tight_loop():
while True:
for i in range(1000000):
x = i * i
def tight_loop_torch():
num_mats = 10
mat_dim = 512
x = torch.rand(num_mats, mat_dim, mat_dim)
if torch.cuda.is_available():
x = x.cuda()
i = 0
while True:
next_idx = (i + 1) % num_mats
next_plus_one_idx = (i + 2) % num_mats
x[i] = x[next_idx] * x[next_plus_one_idx]
i = next_idx
def recv_bytes(sock):
data = sock.recv(common.snd_to_recv_format_struct.size)
num_mb_recv = common.snd_to_recv_format_struct.unpack(data)[0]
total_bytes_recvd = 0
total_bytes_to_recv = num_mb_recv * 1024 * 1024
print("Start recv", num_mb_recv, "MB")
while total_bytes_recvd < total_bytes_to_recv:
buf = sock.recv(total_bytes_to_recv - total_bytes_recvd)
total_bytes_recvd += len(buf)
return num_mb_recv
def recv(sock):
# Receive bytes from sender
num_mb = recv_bytes(sock)
"""sleep_time = random.uniform(0, 2)#4)#random.randint(1, 2)
processes = []
for i in range(2):#multiprocessing.cpu_count()):
p = multiprocessing.Process(target=tight_loop_torch)#tight_loop)
processes.append(p)
p.start()
print("processing for", sleep_time)
time.sleep(sleep_time)
for p in processes:
p.terminate()"""
# Shuffle back to sender
send_bytes(sock, num_mb)
sock.close()
print("Done recv", flush=True)
def listen_recv(args):
global stop
ip = "0.0.0.0"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((ip, common.WORKER_RECV_PORT))
sock.listen(args.num_outstanding)
while not stop:
sock.settimeout(5)
try:
(clientsocket, _) = sock.accept()
t = Thread(target=recv, args=(clientsocket,))
t.start()
except socket.timeout:
print("Receiver timeout, stop={}".format(stop))
pass
print("Receiver got stop signal", flush=True)
# Sleep for some time to make sure any current recvs finish
time.sleep(10)
sock.close()
def listen_send(args):
global stop
print("starting listen_thread", flush=True)
ip = "0.0.0.0"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((ip, common.WORKER_MGMT_PORT))
sock.listen(args.num_outstanding)
print("started listening on port {}".format(common.WORKER_MGMT_PORT))
while not stop:
sock.settimeout(5)
try:
(clientsocket, _) = sock.accept()
t = Thread(target=send, args=(clientsocket, args.worker_ips))
t.start()
except socket.timeout:
print("Sender timeout, stop={}".format(stop), flush=True)
pass
print("Sender got stop signal", flush=True)
# Sleep for some time to make sure any current sends finish
time.sleep(10)
sock.close()
def send_bytes(sock, num_mb_send):
buf = bytearray(65536)
sock.send(common.snd_to_recv_format_struct.pack(*(num_mb_send,)))
total_bytes_sent = 0
total_bytes_to_send = num_mb_send * 1024 * 1024
print("Sending", num_mb_send, "MB")
while total_bytes_sent < total_bytes_to_send:
bytes_to_send = min(total_bytes_to_send - total_bytes_sent, len(buf))
bytes_sent = sock.send(buf[:bytes_to_send])
total_bytes_sent += bytes_sent
def send(clientsocket, worker_ips):
data = clientsocket.recv(common.master_to_snd_format_struct.size)
unpacked = common.master_to_snd_format_struct.unpack(data)
worker_ip = worker_ips[unpacked[0]]
num_mb_send = unpacked[1]
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("Connecting to", worker_ip, common.WORKER_RECV_PORT)
sock.connect((worker_ip, common.WORKER_RECV_PORT))
# Send bytes to receiver
send_bytes(sock, num_mb_send)
# Receive bytes back from receive
recv_bytes(sock)
sock.close()
clientsocket.send(struct.pack('I', 0))
clientsocket.close()
print("Done sending")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("num_outstanding", type=int, help="Number of possible outstanding shuffles")
parser.add_argument("--worker_ips", nargs='+', help="ip addresses of workers")
args = parser.parse_args()
#random.seed(42)
with open("/tmp/shuffle_pid.txt", 'w') as outfile:
outfile.write(str(os.getpid()))
listen_recv_thread = Thread(target=listen_recv, args=(args,))
listen_recv_thread.start()
listen_send_thread = Thread(target=listen_send, args=(args,))
listen_send_thread.start()
print("Listening on udf socket", flush=True)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock_name = '/home/ubuntu/bg_sock'
sock.bind(sock_name)
sock.listen(5)
(clientsocket, address) = sock.accept()
print("Stopping all worker threads", flush=True)
stop = True
listen_recv_thread.join()
listen_send_thread.join()
print("All worker threads joined", flush=True)
print("Sending response", flush=True)
clientsocket.sendall('1'.encode())
clientsocket.close()
sock.close()
|
tasks.py | import socket
import subprocess
import threading
from asgiref.sync import async_to_sync
from celery import shared_task
from channels.layers import get_channel_layer
from django.db import connection
from django_celery_beat.models import CrontabSchedule, PeriodicTask
from wol.commands import shutdown, wake
from wol.models import Device, Port, Websocket
channel_layer = get_channel_layer()
class WolDevice:
def ping_device(self, ip):
try:
subprocess.check_output(["ping", "-c", "1", "-W", "0.5", ip])
return True
except subprocess.CalledProcessError:
return False
def check_port(self, ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
if sock.connect_ex((ip, port)) == 0:
return True
return False
def start(self, dev):
data = {
"id": dev.id,
"name": dev.name,
"ip": dev.ip,
"mac": dev.mac,
"netmask": dev.netmask,
"up": False,
"link": dev.link,
"ports": [],
"wake": {
"enabled": False,
"cron": ""
},
"shutdown": {
"enabled": False,
"cron": "",
"command": dev.shutdown_cmd
}
}
# add ports
for p in Port.objects.all().order_by("number"):
data["ports"].append({
"number": p.number,
"name": p.name,
"checked": True if p in dev.port.all() else False,
"open": False
})
# set status for device and ports
if self.ping_device(dev.ip):
data["up"] = True
for port in dev.port.all():
index = next(i for i, d in enumerate(
data["ports"]) if d["number"] == port.number)
if self.check_port(dev.ip, port.number):
data["ports"][index]["checked"] = True
data["ports"][index]["open"] = True
else:
data["ports"][index]["checked"] = True
data["ports"][index]["open"] = False
# set cron for wake and shutdown
for action in ["wake", "shutdown"]:
try:
task = PeriodicTask.objects.filter(
name=f"{data['name']}-{action}",
task=f"wol.tasks.scheduled_{action}", crontab_id__isnull=False).get()
if task:
wake = CrontabSchedule.objects.get(id=task.crontab_id)
data[action]["enabled"] = task.enabled
data[action]["cron"] = " ".join(
[wake.minute, wake.hour, wake.day_of_week, wake.day_of_month, wake.month_of_year])
except PeriodicTask.DoesNotExist:
pass
async_to_sync(channel_layer.group_send)(
"wol", {"type": "send_group", "message": {
"type": "status",
"message": data
}})
connection.close()
@shared_task
def ping_all_devices():
if Websocket.objects.first().visitors == 0:
return
devices = Device.objects.all()
for dev in devices:
d = WolDevice()
t = threading.Thread(target=d.start, args=(dev,))
t.start()
@shared_task
def scheduled_wake(id):
try:
device = Device.objects.get(id=id)
except Device.DoesNotExist:
for task in PeriodicTask.objects.filter(args=id):
task.delete()
return
d = WolDevice()
up = d.ping_device(device.ip)
if not up:
wake(device.mac, device.ip, device.netmask)
async_to_sync(channel_layer.group_send)(
"wol", {"type": "send_group", "message": {
"type": "pending",
"message": id
}})
@shared_task
def scheduled_shutdown(id):
try:
device = Device.objects.get(id=id)
except Device.DoesNotExist:
for task in PeriodicTask.objects.filter(args=id):
task.delete()
return
d = WolDevice()
up = d.ping_device(device.ip)
if up:
shutdown(device.shutdown_cmd)
async_to_sync(channel_layer.group_send)(
"wol", {"type": "send_group", "message": {
"type": "pending",
"message": id
}})
|
PortScanner.py | #!usr/bin/python
import threading
from queue import Queue
import time
import sys
import socket
from datetime import datetime
from colorama import init, Fore
# some colors
init()
GREEN = Fore.GREEN
RESET = Fore.RESET
GRAY = Fore.LIGHTBLACK_EX
RED = Fore.RED
WHITE = Fore.WHITE
YELLOW = Fore.YELLOW
print(f"{GRAY} - -- - - - - - - - - - - - Developed By rizad- - - - - - - - - - - - - --")
print(f"{GREEN} ###### #####")
print(f"{GREEN} # # #### ##### ##### # # #### ## # # # # ###### #####")
print(f"{GREEN} # # # # # # # # # # # # ## # ## # # # #")
print(f"{GREEN} ###### # # # # # ##### # # # # # # # # # ##### # #")
print(f"{GREEN} # # # ##### # # # ###### # # # # # # # #####")
print(f"{GREEN} # # # # # # # # # # # # # ## # ## # # #")
print(f"{GREEN} # #### # # # ##### #### # # # # # # ###### # #")
print(f"{RED}- - - - - - - - - - - - - - - - - PortScanner V1.0- - - - - - - - - - - - - - - ")
# a print_lock is what is used to prevent "double" modification of shared variables.
# this is used so while one thread is using a variable, others cannot access
# it. Once done, the thread releases the print_lock.
# to use it, you want to specify a print_lock per thing you wish to print_lock.
print_lock = threading.Lock()
target = input('Enter a remote host to scan :')
t1 = datetime.now()
ip = socket.gethostbyname(target)
print(ip)
print(f"{YELLOW}-" * 60)
print("Please Wait Scanning Remote Host", ip)
print(f"{YELLOW}-" * 60)
def portscan(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
con = s.connect((target,port))
with print_lock:
print(f'{GREEN}[+]Port',port, "is open")
con.close()
except:
pass
# The threader thread pulls an worker from the queue and processes it
def threader():
while True:
# gets an worker from the queue
worker = q.get()
# Run the example job with the avail worker in queue (thread)
portscan(worker)
# completed with the job
q.task_done()
# Create the queue and threader
q = Queue()
# how many threads are we going to allow for
for x in range(30):
t = threading.Thread(target=threader)
# classifying as a daemon, so they will die when the main dies
t.daemon = True
# begins, must come after daemon definition
t.start()
start = time.time()
# 1080 jobs assigned.
for worker in range(1080):
q.put(worker)
# wait until the thread terminates.
q.join()
t2 = datetime.now()
total = t2 - t1
print('Scanning Completed in:',total)
|
uiautomation.py | # coding=utf-8
__author__ = 'lxn3032'
import os
import requests
import time
import warnings
import threading
import atexit
from airtest.core.api import connect_device, device as current_device
from airtest.core.android.ime import YosemiteIme
from hrpc.client import RpcClient
from hrpc.transport.http import HttpTransport
from poco.pocofw import Poco
from poco.agent import PocoAgent
from poco.sdk.Attributor import Attributor
from poco.sdk.interfaces.screen import ScreenInterface
from poco.utils.hrpc.hierarchy import RemotePocoHierarchy
from poco.utils.airtest.input import AirtestInput
from poco.utils import six
from poco.drivers.android.utils.installation import install, uninstall
__all__ = ['AndroidUiautomationPoco', 'AndroidUiautomationHelper']
this_dir = os.path.dirname(os.path.realpath(__file__))
PocoServicePackage = 'com.netease.open.pocoservice'
PocoServicePackageTest = 'com.netease.open.pocoservice.test'
class AndroidRpcClient(RpcClient):
def __init__(self, endpoint):
self.endpoint = endpoint
super(AndroidRpcClient, self).__init__(HttpTransport)
def initialize_transport(self):
return HttpTransport(self.endpoint, self)
# deprecated
class AttributorWrapper(Attributor):
"""
部分手机上仍不支持Accessibility.ACTION_SET_TEXT,使用YosemiteIme还是兼容性最好的方案
这个class会hook住set_text,然后改用ime的text方法
"""
def __init__(self, remote, ime):
self.remote = remote
self.ime = ime
def getAttr(self, node, attrName):
return self.remote.getAttr(node, attrName)
def setAttr(self, node, attrName, attrVal):
if attrName == 'text' and attrVal != '':
# 先清除了再设置,虽然这样不如直接用ime的方法好,但是也能凑合用着
current_val = self.remote.getAttr(node, 'text')
if current_val:
self.remote.setAttr(node, 'text', '')
self.ime.text(attrVal)
else:
self.remote.setAttr(node, attrName, attrVal)
class ScreenWrapper(ScreenInterface):
def __init__(self, screen):
super(ScreenWrapper, self).__init__()
self.screen = screen
def getScreen(self, width):
# Android上PocoService的实现为仅返回b64编码的图像,格式固定位jpg
b64img = self.screen.getScreen(width)
return b64img, 'jpg'
def getPortSize(self):
return self.screen.getPortSize()
class AndroidPocoAgent(PocoAgent):
def __init__(self, endpoint, ime, use_airtest_input=False):
self.client = AndroidRpcClient(endpoint)
remote_poco = self.client.remote('poco-uiautomation-framework')
dumper = remote_poco.dumper
selector = remote_poco.selector
attributor = remote_poco.attributor
hierarchy = RemotePocoHierarchy(dumper, selector, attributor)
if use_airtest_input:
inputer = AirtestInput()
else:
inputer = remote_poco.inputer
super(AndroidPocoAgent, self).__init__(hierarchy, inputer, ScreenWrapper(remote_poco.screen), None)
def on_bind_driver(self, driver):
super(AndroidPocoAgent, self).on_bind_driver(driver)
if isinstance(self.input, AirtestInput):
self.input.add_preaction_cb(driver)
class AndroidUiautomationPoco(Poco):
"""
Poco Android implementation for testing **Android native apps**.
Args:
device (:py:obj:`Device`): :py:obj:`airtest.core.device.Device` instance provided by ``airtest``. leave the
parameter default and the default device will be chosen. more details refer to ``airtest doc``
using_proxy (:py:obj:`bool`): whether use adb forward to connect the Android device or not
force_restart (:py:obj:`bool`): whether always restart the poco-service-demo running on Android device or not
options: see :py:class:`poco.pocofw.Poco`
Examples:
The simplest way to initialize AndroidUiautomationPoco instance and no matter your device network status::
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
poco = AndroidUiautomationPoco()
poco('android:id/title').click()
...
"""
def __init__(self, device=None, using_proxy=True, force_restart=False, use_airtest_input=False, **options):
# 加这个参数为了不在最新的pocounit方案中每步都截图
self.screenshot_each_action = True
if options.get('screenshot_each_action') is False:
self.screenshot_each_action = False
self.device = device or current_device()
if not self.device:
self.device = connect_device("Android:///")
self.adb_client = self.device.adb
if using_proxy:
self.device_ip = self.adb_client.host or "127.0.0.1"
else:
self.device_ip = self.device.get_ip_address()
# save current top activity (@nullable)
current_top_activity_package = self.device.get_top_activity_name()
if current_top_activity_package is not None:
current_top_activity_package = current_top_activity_package.split('/')[0]
# install ime
self.ime = YosemiteIme(self.adb_client)
self.ime.start()
# install
self._instrument_proc = None
self._install_service()
# forward
if using_proxy:
p0, _ = self.adb_client.setup_forward("tcp:10080")
p1, _ = self.adb_client.setup_forward("tcp:10081")
else:
p0 = 10080
p1 = 10081
# start
if self._is_running('com.github.uiautomator'):
warnings.warn('{} should not run together with "uiautomator". "uiautomator" will be killed.'
.format(self.__class__.__name__))
self.adb_client.shell(['am', 'force-stop', 'com.github.uiautomator'])
ready = self._start_instrument(p0, force_restart=force_restart)
if not ready:
# 启动失败则需要卸载再重启,instrument的奇怪之处
uninstall(self.adb_client, PocoServicePackage)
self._install_service()
ready = self._start_instrument(p0)
if current_top_activity_package is not None:
current_top_activity2 = self.device.get_top_activity_name()
if current_top_activity2 is None or current_top_activity_package not in current_top_activity2:
self.device.start_app(current_top_activity_package, activity=True)
if not ready:
raise RuntimeError("unable to launch AndroidUiautomationPoco")
if ready:
# 首次启动成功后,在后台线程里监控这个进程的状态,保持让它不退出
self._keep_running_instrumentation(p0)
endpoint = "http://{}:{}".format(self.device_ip, p1)
agent = AndroidPocoAgent(endpoint, self.ime, use_airtest_input)
super(AndroidUiautomationPoco, self).__init__(agent, **options)
def _install_service(self):
updated = install(self.adb_client, os.path.join(this_dir, 'lib', 'pocoservice-debug.apk'))
install(self.adb_client, os.path.join(this_dir, 'lib', 'pocoservice-debug-androidTest.apk'), updated)
return updated
def _is_running(self, package_name):
processes = self.adb_client.shell(['ps']).splitlines()
for ps in processes:
ps = ps.strip()
if ps.endswith(package_name):
return True
return False
def _keep_running_instrumentation(self, port_to_ping):
print('[pocoservice.apk] background daemon started.')
def loop():
while True:
stdout, stderr = self._instrument_proc.communicate()
print('[pocoservice.apk] stdout: {}'.format(stdout))
print('[pocoservice.apk] stderr: {}'.format(stderr))
print('[pocoservice.apk] retrying instrumentation PocoService')
self._start_instrument(port_to_ping) # 尝试重启
time.sleep(1)
t = threading.Thread(target=loop)
t.daemon = True
t.start()
def _start_instrument(self, port_to_ping, force_restart=False):
if not force_restart:
try:
state = requests.get('http://{}:{}/uiautomation/connectionState'.format(self.device_ip, port_to_ping),
timeout=10)
state = state.json()
if state.get('connected'):
# skip starting instrumentation if UiAutomation Service already connected.
return True
except:
pass
if self._instrument_proc is not None:
if self._instrument_proc.poll() is None:
self._instrument_proc.kill()
self._instrument_proc = None
ready = False
self.adb_client.shell(['am', 'force-stop', PocoServicePackage])
# 启动instrument之前,先把主类activity启动起来,不然instrumentation可能失败
self.adb_client.shell('am start -n {}/.TestActivity'.format(PocoServicePackage))
instrumentation_cmd = [
'am', 'instrument', '-w', '-e', 'debug', 'false', '-e', 'class',
'{}.InstrumentedTestAsLauncher'.format(PocoServicePackage),
'{}.test/android.support.test.runner.AndroidJUnitRunner'.format(PocoServicePackage)]
self._instrument_proc = self.adb_client.start_shell(instrumentation_cmd)
atexit.register(self._instrument_proc.kill)
time.sleep(2)
for i in range(10):
try:
requests.get('http://{}:{}'.format(self.device_ip, port_to_ping), timeout=10)
ready = True
break
except requests.exceptions.Timeout:
break
except requests.exceptions.ConnectionError:
if self._instrument_proc.poll() is not None:
warnings.warn("[pocoservice.apk] instrumentation test server process is no longer alive")
stdout = self._instrument_proc.stdout.read()
stderr = self._instrument_proc.stderr.read()
print('[pocoservice.apk] stdout: {}'.format(stdout))
print('[pocoservice.apk] stderr: {}'.format(stderr))
time.sleep(1)
print("still waiting for uiautomation ready.")
continue
return ready
def on_pre_action(self, action, ui, args):
if self.screenshot_each_action:
# airteset log用
from airtest.core.api import snapshot
msg = repr(ui)
if not isinstance(msg, six.text_type):
msg = msg.decode('utf-8')
snapshot(msg=msg)
class AndroidUiautomationHelper(object):
_nuis = {}
@classmethod
def get_instance(cls, device):
"""
This is only a slot to store and get already initialized poco instance rather than initializing again. You can
simply pass the ``current device instance`` provided by ``airtest`` to get the AndroidUiautomationPoco instance.
If no such AndroidUiautomationPoco instance, a new instance will be created and stored.
Args:
device (:py:obj:`airtest.core.device.Device`): more details refer to ``airtest doc``
Returns:
poco instance
"""
if cls._nuis.get(device) is None:
cls._nuis[device] = AndroidUiautomationPoco(device)
return cls._nuis[device]
|
queue_pipe.py | from multiprocessing import Pipe,Process
import os
import time
import random
def send_proc(pipe,content):
pass
def recv_proc(pipe):
pass
if __name__ == '__main__':
print("Parent process is %s started." % (os.getpid(),))
pipe = Pipe()
send_process = Process(target = send_proc, args=()) |
installwizard.py | # Copyright (C) 2018 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import os
import sys
import threading
import traceback
from typing import Tuple, List, Callable, NamedTuple, Optional
from PyQt5.QtCore import QRect, QEventLoop, Qt, pyqtSignal
from PyQt5.QtGui import QPalette, QPen, QPainter, QPixmap
from PyQt5.QtWidgets import (QWidget, QDialog, QLabel, QHBoxLayout, QMessageBox,
QVBoxLayout, QLineEdit, QFileDialog, QPushButton,
QGridLayout, QSlider, QScrollArea)
from electrum.wallet import Wallet, Abstract_Wallet
from electrum.storage import WalletStorage
from electrum.util import UserCancelled, InvalidPassword, WalletFileException
from electrum.base_wizard import BaseWizard, HWD_SETUP_DECRYPT_WALLET, GoBack
from electrum.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import (MessageBoxMixin, Buttons, icon_path, ChoicesLayout, WWLabel,
InfoButton)
from .password_dialog import PasswordLayout, PasswordLayoutForHW, PW_NEW
from electrum.plugin import run_hook
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_HW_STORAGE_ENCRYPTION = _("Set wallet file encryption.") + '\n'\
+ _("Your wallet file does not contain secrets, mostly just metadata. ") \
+ _("It also contains your master public key that allows watching your addresses.") + '\n\n'\
+ _("Note: If you enable this setting, you will need your hardware device to open your wallet.")
WIF_HELP_TEXT = (_('WIF keys are typed in Electrum, based on script type.') + '\n\n' +
_('A few examples') + ':\n' +
'p2pkh:KxZcY47uGp9a... \t-> 1DckmggQM...\n' +
'p2wpkh-p2sh:KxZcY47uGp9a... \t-> 3NhNeZQXF...\n' +
'p2wpkh:KxZcY47uGp9a... \t-> bc1q3fjfk...')
# note: full key is KxZcY47uGp9aVQAb6VVvuBs8SwHKgkSR2DbZUzjDzXf2N2GPhG9n
MSG_PASSPHRASE_WARN_ISSUE4566 = _("Warning") + ": "\
+ _("You have multiple consecutive whitespaces or leading/trailing "
"whitespaces in your passphrase.") + " " \
+ _("This is discouraged.") + " " \
+ _("Due to a bug, old versions of Electrum will NOT be creating the "
"same wallet as newer versions or other software.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
#if out is None:
# out = ()
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
class WalletAlreadyOpenInMemory(Exception):
def __init__(self, wallet: Abstract_Wallet):
super().__init__()
self.wallet = wallet
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
def __init__(self, config, app, plugins):
BaseWizard.__init__(self, config, plugins)
QDialog.__init__(self, None)
self.setWindowTitle('Electrum - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.language_for_seed = config.get('language')
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon('electrumfair.png')
self.show()
self.raise_()
self.refresh_gui() # Need for QT on MacOSX. Lame.
def select_storage(self, path, get_wallet_from_daemon) -> Tuple[str, Optional[WalletStorage]]:
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electrum wallet'))
self.temp_storage = WalletStorage(path, manual_upgrades=True)
wallet_folder = os.path.dirname(self.temp_storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
wallet_from_memory = get_wallet_from_daemon(path)
try:
if wallet_from_memory:
self.temp_storage = wallet_from_memory.storage
else:
self.temp_storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except BaseException:
traceback.print_exc(file=sys.stderr)
self.temp_storage = None
self.next_button.setEnabled(False)
user_needs_to_enter_password = False
if self.temp_storage:
if not self.temp_storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
elif not wallet_from_memory:
if self.temp_storage.is_encrypted_with_user_pw():
msg = _("This file is encrypted with a password.") + '\n' \
+ _('Enter your password or choose another file.')
user_needs_to_enter_password = True
elif self.temp_storage.is_encrypted_with_hw_device():
msg = _("This file is encrypted using a hardware device.") + '\n' \
+ _("Press 'Next' to choose device to decrypt.")
else:
msg = _("Press 'Next' to open this wallet.")
else:
msg = _("This file is already open in memory.") + "\n" \
+ _("Press 'Next' to create/focus window.")
else:
msg = _('Cannot read file')
self.msg_label.setText(msg)
if user_needs_to_enter_password:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.temp_storage.path)
self.name_e.setText(n)
while True:
if self.loop.exec_() != 2: # 2 = next
raise UserCancelled
if self.temp_storage.file_exists() and not self.temp_storage.is_encrypted():
break
if not self.temp_storage.file_exists():
break
wallet_from_memory = get_wallet_from_daemon(self.temp_storage.path)
if wallet_from_memory:
raise WalletAlreadyOpenInMemory(wallet_from_memory)
if self.temp_storage.file_exists() and self.temp_storage.is_encrypted():
if self.temp_storage.is_encrypted_with_user_pw():
password = self.pw_e.text()
try:
self.temp_storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
elif self.temp_storage.is_encrypted_with_hw_device():
try:
self.run('choose_hw_device', HWD_SETUP_DECRYPT_WALLET, storage=self.temp_storage)
except InvalidPassword as e:
QMessageBox.information(
None, _('Error'),
_('Failed to decrypt using this hardware device.') + '\n' +
_('If you use a passphrase, make sure it is correct.'))
self.reset_stack()
return self.select_storage(path, get_wallet_from_daemon)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
raise UserCancelled()
if self.temp_storage.is_past_initial_decryption():
break
else:
raise UserCancelled()
else:
raise Exception('Unexpected encryption version')
return self.temp_storage.path, (self.temp_storage if self.temp_storage.file_exists() else None)
def run_upgrades(self, storage):
path = storage.path
if storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
# raise now, to avoid having the old storage opened
raise UserCancelled()
action = storage.get_action()
if action and storage.requires_upgrade():
raise WalletFileException('Incomplete wallet files cannot be upgraded.')
if action:
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
self.data = storage.db.data # FIXME
self.run(action)
for k, v in self.data.items():
storage.put(k, v)
storage.write()
return
if storage.requires_upgrade():
self.upgrade_storage(storage)
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QPixmap(icon_path(filename))
.scaledToWidth(60, mode=Qt.SmoothTransformation))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack from None
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, header_layout=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False, show_wif_help=False):
header_layout = QHBoxLayout()
label = WWLabel(message)
label.setMinimumWidth(400)
header_layout.addWidget(label)
if show_wif_help:
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
return self.text_input(title, header_layout, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'])
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind, force_disable_encrypt_cb):
playout = PasswordLayout(msg=msg, kind=kind, OK_button=self.next_button,
force_disable_encrypt_cb=force_disable_encrypt_cb)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next, force_disable_encrypt_cb=False):
"""Request the user enter a new password and confirm it. Return
the password or None for no password."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW, force_disable_encrypt_cb)
@wizard_dialog
def request_storage_encryption(self, run_next):
playout = PasswordLayoutForHW(MSG_HW_STORAGE_ENCRYPTION)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.encrypt_cb.isChecked()
@wizard_dialog
def confirm_dialog(self, title, message, run_next):
self.confirm(message, title)
def confirm(self, message, title):
label = WWLabel(message)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self, **kwargs):
self.accept_signal.emit()
def waiting_dialog(self, task, msg, on_finished=None):
label = WWLabel(msg)
vbox = QVBoxLayout()
vbox.addSpacing(100)
label.setMinimumWidth(300)
label.setAlignment(Qt.AlignCenter)
vbox.addWidget(label)
self.set_layout(vbox, next_enabled=False)
self.back_button.setEnabled(False)
t = threading.Thread(target=task)
t.start()
while True:
t.join(1.0/60)
if t.is_alive():
self.refresh_gui()
else:
break
if on_finished:
on_finished()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def choice_and_line_dialog(self, title: str, message1: str, choices: List[Tuple[str, str, str]],
message2: str, test_text: Callable[[str], int],
run_next, default_choice_idx: int=0) -> Tuple[str, str]:
vbox = QVBoxLayout()
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
c_default_text = [x[2] for x in choices]
def on_choice_click(clayout):
idx = clayout.selected_index()
line.setText(c_default_text[idx])
clayout = ChoicesLayout(message1, c_titles, on_choice_click,
checked_index=default_choice_idx)
vbox.addLayout(clayout.layout())
vbox.addSpacing(50)
vbox.addWidget(WWLabel(message2))
line = QLineEdit()
def on_text_change(text):
self.next_button.setEnabled(test_text(text))
line.textEdited.connect(on_text_change)
on_choice_click(clayout) # set default text for "line"
vbox.addWidget(line)
self.exec_layout(vbox, title)
choice = c_values[clayout.selected_index()]
return str(line.text()), choice
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning='',
presets=(), warn_issue4566=False):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
if warn_issue4566:
text_whitespace_normalised = ' '.join(text.split())
warn_issue4566_label.setVisible(text != text_whitespace_normalised)
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
warn_issue4566_label = WWLabel(MSG_PASSPHRASE_WARN_ISSUE4566)
warn_issue4566_label.setVisible(False)
vbox.addWidget(warn_issue4566_label)
for preset in presets:
button = QPushButton(preset[0])
button.clicked.connect(lambda __, text=preset[1]: line.setText(text))
button.setMinimumWidth(150)
hbox = QHBoxLayout()
hbox.addWidget(button, alignment=Qt.AlignCenter)
vbox.addLayout(hbox)
self.exec_layout(vbox, title, next_enabled=test(default))
return line.text()
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False, for_seed_words=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electrum communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfill the same purpose only differing in "
"hardware. In most cases you simply want to let Electrum "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
if r == 1:
nlayout = NetworkChoiceLayout(network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
else:
network.auto_connect = True
self.config.set_key('auto_connect', True, True)
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(2)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require {0} signatures').format(m))
cw.set_m(m)
def on_n(n):
n_label.setText(_('From {0} cosigners').format(n))
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
|
alternate_server.py | import argparse
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = SERVER.accept()
print("%s:%s has connected." % client_address)
addresses[client] = client_address
Thread(target=handle_client, args=(client,)).start()
def handle_client(client): # Takes client socket as argument.
"""Handles a single client connection."""
name = ""
prefix = ""
while True:
msg = client.recv(BUFSIZ)
if not msg is None:
msg = msg.decode("utf-8")
if msg == "":
msg = "{QUIT}"
# Avoid messages before registering
if msg.startswith("{ALL}") and name:
new_msg = msg.replace("{ALL}", "{MSG}"+prefix)
send_message(new_msg, broadcast=True)
continue
if msg.startswith("{REGISTER}"):
name = msg.split("}")[1]
welcome = '{MSG}Welcome %s!' % name
send_message(welcome, destination=client)
msg = "{MSG}%s has joined the chat!" % name
send_message(msg, broadcast=True)
clients[client] = name
prefix = name + ": "
send_clients()
continue
if "{QUIT}" in msg:
client.close()
try:
del clients[client]
except KeyError:
pass
if name:
send_message("{MSG}%s has left the chat." % name, broadcast=True)
send_clients()
break
# Avoid messages before registering
if not name:
continue
# We got until this point, it is either an unknown message or for an
# specific client...
try:
msg_params = msg.split("}")
dest_name = msg_params[0][1:] # Remove the {
dest_sock = find_client_socket(dest_name)
if dest_sock:
send_message(msg_params[1], prefix=prefix, destination=dest_sock)
else:
print("Invalid Destination. %s" % dest_name)
except:
print("Error parsing the message: %s" % msg)
def send_clients():
send_message("{CLIENTS}" + get_clients_names(), broadcast=True)
def get_clients_names(separator="|"):
names = []
for _, name in clients.items():
names.append(name)
return separator.join(names)
def find_client_socket(name):
for cli_sock, cli_name in clients.items():
if cli_name == name:
return cli_sock
return None
def send_message(msg, prefix="", destination=None, broadcast=False):
send_msg = bytes(prefix + msg, "utf-8")
if broadcast:
"""Broadcasts a message to all the clients."""
for sock in clients:
sock.send(send_msg)
else:
if destination is not None:
destination.send(send_msg)
clients = {}
addresses = {}
parser = argparse.ArgumentParser(description="Chat Server")
parser.add_argument(
'--host',
help='Host IP',
default="127.0.0.1"
)
parser.add_argument(
'--port',
help='Port Number',
default=33002
)
server_args = parser.parse_args()
HOST = server_args.host
PORT = int(server_args.port)
BUFSIZ = 2048
ADDR = (HOST, PORT)
stop_server = False
SERVER = socket(AF_INET, SOCK_STREAM)
SERVER.bind(ADDR)
if __name__ == "__main__":
try:
SERVER.listen(5)
print("Server Started at {}:{}".format(HOST, PORT))
print("Waiting for connection...")
ACCEPT_THREAD = Thread(target=accept_incoming_connections)
ACCEPT_THREAD.start()
ACCEPT_THREAD.join()
SERVER.close()
except KeyboardInterrupt:
print("Closing...")
ACCEPT_THREAD.interrupt()
|
_kit2fiff_gui.py | """Mayavi/traits GUI for converting data from KIT systems"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
from ..externals.six.moves import queue
from threading import Thread
import numpy as np
from scipy.linalg import inv
# allow import without traits
try:
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
from pyface.api import confirm, error, FileDialog, OK, YES, information
from traits.api import (HasTraits, HasPrivateTraits, cached_property,
Instance, Property, Bool, Button, Enum, File, Int,
List, Str, DelegatesTo)
from traitsui.api import (View, Item, HGroup, VGroup, spring,
CheckListEditor, EnumEditor, Handler)
from traitsui.menu import NoButtons
from tvtk.pyface.scene_editor import SceneEditor
except:
from ..utils import trait_wraith
HasTraits = object
HasPrivateTraits = object
Handler = object
cached_property = trait_wraith
MayaviScene = trait_wraith
MlabSceneModel = trait_wraith
Bool = trait_wraith
Button = trait_wraith
DelegatesTo = trait_wraith
Enum = trait_wraith
File = trait_wraith
Instance = trait_wraith
Int = trait_wraith
List = trait_wraith
Property = trait_wraith
Str = trait_wraith
spring = trait_wraith
View = trait_wraith
Item = trait_wraith
HGroup = trait_wraith
VGroup = trait_wraith
EnumEditor = trait_wraith
NoButtons = trait_wraith
CheckListEditor = trait_wraith
SceneEditor = trait_wraith
from ..fiff.kit.coreg import read_hsp, read_elp
from ..fiff.kit.kit import RawKIT, KIT
from ..transforms import apply_trans, als_ras_trans, als_ras_trans_mm
from ..coreg import (_decimate_points, fit_matched_points,
get_ras_to_neuromag_trans)
from ._marker_gui import CombineMarkersPanel, CombineMarkersModel
from ._viewer import HeadViewController, headview_item, PointObject
use_editor = CheckListEditor(cols=5, values=[(i, str(i)) for i in range(5)])
backend_is_wx = False # is there a way to determine this?
if backend_is_wx:
# wx backend allows labels for wildcards
hsp_points_wildcard = ['Head Shape Points (*.txt)|*.txt']
hsp_fid_wildcard = ['Head Shape Fiducials (*.txt)|*.txt']
kit_con_wildcard = ['Continuous KIT Files (*.sqd;*.con)|*.sqd;*.con']
else:
hsp_points_wildcard = ['*.txt']
hsp_fid_wildcard = ['*.txt']
kit_con_wildcard = ['*.sqd;*.con']
class Kit2FiffModel(HasPrivateTraits):
"""Data Model for Kit2Fiff conversion
- Markers are transformed into RAS coordinate system (as are the sensor
coordinates).
- Head shape digitizer data is transformed into neuromag-like space.
"""
# Input Traits
markers = Instance(CombineMarkersModel, ())
sqd_file = File(exists=True, filter=kit_con_wildcard)
hsp_file = File(exists=True, filter=hsp_points_wildcard, desc="Digitizer "
"head shape")
fid_file = File(exists=True, filter=hsp_fid_wildcard, desc="Digitizer "
"fiducials")
stim_chs = Enum(">", "<")
stim_slope = Enum("-", "+")
# Marker Points
use_mrk = List(list(range(5)), desc="Which marker points to use for the device "
"head coregistration.")
# Derived Traits
mrk = Property(depends_on=('markers.mrk3.points'))
# Polhemus Fiducials
elp_raw = Property(depends_on=['fid_file'])
hsp_raw = Property(depends_on=['hsp_file'])
polhemus_neuromag_trans = Property(depends_on=['elp_raw'])
# Polhemus data (in neuromag space)
elp = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
fid = Property(depends_on=['elp_raw', 'polhemus_neuromag_trans'])
hsp = Property(depends_on=['hsp_raw', 'polhemus_neuromag_trans'])
# trans
dev_head_trans = Property(depends_on=['elp', 'mrk', 'use_mrk'])
head_dev_trans = Property(depends_on=['dev_head_trans'])
# info
sqd_fname = Property(Str, depends_on='sqd_file')
hsp_fname = Property(Str, depends_on='hsp_file')
fid_fname = Property(Str, depends_on='fid_file')
can_save = Property(Bool, depends_on=['sqd_file', 'fid', 'elp', 'hsp',
'dev_head_trans'])
@cached_property
def _get_can_save(self):
"Only allow saving when either all or no head shape elements are set."
has_sqd = bool(self.sqd_file)
if not has_sqd:
return False
has_all_hsp = (np.any(self.dev_head_trans) and np.any(self.hsp)
and np.any(self.elp) and np.any(self.fid))
if has_all_hsp:
return True
has_any_hsp = self.hsp_file or self.fid_file or np.any(self.mrk)
return not has_any_hsp
@cached_property
def _get_dev_head_trans(self):
if (self.mrk is None) or not np.any(self.fid):
return np.eye(4)
src_pts = self.mrk
dst_pts = self.elp
n_use = len(self.use_mrk)
if n_use < 3:
error(None, "Estimating the device head transform requires at "
"least 3 marker points. Please adjust the markers used.",
"Not Enough Marker Points")
return
elif n_use < 5:
src_pts = src_pts[self.use_mrk]
dst_pts = dst_pts[self.use_mrk]
trans = fit_matched_points(src_pts, dst_pts, out='trans')
return trans
@cached_property
def _get_elp(self):
if self.elp_raw is None:
return np.empty((0, 3))
pts = self.elp_raw[3:8]
pts = apply_trans(self.polhemus_neuromag_trans, pts)
return pts
@cached_property
def _get_elp_raw(self):
if not self.fid_file:
return
try:
pts = read_elp(self.fid_file)
except Exception as err:
error(None, str(err), "Error Reading Fiducials")
self.reset_traits(['fid_file'])
raise
else:
return pts
@cached_property
def _get_fid(self):
if self.elp_raw is None:
return np.empty((0, 3))
pts = self.elp_raw[:3]
pts = apply_trans(self.polhemus_neuromag_trans, pts)
return pts
@cached_property
def _get_fid_fname(self):
if self.fid_file:
return os.path.basename(self.fid_file)
else:
return '-'
@cached_property
def _get_head_dev_trans(self):
return inv(self.dev_head_trans)
@cached_property
def _get_hsp(self):
if (self.hsp_raw is None) or not np.any(self.polhemus_neuromag_trans):
return np.empty((0, 3))
else:
pts = apply_trans(self.polhemus_neuromag_trans, self.hsp_raw)
return pts
@cached_property
def _get_hsp_fname(self):
if self.hsp_file:
return os.path.basename(self.hsp_file)
else:
return '-'
@cached_property
def _get_hsp_raw(self):
fname = self.hsp_file
if not fname:
return
try:
pts = read_hsp(fname)
n_pts = len(pts)
if n_pts > KIT.DIG_POINTS:
msg = ("The selected head shape contains {n_in} points, "
"which is more than the recommended maximum ({n_rec}). "
"The file will be automatically downsampled, which "
"might take a while. A better way to downsample is "
"using FastScan.")
msg = msg.format(n_in=n_pts, n_rec=KIT.DIG_POINTS)
information(None, msg, "Too Many Head Shape Points")
pts = _decimate_points(pts, 5)
except Exception as err:
error(None, str(err), "Error Reading Head Shape")
self.reset_traits(['hsp_file'])
raise
else:
return pts
@cached_property
def _get_mrk(self):
return apply_trans(als_ras_trans, self.markers.mrk3.points)
@cached_property
def _get_polhemus_neuromag_trans(self):
if self.elp_raw is None:
return
pts = apply_trans(als_ras_trans_mm, self.elp_raw[:3])
nasion, lpa, rpa = pts
trans = get_ras_to_neuromag_trans(nasion, lpa, rpa)
trans = np.dot(trans, als_ras_trans_mm)
return trans
@cached_property
def _get_sqd_fname(self):
if self.sqd_file:
return os.path.basename(self.sqd_file)
else:
return '-'
def clear_all(self):
"""Clear all specified input parameters"""
self.markers.mrk1.clear = True
self.markers.mrk2.clear = True
self.reset_traits(['sqd_file', 'hsp_file', 'fid_file'])
def get_event_info(self):
"""
Return a string with the number of events found for each trigger value
"""
if len(self.events) == 0:
return "No events found."
count = ["Events found:"]
events = np.array(self.events)
for i in np.unique(events):
n = np.sum(events == i)
count.append('%3i: %i' % (i, n))
return os.linesep.join(count)
def get_raw(self, preload=False):
"""Create a raw object based on the current model settings
"""
if not self.sqd_file:
raise ValueError("sqd file not set")
raw = RawKIT(self.sqd_file, preload=preload)
raw._set_stimchannels(self.stim_chs, self.stim_slope)
if np.any(self.fid):
raw._set_dig_neuromag(self.fid, self.elp, self.hsp,
self.dev_head_trans)
return raw
class Kit2FiffFrameHandler(Handler):
"""Handler that checks for unfinished processes before closing its window
"""
def close(self, info, is_ok):
if info.object.kit2fiff_panel.queue.unfinished_tasks:
msg = ("Can not close the window while saving is still in "
"progress. Please wait until all files are processed.")
title = "Saving Still in Progress"
information(None, msg, title)
return False
else:
return True
class Kit2FiffPanel(HasPrivateTraits):
"""Control panel for kit2fiff conversion"""
model = Instance(Kit2FiffModel)
# model copies for view
use_mrk = DelegatesTo('model')
sqd_file = DelegatesTo('model')
hsp_file = DelegatesTo('model')
fid_file = DelegatesTo('model')
stim_chs = DelegatesTo('model')
stim_slope = DelegatesTo('model')
# info
can_save = DelegatesTo('model')
sqd_fname = DelegatesTo('model')
hsp_fname = DelegatesTo('model')
fid_fname = DelegatesTo('model')
# Source Files
reset_dig = Button
# Visualization
scene = Instance(MlabSceneModel)
fid_obj = Instance(PointObject)
elp_obj = Instance(PointObject)
hsp_obj = Instance(PointObject)
# Output
save_as = Button(label='Save FIFF...')
clear_all = Button(label='Clear All')
queue = Instance(queue.Queue, ())
queue_feedback = Str('')
queue_current = Str('')
queue_len = Int(0)
queue_len_str = Property(Str, depends_on=['queue_len'])
error = Str('')
view = View(VGroup(VGroup(Item('sqd_file', label="Data"),
Item('sqd_fname', show_label=False,
style='readonly'),
Item('hsp_file', label='Dig Head Shape'),
Item('hsp_fname', show_label=False,
style='readonly'),
Item('fid_file', label='Dig Points'),
Item('fid_fname', show_label=False,
style='readonly'),
Item('reset_dig', label='Clear Digitizer Files',
show_label=False),
Item('use_mrk', editor=use_editor,
style='custom'),
label="Sources", show_border=True),
VGroup(Item('stim_chs', label="Binary Coding",
style='custom',
editor=EnumEditor(values={'>': '1:1 ... 128',
'<': '2:128 ... 1',
},
cols=2),
help="Specifies the bit order in event "
"channels. Assign the first bit (1) to the "
"first or the last trigger channel."),
Item('stim_slope', label="Event Onset",
style='custom',
editor=EnumEditor(
values={'+': '2:Peak (0 to 5 V)',
'-': '1:Trough (5 to 0 V)'},
cols=2),
help="Whether events are marked by a decrease "
"(trough) or an increase (peak) in trigger "
"channel values"),
label='Events', show_border=True),
HGroup(Item('save_as', enabled_when='can_save'), spring,
'clear_all', show_labels=False),
Item('queue_feedback', show_label=False,
style='readonly'),
Item('queue_current', show_label=False,
style='readonly'),
Item('queue_len_str', show_label=False,
style='readonly'),
))
def __init__(self, *args, **kwargs):
super(Kit2FiffPanel, self).__init__(*args, **kwargs)
# setup save worker
def worker():
while True:
raw, fname = self.queue.get()
basename = os.path.basename(fname)
self.queue_len -= 1
self.queue_current = 'Processing: %s' % basename
# task
try:
raw.save(fname, overwrite=True)
except Exception as err:
self.error = str(err)
res = "Error saving: %s"
else:
res = "Saved: %s"
# finalize
self.queue_current = ''
self.queue_feedback = res % basename
self.queue.task_done()
t = Thread(target=worker)
t.daemon = True
t.start()
# setup mayavi visualization
m = self.model
self.fid_obj = PointObject(scene=self.scene, color=(25, 225, 25),
point_scale=5e-3)
m.sync_trait('fid', self.fid_obj, 'points', mutual=False)
m.sync_trait('head_dev_trans', self.fid_obj, 'trans', mutual=False)
self.elp_obj = PointObject(scene=self.scene, color=(50, 50, 220),
point_scale=1e-2, opacity=.2)
m.sync_trait('elp', self.elp_obj, 'points', mutual=False)
m.sync_trait('head_dev_trans', self.elp_obj, 'trans', mutual=False)
self.hsp_obj = PointObject(scene=self.scene, color=(200, 200, 200),
point_scale=2e-3)
m.sync_trait('hsp', self.hsp_obj, 'points', mutual=False)
m.sync_trait('head_dev_trans', self.hsp_obj, 'trans', mutual=False)
self.scene.camera.parallel_scale = 0.15
self.scene.mlab.view(0, 0, .15)
def _clear_all_fired(self):
self.model.clear_all()
@cached_property
def _get_queue_len_str(self):
if self.queue_len:
return "Queue length: %i" % self.queue_len
else:
return ''
def _reset_dig_fired(self):
self.reset_traits(['hsp_file', 'fid_file'])
def _save_as_fired(self):
# create raw
try:
raw = self.model.get_raw()
except Exception as err:
error(None, str(err), "Error Creating KIT Raw")
raise
# find default path
stem, _ = os.path.splitext(self.sqd_file)
if not stem.endswith('raw'):
stem += '-raw'
default_path = stem + '.fif'
# save as dialog
dlg = FileDialog(action="save as",
wildcard="fiff raw file (*.fif)|*.fif",
default_path=default_path)
dlg.open()
if dlg.return_code != OK:
return
fname = dlg.path
if not fname.endswith('.fif'):
fname += '.fif'
if os.path.exists(fname):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
self.queue.put((raw, fname))
self.queue_len += 1
class Kit2FiffFrame(HasTraits):
"""GUI for interpolating between two KIT marker files"""
model = Instance(Kit2FiffModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
marker_panel = Instance(CombineMarkersPanel)
kit2fiff_panel = Instance(Kit2FiffPanel)
view = View(HGroup(VGroup(Item('marker_panel', style='custom'),
show_labels=False),
VGroup(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical', show_label=False),
VGroup(headview_item, show_labels=False),
),
VGroup(Item('kit2fiff_panel', style='custom'),
show_labels=False),
show_labels=False,
),
handler=Kit2FiffFrameHandler(),
height=700, resizable=True, buttons=NoButtons)
def _headview_default(self):
return HeadViewController(scene=self.scene, scale=160, system='RAS')
def _kit2fiff_panel_default(self):
return Kit2FiffPanel(scene=self.scene, model=self.model)
def _marker_panel_default(self):
return CombineMarkersPanel(scene=self.scene, model=self.model.markers,
trans=als_ras_trans)
|
main.py | from flask import Flask, request, send_from_directory
from flask_restful import Api, Resource, reqparse, abort
import os
from werkzeug.utils import secure_filename
import PIL.Image
from multiprocessing import Process
app = Flask(__name__)
api = Api(app)
app.config['MAX_CONTENT_LENGTH'] = 16 * 1000 * 1000 #16 MB max allowed payload for upload
# configuration parameter
STORAGE_PATH = "storage/"
MAX_WIDTH = 2000
MAX_HEIGHT = 2000
SERVER_PORT = 5000
# setting up requested parameter for resize request operation
img_resize_args = reqparse.RequestParser()
img_resize_args.add_argument("width", type=int, help="Width of the image is required", required=True)
img_resize_args.add_argument("height", type=int, help="Height of the image is required", required=True)
images = []
def load_img_list():
# populate image list with files stored in the STORAGE_PATH
for img in os.listdir(STORAGE_PATH):
if not img.startswith("."):
images.append(img)
images.sort()
def img_resize(filename, width, height):
# performs image resizing overwriting the original one
image = PIL.Image.open(STORAGE_PATH + filename)
resized_image = image.resize((width, height))
resized_image.save(STORAGE_PATH + filename)
class ImageList(Resource):
def get(self):
#handle the request for file list
return images
def post(self):
#handle image uploading
if "file" not in request.files:
abort(400 , message="No file part in the request")
file = request.files["file"]
if file.filename == "":
abort(400, message="No image selected for uploading")
filename = secure_filename(file.filename)
if filename in images:
abort(409, message="Image already exists with that name...")
file.save(os.path.join(STORAGE_PATH, filename))
file.close()
images.append(filename)
images.sort()
return "Image successfully uploaded", 201
class Image(Resource):
def get(self, img_name):
#handle image download
filename = secure_filename(img_name)
if filename not in images:
abort(404, message="Could not find image...")
path = os.path.join(app.root_path, STORAGE_PATH)
#return path
return send_from_directory(path, filename)
def delete(self, img_name):
#handle image delete
filename = secure_filename(img_name)
if filename not in images:
abort(404, message="Could not find image...")
images.remove(filename)
os.remove(STORAGE_PATH + filename)
images.sort()
return "", 204
def patch(self, img_name):
#handle image resizing
filename = secure_filename(img_name)
if filename not in images:
abort(404, message="Could not find image")
args = img_resize_args.parse_args()
if args["width"] > MAX_WIDTH or args["height"] > MAX_HEIGHT:
abort(404, message="Out of bound parameters")
p = Process(target=img_resize, args=(filename, args["width"], args["height"]))
p.start()
return "Image resize command received",202
# adding endpoints to the api
api.add_resource(ImageList, "/")
api.add_resource(Image, "/<string:img_name>")
if __name__ == "__main__":
load_img_list()
app.run(debug=True, host = "0.0.0.0", port = SERVER_PORT)
|
multi_tcp_server.py | #!/usr/bin/env python3
import sys, os
runpath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(runpath,'..'))
approot = os.path.abspath(os.path.join(runpath, os.pardir))
import lib.tcp_server as server
import lib.events as event
import lib.shm as shm
from threading import Thread
import lib.job as job
info = {'name':'multi_tcp_server',
'description':'Handle multiple incoming connections to same port',
'Author':'Peter Isa',
'type':'handler'}
options = {'lhost':'127.0.0.1',
'lport':'1234',
'number':5,
'threaded': 'true'}
session_created = event.EventHook()
job_started = event.EventHook()
lserver = None
server_thread = None
def run(*args):
global lserver,server_thread
try:
srv_address = options['lhost']
srv_port = int(options['lport'])
srv_conn = int(options['number'])
lserver = server.MultiTCPServer(srv_address,srv_port,connections=srv_conn)
lserver.session_created += server_session_created
if options['threaded'].lower() == 'true':
server_thread = Thread(target = lserver.run)
server_thread.start()
else:
lserver.run()
except Exception as x:
print(f"[x] {x}")
def terminate():
global lserver,server_thread
lserver.keep_running = False
server_thread.join()
def server_session_created(clienthandler):
shm.add_session(clienthandler)
session_created.fire(clienthandler) |
chat.py | # -*- coding:utf-8 -*-
import abc
import socket
import threading
from LiveChat.dao.server_dao import ServerDao
__auth__ = "daigd"
class BaseServer(metaclass=abc.ABCMeta):
@abc.abstractmethod
def bind(self):
pass
@abc.abstractmethod
def accept(self):
pass
server_dao = ServerDao()
class ChatServer(BaseServer):
def __init__(self):
self.socket = socket.socket()
self.room = {}
self.start_port = 8000
self.limit_conn = 10
def __get_an_available_addr(self):
ip = self.__get_local_ip()
addr = None
while True:
try:
s = socket.socket()
s.connect((ip, self.start_port))
except ConnectionRefusedError:
addr = ip, self.start_port
break
else:
self.start_port += 1
continue
return addr
def __get_local_ip(self):
return socket.gethostbyname(socket.getfqdn(socket.gethostname()))
def __bind(self):
ip, port = self.__get_an_available_addr()
server_dao.save(ip, port)
self.socket.bind((ip, port))
self.socket.listen(self.limit_conn)
def __message_hub(self, sok: socket.socket):
while True:
msg = sok.recv(1024)
if msg:
for sk in self.room:
if sk != id(sok):
self.room.get(sk).send(msg)
def __save_message(self, msg):
pass
def __accept(self):
sok, raddr = self.socket.accept()
self.room[id(sok)] = sok
threading.Thread(target=self.__message_hub, args=(sok), name="chatroom").start()
def connect(self):
while True:
threading.Thread(target=self.__accept, name="conn").start()
|
NALSM_PARALLEL_SAVE.py | import NALSM_GEN_SUPPORT as sup
import multiprocessing as mp
class multi_process:
def __init__(self,num_processes,save_path):
self.dataPath = save_path
# Define IPC managers
manager1 = mp.Manager()
# Define lists (queue) for tasks and computation results
self.data_feed1 = manager1.Queue()
self.status1 = manager1.Queue()
pool1 = mp.Pool(processes=num_processes)
self.processes1 = []
# activate full branch saving processes
for i in range(num_processes):
# Set process name
process_name = 'Pb%i' % i
# Create the process, and connect it to the worker function
new_process = mp.Process(target=self.saveData, args=(process_name, self.data_feed1, self.status1))
# Add new process to the list of processes
self.processes1.append(new_process)
# Start the process
new_process.start()
def save_data(self,signal, names, data, save_filename):
self.data_feed1.put([signal, names, data, save_filename])
def saveData(self, process_name, data_feed, status):
print('[%s] SAVE PROCESS launched, waiting for data' % process_name)
while True:
data = data_feed.get()
if data[0] == -1:
print('[%s] SAVE PROCESS terminated' % process_name)
status.put(1)
break
else:
saveNames = data[1]
saveData = data[2]
save_filename = data[3]
sup.save_non_tf_data(saveNames, saveData, filename=save_filename, savePath=self.dataPath)
print('DATA_SAVED_to_'+str(save_filename))
status.put(save_filename)
def kill_workers(self, process_count):
for i in range(0, process_count):
print('KILL SWITCH SENT FOR PROCESS ' + str(i))
self.save_data(signal=-1,names=[0],data=[0],save_filename='')
list_of_save_files = []
sum1 = 0
while sum1 != process_count:
temp = self.status1.get()
if type(temp) == int:
sum1 = sum1 + temp
elif type(temp)==str:
list_of_save_files.append(temp)
else:
print('Found other stuff in queue....check')
return list_of_save_files |
serve.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
Host a trained paddle model with one line command
Example:
python -m paddle_serving_server.serve --model ./serving_server_model --port 9292
"""
import argparse
import os
import json
import base64
import time
from multiprocessing import Process
import sys
if sys.version_info.major == 2:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
elif sys.version_info.major == 3:
from http.server import BaseHTTPRequestHandler, HTTPServer
from contextlib import closing
import socket
# web_service.py is still used by Pipeline.
def port_is_available(port):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.settimeout(2)
result = sock.connect_ex(('127.0.0.1', port))
if result != 0:
return True
else:
return False
def format_gpu_to_strlist(unformatted_gpus):
gpus_strlist = []
if isinstance(unformatted_gpus, int):
gpus_strlist = [str(unformatted_gpus)]
elif isinstance(unformatted_gpus, list):
if unformatted_gpus == [""]:
gpus_strlist = ["-1"]
elif len(unformatted_gpus) == 0:
gpus_strlist = ["-1"]
else:
gpus_strlist = [str(x) for x in unformatted_gpus]
elif isinstance(unformatted_gpus, str):
if unformatted_gpus == "":
gpus_strlist = ["-1"]
else:
gpus_strlist = [unformatted_gpus]
elif unformatted_gpus == None:
gpus_strlist = ["-1"]
else:
raise ValueError("error input of set_gpus")
# check cuda visible
if "CUDA_VISIBLE_DEVICES" in os.environ:
env_gpus = os.environ["CUDA_VISIBLE_DEVICES"].split(",")
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
# op_gpu_list == ["-1"] means this op use CPU
# so don`t check cudavisible.
if op_gpu_list == ["-1"]:
continue
for ids in op_gpu_list:
if ids not in env_gpus:
print("gpu_ids is not in CUDA_VISIBLE_DEVICES.")
exit(-1)
# check gpuid is valid
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
use_gpu = False
for ids in op_gpu_list:
if int(ids) < -1:
raise ValueError("The input of gpuid error.")
if int(ids) >= 0:
use_gpu = True
if int(ids) == -1 and use_gpu:
raise ValueError("You can not use CPU and GPU in one model.")
return gpus_strlist
def is_gpu_mode(unformatted_gpus):
gpus_strlist = format_gpu_to_strlist(unformatted_gpus)
for op_gpus_str in gpus_strlist:
op_gpu_list = op_gpus_str.split(",")
for ids in op_gpu_list:
if int(ids) >= 0:
return True
return False
def serve_args():
parser = argparse.ArgumentParser("serve")
parser.add_argument(
"--thread",
type=int,
default=4,
help="Concurrency of server,[4,1024]",
choices=range(4, 1025))
parser.add_argument(
"--port", type=int, default=9393, help="Port of the starting gpu")
parser.add_argument(
"--device", type=str, default="cpu", help="Type of device")
parser.add_argument(
"--gpu_ids", type=str, default="", nargs="+", help="gpu ids")
parser.add_argument(
"--op_num", type=int, default=0, nargs="+", help="Number of each op")
parser.add_argument(
"--op_max_batch",
type=int,
default=32,
nargs="+",
help="Max batch of each op")
parser.add_argument(
"--model", type=str, default="", nargs="+", help="Model for serving")
parser.add_argument(
"--workdir",
type=str,
default="workdir",
help="Working dir of current service")
parser.add_argument(
"--use_mkl", default=False, action="store_true", help="Use MKL")
parser.add_argument(
"--precision",
type=str,
default="fp32",
help="precision mode(fp32, int8, fp16, bf16)")
parser.add_argument(
"--use_calib",
default=False,
action="store_true",
help="Use TensorRT Calibration")
parser.add_argument(
"--mem_optim_off",
default=False,
action="store_true",
help="Memory optimize")
parser.add_argument(
"--ir_optim", default=False, action="store_true", help="Graph optimize")
parser.add_argument(
"--max_body_size",
type=int,
default=512 * 1024 * 1024,
help="Limit sizes of messages")
parser.add_argument(
"--use_encryption_model",
default=False,
action="store_true",
help="Use encryption model")
parser.add_argument(
"--use_trt", default=False, action="store_true", help="Use TensorRT")
parser.add_argument(
"--use_lite", default=False, action="store_true", help="Use PaddleLite")
parser.add_argument(
"--use_xpu", default=False, action="store_true", help="Use XPU")
parser.add_argument(
"--product_name",
type=str,
default=None,
help="product_name for authentication")
parser.add_argument(
"--container_id",
type=str,
default=None,
help="container_id for authentication")
parser.add_argument(
"--gpu_multi_stream",
default=False,
action="store_true",
help="Use gpu_multi_stream")
return parser.parse_args()
def start_gpu_card_model(gpu_mode, port, args): # pylint: disable=doc-string-missing
device = "cpu"
if gpu_mode == True:
device = "gpu"
thread_num = args.thread
model = args.model
mem_optim = args.mem_optim_off is False
ir_optim = args.ir_optim
use_mkl = args.use_mkl
max_body_size = args.max_body_size
workdir = "{}_{}".format(args.workdir, port)
if model == "":
print("You must specify your serving model")
exit(-1)
for single_model_config in args.model:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError("The input of --model should be a dir not file.")
import paddle_serving_server as serving
op_maker = serving.OpMaker()
op_seq_maker = serving.OpSeqMaker()
read_op = op_maker.create('general_reader')
op_seq_maker.add_op(read_op)
for idx, single_model in enumerate(model):
infer_op_name = "general_infer"
# 目前由于ocr的节点Det模型依赖于opencv的第三方库
# 只有使用ocr的时候,才会加入opencv的第三方库并编译GeneralDetectionOp
# 故此处做特殊处理,当不满足下述情况时,所添加的op默认为GeneralInferOp
# 以后可能考虑不用python脚本来生成配置
if len(model) == 2 and idx == 0 and single_model == "ocr_det_model":
infer_op_name = "general_detection"
else:
infer_op_name = "general_infer"
general_infer_op = op_maker.create(infer_op_name)
op_seq_maker.add_op(general_infer_op)
general_response_op = op_maker.create('general_response')
op_seq_maker.add_op(general_response_op)
server = serving.Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
server.use_mkl(use_mkl)
server.set_precision(args.precision)
server.set_use_calib(args.use_calib)
server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim)
server.set_max_body_size(max_body_size)
if args.use_trt and device == "gpu":
server.set_trt()
server.set_ir_optimize(True)
if args.gpu_multi_stream and device == "gpu":
server.set_gpu_multi_stream()
if args.op_num:
server.set_op_num(args.op_num)
if args.op_max_batch:
server.set_op_max_batch(args.op_max_batch)
if args.use_lite:
server.set_lite()
server.set_device(device)
if args.use_xpu:
server.set_xpu()
if args.product_name != None:
server.set_product_name(args.product_name)
if args.container_id != None:
server.set_container_id(args.container_id)
if gpu_mode == True:
server.set_gpuid(args.gpu_ids)
server.load_model_config(model)
server.prepare_server(
workdir=workdir,
port=port,
device=device,
use_encryption_model=args.use_encryption_model)
server.run_server()
def start_multi_card(args, serving_port=None): # pylint: disable=doc-string-missing
if serving_port == None:
serving_port = args.port
if args.use_lite:
print("run using paddle-lite.")
start_gpu_card_model(False, serving_port, args)
else:
start_gpu_card_model(is_gpu_mode(args.gpu_ids), serving_port, args)
class MainService(BaseHTTPRequestHandler):
def get_available_port(self):
default_port = 12000
for i in range(1000):
if port_is_available(default_port + i):
return default_port + i
def start_serving(self):
start_multi_card(args, serving_port)
def get_key(self, post_data):
if "key" not in post_data:
return False
else:
key = base64.b64decode(post_data["key"].encode())
for single_model_config in args.model:
if os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
with open(single_model_config + "/key", "wb") as f:
f.write(key)
return True
def check_key(self, post_data):
if "key" not in post_data:
return False
else:
key = base64.b64decode(post_data["key"].encode())
for single_model_config in args.model:
if os.path.isfile(single_model_config):
raise ValueError(
"The input of --model should be a dir not file.")
with open(single_model_config + "/key", "rb") as f:
cur_key = f.read()
if key != cur_key:
return False
return True
def start(self, post_data):
post_data = json.loads(post_data.decode('utf-8'))
global p_flag
if not p_flag:
if args.use_encryption_model:
print("waiting key for model")
if not self.get_key(post_data):
print("not found key in request")
return False
global serving_port
global p
serving_port = self.get_available_port()
p = Process(target=self.start_serving)
p.start()
time.sleep(3)
if p.is_alive():
p_flag = True
else:
return False
else:
if p.is_alive():
if not self.check_key(post_data):
return False
else:
return False
return True
def do_POST(self):
content_length = int(self.headers['Content-Length'])
post_data = self.rfile.read(content_length)
if self.start(post_data):
response = {"endpoint_list": [serving_port]}
else:
response = {"message": "start serving failed"}
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(json.dumps(response).encode())
if __name__ == "__main__":
# args.device is not used at all.
# just keep the interface.
# so --device should not be recommended at the HomePage.
args = serve_args()
for single_model_config in args.model:
if os.path.isdir(single_model_config):
pass
elif os.path.isfile(single_model_config):
raise ValueError("The input of --model should be a dir not file.")
if args.use_encryption_model:
p_flag = False
p = None
serving_port = 0
server = HTTPServer(('0.0.0.0', int(args.port)), MainService)
print(
'Starting encryption server, waiting for key from client, use <Ctrl-C> to stop'
)
server.serve_forever()
else:
start_multi_card(args)
|
flask_helper.py | from flask import Flask, request
from flask_cors import CORS
from jinja2 import Environment, PackageLoader
from IPython.display import display, HTML
from .environment_detector import build_environment
import socket
import threading
import atexit
try:
from gevent.pywsgi import WSGIServer
except ModuleNotFoundError:
raise RuntimeError("Error: gevent package is missing, please run 'conda install gevent' or"
"'pip install gevent' or 'pip install interpret-community[visualization]'")
class FlaskHelper:
app = Flask(__name__)
CORS(app)
def __init__(self, *, port, ip):
self.port = port
self.ip = ip
# dictionary to store arbitrary state for use by consuming classes
self.shared_state = {}
if self.ip is None:
self.ip = "localhost"
if self.port is None:
# Try 100 different ports
for port in range(5000, 5100):
available = FlaskHelper._local_port_available(self.ip, port, rais=False)
if available:
self.port = port
return
error_message = """Ports 5000 to 5100 not available.
Please specify an open port for use via the 'port' parameter"""
raise RuntimeError(
error_message.format(port)
)
else:
FlaskHelper._local_port_available(self.ip, self.port)
self._thread = threading.Thread(target=self.run, daemon=True)
self._thread.start()
self.env = build_environment(self.ip, self.port)
@staticmethod
def _local_port_available(ip, port, rais=True):
"""
Borrowed from:
https://stackoverflow.com/questions/19196105/how-to-check-if-a-network-port-is-open-on-linux
"""
try:
backlog = 5
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((ip, port))
sock.listen(backlog)
sock.close()
except socket.error: # pragma: no cover
if rais:
error_message = """Port {0} is not available.
Please specify another port for use via the 'port' parameter"""
raise RuntimeError(
error_message.format(port)
)
else:
return False
return True
def run(self):
class devnull:
write = lambda _: None # noqa: E731
server = WSGIServer((self.ip, self.port), self.app, log=devnull)
self.app.config["server"] = server
server.serve_forever()
# Closes server on program exit, including freeing all sockets
def closeserver():
server.stop()
atexit.register(closeserver) |
system_test.py | import tempfile, os, os.path, subprocess, md5, time, sys, random, threading
import ringogw
home_dir = tempfile.mkdtemp("", "ringotest-") + '/'
node_id = 0
conn = None
os.environ['RINGOHOST'] = 'RINGODBG'
os.environ['RESYNC_INTERVAL'] = '10000'
os.environ['CHECK_EXT_INTERVAL'] = '10000'
os.environ['DOMAIN_CHUNK_MAX'] = str(100 * 1024**2)
def new_node(id = None):
global node_id
if id == None:
id = md5.md5("test-%d" % node_id).hexdigest()
node_id += 1
path = home_dir + id
if not os.path.exists(path):
os.mkdir(path)
p = subprocess.Popen(["start_ringo.sh", path],
stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.PIPE, env = os.environ)
return id, p
def kill_node(id):
subprocess.call(["pkill", "-f", id])
def domain_id(name, chunk):
return md5.md5("%d %s" % (chunk, name)).hexdigest().upper()
def make_domain_id(did):
return hex(did)[2:-1]
def check_entries(r, nrepl, nentries, check_size = True):
if r[0] != 200:
return False
if len(r[1][3]) != nrepl:
return False
owner_root = owner_size = -2
roots = []
for node in r[1][3]:
if 'error' in node:
continue
num = node["num_entries"]
if nentries != None and\
(num == "undefined" or int(num) != nentries):
return False
root = -1
if "synctree_root" in node:
root = node["synctree_root"][0][1]
roots.append((root, node["size"]))
if node["owner"]:
owner_root = root
owner_size = node["size"]
if len(roots) != nrepl:
return False
# check that root hashes match
return [(r, s) for r, s in roots if r != owner_root\
or (check_size and s != owner_size)] == []
def _check_results(reply, num):
if reply[0] != 200:
return False
nodes = reply[1]
l = len([node for node in nodes if node['ok']])
return len(nodes) == l == num
def _test_ring(n, num = None, nodeids = []):
if num == None:
check = lambda x: _check_results(x, n)
else:
check = lambda x: _check_results(x, num)
if nodeids:
n = len(nodeids)
print "Launching nodes",
res = []
for i in range(n):
time.sleep(1)
if nodeids:
res.append(new_node(nodeids[i]))
else:
res.append(new_node())
sys.stdout.write(".")
print
t = time.time()
if _wait_until("/mon/ring/nodes", check, 30):
print "Ring converged in %d seconds" % (time.time() - t)
return True
return False
def _wait_until(req, check, timeout):
print "Checking results",
for i in range(timeout):
time.sleep(1)
r = ringo.request(req)
if check(r):
print
return True
sys.stdout.write(".")
print
return False
def _put_entries(name, nitems, retries = 0):
t = time.time()
for i in range(nitems):
ringo.put(name, "item-%d" % i, "testitem-%d" % i,
retries = retries)
print "%d items put in %dms" % (nitems, (time.time() - t) * 1000)
def _test_repl(name, n, nrepl, nitems, create_ring = True):
if create_ring and not _test_ring(n):
return False
node, domainid = ringo.create(name, nrepl)
_put_entries(name, nitems)
return _wait_until("/mon/domains/domain?id=0x" + domainid,
lambda x: check_entries(x, n, nitems), 50)
def _check_extfiles(node, domainid, num):
files = os.listdir("%s/%s/rdomain-%s/" % (home_dir, node, domainid))
return len([f for f in files if f.startswith("value")]) == num
def _cache_test(**kwargs):
def check_values(key, ret):
for i, v in enumerate(ret):
if v != key + "-pie-%d" % i:
raise "Invalid results: Key <%s>: %s"\
% (key, r)
if not _test_ring(1):
return False
node, domainid = ringo.create("cachetest", 5, **kwargs)
print "Putting 100500 items.."
t = time.time()
head = []
tail = []
for i in range(105):
key = "head-%d" % i
head.append(key)
for j in range(100):
ringo.put("cachetest", key, key + "-pie-%d" % j)
for i in range(50500):
key = "tail-%d" % i
tail.append(key)
ringo.put("cachetest", key, key + "-pie-0")
print "items put in %dms" % ((time.time() - t) * 1000)
print "Retrieving all keys and checking values.."
t = time.time()
for key in head + tail:
check_values(key, ringo.get("cachetest", key))
print "Get took %dms" % ((time.time() - t) * 1000)
print "Getting 10000 keys in sequential order"
s = random.sample(head, 100) + random.sample(tail, 10)
t = time.time()
for i in range(10):
random.shuffle(s)
for key in s:
for j in range(10):
check_values(key, ringo.get("cachetest", key))
print "Get took %dms" % ((time.time() - t) * 1000)
print "Getting 10000 keys in random order"
t = time.time()
for i in range(10000):
key = random.choice(tail)
check_values(key, ringo.get("cachetest", key))
print "Get took %dms" % ((time.time() - t) * 1000)
return True
# make a ring, check that converges
def test01_ring10():
return _test_ring(10)
# make a large ring, check that converges
def test02_ring100():
return _test_ring(100)
# make a ring in two phases, check that converges
def test03_ring_newnodes():
print "Launching first batch of nodes"
if not _test_ring(10):
return False
time.sleep(10)
print "Launching second batch of nodes"
return _test_ring(10, 20)
# make a ring, kill random nodes, check that heals
def test04_ring_randomkill():
res = []
print "Launching nodes",
for i in range(50):
time.sleep(1)
id, proc = new_node()
res.append(id)
sys.stdout.write(".")
print
time.sleep(10)
for id in random.sample(res, 23):
print "Kill", id
kill_node(id)
t = time.time()
if _wait_until("/mon/ring/nodes",
lambda x: _check_results(x, 27), 60):
print "Ring healed in %d seconds" % (time.time() - t)
return True
return False
# create, put, check that succeeds
def test05_replication1():
return _test_repl("test_replication1", 1, 1, 100)
# create, put, check that succeeds, replicates
def test06_replication50():
return _test_repl("test_replication50", 50, 50, 100)
# create, put, add new node (replica), check that resyncs ok
def test07_addreplica(first_owner = True):
if first_owner:
name = "addreplicas_test"
else:
name = "addowner_test"
check1 = lambda x: _check_results(x, 1)
check2 = lambda x: _check_results(x, 2)
did = domain_id(name, 0)
# a node id that is guaranteed to become owner for the domain
owner_id = real_owner = make_domain_id(int(did, 16) - 1)
# a node id that is guaranteed to become replica for the domain
repl_id = make_domain_id(int(did, 16) + 1)
if not first_owner:
tmp = repl_id
repl_id = owner_id
owner_id = tmp
new_node(owner_id)
print "Waiting for ring to converge:"
if not _wait_until("/mon/ring/nodes", check1, 30):
print "Ring didn't converge"
return False
print "Creating and populating the domain:"
if not _test_repl(name, 1, 2, 50, False):
print "Couldn't create and populate the domain"
return False
if first_owner:
print "Adding a new replica:"
else:
print "Adding a new owner:"
new_node(repl_id)
if not _wait_until("/mon/ring/nodes", check2, 30):
print "Ring didn't converge"
return False
_put_entries(name, 50)
print "Waiting for resync (timeout 300s, be patient):"
if not _wait_until("/mon/domains/domain?id=0x" + did,
lambda x: check_entries(x, 2, 100), 300):
print "Resync didn't finish in time"
return False
re = ringo.request("/mon/domains/domain?id=0x" + did)
repl = re[1][3]
if real_owner in [r['node'] for r in repl if r['owner']][0]:
print "Owner matches"
return True
else:
print "Invalid owner for domain %s (should be %s), got: %s" %\
(did, owner_id, repl)
return False
# create, put, add new node (owner), check that resyncs ok and owner is
# transferred correctly
def test08_addowner():
return test07_addreplica(False)
# create, put, kill owner, put, reincarnate owner, check that succeeds
# and resyncs
def test09_killowner():
if not _test_ring(10):
return False
print "Create and populate domain"
node, domainid = ringo.create("killowner", 5)
_put_entries("killowner", 50)
kill_id = node.split('@')[0].split("-")[1]
print "Kill owner", kill_id
kill_node(kill_id)
print "Put 50 entries:"
_put_entries("killowner", 50, retries = 10)
if not _wait_until("/mon/domains/domain?id=0x" + domainid,
lambda x: check_entries(x, 6, 100), 300):
print "Resync didn't finish in time"
return False
print "Owner reincarnates"
new_node(kill_id)
print "Put 50 entries:"
_put_entries("killowner", 50, retries = 10)
if _wait_until("/mon/domains/domain?id=0x" + domainid,
lambda x: check_entries(x, 7, 150), 300):
return True
else:
print "Resync didn't finish in time"
return False
# create owner and a replica that has a distant id. Put items. Kill owner,
# add items to the distant node. Reincarnate owner and add new nodes between
# the owner and the distant node. Check that resyncs ok.
#
# NB: This test doesn't quite test what it should: Polling status from the
# distant node activates it, which causes resync to active as well. In a more
# realistic case the owner would have to active the distant node by itself with
# the global resync process.
def test10_distantsync():
did = domain_id("distantsync", 0)
distant_id, p = new_node(make_domain_id(int(did, 16) - 20))
time.sleep(1)
owner_id, p = new_node(did)
print "Owner is", owner_id
print "Distant node is", distant_id
if not _wait_until("/mon/ring/nodes",
lambda x: _check_results(x, 2), 30):
print "Ring didn't converge"
return False
print "Create and populate domain"
if not _test_repl("distantsync", 2, 3, 60, create_ring = False):
print "Couldn't create and populate the domain"
return False
print "Kill owner", owner_id
kill_node(owner_id)
print "Put 30 entries:"
_put_entries("distantsync", 30, retries = 10)
print "Creating more node, reincarnating owner"
if not _test_ring(0, 40, [make_domain_id(int(did, 16) + i)
for i in range(-19, 20)]):
return False
print "Putting 10 entries to the new owner"
_put_entries("distantsync", 10)
print "Waiting for everything to resync"
if _wait_until("/mon/domains/domain?id=0x" + did,
lambda x: check_entries(x, 5, 100), 300):
return True
else:
print "Resync didn't finish in time"
return False
# This test checks that code updates can be perfomed smoothly in the
# ring. This happens by restarting nodes one by one. Restarting shouldn't
# distrupt concurrent put operations, except some re-requests may be
# needed.
def test11_simcodeupdate():
names = ["simcodeupdate1", "simcodeupdate2"]
def pput():
rgo = ringogw.Ringo(sys.argv[1])
for i in range(10):
k = "item-%d" % i
v = "testitem-%d" % i
for name in names:
rgo.put(name, k, v, retries = 10)
did1 = int(domain_id(names[0], 0), 16)
did2 = int(domain_id(names[1], 0), 16)
mi = min(did1, did2)
ids = [make_domain_id(mi + i) for i in range(10)]
if not _test_ring(0, 10, ids):
return False
print "Creating domains.."
for name in names:
ringo.create(name, 6)
print "Restarting nodes one at time:"
for id in ids:
print "Restart", id
t = threading.Thread(target = pput).start()
kill_node(id)
new_node(id)
# if the pause is too small, say 1sec, there's a danger
# that replicas aren't yet fully propagated for the previous
# requests and killing a node might make a replica jump over
# the zombie and make a new replica domain. In the extreme
# case the domain is propagated to all the nodes in the ring.
time.sleep(7)
print "All the nodes restarted."
print "NB: Test may sometimes fail due to a wrong number of replicas,"
print "typically 7 instead of 8. This is ok."
# actually the number of replicas may be off by one here, if a put
# requests hits a node while its down. So don't worry if the test fails
# due to a wrong number of replicas.
if not _wait_until("/mon/domains/domain?id=0x" + make_domain_id(did1),
lambda x: check_entries(x, 8, 100), 300):
return False
if not _wait_until("/mon/domains/domain?id=0x" + make_domain_id(did2),
lambda x: check_entries(x, 8, 100), 300):
return False
return True
# Check that putting large (1M) entries to external files works in
# replication and resyncing.
def test12_extsync():
if not _test_ring(5):
return False
node, domainid = ringo.create("extsync", 6)
v = "!" * 1024**2
print "Putting ten 1M values"
for i in range(10):
ringo.put("extsync", "fub-%d" % i, v, verbose = True)
if not _wait_until("/mon/domains/domain?id=0x" + domainid,
lambda x: x[0] == 200, 30):
return False
replicas = [n['node'].split('-')[1].split('@')[0] for n in\
ringo.request("/mon/domains/domain?id=0x" + domainid)[1][3]]
for repl in replicas:
if not _check_extfiles(repl, domainid, 10):
print "Ext files not found on node", repl
return False
print "Ext files written ok to all replicas"
print "Deleting some files on node", replicas[0]
print "to check that check_external works:"
files = os.listdir("%s/%s/rdomain-%s/" %
(home_dir, replicas[0], domainid))
for rem in random.sample(
[f for f in files if f.startswith("value")], 5):
print "Deleting", rem
os.remove("%s/%s/rdomain-%s/%s" %
(home_dir, replicas[0], domainid, rem))
newid, p = new_node()
print "Creating a new node", newid
if not _wait_until("/mon/ring/nodes",
lambda x: _check_results(x, 6), 60):
return False
print "Putting an extra item (should go to the new node as well)"
ringo.put("extsync", "extra", v, verbose = True)
if not _wait_until("/mon/domains/domain?id=0x" + domainid,
lambda x: check_entries(x, 6, 11), 300):
return False
for repl in replicas + [newid]:
if not _check_extfiles(newid, domainid, 11):
print "All ext files not found on node", repl
return False
return True
# Simple get test: Get a single value without chunked transfer
def test13_singleget():
if not _test_ring(1):
return False
node, domainid = ringo.create("basicget", 5)
for i in range(5):
ringo.put("basicget", "muppet-%d" % i, "nufnuf-%d" % i)
for i in range(5):
r = ringo.get("basicget", "muppet-%d" % i, single = True)
if r != "nufnuf-%d" % i:
print "Invalid reply", r
return False
return True
# Get multiple values: Test chunked transfer and entry_callback
def test14_multiget():
def check_reply(entry, out):
if entry != "nufnuf-%d" % len(out):
raise "Invalid reply", entry
out.append(entry)
if not _test_ring(1):
return False
node, domainid = ringo.create("multiget", 5)
print "Putting 1000 items.."
for i in range(1000):
ringo.put("multiget", "bork", "nufnuf-%d" % i)
print "Getting 1000 items.."
out = ringo.get("multiget", "bork", entry_callback = check_reply, verbose = True)
if len(out) != 1000:
raise "Invalid number of replies: %d" % len(out)
return True
# Test that iblock cache works correctly with many iblocks
def test15_iblockcache():
return _cache_test()
# Test that key cache works correctly with many iblocks
def test16_keycache():
return _cache_test(keycache = True)
# Test that interleaved puts and gets work correctly with both the caches
def test17_putget(**kwargs):
keycache = 'keycache' in kwargs
if keycache:
print "Testing with key cache"
else:
print "Testing with iblock cache"
if not _test_ring(5):
return False
dname = "putgettest-%s" % keycache
node, domainid = ringo.create(dname, 5, **kwargs)
values = ["zing-%d-%s" % (i, keycache) for i in range(2)]
print "Putting and getting 15050 keys.."
for i in range(1505):
key = "k-%d-%s" % (i, keycache)
for j in range(10):
for value in values:
r = ringo.put(dname, key, value)
r = ringo.get(dname, key)
c = values * (j + 1)
if r != c:
print key, j
raise "Invalid reply %s (%d) expected %s (%d)"\
% (r, len(r), c, len(c))
print "Results ok"
if keycache:
return True
else:
return test17_putget(keycache = True)
def single_get_check(dname, N):
print "Check get.."
for i in range(N):
r = ringo.get(dname, "abc-%d" % i)
if r != ['def-%d' % i]:
raise "Invalid reply to key %s: %s" %\
("abc-%d" % i, r)
print "Get ok"
# Test that missing or corrupted iblocks are re-generated correctly during
# index initialization.
def test18_regeniblocks():
N = 50500
if not _test_ring(5):
return False
node, domainid = ringo.create("regentest", 5)
print "Putting %d entries.." % N
for i in range(N):
ringo.put("regentest", "abc-%d" % i, "def-%d" % i)
print "Entries put"
single_get_check("regentest", N)
kill_id = node.split('@')[0].split("-")[1]
print "Kill owner", kill_id
kill_node(kill_id)
path = "%s/%s/rdomain-%s/" % (home_dir, kill_id, domainid)
ifiles = sorted([(int(x.split('-')[1]), x)\
for x in os.listdir(path) if x.startswith("iblock")])
if len(ifiles) != N / 10000:
print "Incorrect number of iblocks: %d expected %d"\
% (len(ifiles), N / 10000)
print "iblocks", ifiles
return False
iblocks = []
print "Removing some iblocks"
for i, iblock in enumerate(ifiles):
fname = "%s/%s" % (path, iblock[1])
f = file(fname).read()
iblocks.append((iblock[1], md5.md5(f).hexdigest()))
if i % 2:
print "Deleting", iblock[1]
os.remove(fname)
print "Reincarnating owner"
new_node(kill_id)
if not _wait_until("/mon/ring/nodes",
lambda x: _check_results(x, 5), 60):
return False
single_get_check("regentest", N)
print "Checking iblocks"
for iblock, checksum in iblocks:
fname = "%s/%s" % (path, iblock)
f = file(fname).read()
if checksum != md5.md5(f).hexdigest():
print "Checksums don't match for iblock", iblock
return False
print "Checksums match"
return True
# Test that gets are properly redirected when a new, empty owner is spawned.
# Note that especially interesting is the case when the new owner has resynced
# some of the entries, but not all, and GETs still need to be redirected to get
# the most comprehensive results.
def test19_redirget():
N = 20500
did = domain_id("redirget", 0)
owner_id = real_owner = make_domain_id(int(did, 16) - 1)
ids = [make_domain_id(int(did, 16) + i) for i in range(1, 10)]
print "Create 9 replica nodes first.."
if not _test_ring(0, 9, ids):
return False
print "Putting %d entries.." % N
node, domainid = ringo.create("redirget", 5)
for i in range(N):
ringo.put("redirget", "abc-%d" % i, "def-%d" % i)
single_get_check("redirget", N)
print "Entries went to", node
print "Creating owner node:", owner_id
new_node(owner_id)
if not _wait_until("/mon/ring/nodes",
lambda x: _check_results(x, 10), 30):
print "Ring didn't converge"
return False
single_get_check("redirget", N)
# Owner and replicas will have different sizes here, thus check_size =
# False. Iblocks are included in the chunk size, but owner doesn't have
# them. Check_external() ignores iblocks, so they won't get to the owner
# automatically so the owner size differs from the replica size.
print "Waiting for resync.."
if not _wait_until("/mon/domains/domain?id=0x" + domainid,
lambda x: check_entries(x, 7, N, check_size = False),\
300):
print "Resync failed"
return False
print "Killing replicas.."
for id in ids:
kill_node(id)
print "Waiting for ring to recover.."
if not _wait_until("/mon/ring/nodes",
lambda x: _check_results(x, 1), 60):
return False
single_get_check("redirget", N)
return True
def _check_chunks(x, num_chunks):
if x[0] != 200:
return False
if len(x[1]) != num_chunks:
return False
return True
# Basic chunking test: Put so many items that the maximum chunk size is
# exceeded multiple times. Check that the number of chunk is correct and
# all the items are retrieved ok.
def test20_manychunks():
chunk_size = 500 * 1024
N = (chunk_size / len("abc-0def-0")) * 2
orig_max = os.environ['DOMAIN_CHUNK_MAX']
os.environ['DOMAIN_CHUNK_MAX'] = str(chunk_size)
if not _test_ring(1):
return False
node, domainid = ringo.create("manychunks", 5)
print "Putting %d items" % N
t = time.time()
for i in range(N):
ringo.put("manychunks", "abc-%d" % i, "def-%d" % i)
print "Put took %dms" % ((time.time() - t) * 1000)
# There's nothing special in 12 chunks. It just seems that it's
# the correct number for this chunk size and these entries. If
# the on-disk entry format changes, this number is likely to
# change too.
if not _wait_until("/mon/domains/node?name=" + node,
lambda x: _check_chunks(x, 12), 30):
print "Couldn't find 12 chunks"
return False
print "Got a correct number of chunks"
t = time.time()
single_get_check("manychunks", N)
print "Get took %dms" % ((time.time() - t) * 1000)
os.environ['DOMAIN_CHUNK_MAX'] = orig_max
return True
# Test chunks with replicas. Put many items, as in the manychunks test.
# Check that replicas are in sync and have the same size with the owner.
def test21_chunkrepl():
chunk_size = 500 * 1024
N = (chunk_size / len("abc-0def-0")) * 2
orig_max = os.environ['DOMAIN_CHUNK_MAX']
os.environ['DOMAIN_CHUNK_MAX'] = str(chunk_size)
if not _test_ring(10):
return False
node, domainid = ringo.create("chunkrepl", 5)
print "Putting %d items with replicas" % N
t = time.time()
for i in range(N):
ringo.put("chunkrepl", "abc-%d" % i, "def-%d" % i)
print "Put took %dms" % ((time.time() - t) * 1000)
code, reply = ringo.request("/mon/domains/domain?id=0x" + domainid)
orig_size = reply[3][0]['size']
for i in range(12):
chunkid = domain_id("chunkrepl", 0)
print "Checking chunk", i
# Make sure that all replicas for this chunk are of equal size
if not _wait_until("/mon/domains/domain?id=0x" + chunkid,
lambda x: check_entries(x, 6, None), 300):
print "Resync didn't finish in time"
return False
# Check that the size is the same for all the replicas, except
# the last one
if i < 11:
code, reply = ringo.request("/mon/domains/domain?id=0x" + chunkid)
chunk_size = reply[3][0]['size']
if chunk_size != orig_size:
print "Chunk %d has incorrect size %d, should be %d" %\
(i, chunk_size, orig_size)
return False
print "Chunk ok"
t = time.time()
single_get_check("chunkrepl", N)
print "Get took %dms" % ((time.time() - t) * 1000)
os.environ['DOMAIN_CHUNK_MAX'] = orig_max
return True
# Check that chunk size is re-computed correctly after a node is
# re-instantiated.
def test22_chunksizes():
chunk_size = 500 * 1024
orig_max = os.environ['DOMAIN_CHUNK_MAX']
os.environ['DOMAIN_CHUNK_MAX'] = str(chunk_size)
if not _test_ring(1):
return False
node, domainid = ringo.create("chunksizes", 5)
print "Filling about 60% of the chunk.."
for i in range(5000):
ringo.put("chunksizes", "abc-%d" % i, "def-%d" % i)
if not _wait_until("/mon/domains/domain?id=0x" + domainid,
lambda x: check_entries(x, 1, 5000), 300):
print "Put failed"
return False
code, reply = ringo.request("/mon/domains/domain?id=0x" + domainid)
orig_size = reply[3][0]['size']
print "Kill node.."
kill_node(domainid)
time.sleep(1)
print "Reinstantiate it.."
new_node(domainid)
print "Waiting for ring to recover.."
if not _wait_until("/mon/ring/nodes",
lambda x: _check_results(x, 1), 60):
return False
code, reply = ringo.request("/mon/domains/domain?id=0x" + domainid)
new_size = reply[3][0]['size']
if orig_size != new_size:
print "Chunk size was %d bytes before and after reinstation "\
"%d bytes. No good." % (orig_size, new_size)
return False
print "Sizes match. Great!"
print "Putting more items.."
for i in range(5000, 10000):
ringo.put("chunksizes", "abc-%d" % i, "def-%d" % i)
print "Checking chunks.."
if not _wait_until("/mon/domains/node?name=" + node,
lambda x: _check_chunks(x, 2), 30):
print "Couldn't find two chunks"
return False
single_get_check("chunksizes", 10000)
os.environ['DOMAIN_CHUNK_MAX'] = orig_max
return True
# Make node A, put 90% entries, kill A. make node B put 90%, restart A.
# After resyncing owner should be 180% full and only one chunk should
# exist (note that percentages are totally approximate). The idea is
# anyway that resyncing should work with closed domains.
def test23_resyncfull():
def make_and_put(node_id, start):
ringo.request("/mon/ring/reset")
ringo.request("/mon/domains/reset")
new_node(node_id)
if not _wait_until("/mon/ring/nodes",
lambda x: _check_results(x, 1), 60):
return False
print "Create domain"
node, domainid = ringo.create("resyncfull", 2)
print "Filling about 70% of the chunk.."
for i in range(start, start + 8000):
ringo.put("resyncfull", "abc-%d" % i, "def-%d" % i)
if not _wait_until("/mon/domains/domain?id=0x" + domainid,
lambda x: check_entries(x, 1, 8000), 300):
print "Put failed"
return False
return True
chunk_size = 500 * 1024
orig_max = os.environ['DOMAIN_CHUNK_MAX']
os.environ['DOMAIN_CHUNK_MAX'] = str(chunk_size)
did = domain_id("resyncfull", 0)
owner_id = make_domain_id(int(did, 16) - 1)
another_id = make_domain_id(int(did, 16) + 1)
print "Instantiating node A:", owner_id
if not make_and_put(owner_id, 0):
return False
print "Kill node A"
kill_node(owner_id)
print "Instantiating node B:", another_id
if not make_and_put(another_id, 8000):
return False
print "Reinstantiating node A"
new_node(owner_id)
# This just wakes up the domain
code, reply = ringo.request("/mon/domains/domain?id=0x" + did)
print "Waiting for resync.."
if not _wait_until("/mon/domains/domain?id=0x" + did,
lambda x: check_entries(x, 2, 16000), 300):
print "Resync failed"
return False
code, reply = ringo.request("/mon/domains/domain?id=0x" + did)
if reply[3][0]['full'] == False:
print "Chunk should be closed"
return False
single_get_check("resyncfull", 16000)
os.environ['DOMAIN_CHUNK_MAX'] = orig_max
return True
# X put, exceed chunk limit, check that new chunk is created. Check get.
# X put with replicas, exceed chunk limit, wait to converge, check that sizes
# match
# X put to 50% chunk limit, kill node, put 50%, check that new chunk is
# created, kill node, put 50%, check that two chunks exist (big values too)
tests = sorted([f for f in globals().keys() if f.startswith("test")])
if len(sys.argv) > 2 and sys.argv[2] == '?':
print "Available tests:\n", "\n".join([t[7:] for t in tests])
sys.exit(1)
ringo = ringogw.Ringo(sys.argv[1])
for f in tests:
prefix, testname = f.split("_", 1)
if len(sys.argv) > 2 and testname not in sys.argv[2:]:
continue
kill_node("ringotest")
ringo.request("/mon/ring/reset")
ringo.request("/mon/domains/reset")
time.sleep(1)
print "*** Starting", testname
if globals()[f]():
print "+++ Test", testname, "successful"
else:
print "--- Test", testname, "failed"
sys.exit(1)
|
ecmwf_mesoscale_fetcher.py | #!/usr/bin/env python
from multiprocessing import Process
from multiprocessing.managers import SyncManager
import signal
try:
from gribapi import *
GRIBAPI = True
except ImportError:
from eccodes import *
GRIBAPI = False
import logging
import argparse
import sys
try:
import yaml
except ImportError:
print("You need to install pyyaml. Issue a 'pip install pyyaml'")
import datetime
import os
import shutil
#from ecmwfapi import ECMWFDataServer
from cdsapi import Client
from ecmwf_dataset_template import returnModelData, SUPPORTED_MODELS
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)
class DummyObject(object):
def __init__(self, config):
object.__setattr__(self, 'datastore', {})
for key in config.keys():
object.__setattr__(self, key, config[key])
def __getattr__(self, key):
return self.datastore[key]
def __setattr__(self, key, value):
self.datastore[key] = value
def mgr_init():
signal.signal(signal.SIGINT, signal.SIG_IGN)
logging.info("Sync manager initalized")
def parseYAML():
try:
with open("config.yaml", "r") as f:
stream = f.read()
except EnvironmentError:
logging.error("ERROR: Couldn't open config.yaml")
sys.exit(-1)
try:
yaml_config = yaml.safe_load(stream)
except yaml.YAMLError:
logging.error("ERROR: Some kind of parsing error happened.")
sys.exit(-1)
print(yaml_config)
Args = type("Args", (object,), yaml_config)
return (Args())
def setupArgParser():
parser = argparse.ArgumentParser(description="Fetch ERA[5|-interim] gribs from ECMWF")
parser.add_argument('-outmodel', type=str, required=True,
choices=["cosmo", "wrf"])
parser.add_argument('-inmodel', type=str, required=True,
choices=SUPPORTED_MODELS)
parser.add_argument(
'-startdate', type=str,
help="Enter Startdate like '20140201'", required=True)
parser.add_argument('-enddate', type=str,
help="Enter Enddate like '20140230'", required=True)
parser.add_argument('-grid', type=str, required=True,
help="Please enter grid in the form 'N/W/S/E'")
parser.add_argument(
'-res',
type=str,
help="Please enter resolution in the form 'dx/dy'",
default="0.25/0.25)")
return parser
def parseArgs(parser):
return parser.parse_args()
def setArguments(args, dic_list):
for dic in dic_list:
dic['date'] = "{}/to/{}".format(args.startdate.strftime("%Y-%m-%d"),
args.enddate.strftime("%Y-%m-%d"))
dic['area'] = args.grid
dic['grid'] = args.res
return dic_list
def sanityCheck(args):
try:
start = datetime.datetime.strptime(args.startdate, "%Y%m%d")
except ValueError:
logging.error(
"ERROR: Wrongly formatted Startdate: {}".format(
args.startdate))
sys.exit(-1)
try:
end = datetime.datetime.strptime(args.enddate, "%Y%m%d")
except ValueError:
logging.error(
"ERROR: Wrongly formatted Enddate: {}".format(
args.enddate))
sys.exit(-1)
delta = end - start
if delta < datetime.timedelta(0):
logging.error("ERROR: Enddate earlier than Startdate: {} -> {}".format(
args.startdate, args.enddate))
sys.exit(-1)
if end.month != start.month:
logging.error(
"ERROR: Only dates in the same month are yet supported at the moment.")
logging.error("You can execute the script multiple times")
#sys.exit(-1)
return start, end
def catBinaryOutput(outfile, infiles):
BLOCKSIZE = 4096
BLOCKS = 1024
CHUNK = BLOCKS * BLOCKSIZE
with open(outfile, "wb") as out:
for fname in infiles:
with open(fname, "rb") as inf:
while True:
read_bytes = inf.read(CHUNK)
if not read_bytes:
break
out.write(read_bytes)
def convertToGrib1gribapi(ifile):
with open (ifile, "rb") as inp:
with open(ifile + ".grb1", "wb") as output:
while True:
gid = grib_new_from_file(inp)
if gid is None:
break
grib_set(gid,'deletePV',1)
grib_set(gid,'edition',1)
grib_write(gid,output)
grib_release(gid)
os.rename(ifile + ".grb1", ifile)
def convertToGrib1eccodes(ifile):
with open (ifile, "rb") as inp:
with open(ifile + ".grb1", "wb") as output:
while True:
gid = codes_grib_new_from_file(inp)
if gid is None:
break
codes_set(gid,'deletePV',1)
codes_set(gid,'edition',1)
codes_write(gid,output)
codes_release(gid)
os.rename(ifile + ".grb1", ifile)
def fetchECMWF(dic):
server = Client()
logging.info("MARS Request: {}".format(dic))
try:
server.retrieve("reanalysis-era5-complete",dic, dic['target'])
except KeyboardInterrupt:
logging.error("SIG INT caught. aborting")
except BaseException:
logging.error(
"ERROR: Something of your request is not working, either "
"ecmwf or in the request itself")
def splitGRIBSgribapi(ifile):
index_keys = ["dataDate", "dataTime"]
logging.info("Creating index for grib file")
iid = grib_index_new_from_file(ifile, index_keys)
date_vals, time_vals = grib_index_get(
iid, "dataDate"), grib_index_get(
iid, "dataTime")
logging.info("Splitting grib")
for date in date_vals:
grib_index_select(iid, index_keys[0], date)
for time in time_vals:
logging.info("Working on {} {}".format(date, time))
grib_index_select(iid, index_keys[1], time)
if time == "0":
time = "00"
else:
time = "{:02}".format(int(time)/100)
with open("eas{}{}".format(date, time), "ab") as out:
while True:
gid = grib_new_from_index(iid)
if gid is None:
break
grib_write(gid, out)
grib_release(gid)
def splitGRIBSeccodes(ifile):
index_keys = ["dataDate", "dataTime"]
logging.info("Creating index for grib file")
iid = codes_index_new_from_file(ifile, index_keys)
date_vals, time_vals = codes_index_get(
iid, "dataDate"), codes_index_get(
iid, "dataTime")
logging.info("Splitting grib")
for date in date_vals:
codes_index_select(iid, index_keys[0], date)
for time in time_vals:
logging.info("Working on {} {}".format(date, time))
codes_index_select(iid, index_keys[1], time)
if time == "0":
time = "00"
else:
time = "{:02}".format(int(time)/100)
with open("eas{}{}".format(date, time), "ab") as out:
while True:
gid = codes_new_from_index(iid)
if gid is None:
break
codes_write(gid, out)
codes_release(gid)
def cleanup(files):
for f in files:
if os.path.isfile(f):
os.remove(f)
def manageProcs(dic_list):
manager = SyncManager()
manager.start(mgr_init)
procs = []
for i in range(len(dic_list)):
p = Process(target=fetchECMWF, args=(dic_list[i],))
p.start()
procs.append(p)
try:
for proc in procs:
proc.join()
except KeyboardInterrupt:
logging.error("SIGINT in main. Aborting")
raise KeyboardInterrupt
finally:
manager.shutdown()
def fetchCOSMO(args):
dic_list, infile_list, out_file = returnModelData(args.inmodel, args.outmodel)
logging.info("******************************************")
logging.info(
"Set grid to {} and resolution to {}".format(
args.grid, args.res))
dic_list = setArguments(args, dic_list)
logging.info("Starting ecmwf mars request")
try:
manageProcs(dic_list)
except:
logging.info("ERROR, exiting")
sys.exit(-1)
logging.info("Ecmwf request finished....")
logging.info("******************************************")
logging.info("Concat gribs")
catBinaryOutput(out_file, infile_list)
logging.info("Split gribs and name them for cosmo")
if GRIBAPI:
splitGRIBSgribapi(out_file)
else:
splitGRIBSeccodes(out_file)
cleanup(infile_list)
logging.info("Cleaning directory...")
def fetchWRF(args):
dic_list, infile_list, out_file = returnModelData(args.inmodel, args.outmodel)
logging.info("******************************************")
logging.info(
"Set grid to {} and resolution to {}".format(
args.grid, args.res))
dic_list = setArguments(args, dic_list)
logging.info("Starting ecmwf mars request")
try:
manageProcs(dic_list)
except:
logging.info("ERROR, exiting")
sys.exit(-1)
logging.info("Ecmwf request finished....")
logging.info("******************************************")
logging.info("Concat gribs")
catBinaryOutput(out_file, infile_list)
logging.info("Split gribs and name them for wrf")
if GRIBAPI:
convertToGrib1gribapi(out_file)
splitGRIBSgribapi(out_file)
else:
convertToGrib1eccodes(out_file)
splitGRIBSeccodes(out_file)
cleanup(infile_list)
logging.info("Cleaning directory...")
if __name__ == "__main__":
logging.info("******************************************")
logging.info(" ERA5/interim for mesoscale simulations fetcher ")
logging.info("******************************************")
if len(sys.argv) == 1:
logging.info(" Using configration file config.yaml")
logging.info("******************************************")
args = parseYAML()
print(args.startdate)
else:
logging.info(" Using command line arguments.....")
logging.info("******************************************")
parser = setupArgParser()
args = parseArgs(parser)
logging.info("")
logging.info("******************************************")
logging.info(" The following arguments were given:")
logging.info("{}".format(args))
logging.info("******************************************")
logging.info(" Sanity check of arguments")
logging.info("******************************************")
args.startdate, args.enddate = sanityCheck(args)
logging.info("Selected model {} -> {}".format(args.inmodel, args.outmodel))
logging.info("******************************************")
if args.outmodel == "cosmo":
fetchCOSMO(args)
else:
fetchWRF(args)
logging.info("Done fetching.")
logging.info("Generating Envrionment")
|
train_random.py | # -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import argparse
import bisect
import multiprocessing as mp
import os
import time
import megengine as mge
import megengine.distributed as dist
from megengine.autodiff import GradManager
from megengine.data import DataLoader, Infinite, RandomSampler
from megengine.data import transform as T
# from megengine.jit import trace
from megengine.optimizer import SGD
from official.vision.detection.tools.utils import (
AverageMeter,
DetectionPadCollator,
GroupedRandomSampler,
PseudoDetectionDataset,
get_config_info,
import_from_file
)
logger = mge.get_logger(__name__)
logger.setLevel("INFO")
mge.device.set_prealloc_config(1024, 1024, 512 * 1024 * 1024, 2.0)
def make_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f", "--file", default="net.py", type=str, help="net description file"
)
parser.add_argument(
"-w", "--weight_file", default=None, type=str, help="weights file",
)
parser.add_argument(
"-n", "--ngpus", default=1, type=int, help="total number of gpus for training",
)
parser.add_argument(
"-b", "--batch_size", default=2, type=int, help="batchsize for training",
)
parser.add_argument(
"-d", "--dataset_dir", default="/data/datasets", type=str,
)
return parser
def main():
parser = make_parser()
args = parser.parse_args()
# ------------------------ begin training -------------------------- #
logger.info("Device Count = %d", args.ngpus)
log_dir = "log-of-{}".format(os.path.basename(args.file).split(".")[0])
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if args.ngpus > 1:
master_ip = "localhost"
port = dist.get_free_ports(1)[0]
dist.Server(port)
processes = list()
for rank in range(args.ngpus):
process = mp.Process(
target=worker, args=(master_ip, port, args.ngpus, rank, args)
)
process.start()
processes.append(process)
for p in processes:
p.join()
else:
worker(None, None, 1, 0, args)
def worker(master_ip, port, world_size, rank, args):
if world_size > 1:
dist.init_process_group(
master_ip=master_ip,
port=port,
world_size=world_size,
rank=rank,
device=rank,
)
logger.info("Init process group for gpu{} done".format(rank))
current_network = import_from_file(args.file)
model = current_network.Net(current_network.Cfg())
model.train()
if dist.get_rank() == 0:
logger.info(get_config_info(model.cfg))
logger.info(repr(model))
params_with_grad = []
for name, param in model.named_parameters():
if "bottom_up.conv1" in name and model.cfg.backbone_freeze_at >= 1:
continue
if "bottom_up.layer1" in name and model.cfg.backbone_freeze_at >= 2:
continue
params_with_grad.append(param)
opt = SGD(
params_with_grad,
lr=model.cfg.basic_lr * args.batch_size,
momentum=model.cfg.momentum,
weight_decay=model.cfg.weight_decay * dist.get_world_size(),
)
gm = GradManager()
if dist.get_world_size() > 1:
gm.attach(
params_with_grad,
callbacks=[dist.make_allreduce_cb("SUM", dist.WORLD)]
)
else:
gm.attach(params_with_grad)
if args.weight_file is not None:
weights = mge.load(args.weight_file)
model.backbone.bottom_up.load_state_dict(weights, strict=False)
if dist.get_world_size() > 1:
dist.bcast_list_(model.parameters(), dist.WORLD) # sync parameters
if dist.get_rank() == 0:
logger.info("Prepare dataset")
train_loader = iter(build_dataloader(args.batch_size, args.dataset_dir, model.cfg))
for epoch in range(model.cfg.max_epoch):
train_one_epoch(model, train_loader, opt, gm, epoch, args)
if dist.get_rank() == 0:
save_path = "log-of-{}/epoch_{}.pkl".format(
os.path.basename(args.file).split(".")[0], epoch
)
mge.save(
{"epoch": epoch, "state_dict": model.state_dict()}, save_path,
)
logger.info("dump weights to %s", save_path)
def train_one_epoch(model, data_queue, opt, gm, epoch, args):
# @trace(symbolic=True)
def train_func(image, im_info, gt_boxes):
with gm:
loss_dict = model(image=image, im_info=im_info, gt_boxes=gt_boxes)
gm.backward(loss_dict["total_loss"])
loss_list = list(loss_dict.values())
opt.step().clear_grad()
return loss_list
meter = AverageMeter(record_len=model.cfg.num_losses)
time_meter = AverageMeter(record_len=2)
log_interval = model.cfg.log_interval
tot_step = model.cfg.nr_images_epoch // (args.batch_size * dist.get_world_size())
for step in range(tot_step):
adjust_learning_rate(opt, epoch, step, model.cfg, args)
data_tik = time.time()
mini_batch = next(data_queue)
data_tok = time.time()
tik = time.time()
loss_list = train_func(
image=mge.tensor(mini_batch["data"]),
im_info=mge.tensor(mini_batch["im_info"]),
gt_boxes=mge.tensor(mini_batch["gt_boxes"])
)
tok = time.time()
time_meter.update([tok - tik, data_tok - data_tik])
if dist.get_rank() == 0:
info_str = "e%d, %d/%d, lr:%f, "
loss_str = ", ".join(
["{}:%f".format(loss) for loss in model.cfg.losses_keys]
)
time_str = ", train_time:%.3fs, data_time:%.3fs"
log_info_str = info_str + loss_str + time_str
meter.update([loss.numpy() for loss in loss_list])
if step % log_interval == 0:
logger.info(
log_info_str,
epoch,
step,
tot_step,
opt.param_groups[0]["lr"],
*meter.average(),
*time_meter.average()
)
meter.reset()
time_meter.reset()
def adjust_learning_rate(optimizer, epoch, step, cfg, args):
base_lr = (
cfg.basic_lr * args.batch_size * (
cfg.lr_decay_rate
** bisect.bisect_right(cfg.lr_decay_stages, epoch)
)
)
# Warm up
lr_factor = 1.0
if epoch == 0 and step < cfg.warm_iters:
lr_factor = (step + 1.0) / cfg.warm_iters
for param_group in optimizer.param_groups:
param_group["lr"] = base_lr * lr_factor
# pylint: disable=unused-argument
def build_dataset(dataset_dir, cfg):
return PseudoDetectionDataset(order=["image", "boxes", "boxes_category", "info"])
# pylint: disable=dangerous-default-value
def build_sampler(train_dataset, batch_size, aspect_grouping=[1]):
def _compute_aspect_ratios(dataset):
aspect_ratios = []
for i in range(len(dataset)):
info = dataset.get_img_info(i)
aspect_ratios.append(info["height"] / info["width"])
return aspect_ratios
def _quantize(x, bins):
return list(map(lambda y: bisect.bisect_right(sorted(bins), y), x))
if len(aspect_grouping) == 0:
return Infinite(RandomSampler(train_dataset, batch_size, drop_last=True))
aspect_ratios = _compute_aspect_ratios(train_dataset)
group_ids = _quantize(aspect_ratios, aspect_grouping)
return Infinite(GroupedRandomSampler(train_dataset, batch_size, group_ids))
def build_dataloader(batch_size, dataset_dir, cfg):
train_dataset = build_dataset(dataset_dir, cfg)
train_sampler = build_sampler(train_dataset, batch_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
transform=T.Compose(
transforms=[
T.ShortestEdgeResize(
cfg.train_image_short_size,
cfg.train_image_max_size,
sample_style="choice",
),
T.RandomHorizontalFlip(),
T.ToMode(),
],
order=["image", "boxes", "boxes_category"],
),
collator=DetectionPadCollator(),
num_workers=2,
)
return train_dataloader
if __name__ == "__main__":
main()
|
schedule.py | import time
from multiprocessing import Process
import asyncio
import aiohttp
try:
from aiohttp.error import ProxyConnectionError,ServerDisconnectedError,ClientResponseError,ClientConnectorError
except:
from aiohttp import ClientProxyConnectionError as ProxyConnectionError,ServerDisconnectedError,ClientResponseError,ClientConnectorError
from proxypool.db import RedisClient
from proxypool.error import ResourceDepletionError
from proxypool.getter import FreeProxyGetter
from proxypool.setting import *
from asyncio import TimeoutError
class ValidityTester(object):
test_api = TEST_API
def __init__(self):
self._raw_proxies = None
self._usable_proxies = []
def set_raw_proxies(self, proxies):
self._raw_proxies = proxies
self._conn = RedisClient()
async def test_single_proxy(self, proxy):
"""
text one proxy, if valid, put them to usable_proxies.
"""
try:
async with aiohttp.ClientSession() as session:
try:
if isinstance(proxy, bytes):
proxy = proxy.decode('utf-8')
real_proxy = 'http://' + proxy
print('Testing', proxy)
async with session.get(self.test_api, proxy=real_proxy, timeout=get_proxy_timeout) as response:
if response.status == 200:
self._conn.put(proxy)
print('Valid proxy', proxy)
except (ProxyConnectionError, TimeoutError, ValueError):
print('Invalid proxy', proxy)
except (ServerDisconnectedError, ClientResponseError,ClientConnectorError) as s:
print(s)
pass
def test(self):
"""
aio test all proxies.
"""
print('ValidityTester is working')
try:
loop = asyncio.get_event_loop()
tasks = [self.test_single_proxy(proxy) for proxy in self._raw_proxies]
loop.run_until_complete(asyncio.wait(tasks))
except ValueError:
print('Async Error')
class PoolAdder(object):
"""
add proxy to pool
"""
def __init__(self, threshold):
self._threshold = threshold
self._conn = RedisClient()
self._tester = ValidityTester()
self._crawler = FreeProxyGetter()
def is_over_threshold(self):
"""
judge if count is overflow.
"""
if self._conn.queue_len >= self._threshold:
return True
else:
return False
def add_to_queue(self):
print('PoolAdder is working')
proxy_count = 0
while not self.is_over_threshold():
for callback_label in range(self._crawler.__CrawlFuncCount__):
callback = self._crawler.__CrawlFunc__[callback_label]
raw_proxies = self._crawler.get_raw_proxies(callback)
# test crawled proxies
self._tester.set_raw_proxies(raw_proxies)
self._tester.test()
proxy_count += len(raw_proxies)
if self.is_over_threshold():
print('IP is enough, waiting to be used')
break
if proxy_count == 0:
raise ResourceDepletionError
class Schedule(object):
@staticmethod
def valid_proxy(cycle=VALID_CHECK_CYCLE):
"""
Get half of proxies which in redis
"""
conn = RedisClient()
tester = ValidityTester()
while True:
print('Refreshing ip')
count = int(0.5 * conn.queue_len)
if count == 0:
print('Waiting for adding')
time.sleep(cycle)
continue
raw_proxies = conn.get(count)
tester.set_raw_proxies(raw_proxies)
tester.test()
time.sleep(cycle)
@staticmethod
def check_pool(lower_threshold=POOL_LOWER_THRESHOLD,
upper_threshold=POOL_UPPER_THRESHOLD,
cycle=POOL_LEN_CHECK_CYCLE):
"""
If the number of proxies less than lower_threshold, add proxy
"""
conn = RedisClient()
adder = PoolAdder(upper_threshold)
while True:
if conn.queue_len < lower_threshold:
adder.add_to_queue()
time.sleep(cycle)
def run(self):
print('Ip processing running')
valid_process = Process(target=Schedule.valid_proxy)
check_process = Process(target=Schedule.check_pool)
valid_process.start()
check_process.start()
|
player_sandboxed.py | import threading
from threading import Timer
from player_abstract import AbstractPlayer
import random
import socket
import server
def _stream_logs(container, stdout, stderr, line_action):
for line in container.logs(stdout=stdout, stderr=stderr, stream=True):
line_action(line)
class SandboxedPlayer(AbstractPlayer):
def __init__(self, socket_file, working_dir, docker_client, local_dir=None, s3_bucket=None, s3_key=None,
player_key="", player_mem_limit=256, player_cpu=20):
super().__init__(socket_file, working_dir, local_dir, s3_bucket, s3_key, player_key, player_mem_limit, player_cpu)
self.docker = docker_client
def stream_logs(self, stdout=True, stderr=True, line_action=lambda line: print(line.decode())):
threading.Thread(target=_stream_logs, args=(self.container, stdout, stderr, line_action)).start()
def start(self):
# won't collide ;)
self.socket_name = '/tmp/battlecode-suspender-{}'.format(random.randint(0, 10**50))
self.suspender_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.suspender_socket.bind(self.socket_name)
self.suspender_socket.settimeout(server.BUILD_TIMEOUT) # seconds
self.suspender_socket.listen(1)
volumes = {
self.working_dir: {'bind': '/code', 'mode': 'rw'},
self.socket_file: {'bind': '/tmp/battlecode-socket', 'mode': 'rw'},
self.socket_name: {'bind': '/tmp/battlecode-suspender', 'mode': 'rw'}
}
working_dir = '/'
command = 'sh /player_startup.sh'
env = {
'PLAYER_KEY': self.player_key,
'SOCKET_FILE': '/tmp/battlecode-socket',
'RUST_BACKTRACE': 1,
'BC_PLATFORM': self._detect_platform()
}
self.container = self.docker.containers.run(
'battlebaby',
command,
privileged=False,
detach=True,
stdout=True,
stderr=True,
volumes=volumes,
working_dir=working_dir,
environment=env,
mem_limit=self.player_mem_limit,
memswap_limit=self.player_mem_limit,
auto_remove = True,
network_disabled=True
)
self.suspender_connection = None
def guess_language(self):
procs = self.container.top()['Processes']
for p in procs:
name = p[3]
if "java" in name:
return "jvm"
elif "python" in name:
return "python"
elif "pypy" in name:
return "pypy"
elif "mono" in name:
return "mono"
return "c"
def suspinit(self):
if self.suspender_connection == None:
try:
# wait for suspender script to connect from player host
connection, _ = self.suspender_socket.accept()
self.suspender_connection = connection
self.suspender_file = self.suspender_connection.makefile('rw', 64)
login = next(self.suspender_file)
assert int(login.strip()) == self.player_key, 'mismatched suspension login: {} != {}'.format(repr(login.strip()), repr(self.player_key))
except Exception as e:
print('suspender timed out', e)
def pause(self):
self.suspinit()
# see suspender.py
# we don't go through docker.suspend or docker.exec because they're too slow (100ms)
try:
self.suspender_file.write('suspend\n')
self.suspender_file.flush()
response = next(self.suspender_file)
assert response.strip() == 'ack', response.strip() + ' != ack'
except Exception as e:
print("SUSPENSION FAILED!!! SUSPICIOUS:", e)
def unpause(self, timeout=None):
self.suspinit()
# see suspender.py
# we don't go through docker.suspend or docker.exec because they're too slow (100ms)
try:
self.suspender_file.write('resume\n')
self.suspender_file.flush()
response = next(self.suspender_file)
assert response.strip() == 'ack', response.strip() + ' != ack'
except Exception as e:
print("resumption failed:", e)
def destroy(self):
try:
self.container.remove(force=True)
except Exception as e:
pass
try:
self.suspender_socket.close()
except Exception as e:
print('suspender close err:', e)
super().destroy()
def docker_stats(self, stream=False):
return self.container.stats(decode=True, stream=stream)
def __del__(self):
self.destroy()
|
Hiwin_RT605_Socket_v3_20190628111231.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd_v3 as TCP
import HiwinRA605_socket_Taskcmd_v3 as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
#data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
#self.get_connect()
pass
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = '%s'%x
pos.y = '%s'%y
pos.z = '%s'%z
pos.pitch = '%s'%pitch
pos.roll = '%s'%roll
pos.yaw = '%s'%yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = int('%s'%action)
socket_cmd.grip = int('%s'%grip)
socket_cmd.ra = int('%s'%ra)
socket_cmd.setvel = int('%s'%setvel)
socket_cmd.setboth = int('%s'%setboth)
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = int('%s'%speedmode)
# def point_data(req): ##接收策略端傳送位姿資料
# pos.x = '%s'%req.x
# pos.y = '%s'%req.y
# pos.z = '%s'%req.z
# pos.pitch = '%s'%req.pitch
# pos.roll = '%s'%req.roll
# pos.yaw = '%s'%req.yaw
# return(1)
# ##----------Arm Mode-------------###
# def Arm_Mode(req): ##接收策略端傳送手臂模式資料
# global arm_mode_flag
# socket_cmd.action = int('%s'%req.action)
# socket_cmd.grip = int('%s'%req.grip)
# socket_cmd.ra = int('%s'%req.ra)
# socket_cmd.setvel = int('%s'%req.vel)
# socket_cmd.setboth = int('%s'%req.both)
# arm_mode_flag = True
# Socket_command()
# return(1)
# ##-------Arm Speed Mode------------###
# def Speed_Mode(req): ##接收策略端傳送手臂模式資料
# global speed_mode_flag
# socket_cmd.Speedmode = int('%s'%req.Speedmode)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
#a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
#s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
#b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
rate = rospy.Rate(100) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
Socket.send(data)
##-----------socket client--------
def socket_client():
global Socket
try:
#Socket = client()
Socket.get_connect()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
Socket_feedback(Socket)
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
controller.py | import re
import re
import time
import traceback
from threading import Thread
from typing import List, Set, Type, Tuple, Dict
from bauh.api.abstract.controller import SoftwareManager, SearchResult, ApplicationContext, UpgradeRequirements, \
UpgradeRequirement
from bauh.api.abstract.disk import DiskCacheLoader
from bauh.api.abstract.handler import ProcessWatcher, TaskManager
from bauh.api.abstract.model import SoftwarePackage, PackageUpdate, PackageHistory, PackageSuggestion, \
CustomSoftwareAction
from bauh.api.abstract.view import ViewComponent, TabGroupComponent
from bauh.api.exception import NoInternetException
from bauh.commons import internet
from bauh.commons.html import bold
from bauh.view.core.settings import GenericSettingsManager
from bauh.view.core.update import check_for_update
from bauh.view.util import resource
from bauh.view.util.util import clean_app_files, restart_app
RE_IS_URL = re.compile(r'^https?://.+')
class GenericUpgradeRequirements(UpgradeRequirements):
def __init__(self, to_install: List[UpgradeRequirement], to_remove: List[UpgradeRequirement],
to_upgrade: List[UpgradeRequirement], cannot_upgrade: List[SoftwarePackage],
sub_requirements: Dict[SoftwareManager, UpgradeRequirements]):
super(GenericUpgradeRequirements, self).__init__(to_install=to_install, to_upgrade=to_upgrade,
to_remove=to_remove, cannot_upgrade=cannot_upgrade)
self.sub_requirements = sub_requirements
class GenericSoftwareManager(SoftwareManager):
def __init__(self, managers: List[SoftwareManager], context: ApplicationContext, config: dict,
settings_manager: GenericSettingsManager = None):
super(GenericSoftwareManager, self).__init__(context=context)
self.managers = managers
self.map = {t: m for m in self.managers for t in m.get_managed_types()}
self._available_cache = {} if config['system']['single_dependency_checking'] else None
self.thread_prepare = None
self.i18n = context.i18n
self.disk_loader_factory = context.disk_loader_factory
self.logger = context.logger
self._already_prepared = []
self.working_managers = []
self.config = config
self.settings_manager = settings_manager
self.http_client = context.http_client
self.extra_actions = [CustomSoftwareAction(i18_label_key='action.reset',
i18n_status_key='action.reset.status',
manager_method='reset',
manager=self,
icon_path=resource.get_path('img/logo.svg'),
requires_root=False,
refresh=False)]
def reset_cache(self):
if self._available_cache is not None:
self._available_cache = {}
self.working_managers.clear()
def _sort(self, apps: List[SoftwarePackage], word: str) -> List[SoftwarePackage]:
exact_name_matches, contains_name_matches, others = [], [], []
for app in apps:
lower_name = app.name.lower()
if word == lower_name:
exact_name_matches.append(app)
elif word in lower_name:
contains_name_matches.append(app)
else:
others.append(app)
res = []
for app_list in (exact_name_matches, contains_name_matches, others):
app_list.sort(key=lambda a: a.name.lower())
res.extend(app_list)
return res
def _can_work(self, man: SoftwareManager):
if self._available_cache is not None:
available = False
for t in man.get_managed_types():
available = self._available_cache.get(t)
if available is None:
available = man.is_enabled() and man.can_work()
self._available_cache[t] = available
if available:
available = True
else:
available = man.is_enabled() and man.can_work()
if available:
if man not in self.working_managers:
self.working_managers.append(man)
else:
if man in self.working_managers:
self.working_managers.remove(man)
return available
def _search(self, word: str, is_url: bool, man: SoftwareManager, disk_loader, res: SearchResult):
if self._can_work(man):
mti = time.time()
apps_found = man.search(words=word, disk_loader=disk_loader, is_url=is_url)
mtf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.2f} seconds".format(mtf - mti))
res.installed.extend(apps_found.installed)
res.new.extend(apps_found.new)
def search(self, word: str, disk_loader: DiskCacheLoader = None, limit: int = -1, is_url: bool = False) -> SearchResult:
ti = time.time()
self._wait_to_be_ready()
res = SearchResult([], [], 0)
if internet.is_available():
norm_word = word.strip().lower()
url_words = RE_IS_URL.match(norm_word)
disk_loader = self.disk_loader_factory.new()
disk_loader.start()
threads = []
for man in self.managers:
t = Thread(target=self._search, args=(norm_word, url_words, man, disk_loader, res))
t.start()
threads.append(t)
for t in threads:
t.join()
if disk_loader:
disk_loader.stop_working()
disk_loader.join()
res.installed = self._sort(res.installed, norm_word)
res.new = self._sort(res.new, norm_word)
res.total = len(res.installed) + len(res.new)
else:
raise NoInternetException()
tf = time.time()
self.logger.info('Took {0:.2f} seconds'.format(tf - ti))
return res
def _wait_to_be_ready(self):
if self.thread_prepare:
self.thread_prepare.join()
self.thread_prepare = None
def set_enabled(self, enabled: bool):
pass
def can_work(self) -> bool:
return True
def read_installed(self, disk_loader: DiskCacheLoader = None, limit: int = -1, only_apps: bool = False, pkg_types: Set[Type[SoftwarePackage]] = None, internet_available: bool = None) -> SearchResult:
ti = time.time()
self._wait_to_be_ready()
res = SearchResult([], None, 0)
disk_loader = None
net_available = internet.is_available()
if not pkg_types: # any type
for man in self.managers:
if self._can_work(man):
if not disk_loader:
disk_loader = self.disk_loader_factory.new()
disk_loader.start()
mti = time.time()
man_res = man.read_installed(disk_loader=disk_loader, pkg_types=None, internet_available=net_available)
mtf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.2f} seconds".format(mtf - mti))
res.installed.extend(man_res.installed)
res.total += man_res.total
else:
man_already_used = []
for t in pkg_types:
man = self.map.get(t)
if man and (man not in man_already_used) and self._can_work(man):
if not disk_loader:
disk_loader = self.disk_loader_factory.new()
disk_loader.start()
mti = time.time()
man_res = man.read_installed(disk_loader=disk_loader, pkg_types=None, internet_available=net_available)
mtf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.2f} seconds".format(mtf - mti))
res.installed.extend(man_res.installed)
res.total += man_res.total
if disk_loader:
disk_loader.stop_working()
disk_loader.join()
tf = time.time()
self.logger.info('Took {0:.2f} seconds'.format(tf - ti))
return res
def downgrade(self, app: SoftwarePackage, root_password: str, handler: ProcessWatcher) -> bool:
man = self._get_manager_for(app)
if man and app.can_be_downgraded():
mti = time.time()
res = man.downgrade(app, root_password, handler)
mtf = time.time()
self.logger.info('Took {0:.2f} seconds'.format(mtf - mti))
return res
else:
raise Exception("downgrade is not possible for {}".format(app.__class__.__name__))
def clean_cache_for(self, app: SoftwarePackage):
man = self._get_manager_for(app)
if man:
return man.clean_cache_for(app)
def upgrade(self, requirements: GenericUpgradeRequirements, root_password: str, handler: ProcessWatcher) -> bool:
for man, man_reqs in requirements.sub_requirements.items():
res = man.upgrade(man_reqs, root_password, handler)
if not res:
return False
return True
def uninstall(self, app: SoftwarePackage, root_password: str, handler: ProcessWatcher) -> bool:
man = self._get_manager_for(app)
if man:
return man.uninstall(app, root_password, handler)
def install(self, app: SoftwarePackage, root_password: str, handler: ProcessWatcher) -> bool:
man = self._get_manager_for(app)
if man:
ti = time.time()
try:
self.logger.info('Installing {}'.format(app))
return man.install(app, root_password, handler)
except:
traceback.print_exc()
return False
finally:
tf = time.time()
self.logger.info('Installation of {}'.format(app) + 'took {0:.2f} minutes'.format((tf - ti)/60))
def get_info(self, app: SoftwarePackage):
man = self._get_manager_for(app)
if man:
return man.get_info(app)
def get_history(self, app: SoftwarePackage) -> PackageHistory:
man = self._get_manager_for(app)
if man:
mti = time.time()
history = man.get_history(app)
mtf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.2f} seconds".format(mtf - mti))
return history
def get_managed_types(self) -> Set[Type[SoftwarePackage]]:
pass
def is_enabled(self):
return True
def _get_manager_for(self, app: SoftwarePackage) -> SoftwareManager:
man = self.map[app.__class__]
return man if man and self._can_work(man) else None
def cache_to_disk(self, pkg: SoftwarePackage, icon_bytes: bytes, only_icon: bool):
if pkg.supports_disk_cache():
man = self._get_manager_for(pkg)
if man:
return man.cache_to_disk(pkg, icon_bytes=icon_bytes, only_icon=only_icon)
def requires_root(self, action: str, app: SoftwarePackage) -> bool:
if app is None:
if self.managers:
for man in self.managers:
if self._can_work(man):
if man.requires_root(action, app):
return True
return False
else:
man = self._get_manager_for(app)
if man:
return man.requires_root(action, app)
def prepare(self, task_manager: TaskManager, root_password: str, internet_available: bool):
if self.managers:
internet_on = internet.is_available()
for man in self.managers:
if man not in self._already_prepared and self._can_work(man):
if task_manager:
man.prepare(task_manager, root_password, internet_on)
self._already_prepared.append(man)
def list_updates(self, internet_available: bool = None) -> List[PackageUpdate]:
self._wait_to_be_ready()
updates = []
if self.managers:
net_available = internet.is_available()
for man in self.managers:
if self._can_work(man):
man_updates = man.list_updates(internet_available=net_available)
if man_updates:
updates.extend(man_updates)
return updates
def list_warnings(self, internet_available: bool = None) -> List[str]:
warnings = []
int_available = internet.is_available()
if int_available:
updates_msg = check_for_update(self.logger, self.http_client, self.i18n)
if updates_msg:
warnings.append(updates_msg)
if self.managers:
for man in self.managers:
if man.is_enabled():
man_warnings = man.list_warnings(internet_available=int_available)
if man_warnings:
if warnings is None:
warnings = []
warnings.extend(man_warnings)
return warnings
def _fill_suggestions(self, suggestions: list, man: SoftwareManager, limit: int, filter_installed: bool):
if self._can_work(man):
mti = time.time()
man_sugs = man.list_suggestions(limit=limit, filter_installed=filter_installed)
mtf = time.time()
self.logger.info(man.__class__.__name__ + ' took {0:.2f} seconds'.format(mtf - mti))
if man_sugs:
if 0 < limit < len(man_sugs):
man_sugs = man_sugs[0:limit]
suggestions.extend(man_sugs)
def list_suggestions(self, limit: int, filter_installed: bool) -> List[PackageSuggestion]:
if bool(self.config['suggestions']['enabled']):
if self.managers and internet.is_available():
suggestions, threads = [], []
for man in self.managers:
t = Thread(target=self._fill_suggestions, args=(suggestions, man, int(self.config['suggestions']['by_type']), filter_installed))
t.start()
threads.append(t)
for t in threads:
t.join()
if suggestions:
suggestions.sort(key=lambda s: s.priority.value, reverse=True)
return suggestions
return []
def execute_custom_action(self, action: CustomSoftwareAction, pkg: SoftwarePackage, root_password: str, watcher: ProcessWatcher):
man = action.manager if action.manager else self._get_manager_for(pkg)
if man:
return eval('man.{}({}root_password=root_password, watcher=watcher)'.format(action.manager_method, 'pkg=pkg, ' if pkg else ''))
def is_default_enabled(self) -> bool:
return True
def launch(self, pkg: SoftwarePackage):
self._wait_to_be_ready()
man = self._get_manager_for(pkg)
if man:
self.logger.info('Launching {}'.format(pkg))
man.launch(pkg)
def get_screenshots(self, pkg: SoftwarePackage):
man = self._get_manager_for(pkg)
if man:
return man.get_screenshots(pkg)
def get_working_managers(self):
return [m for m in self.managers if self._can_work(m)]
def get_settings(self, screen_width: int, screen_height: int) -> ViewComponent:
if self.settings_manager is None:
self.settings_manager = GenericSettingsManager(managers=self.managers,
working_managers=self.working_managers,
logger=self.logger,
i18n=self.i18n)
else:
self.settings_manager.managers = self.managers
self.settings_manager.working_managers = self.working_managers
return self.settings_manager.get_settings(screen_width=screen_width, screen_height=screen_height)
def save_settings(self, component: TabGroupComponent) -> Tuple[bool, List[str]]:
return self.settings_manager.save_settings(component)
def _map_pkgs_by_manager(self, pkgs: List[SoftwarePackage], pkg_filters: list = None) -> Dict[SoftwareManager, List[SoftwarePackage]]:
by_manager = {}
for pkg in pkgs:
if pkg_filters and not all((1 for f in pkg_filters if f(pkg))):
continue
man = self._get_manager_for(pkg)
if man:
man_pkgs = by_manager.get(man)
if man_pkgs is None:
man_pkgs = []
by_manager[man] = man_pkgs
man_pkgs.append(pkg)
return by_manager
def get_upgrade_requirements(self, pkgs: List[SoftwarePackage], root_password: str, watcher: ProcessWatcher) -> UpgradeRequirements:
by_manager = self._map_pkgs_by_manager(pkgs)
res = GenericUpgradeRequirements([], [], [], [], {})
if by_manager:
for man, pkgs in by_manager.items():
ti = time.time()
man_reqs = man.get_upgrade_requirements(pkgs, root_password, watcher)
tf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.2f} seconds".format(tf - ti))
if not man_reqs:
return # it means the process should be stopped
if man_reqs:
res.sub_requirements[man] = man_reqs
if man_reqs.to_install:
res.to_install.extend(man_reqs.to_install)
if man_reqs.to_remove:
res.to_remove.extend(man_reqs.to_remove)
if man_reqs.to_upgrade:
res.to_upgrade.extend(man_reqs.to_upgrade)
if man_reqs.cannot_upgrade:
res.cannot_upgrade.extend(man_reqs.cannot_upgrade)
return res
def reset(self, root_password: str, watcher: ProcessWatcher) -> bool:
body = '<p>{}</p><p>{}</p>'.format(self.i18n['action.reset.body_1'].format(bold(self.context.app_name)),
self.i18n['action.reset.body_2'])
if watcher.request_confirmation(title=self.i18n['action.reset'],
body=body,
confirmation_label=self.i18n['proceed'].capitalize(),
deny_label=self.i18n['cancel'].capitalize()):
try:
clean_app_files(managers=self.managers, logs=False)
restart_app()
except:
return False
return True
def get_custom_actions(self) -> List[CustomSoftwareAction]:
actions = []
if self.managers:
working_managers = []
for man in self.managers:
if self._can_work(man):
working_managers.append(man)
if working_managers:
working_managers.sort(key=lambda m: m.__class__.__name__)
for man in working_managers:
man_actions = man.get_custom_actions()
if man_actions:
actions.extend(man_actions)
actions.extend(self.extra_actions)
return actions
def _fill_sizes(self, man: SoftwareManager, pkgs: List[SoftwarePackage]):
ti = time.time()
man.fill_sizes(pkgs)
tf = time.time()
self.logger.info(man.__class__.__name__ + " took {0:.2f} seconds".format(tf - ti))
def fill_sizes(self, pkgs: List[SoftwarePackage]):
by_manager = self._map_pkgs_by_manager(pkgs, pkg_filters=[lambda p: p.size is None])
if by_manager:
threads = []
for man, man_pkgs in by_manager.items():
if man_pkgs:
t = Thread(target=self._fill_sizes, args=(man, man_pkgs), daemon=True)
t.start()
threads.append(t)
for t in threads:
t.join()
|
SoundTheAlarm.py | import smtplib
import sys
from PyQt5.QtWidgets import QApplication, QWidget, \
QPushButton, QDesktopWidget, QLabel, QVBoxLayout
from PyQt5.QtGui import QIcon
from playsound import playsound
from threading import Thread
from PyQt5.QtCore import QDateTime, Qt
class WarningBox(QWidget):
"""Defining the parameters of the warning bow"""
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
instance = QDateTime.currentDateTime()
text_displayed = " WARNING! Error detected\n"+ instance.toString(Qt.DefaultLocaleLongDate)
#self.setWindowIcon(QIcon('resources/warning_icon.png'))
display = QLabel()
display.setText(text_displayed)
display.setAlignment(Qt.AlignHCenter)
vbox = QVBoxLayout()
vbox.addWidget(display)
vbox.addStretch()
self.setLayout(vbox)
self.setFixedHeight(300)
self.setFixedWidth(400)
qbtn = QPushButton('Quit', self)
qbtn.clicked.connect(QApplication.instance().quit)
qbtn.resize(qbtn.sizeHint())
qbtn.move(150, 200)
#self.setGeometry(500, 300, 300, 300)
self.center()
self.setWindowTitle('WARNING')
self.show()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def warning_function():
"""Executing the warning message on call"""
app = QApplication(sys.argv)
ex = WarningBox()
sys.exit(app.exec_())
def sound_loop():
while True:
playsound('resources/enough.mp3')
def start_alarm(sound=True, warning_message=True, email=False):
"""name is self explanatory"""
if warning_message == True:
###throw up a warning Window, is a thread so that other functions can happen simultaneously
t = Thread(target=warning_function)
t.start()
if sound == True: #fix this, the main program runs the side program
h = Thread(target=sound_loop)
h.daemon = True
h.start()
if email == True: #### Clearly haven't implemented this yet
###read in emil list and send out warning email
print("simulation email")
|
exampleApp3.py | import time
import threading
from pyhtmlgui import PyHtmlGui, PyHtmlView, Observable
class App(Observable):
pass
class AppView(PyHtmlView):
TEMPLATE_STR = '''
i am a item that is updated by a thread in the python frontend object<br>
{{ this.value }}<br>
<button onclick='pyhtmlgui.call(this.pause_restart);'> {% if this.paused == True %} Start {% else %} Pause {% endif %}</button>
'''
def __init__(self, observedObject, parentView):
super().__init__(observedObject, parentView)
self.value = 0
self.paused = True
self.worker_thread = threading.Thread(target=self._worker_thread, daemon=True)
self.worker_thread.start()
def _worker_thread(self):
self.insert_element()
while True:
if self.paused is False:
self.value = time.time()
if self.is_visible is True: # if we call update ourself, we need to check visibility, we cant update invisible components.
self.update()
time.sleep(1)
def pause_restart(self):
self.paused = not self.paused
if self.is_visible is True: #This is why we normalle observe some other object, so events deals with this. see example4 to see how to do this correctly
self.update()
if __name__ == "__main__":
gui = PyHtmlGui(
appInstance = App(),
appViewClass = AppView,
)
gui.start(show_frontend=True, block=True)
|
process.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
# Import python libs
import logging
import os
import time
import sys
import multiprocessing
import signal
# Import salt libs
import salt.utils
import six
log = logging.getLogger(__name__)
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
try:
import systemd.daemon
HAS_PYTHON_SYSTEMD = True
except ImportError:
HAS_PYTHON_SYSTEMD = False
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(os.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
def add_process(self, tgt, args=None, kwargs=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if type(multiprocessing.Process) == type(tgt) and issubclass(tgt, multiprocessing.Process):
p = tgt(*args, **kwargs)
else:
p = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
p.start()
log.debug("Started '{0}'(*{1}, **{2} with pid {3}".format(tgt,
args,
kwargs,
p.pid))
self._process_map[p.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': p}
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info(('Process {0} ({1}) died with exit status {2},'
' restarting...').format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def run(self):
'''
Load and start all available api modules
'''
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
try:
if HAS_PYTHON_SYSTEMD and systemd.daemon.booted():
systemd.daemon.notify('READY=1')
except SystemError:
# Daemon wasn't started by systemd
pass
while True:
try:
# in case someone died while we were waiting...
self.check_children()
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug(('Process of pid {0} died, not a known'
' process, will not restart').format(pid))
continue
self.restart_process(pid)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
for pid, p_map in self._process_map.items():
p_map['Process'].terminate()
end_time = time.time() + self.wait_for_kill # when to die
while self._process_map and time.time() < end_time:
for pid, p_map in self._process_map.items():
p_map['Process'].join(0)
# This is a race condition if a signal was passed to all children
try:
del self._process_map[pid]
except KeyError:
pass
# if anyone is done after
for pid in self._process_map:
try:
os.kill(signal.SIGKILL, pid)
# in case the process has since decided to die, os.kill returns OSError
except OSError:
pass
|
__init__.py | from __future__ import print_function
import argparse
import io
import itertools
import json
import os
import random
import re
import shlex
import string
import sys
import traceback
import warnings
from collections import OrderedDict
from fnmatch import fnmatchcase
from subprocess import list2cmdline
from threading import Thread
import pluggy
import py
import six
import toml
from packaging import requirements
from packaging.utils import canonicalize_name
from packaging.version import Version
import tox
from tox.constants import INFO
from tox.exception import MissingDependency
from tox.interpreters import Interpreters, NoInterpreterInfo
from tox.reporter import (
REPORTER_TIMESTAMP_ON_ENV,
error,
update_default_reporter,
using,
verbosity1,
)
from tox.util.path import ensure_empty_dir
from tox.util.stdlib import importlib_metadata
from .parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
from .parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
from .parallel import add_parallel_config, add_parallel_flags
from .reporter import add_verbosity_commands
try:
from shlex import quote as shlex_quote
except ImportError:
from pipes import quote as shlex_quote
hookimpl = tox.hookimpl
# DEPRECATED - REMOVE - left for compatibility with plugins importing from here.
# Import hookimpl directly from tox instead.
WITHIN_PROVISION = os.environ.get(str("TOX_PROVISION")) == "1"
SUICIDE_TIMEOUT = 0.0
INTERRUPT_TIMEOUT = 0.3
TERMINATE_TIMEOUT = 0.2
_FACTOR_LINE_PATTERN = re.compile(r"^([\w{}.!,-]+):\s+(.+)")
_ENVSTR_SPLIT_PATTERN = re.compile(r"((?:{[^}]+})+)|,")
_ENVSTR_EXPAND_PATTERN = re.compile(r"{([^}]+)}")
_WHITESPACE_PATTERN = re.compile(r"\s+")
def get_plugin_manager(plugins=()):
# initialize plugin manager
import tox.venv
pm = pluggy.PluginManager("tox")
pm.add_hookspecs(tox.hookspecs)
pm.register(tox.config)
pm.register(tox.interpreters)
pm.register(tox.venv)
pm.register(tox.session)
from tox import package
pm.register(package)
pm.load_setuptools_entrypoints("tox")
for plugin in plugins:
pm.register(plugin)
pm.check_pending()
return pm
class Parser:
"""Command line and ini-parser control object."""
def __init__(self):
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(HelpFormatter, self).__init__(prog, max_help_position=35, width=190)
self.argparser = argparse.ArgumentParser(
description="tox options",
add_help=False,
prog="tox",
formatter_class=HelpFormatter,
)
self._testenv_attr = []
def add_argument(self, *args, **kwargs):
"""add argument to command line parser. This takes the
same arguments that ``argparse.ArgumentParser.add_argument``.
"""
return self.argparser.add_argument(*args, **kwargs)
def add_testenv_attribute(self, name, type, help, default=None, postprocess=None):
"""add an ini-file variable for "testenv" section.
Types are specified as strings like "bool", "line-list", "string", "argv", "path",
"argvlist".
The ``postprocess`` function will be called for each testenv
like ``postprocess(testenv_config=testenv_config, value=value)``
where ``value`` is the value as read from the ini (or the default value)
and ``testenv_config`` is a :py:class:`tox.config.TestenvConfig` instance
which will receive all ini-variables as object attributes.
Any postprocess function must return a value which will then be set
as the final value in the testenv section.
"""
self._testenv_attr.append(VenvAttribute(name, type, default, help, postprocess))
def add_testenv_attribute_obj(self, obj):
"""add an ini-file variable as an object.
This works as the ``add_testenv_attribute`` function but expects
"name", "type", "help", and "postprocess" attributes on the object.
"""
assert hasattr(obj, "name")
assert hasattr(obj, "type")
assert hasattr(obj, "help")
assert hasattr(obj, "postprocess")
self._testenv_attr.append(obj)
def parse_cli(self, args, strict=False):
args, argv = self.argparser.parse_known_args(args)
if argv and (strict or WITHIN_PROVISION):
self.argparser.error("unrecognized arguments: {}".format(" ".join(argv)))
return args
def _format_help(self):
return self.argparser.format_help()
class VenvAttribute:
def __init__(self, name, type, default, help, postprocess):
self.name = name
self.type = type
self.default = default
self.help = help
self.postprocess = postprocess
class DepOption:
name = "deps"
type = "line-list"
help = "each line specifies a dependency in pip/setuptools format."
default = ()
def postprocess(self, testenv_config, value):
deps = []
config = testenv_config.config
for depline in value:
m = re.match(r":(\w+):\s*(\S+)", depline)
if m:
iname, name = m.groups()
ixserver = config.indexserver[iname]
else:
name = depline.strip()
ixserver = None
# we need to process options, in case they contain a space,
# as the subprocess call to pip install will otherwise fail.
# in case of a short option, we remove the space
for option in tox.PIP.INSTALL_SHORT_OPTIONS_ARGUMENT:
if name.startswith(option):
name = "{}{}".format(option, name[len(option) :].strip())
# in case of a long option, we add an equal sign
for option in tox.PIP.INSTALL_LONG_OPTIONS_ARGUMENT:
name_start = "{} ".format(option)
if name.startswith(name_start):
name = "{}={}".format(option, name[len(option) :].strip())
name = self._cut_off_dep_comment(name)
name = self._replace_forced_dep(name, config)
deps.append(DepConfig(name, ixserver))
return deps
def _replace_forced_dep(self, name, config):
"""Override given dependency config name. Take ``--force-dep-version`` option into account.
:param name: dep config, for example ["pkg==1.0", "other==2.0"].
:param config: ``Config`` instance
:return: the new dependency that should be used for virtual environments
"""
if not config.option.force_dep:
return name
for forced_dep in config.option.force_dep:
if self._is_same_dep(forced_dep, name):
return forced_dep
return name
@staticmethod
def _cut_off_dep_comment(name):
return re.sub(r"\s+#.*", "", name).strip()
@classmethod
def _is_same_dep(cls, dep1, dep2):
"""Definitions are the same if they refer to the same package, even if versions differ."""
dep1_name = canonicalize_name(requirements.Requirement(dep1).name)
try:
dep2_name = canonicalize_name(requirements.Requirement(dep2).name)
except requirements.InvalidRequirement:
# we couldn't parse a version, probably a URL
return False
return dep1_name == dep2_name
class PosargsOption:
name = "args_are_paths"
type = "bool"
default = True
help = "treat positional args in commands as paths"
def postprocess(self, testenv_config, value):
config = testenv_config.config
args = config.option.args
if args:
if value:
args = []
for arg in config.option.args:
if arg and not os.path.isabs(arg):
origpath = os.path.join(config.invocationcwd.strpath, arg)
if os.path.exists(origpath):
arg = os.path.relpath(origpath, testenv_config.changedir.strpath)
args.append(arg)
testenv_config._reader.addsubstitutions(args)
return value
class InstallcmdOption:
name = "install_command"
type = "argv_install_command"
default = r"python -m pip install \{opts\} \{packages\}"
help = "install command for dependencies and package under test."
def postprocess(self, testenv_config, value):
if "{packages}" not in value:
raise tox.exception.ConfigError(
"'install_command' must contain '{packages}' substitution",
)
return value
def parseconfig(args, plugins=()):
"""Parse the configuration file and create a Config object.
:param plugins:
:param list[str] args: list of arguments.
:rtype: :class:`Config`
:raise SystemExit: toxinit file is not found
"""
pm = get_plugin_manager(plugins)
config, option = parse_cli(args, pm)
update_default_reporter(config.option.quiet_level, config.option.verbose_level)
for config_file in propose_configs(option.configfile):
config_type = config_file.basename
content = None
if config_type == "pyproject.toml":
toml_content = get_py_project_toml(config_file)
try:
content = toml_content["tool"]["tox"]["legacy_tox_ini"]
except KeyError:
continue
try:
ParseIni(config, config_file, content)
except SkipThisIni:
continue
pm.hook.tox_configure(config=config) # post process config object
break
else:
parser = Parser()
pm.hook.tox_addoption(parser=parser)
# if no tox config file, now we need do a strict argument evaluation
# raise on unknown args
parser.parse_cli(args, strict=True)
if option.help or option.helpini:
return config
if option.devenv:
# To load defaults, we parse an empty config
ParseIni(config, py.path.local(), "")
pm.hook.tox_configure(config=config)
return config
msg = "tox config file (either {}) not found"
candidates = ", ".join(INFO.CONFIG_CANDIDATES)
feedback(msg.format(candidates), sysexit=not (option.help or option.helpini))
return config
def get_py_project_toml(path):
with io.open(str(path), encoding="UTF-8") as file_handler:
config_data = toml.load(file_handler)
return config_data
def propose_configs(cli_config_file):
from_folder = py.path.local()
if cli_config_file is not None:
if os.path.isfile(cli_config_file):
yield py.path.local(cli_config_file)
return
if os.path.isdir(cli_config_file):
from_folder = py.path.local(cli_config_file)
else:
print(
"ERROR: {} is neither file or directory".format(cli_config_file),
file=sys.stderr,
)
return
for basename in INFO.CONFIG_CANDIDATES:
if from_folder.join(basename).isfile():
yield from_folder.join(basename)
for path in from_folder.parts(reverse=True):
ini_path = path.join(basename)
if ini_path.check():
yield ini_path
def parse_cli(args, pm):
parser = Parser()
pm.hook.tox_addoption(parser=parser)
option = parser.parse_cli(args)
if option.version:
print(get_version_info(pm))
raise SystemExit(0)
interpreters = Interpreters(hook=pm.hook)
config = Config(
pluginmanager=pm,
option=option,
interpreters=interpreters,
parser=parser,
args=args,
)
return config, option
def feedback(msg, sysexit=False):
print("ERROR: {}".format(msg), file=sys.stderr)
if sysexit:
raise SystemExit(1)
def get_version_info(pm):
out = ["{} imported from {}".format(tox.__version__, tox.__file__)]
plugin_dist_info = pm.list_plugin_distinfo()
if plugin_dist_info:
out.append("registered plugins:")
for mod, egg_info in plugin_dist_info:
source = getattr(mod, "__file__", repr(mod))
out.append(" {}-{} at {}".format(egg_info.project_name, egg_info.version, source))
return "\n".join(out)
class SetenvDict(object):
_DUMMY = object()
def __init__(self, definitions, reader):
self.definitions = definitions
self.reader = reader
self.resolved = {}
self._lookupstack = []
def __repr__(self):
return "{}: {}".format(self.__class__.__name__, self.definitions)
def __contains__(self, name):
return name in self.definitions
def get(self, name, default=None):
try:
return self.resolved[name]
except KeyError:
try:
if name in self._lookupstack:
raise KeyError(name)
val = self.definitions[name]
except KeyError:
return os.environ.get(name, default)
self._lookupstack.append(name)
try:
self.resolved[name] = res = self.reader._replace(val, name="setenv")
finally:
self._lookupstack.pop()
return res
def __getitem__(self, name):
x = self.get(name, self._DUMMY)
if x is self._DUMMY:
raise KeyError(name)
return x
def keys(self):
return self.definitions.keys()
def __setitem__(self, name, value):
self.definitions[name] = value
self.resolved[name] = value
def items(self):
return ((name, self[name]) for name in self.definitions)
def export(self):
# post-process items to avoid internal syntax/semantics
# such as {} being escaped using \{\}, suitable for use with
# os.environ .
return {
name: Replacer._unescape(value)
for name, value in self.items()
if value is not self._DUMMY
}
@tox.hookimpl
def tox_addoption(parser):
parser.add_argument(
"--version",
action="store_true",
help="report version information to stdout.",
)
parser.add_argument("-h", "--help", action="store_true", help="show help about options")
parser.add_argument(
"--help-ini",
"--hi",
action="store_true",
dest="helpini",
help="show help about ini-names",
)
add_verbosity_commands(parser)
parser.add_argument(
"--showconfig",
action="store_true",
help="show live configuration (by default all env, with -l only default targets,"
" specific via TOXENV/-e)",
)
parser.add_argument(
"-l",
"--listenvs",
action="store_true",
help="show list of test environments (with description if verbose)",
)
parser.add_argument(
"-a",
"--listenvs-all",
action="store_true",
help="show list of all defined environments (with description if verbose)",
)
parser.add_argument(
"-c",
dest="configfile",
help="config file name or directory with 'tox.ini' file.",
)
parser.add_argument(
"-e",
action="append",
dest="env",
metavar="envlist",
help="work against specified environments (ALL selects all).",
)
parser.add_argument(
"--devenv",
metavar="ENVDIR",
help=(
"sets up a development environment at ENVDIR based on the env's tox "
"configuration specified by `-e` (-e defaults to py)."
),
)
parser.add_argument("--notest", action="store_true", help="skip invoking test commands.")
parser.add_argument(
"--sdistonly",
action="store_true",
help="only perform the sdist packaging activity.",
)
parser.add_argument(
"--skip-pkg-install",
action="store_true",
help="skip package installation for this run",
)
add_parallel_flags(parser)
parser.add_argument(
"--parallel--safe-build",
action="store_true",
dest="parallel_safe_build",
help="(deprecated) ensure two tox builds can run in parallel "
"(uses a lock file in the tox workdir with .lock extension)",
)
parser.add_argument(
"--installpkg",
metavar="PATH",
help="use specified package for installation into venv, instead of creating an sdist.",
)
parser.add_argument(
"--develop",
action="store_true",
help="install package in the venv using 'setup.py develop' via 'pip -e .'",
)
parser.add_argument(
"-i",
"--index-url",
action="append",
dest="indexurl",
metavar="URL",
help="set indexserver url (if URL is of form name=url set the "
"url for the 'name' indexserver, specifically)",
)
parser.add_argument(
"--pre",
action="store_true",
help="install pre-releases and development versions of dependencies. "
"This will pass the --pre option to install_command "
"(pip by default).",
)
parser.add_argument(
"-r",
"--recreate",
action="store_true",
help="force recreation of virtual environments",
)
parser.add_argument(
"--result-json",
dest="resultjson",
metavar="PATH",
help="write a json file with detailed information "
"about all commands and results involved.",
)
parser.add_argument(
"--discover",
dest="discover",
nargs="+",
metavar="PATH",
help="for python discovery first try the python executables under these paths",
default=[],
)
# We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED.
parser.add_argument(
"--hashseed",
metavar="SEED",
help="set PYTHONHASHSEED to SEED before running commands. "
"Defaults to a random integer in the range [1, 4294967295] "
"([1, 1024] on Windows). "
"Passing 'noset' suppresses this behavior.",
)
parser.add_argument(
"--force-dep",
action="append",
metavar="REQ",
help="Forces a certain version of one of the dependencies "
"when configuring the virtual environment. REQ Examples "
"'pytest<2.7' or 'django>=1.6'.",
)
parser.add_argument(
"--sitepackages",
action="store_true",
help="override sitepackages setting to True in all envs",
)
parser.add_argument(
"--alwayscopy",
action="store_true",
help="override alwayscopy setting to True in all envs",
)
parser.add_argument(
"--no-provision",
action="store",
nargs="?",
default=False,
const=True,
metavar="REQUIRES_JSON",
help="do not perform provision, but fail and if a path was provided "
"write provision metadata as JSON to it",
)
cli_skip_missing_interpreter(parser)
parser.add_argument("--workdir", metavar="PATH", help="tox working directory")
parser.add_argument(
"args",
nargs="*",
help="additional arguments available to command positional substitution",
)
def _set_envdir_from_devenv(testenv_config, value):
if (
testenv_config.config.option.devenv is not None
and testenv_config.envname != testenv_config.config.provision_tox_env
):
return py.path.local(testenv_config.config.option.devenv)
else:
return value
parser.add_testenv_attribute(
name="envdir",
type="path",
default="{toxworkdir}/{envname}",
help="set venv directory -- be very careful when changing this as tox "
"will remove this directory when recreating an environment",
postprocess=_set_envdir_from_devenv,
)
# add various core venv interpreter attributes
def setenv(testenv_config, value):
setenv = value
config = testenv_config.config
if "PYTHONHASHSEED" not in setenv and config.hashseed is not None:
setenv["PYTHONHASHSEED"] = config.hashseed
setenv["TOX_ENV_NAME"] = str(testenv_config.envname)
setenv["TOX_ENV_DIR"] = str(testenv_config.envdir)
return setenv
parser.add_testenv_attribute(
name="setenv",
type="dict_setenv",
postprocess=setenv,
help="list of X=Y lines with environment variable settings",
)
def basepython_default(testenv_config, value):
"""either user set or proposed from the factor name
in both cases we check that the factor name implied python version and the resolved
python interpreter version match up; if they don't we warn, unless ignore base
python conflict is set in which case the factor name implied version if forced
"""
for factor in testenv_config.factors:
match = tox.PYTHON.PY_FACTORS_RE.match(factor)
if match:
base_exe = {"py": "python"}.get(match.group(1), match.group(1))
version_s = match.group(2)
if not version_s:
version_info = ()
elif len(version_s) == 1:
version_info = (version_s,)
else:
version_info = (version_s[0], version_s[1:])
implied_version = ".".join(version_info)
implied_python = "{}{}".format(base_exe, implied_version)
break
else:
implied_python, version_info, implied_version = None, (), ""
if testenv_config.config.ignore_basepython_conflict and implied_python is not None:
return implied_python
proposed_python = (implied_python or sys.executable) if value is None else str(value)
if implied_python is not None and implied_python != proposed_python:
testenv_config.basepython = proposed_python
python_info_for_proposed = testenv_config.python_info
if not isinstance(python_info_for_proposed, NoInterpreterInfo):
proposed_version = ".".join(
str(x) for x in python_info_for_proposed.version_info[: len(version_info)]
)
if proposed_version != implied_version:
# TODO(stephenfin): Raise an exception here in tox 4.0
warnings.warn(
"conflicting basepython version (set {}, should be {}) for env '{}';"
"resolve conflict or set ignore_basepython_conflict".format(
proposed_version,
implied_version,
testenv_config.envname,
),
)
return proposed_python
parser.add_testenv_attribute(
name="basepython",
type="basepython",
default=None,
postprocess=basepython_default,
help="executable name or path of interpreter used to create a virtual test environment.",
)
def merge_description(testenv_config, value):
"""the reader by default joins generated description with new line,
replace new line with space"""
return value.replace("\n", " ")
parser.add_testenv_attribute(
name="description",
type="string",
default="",
postprocess=merge_description,
help="short description of this environment",
)
parser.add_testenv_attribute(
name="envtmpdir",
type="path",
default="{envdir}/tmp",
help="venv temporary directory",
)
parser.add_testenv_attribute(
name="envlogdir",
type="path",
default="{envdir}/log",
help="venv log directory",
)
parser.add_testenv_attribute(
name="downloadcache",
type="string",
default=None,
help="(ignored) has no effect anymore, pip-8 uses local caching by default",
)
parser.add_testenv_attribute(
name="changedir",
type="path",
default="{toxinidir}",
help="directory to change to when running commands",
)
parser.add_testenv_attribute_obj(PosargsOption())
def skip_install_default(testenv_config, value):
return value is True or testenv_config.config.option.skip_pkg_install is True
parser.add_testenv_attribute(
name="skip_install",
type="bool",
default=False,
postprocess=skip_install_default,
help="Do not install the current package. This can be used when you need the virtualenv "
"management but do not want to install the current package",
)
parser.add_testenv_attribute(
name="ignore_errors",
type="bool",
default=False,
help="if set to True all commands will be executed irrespective of their result error "
"status.",
)
def recreate(testenv_config, value):
if testenv_config.config.option.recreate:
return True
return value
parser.add_testenv_attribute(
name="recreate",
type="bool",
default=False,
postprocess=recreate,
help="always recreate this test environment.",
)
def passenv(testenv_config, value):
# Flatten the list to deal with space-separated values.
value = list(itertools.chain.from_iterable([x.split(" ") for x in value]))
passenv = {
"CURL_CA_BUNDLE",
"LANG",
"LANGUAGE",
"LC_ALL",
"LD_LIBRARY_PATH",
"PATH",
"PIP_INDEX_URL",
"PIP_EXTRA_INDEX_URL",
"REQUESTS_CA_BUNDLE",
"SSL_CERT_FILE",
"TOX_WORK_DIR",
"HTTP_PROXY",
"HTTPS_PROXY",
"NO_PROXY",
str(REPORTER_TIMESTAMP_ON_ENV),
str(PARALLEL_ENV_VAR_KEY_PUBLIC),
}
# read in global passenv settings
p = os.environ.get("TOX_TESTENV_PASSENV", None)
if p is not None:
env_values = [x for x in p.split() if x]
value.extend(env_values)
# we ensure that tmp directory settings are passed on
# we could also set it to the per-venv "envtmpdir"
# but this leads to very long paths when run with jenkins
# so we just pass it on by default for now.
if tox.INFO.IS_WIN:
passenv.add("SYSTEMDRIVE") # needed for pip6
passenv.add("SYSTEMROOT") # needed for python's crypto module
passenv.add("PATHEXT") # needed for discovering executables
passenv.add("COMSPEC") # needed for distutils cygwincompiler
passenv.add("TEMP")
passenv.add("TMP")
# for `multiprocessing.cpu_count()` on Windows (prior to Python 3.4).
passenv.add("NUMBER_OF_PROCESSORS")
passenv.add("PROCESSOR_ARCHITECTURE") # platform.machine()
passenv.add("USERPROFILE") # needed for `os.path.expanduser()`
passenv.add("MSYSTEM") # fixes #429
else:
passenv.add("TMPDIR")
for spec in value:
for name in os.environ:
if fnmatchcase(name.upper(), spec.upper()):
passenv.add(name)
return passenv
parser.add_testenv_attribute(
name="passenv",
type="line-list",
postprocess=passenv,
help="environment variables needed during executing test commands (taken from invocation "
"environment). Note that tox always passes through some basic environment variables "
"which are needed for basic functioning of the Python system. See --showconfig for the "
"eventual passenv setting.",
)
parser.add_testenv_attribute(
name="whitelist_externals",
type="line-list",
help="DEPRECATED: use allowlist_externals",
)
parser.add_testenv_attribute(
name="allowlist_externals",
type="line-list",
help="each lines specifies a path or basename for which tox will not warn "
"about it coming from outside the test environment.",
)
parser.add_testenv_attribute(
name="platform",
type="string",
default=".*",
help="regular expression which must match against ``sys.platform``. "
"otherwise testenv will be skipped.",
)
def sitepackages(testenv_config, value):
return testenv_config.config.option.sitepackages or value
def alwayscopy(testenv_config, value):
return testenv_config.config.option.alwayscopy or value
parser.add_testenv_attribute(
name="sitepackages",
type="bool",
default=False,
postprocess=sitepackages,
help="Set to ``True`` if you want to create virtual environments that also "
"have access to globally installed packages.",
)
parser.add_testenv_attribute(
"download",
type="bool",
default=False,
help="download the latest pip, setuptools and wheel when creating the virtual"
"environment (default is to use the one bundled in virtualenv)",
)
parser.add_testenv_attribute(
name="alwayscopy",
type="bool",
default=False,
postprocess=alwayscopy,
help="Set to ``True`` if you want virtualenv to always copy files rather "
"than symlinking.",
)
def pip_pre(testenv_config, value):
return testenv_config.config.option.pre or value
parser.add_testenv_attribute(
name="pip_pre",
type="bool",
default=False,
postprocess=pip_pre,
help="If ``True``, adds ``--pre`` to the ``opts`` passed to the install command. ",
)
def develop(testenv_config, value):
option = testenv_config.config.option
return not option.installpkg and (value or option.develop or option.devenv is not None)
parser.add_testenv_attribute(
name="usedevelop",
type="bool",
postprocess=develop,
default=False,
help="install package in develop/editable mode",
)
parser.add_testenv_attribute_obj(InstallcmdOption())
parser.add_testenv_attribute(
name="list_dependencies_command",
type="argv",
default="python -m pip freeze",
help="list dependencies for a virtual environment",
)
parser.add_testenv_attribute_obj(DepOption())
parser.add_testenv_attribute(
name="suicide_timeout",
type="float",
default=SUICIDE_TIMEOUT,
help="timeout to allow process to exit before sending SIGINT",
)
parser.add_testenv_attribute(
name="interrupt_timeout",
type="float",
default=INTERRUPT_TIMEOUT,
help="timeout before sending SIGTERM after SIGINT",
)
parser.add_testenv_attribute(
name="terminate_timeout",
type="float",
default=TERMINATE_TIMEOUT,
help="timeout before sending SIGKILL after SIGTERM",
)
parser.add_testenv_attribute(
name="commands",
type="argvlist",
default="",
help="each line specifies a test command and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_pre",
type="argvlist",
default="",
help="each line specifies a setup command action and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_post",
type="argvlist",
default="",
help="each line specifies a teardown command and can use substitution.",
)
parser.add_testenv_attribute(
"ignore_outcome",
type="bool",
default=False,
help="if set to True a failing result of this testenv will not make "
"tox fail, only a warning will be produced",
)
parser.add_testenv_attribute(
"extras",
type="line-list",
help="list of extras to install with the source distribution or develop install",
)
add_parallel_config(parser)
def cli_skip_missing_interpreter(parser):
class SkipMissingInterpreterAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
value = "true" if values is None else values
if value not in ("config", "true", "false"):
raise argparse.ArgumentTypeError("value must be config, true or false")
setattr(namespace, self.dest, value)
parser.add_argument(
"-s",
"--skip-missing-interpreters",
default="config",
metavar="val",
nargs="?",
action=SkipMissingInterpreterAction,
help="don't fail tests for missing interpreters: {config,true,false} choice",
)
class Config(object):
"""Global Tox config object."""
def __init__(self, pluginmanager, option, interpreters, parser, args):
self.envconfigs = OrderedDict()
"""Mapping envname -> envconfig"""
self.invocationcwd = py.path.local()
self.interpreters = interpreters
self.pluginmanager = pluginmanager
self.option = option
self._parser = parser
self._testenv_attr = parser._testenv_attr
self.args = args
"""option namespace containing all parsed command line options"""
@property
def homedir(self):
homedir = get_homedir()
if homedir is None:
homedir = self.toxinidir # FIXME XXX good idea?
return homedir
class TestenvConfig:
"""Testenv Configuration object.
In addition to some core attributes/properties this config object holds all
per-testenv ini attributes as attributes, see "tox --help-ini" for an overview.
"""
def __init__(self, envname, config, factors, reader):
#: test environment name
self.envname = envname
#: global tox config object
self.config = config
#: set of factors
self.factors = factors
self._reader = reader
self._missing_subs = {}
"""Holds substitutions that could not be resolved.
Pre 2.8.1 missing substitutions crashed with a ConfigError although this would not be a
problem if the env is not part of the current testrun. So we need to remember this and
check later when the testenv is actually run and crash only then.
"""
# Python 3 only, as __getattribute__ is ignored for old-style types on Python 2
def __getattribute__(self, name):
rv = object.__getattribute__(self, name)
if isinstance(rv, Exception):
raise rv
return rv
if six.PY2:
def __getattr__(self, name):
if name in self._missing_subs:
raise self._missing_subs[name]
raise AttributeError(name)
def get_envbindir(self):
"""Path to directory where scripts/binaries reside."""
is_bin = (
isinstance(self.python_info, NoInterpreterInfo)
or tox.INFO.IS_WIN is False
or self.python_info.implementation == "Jython"
or (
# this combination is MSYS2
tox.INFO.IS_WIN
and self.python_info.os_sep == "/"
)
or (
tox.INFO.IS_WIN
and self.python_info.implementation == "PyPy"
and self.python_info.extra_version_info < (7, 3, 1)
)
)
return self.envdir.join("bin" if is_bin else "Scripts")
@property
def envbindir(self):
return self.get_envbindir()
@property
def envpython(self):
"""Path to python executable."""
return self.get_envpython()
def get_envpython(self):
"""path to python/jython executable."""
if "jython" in str(self.basepython):
name = "jython"
else:
name = "python"
return self.envbindir.join(name)
def get_envsitepackagesdir(self):
"""Return sitepackagesdir of the virtualenv environment.
NOTE: Only available during execution, not during parsing.
"""
x = self.config.interpreters.get_sitepackagesdir(info=self.python_info, envdir=self.envdir)
return x
@property
def python_info(self):
"""Return sitepackagesdir of the virtualenv environment."""
return self.config.interpreters.get_info(envconfig=self)
def getsupportedinterpreter(self):
if tox.INFO.IS_WIN and self.basepython and "jython" in self.basepython:
raise tox.exception.UnsupportedInterpreter(
"Jython/Windows does not support installing scripts",
)
info = self.config.interpreters.get_info(envconfig=self)
if not info.executable:
raise tox.exception.InterpreterNotFound(self.basepython)
if not info.version_info:
raise tox.exception.InvocationError(
"Failed to get version_info for {}: {}".format(info.name, info.err),
)
return info.executable
testenvprefix = "testenv:"
def get_homedir():
try:
return py.path.local._gethomedir()
except Exception:
return None
def make_hashseed():
max_seed = 4294967295
if tox.INFO.IS_WIN:
max_seed = 1024
return str(random.randint(1, max_seed))
class SkipThisIni(Exception):
"""Internal exception to indicate the parsed ini file should be skipped"""
class ParseIni(object):
def __init__(self, config, ini_path, ini_data): # noqa
config.toxinipath = ini_path
using("tox.ini: {} (pid {})".format(config.toxinipath, os.getpid()))
config.toxinidir = config.toxinipath.dirpath() if ini_path.check(file=True) else ini_path
self._cfg = py.iniconfig.IniConfig(config.toxinipath, ini_data)
if ini_path.basename == "setup.cfg" and "tox:tox" not in self._cfg:
verbosity1("Found no [tox:tox] section in setup.cfg, skipping.")
raise SkipThisIni()
previous_line_of = self._cfg.lineof
self.expand_section_names(self._cfg)
def line_of_default_to_zero(section, name=None):
at = previous_line_of(section, name=name)
if at is None:
at = 0
return at
self._cfg.lineof = line_of_default_to_zero
config._cfg = self._cfg
self.config = config
prefix = "tox" if ini_path.basename == "setup.cfg" else None
fallbacksection = "tox:tox" if ini_path.basename == "setup.cfg" else "tox"
context_name = getcontextname()
if context_name == "jenkins":
reader = SectionReader(
"tox:jenkins",
self._cfg,
prefix=prefix,
fallbacksections=[fallbacksection],
)
dist_share_default = "{toxworkdir}/distshare"
elif not context_name:
reader = SectionReader("tox", self._cfg, prefix=prefix)
dist_share_default = "{homedir}/.tox/distshare"
else:
raise ValueError("invalid context")
if config.option.hashseed is None:
hash_seed = make_hashseed()
elif config.option.hashseed == "noset":
hash_seed = None
else:
hash_seed = config.option.hashseed
config.hashseed = hash_seed
reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir)
if config.option.workdir is None:
config.toxworkdir = reader.getpath("toxworkdir", "{toxinidir}/.tox")
else:
config.toxworkdir = config.toxinidir.join(config.option.workdir, abs=True)
if os.path.exists(str(config.toxworkdir)):
config.toxworkdir = config.toxworkdir.realpath()
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.ignore_basepython_conflict = reader.getbool("ignore_basepython_conflict", False)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", dist_share_default)
reader.addsubstitutions(distshare=config.distshare)
config.temp_dir = reader.getpath("temp_dir", "{toxworkdir}/.tmp")
reader.addsubstitutions(temp_dir=config.temp_dir)
config.sdistsrc = reader.getpath("sdistsrc", None)
config.setupdir = reader.getpath("setupdir", "{toxinidir}")
config.logdir = config.toxworkdir.join("log")
within_parallel = PARALLEL_ENV_VAR_KEY_PRIVATE in os.environ
if not within_parallel and not WITHIN_PROVISION:
ensure_empty_dir(config.logdir)
# determine indexserver dictionary
config.indexserver = {"default": IndexServerConfig("default")}
prefix = "indexserver"
for line in reader.getlist(prefix):
name, url = map(lambda x: x.strip(), line.split("=", 1))
config.indexserver[name] = IndexServerConfig(name, url)
if config.option.skip_missing_interpreters == "config":
val = reader.getbool("skip_missing_interpreters", False)
config.option.skip_missing_interpreters = "true" if val else "false"
override = False
if config.option.indexurl:
for url_def in config.option.indexurl:
m = re.match(r"\W*(\w+)=(\S+)", url_def)
if m is None:
url = url_def
name = "default"
else:
name, url = m.groups()
if not url:
url = None
if name != "ALL":
config.indexserver[name].url = url
else:
override = url
# let ALL override all existing entries
if override:
for name in config.indexserver:
config.indexserver[name] = IndexServerConfig(name, override)
self.handle_provision(config, reader)
self.parse_build_isolation(config, reader)
res = self._getenvdata(reader, config)
config.envlist, all_envs, config.envlist_default, config.envlist_explicit = res
# factors used in config or predefined
known_factors = self._list_section_factors("testenv")
known_factors.update({"py", "python"})
# factors stated in config envlist
stated_envlist = reader.getstring("envlist", replace=False)
if stated_envlist:
for env in _split_env(stated_envlist):
known_factors.update(env.split("-"))
# configure testenvs
to_do = []
failures = OrderedDict()
results = {}
cur_self = self
def run(name, section, subs, config):
try:
results[name] = cur_self.make_envconfig(name, section, subs, config)
except Exception as exception:
failures[name] = (exception, traceback.format_exc())
order = []
for name in all_envs:
section = "{}{}".format(testenvprefix, name)
factors = set(name.split("-"))
if (
section in self._cfg
or factors <= known_factors
or all(
tox.PYTHON.PY_FACTORS_RE.match(factor) for factor in factors - known_factors
)
):
order.append(name)
thread = Thread(target=run, args=(name, section, reader._subs, config))
thread.daemon = True
thread.start()
to_do.append(thread)
for thread in to_do:
while thread.is_alive():
thread.join(timeout=20)
if failures:
raise tox.exception.ConfigError(
"\n".join(
"{} failed with {} at {}".format(key, exc, trace)
for key, (exc, trace) in failures.items()
),
)
for name in order:
config.envconfigs[name] = results[name]
all_develop = all(
name in config.envconfigs and config.envconfigs[name].usedevelop
for name in config.envlist
)
config.skipsdist = reader.getbool("skipsdist", all_develop)
if config.option.devenv is not None:
config.option.notest = True
if config.option.devenv is not None and len(config.envlist) != 1:
feedback("--devenv requires only a single -e", sysexit=True)
def handle_provision(self, config, reader):
config.requires = reader.getlist("requires")
config.minversion = reader.getstring("minversion", None)
config.provision_tox_env = name = reader.getstring("provision_tox_env", ".tox")
min_version = "tox >= {}".format(config.minversion or Version(tox.__version__).public)
deps = self.ensure_requires_satisfied(config, config.requires, min_version)
if config.run_provision:
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["description"] = "meta tox"
env_config = self.make_envconfig(
name,
"{}{}".format(testenvprefix, name),
reader._subs,
config,
)
env_config.deps = deps
config.envconfigs[config.provision_tox_env] = env_config
raise tox.exception.MissingRequirement(config)
# if provisioning is not on, now we need do a strict argument evaluation
# raise on unknown args
self.config._parser.parse_cli(args=self.config.args, strict=True)
@classmethod
def ensure_requires_satisfied(cls, config, requires, min_version):
missing_requirements = []
failed_to_parse = False
deps = []
exists = set()
for require in requires + [min_version]:
# noinspection PyBroadException
try:
package = requirements.Requirement(require)
# check if the package even applies
if package.marker and not package.marker.evaluate({"extra": ""}):
continue
package_name = canonicalize_name(package.name)
if package_name not in exists:
deps.append(DepConfig(require, None))
exists.add(package_name)
dist = importlib_metadata.distribution(package.name)
if not package.specifier.contains(dist.version, prereleases=True):
raise MissingDependency(package)
except requirements.InvalidRequirement as exception:
failed_to_parse = True
error("failed to parse {!r}".format(exception))
except Exception as exception:
verbosity1("could not satisfy requires {!r}".format(exception))
missing_requirements.append(str(requirements.Requirement(require)))
if failed_to_parse:
raise tox.exception.BadRequirement()
if config.option.no_provision and missing_requirements:
msg = "provisioning explicitly disabled within {}, but missing {}"
if config.option.no_provision is not True: # it's a path
msg += " and wrote to {}"
cls.write_requires_to_json_file(config)
raise tox.exception.Error(
msg.format(sys.executable, missing_requirements, config.option.no_provision)
)
if WITHIN_PROVISION and missing_requirements:
msg = "break infinite loop provisioning within {} missing {}"
raise tox.exception.Error(msg.format(sys.executable, missing_requirements))
config.run_provision = bool(len(missing_requirements))
return deps
@staticmethod
def write_requires_to_json_file(config):
requires_dict = {
"minversion": config.minversion,
"requires": config.requires,
}
try:
with open(config.option.no_provision, "w", encoding="utf-8") as outfile:
json.dump(requires_dict, outfile, indent=4)
except TypeError: # Python 2
with open(config.option.no_provision, "w") as outfile:
json.dump(requires_dict, outfile, indent=4, encoding="utf-8")
def parse_build_isolation(self, config, reader):
config.isolated_build = reader.getbool("isolated_build", False)
config.isolated_build_env = reader.getstring("isolated_build_env", ".package")
if config.isolated_build is True:
name = config.isolated_build_env
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["deps"] = ""
self._cfg.sections[section_name]["sitepackages"] = "False"
self._cfg.sections[section_name]["description"] = "isolated packaging environment"
config.envconfigs[name] = self.make_envconfig(
name,
"{}{}".format(testenvprefix, name),
reader._subs,
config,
)
def _list_section_factors(self, section):
factors = set()
if section in self._cfg:
for _, value in self._cfg[section].items():
exprs = re.findall(r"^([\w{}.!,-]+):\s+", value, re.M)
factors.update(*mapcat(_split_factor_expr_all, exprs))
return factors
def make_envconfig(self, name, section, subs, config, replace=True):
factors = set(name.split("-"))
reader = SectionReader(section, self._cfg, fallbacksections=["testenv"], factors=factors)
tc = TestenvConfig(name, config, factors, reader)
reader.addsubstitutions(
envname=name,
envbindir=tc.get_envbindir,
envsitepackagesdir=tc.get_envsitepackagesdir,
envpython=tc.get_envpython,
**subs
)
for env_attr in config._testenv_attr:
atype = env_attr.type
try:
if atype in (
"bool",
"float",
"path",
"string",
"dict",
"dict_setenv",
"argv",
"argvlist",
"argv_install_command",
):
meth = getattr(reader, "get{}".format(atype))
res = meth(env_attr.name, env_attr.default, replace=replace)
elif atype == "basepython":
no_fallback = name in (config.provision_tox_env,)
res = reader.getstring(
env_attr.name,
env_attr.default,
replace=replace,
no_fallback=no_fallback,
)
elif atype == "space-separated-list":
res = reader.getlist(env_attr.name, sep=" ")
elif atype == "line-list":
res = reader.getlist(env_attr.name, sep="\n")
elif atype == "env-list":
res = reader.getstring(env_attr.name, replace=False)
res = tuple(_split_env(res))
else:
raise ValueError("unknown type {!r}".format(atype))
if env_attr.postprocess:
res = env_attr.postprocess(testenv_config=tc, value=res)
except tox.exception.MissingSubstitution as e:
tc._missing_subs[env_attr.name] = res = e
# On Python 2, exceptions are handled in __getattr__
if not six.PY2 or not isinstance(res, Exception):
setattr(tc, env_attr.name, res)
if atype in ("path", "string", "basepython"):
reader.addsubstitutions(**{env_attr.name: res})
return tc
def _getallenvs(self, reader, extra_env_list=None):
extra_env_list = extra_env_list or []
env_str = reader.getstring("envlist", replace=False)
env_list = _split_env(env_str)
for env in extra_env_list:
if env not in env_list:
env_list.append(env)
all_envs = OrderedDict((i, None) for i in env_list)
for section in self._cfg:
if section.name.startswith(testenvprefix):
all_envs[section.name[len(testenvprefix) :]] = None
if not all_envs:
all_envs["python"] = None
return list(all_envs.keys())
def _getenvdata(self, reader, config):
from_option = self.config.option.env
from_environ = os.environ.get("TOXENV")
from_config = reader.getstring("envlist", replace=False)
env_list = []
envlist_explicit = False
if (
(from_option and "ALL" in from_option)
or (not from_option and from_environ and "ALL" in from_environ.split(","))
) and PARALLEL_ENV_VAR_KEY_PRIVATE not in os.environ:
all_envs = self._getallenvs(reader)
else:
candidates = (
(os.environ.get(PARALLEL_ENV_VAR_KEY_PRIVATE), True),
(from_option, True),
(from_environ, True),
("py" if self.config.option.devenv is not None else None, False),
(from_config, False),
)
env_str, envlist_explicit = next(((i, e) for i, e in candidates if i), ([], False))
env_list = _split_env(env_str)
all_envs = self._getallenvs(reader, env_list)
if not env_list:
env_list = all_envs
provision_tox_env = config.provision_tox_env
if config.provision_tox_env in env_list:
msg = "provision_tox_env {} cannot be part of envlist".format(provision_tox_env)
raise tox.exception.ConfigError(msg)
package_env = config.isolated_build_env
if config.isolated_build is True and package_env in all_envs:
all_envs.remove(package_env)
if config.isolated_build is True and package_env in env_list:
msg = "isolated_build_env {} cannot be part of envlist".format(package_env)
raise tox.exception.ConfigError(msg)
return env_list, all_envs, _split_env(from_config), envlist_explicit
@staticmethod
def expand_section_names(config):
"""Generative section names.
Allow writing section as [testenv:py{36,37}-cov]
The parser will see it as two different sections: [testenv:py36-cov], [testenv:py37-cov]
"""
factor_re = re.compile(r"{\s*([\w\s,-]+)\s*}")
split_re = re.compile(r"\s*,\s*")
to_remove = set()
for section in list(config.sections):
split_section = factor_re.split(section)
for parts in itertools.product(*map(split_re.split, split_section)):
section_name = "".join(parts)
if section_name not in config.sections:
config.sections[section_name] = config.sections[section]
to_remove.add(section)
for section in to_remove:
del config.sections[section]
def _split_env(env):
"""if handed a list, action="append" was used for -e"""
if env is None:
return []
if not isinstance(env, list):
env = [e.split("#", 1)[0].strip() for e in env.split("\n")]
env = ",".join(e for e in env if e)
env = [env]
return mapcat(_expand_envstr, env)
def _is_negated_factor(factor):
return factor.startswith("!")
def _base_factor_name(factor):
return factor[1:] if _is_negated_factor(factor) else factor
def _split_factor_expr(expr):
def split_single(e):
raw = e.split("-")
included = {_base_factor_name(factor) for factor in raw if not _is_negated_factor(factor)}
excluded = {_base_factor_name(factor) for factor in raw if _is_negated_factor(factor)}
return included, excluded
partial_envs = _expand_envstr(expr)
return [split_single(e) for e in partial_envs]
def _split_factor_expr_all(expr):
partial_envs = _expand_envstr(expr)
return [{_base_factor_name(factor) for factor in e.split("-")} for e in partial_envs]
def _expand_envstr(envstr):
# split by commas not in groups
tokens = _ENVSTR_SPLIT_PATTERN.split(envstr)
envlist = ["".join(g).strip() for k, g in itertools.groupby(tokens, key=bool) if k]
def expand(env):
tokens = _ENVSTR_EXPAND_PATTERN.split(env)
parts = [_WHITESPACE_PATTERN.sub("", token).split(",") for token in tokens]
return ["".join(variant) for variant in itertools.product(*parts)]
return mapcat(expand, envlist)
def mapcat(f, seq):
return list(itertools.chain.from_iterable(map(f, seq)))
class DepConfig:
def __init__(self, name, indexserver=None):
self.name = name
self.indexserver = indexserver
def __repr__(self):
if self.indexserver:
if self.indexserver.name == "default":
return self.name
return ":{}:{}".format(self.indexserver.name, self.name)
return str(self.name)
class IndexServerConfig:
def __init__(self, name, url=None):
self.name = name
self.url = url
def __repr__(self):
return "IndexServerConfig(name={}, url={})".format(self.name, self.url)
is_section_substitution = re.compile(r"{\[[^{}\s]+\]\S+?}").match
# Check value matches substitution form of referencing value from other section.
# E.g. {[base]commands}
class SectionReader:
def __init__(
self,
section_name,
cfgparser,
fallbacksections=None,
factors=(),
prefix=None,
posargs="",
):
if prefix is None:
self.section_name = section_name
else:
self.section_name = "{}:{}".format(prefix, section_name)
self._cfg = cfgparser
self.fallbacksections = fallbacksections or []
self.factors = factors
self._subs = {}
self._subststack = []
self._setenv = None
self.posargs = posargs
def get_environ_value(self, name):
if self._setenv is None:
return os.environ.get(name)
return self._setenv.get(name)
def addsubstitutions(self, _posargs=None, **kw):
self._subs.update(kw)
if _posargs:
self.posargs = _posargs
def getpath(self, name, defaultpath, replace=True):
path = self.getstring(name, defaultpath, replace=replace)
if path is not None:
toxinidir = self._subs["toxinidir"]
return toxinidir.join(path, abs=True)
def getlist(self, name, sep="\n"):
s = self.getstring(name, None)
if s is None:
return []
return [x.strip() for x in s.split(sep) if x.strip()]
def getdict(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace)
return self._getdict(value, default=default, sep=sep, replace=replace)
def getdict_setenv(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace, crossonly=True)
definitions = self._getdict(value, default=default, sep=sep, replace=replace)
self._setenv = SetenvDict(definitions, reader=self)
return self._setenv
def _getdict(self, value, default, sep, replace=True):
if value is None or not replace:
return default or {}
env_values = {}
for line in value.split(sep):
if line.strip():
if line.startswith("#"): # comment lines are ignored
pass
elif line.startswith("file|"): # file markers contain paths to env files
file_path = line[5:].strip()
if os.path.exists(file_path):
with open(file_path, "rt") as file_handler:
content = file_handler.read()
env_values.update(self._getdict(content, "", sep, replace))
else:
name, value = line.split("=", 1)
env_values[name.strip()] = value.strip()
return env_values
def getfloat(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, float):
try:
s = float(s)
except ValueError:
raise tox.exception.ConfigError("{}: invalid float {!r}".format(name, s))
return s
def getbool(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, bool):
if s.lower() == "true":
s = True
elif s.lower() == "false":
s = False
else:
raise tox.exception.ConfigError(
"{}: boolean value {!r} needs to be 'True' or 'False'".format(name, s),
)
return s
def getargvlist(self, name, default="", replace=True):
s = self.getstring(name, default, replace=False)
return _ArgvlistReader.getargvlist(self, s, replace=replace, name=name)
def getargv(self, name, default="", replace=True):
return self.getargvlist(name, default, replace=replace)[0]
def getargv_install_command(self, name, default="", replace=True):
s = self.getstring(name, default, replace=False)
if not s:
# This occurs when factors are used, and a testenv doesnt have
# a factorised value for install_command, most commonly occurring
# if setting platform is also used.
# An empty value causes error install_command must contain '{packages}'.
s = default
if "{packages}" in s:
s = s.replace("{packages}", r"\{packages\}")
if "{opts}" in s:
s = s.replace("{opts}", r"\{opts\}")
return _ArgvlistReader.getargvlist(self, s, replace=replace, name=name)[0]
def getstring(self, name, default=None, replace=True, crossonly=False, no_fallback=False):
x = None
sections = [self.section_name] + ([] if no_fallback else self.fallbacksections)
for s in sections:
try:
x = self._cfg[s][name]
break
except KeyError:
continue
if x is None:
x = default
else:
# It is needed to apply factors before unwrapping
# dependencies, otherwise it can break the substitution
# process. Once they are unwrapped, we call apply factors
# again for those new dependencies.
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
return x
def getposargs(self, default=None):
if self.posargs:
posargs = self.posargs
if sys.platform.startswith("win"):
posargs_string = list2cmdline([x for x in posargs if x])
else:
posargs_string = " ".join(shlex_quote(x) for x in posargs if x)
return posargs_string
else:
return default or ""
def _replace_if_needed(self, x, name, replace, crossonly):
if replace and x and hasattr(x, "replace"):
x = self._replace(x, name=name, crossonly=crossonly)
return x
def _apply_factors(self, s):
def factor_line(line):
m = _FACTOR_LINE_PATTERN.search(line)
if not m:
return line
expr, line = m.groups()
if any(
included <= self.factors and not any(x in self.factors for x in excluded)
for included, excluded in _split_factor_expr(expr)
):
return line
lines = s.strip().splitlines()
return "\n".join(filter(None, map(factor_line, lines)))
def _replace(self, value, name=None, section_name=None, crossonly=False):
if "{" not in value:
return value
section_name = section_name if section_name else self.section_name
assert name
self._subststack.append((section_name, name))
try:
replaced = Replacer(self, crossonly=crossonly).do_replace(value)
assert self._subststack.pop() == (section_name, name)
except tox.exception.MissingSubstitution:
if not section_name.startswith(testenvprefix):
raise tox.exception.ConfigError(
"substitution env:{!r}: unknown or recursive definition in"
" section {!r}.".format(value, section_name),
)
raise
return replaced
class Replacer:
RE_ITEM_REF = re.compile(
r"""
(?<!\\)[{]
(?:(?P<sub_type>[^[:{}]+):)? # optional sub_type for special rules
(?P<substitution_value>(?:\[[^,{}]*\])?[^:,{}]*) # substitution key
(?::(?P<default_value>([^{}]|\\{|\\})*))? # default value
[}]
""",
re.VERBOSE,
)
def __init__(self, reader, crossonly=False):
self.reader = reader
self.crossonly = crossonly
def do_replace(self, value):
"""
Recursively expand substitutions starting from the innermost expression
"""
def substitute_once(x):
return self.RE_ITEM_REF.sub(self._replace_match, x)
expanded = substitute_once(value)
while expanded != value: # substitution found
value = expanded
expanded = substitute_once(value)
return expanded
@staticmethod
def _unescape(s):
return s.replace("\\{", "{").replace("\\}", "}")
def _replace_match(self, match):
g = match.groupdict()
sub_value = g["substitution_value"]
if self.crossonly:
if sub_value.startswith("["):
return self._substitute_from_other_section(sub_value)
# in crossonly we return all other hits verbatim
start, end = match.span()
return match.string[start:end]
full_match = match.group(0)
# ":" is swallowed by the regex, so the raw matched string is checked
if full_match.startswith("{:"):
if full_match != "{:}":
raise tox.exception.ConfigError(
"Malformed substitution with prefix ':': {}".format(full_match),
)
return os.pathsep
default_value = g["default_value"]
# special case: opts and packages. Leave {opts} and
# {packages} intact, they are replaced manually in
# _venv.VirtualEnv.run_install_command.
if sub_value in ("opts", "packages"):
return "{{{}}}".format(sub_value)
if sub_value == "posargs":
return self.reader.getposargs(default_value)
sub_type = g["sub_type"]
if sub_type == "posargs":
if default_value:
value = "{}:{}".format(sub_value, default_value)
else:
value = sub_value
return self.reader.getposargs(value)
if not sub_type and not sub_value:
raise tox.exception.ConfigError(
"Malformed substitution; no substitution type provided. "
"If you were using `{}` for `os.pathsep`, please use `{:}`.",
)
if not sub_type and not default_value and sub_value == "/":
return os.sep
if sub_type == "env":
return self._replace_env(sub_value, default_value)
if sub_type == "tty":
if is_interactive():
return match.group("substitution_value")
return match.group("default_value")
if sub_type == "posargs":
return self.reader.getposargs(sub_value)
if sub_type is not None:
raise tox.exception.ConfigError(
"No support for the {} substitution type".format(sub_type),
)
return self._replace_substitution(sub_value)
def _replace_env(self, key, default):
if not key:
raise tox.exception.ConfigError("env: requires an environment variable name")
value = self.reader.get_environ_value(key)
if value is not None:
return value
if default is not None:
return default
raise tox.exception.MissingSubstitution(key)
def _substitute_from_other_section(self, key):
if key.startswith("[") and "]" in key:
i = key.find("]")
section, item = key[1:i], key[i + 1 :]
cfg = self.reader._cfg
if section in cfg and item in cfg[section]:
if (section, item) in self.reader._subststack:
raise tox.exception.SubstitutionStackError(
"{} already in {}".format((section, item), self.reader._subststack),
)
x = str(cfg[section][item])
return self.reader._replace(
x,
name=item,
section_name=section,
crossonly=self.crossonly,
)
raise tox.exception.ConfigError("substitution key {!r} not found".format(key))
def _replace_substitution(self, sub_key):
val = self.reader._subs.get(sub_key, None)
if val is None:
val = self._substitute_from_other_section(sub_key)
if callable(val):
val = val()
return str(val)
def is_interactive():
return sys.stdin.isatty()
class _ArgvlistReader:
@classmethod
def getargvlist(cls, reader, value, replace=True, name=None):
"""Parse ``commands`` argvlist multiline string.
:param SectionReader reader: reader to be used.
:param str value: Content stored by key.
:rtype: list[list[str]]
:raise :class:`tox.exception.ConfigError`:
line-continuation ends nowhere while resolving for specified section
"""
commands = []
current_command = ""
for line in value.splitlines():
line = line.rstrip()
if not line:
continue
if line.endswith("\\"):
current_command += " {}".format(line[:-1])
continue
current_command += line
if is_section_substitution(current_command):
replaced = reader._replace(current_command, crossonly=True, name=name)
commands.extend(cls.getargvlist(reader, replaced, name=name))
else:
commands.append(cls.processcommand(reader, current_command, replace, name=name))
current_command = ""
else:
if current_command:
raise tox.exception.ConfigError(
"line-continuation ends nowhere while resolving for [{}] {}".format(
reader.section_name,
"commands",
),
)
return commands
@classmethod
def processcommand(cls, reader, command, replace=True, name=None):
# Iterate through each word of the command substituting as
# appropriate to construct the new command string. This
# string is then broken up into exec argv components using
# shlex.
if replace:
newcommand = ""
for word in CommandParser(command).words():
if word == "[]":
newcommand += reader.getposargs()
continue
new_arg = ""
new_word = reader._replace(word, name=name)
new_word = reader._replace(new_word, name=name)
new_word = Replacer._unescape(new_word)
new_arg += new_word
newcommand += new_arg
else:
newcommand = command
# Construct shlex object that will not escape any values,
# use all values as is in argv.
shlexer = shlex.shlex(newcommand, posix=True)
shlexer.whitespace_split = True
shlexer.escape = ""
return list(shlexer)
class CommandParser(object):
class State(object):
def __init__(self):
self.word = ""
self.depth = 0
self.yield_words = []
def __init__(self, command):
self.command = command
def words(self):
ps = CommandParser.State()
def word_has_ended():
return (
(
cur_char in string.whitespace
and ps.word
and ps.word[-1] not in string.whitespace
)
or (cur_char == "{" and ps.depth == 0 and not ps.word.endswith("\\"))
or (ps.depth == 0 and ps.word and ps.word[-1] == "}")
or (cur_char not in string.whitespace and ps.word and ps.word.strip() == "")
)
def yield_this_word():
yieldword = ps.word
ps.word = ""
if yieldword:
ps.yield_words.append(yieldword)
def yield_if_word_ended():
if word_has_ended():
yield_this_word()
def accumulate():
ps.word += cur_char
def push_substitution():
ps.depth += 1
def pop_substitution():
ps.depth -= 1
for cur_char in self.command:
if cur_char in string.whitespace:
if ps.depth == 0:
yield_if_word_ended()
accumulate()
elif cur_char == "{":
yield_if_word_ended()
accumulate()
push_substitution()
elif cur_char == "}":
accumulate()
pop_substitution()
else:
yield_if_word_ended()
accumulate()
if ps.word.strip():
yield_this_word()
return ps.yield_words
def getcontextname():
if any(env in os.environ for env in ["JENKINS_URL", "HUDSON_URL"]):
return "jenkins"
return None
|
helpers.py | """Supporting functions for polydata and grid objects."""
import collections.abc
import ctypes
import enum
import logging
import signal
import sys
import warnings
from threading import Thread
import threading
import traceback
import numpy as np
import scooby
import vtk
import vtk.util.numpy_support as nps
import pyvista
from .fileio import from_meshio
class FieldAssociation(enum.Enum):
"""Represents which type of vtk field a scalar or vector array is associated with."""
POINT = vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS
CELL = vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS
NONE = vtk.vtkDataObject.FIELD_ASSOCIATION_NONE
ROW = vtk.vtkDataObject.FIELD_ASSOCIATION_ROWS
def get_vtk_type(typ):
"""Look up the VTK type for a give python data type.
Corrects for string type mapping issues.
Return
------
int : the integer type id specified in vtkType.h
"""
typ = nps.get_vtk_array_type(typ)
# This handles a silly string type bug
if typ == 3:
return 13
return typ
def vtk_bit_array_to_char(vtkarr_bint):
"""Cast vtk bit array to a char array."""
vtkarr = vtk.vtkCharArray()
vtkarr.DeepCopy(vtkarr_bint)
return vtkarr
def vtk_id_list_to_array(vtk_id_list):
"""Convert a vtkIdList to a NumPy array."""
return np.array([vtk_id_list.GetId(i) for i in range(vtk_id_list.GetNumberOfIds())])
def convert_string_array(arr, name=None):
"""Convert a numpy array of strings to a vtkStringArray or vice versa.
Note that this is terribly inefficient - inefficient support
is better than no support :). If you have ideas on how to make this faster,
please consider opening a pull request.
"""
if isinstance(arr, np.ndarray):
vtkarr = vtk.vtkStringArray()
########### OPTIMIZE ###########
for val in arr:
vtkarr.InsertNextValue(val)
################################
if isinstance(name, str):
vtkarr.SetName(name)
return vtkarr
# Otherwise it is a vtk array and needs to be converted back to numpy
############### OPTIMIZE ###############
nvalues = arr.GetNumberOfValues()
return np.array([arr.GetValue(i) for i in range(nvalues)], dtype='|U')
########################################
def convert_array(arr, name=None, deep=0, array_type=None):
"""Convert a NumPy array to a vtkDataArray or vice versa.
Parameters
-----------
arr : ndarray or vtkDataArry
A numpy array or vtkDataArry to convert
name : str
The name of the data array for VTK
deep : bool
if input is numpy array then deep copy values
Return
------
vtkDataArray, ndarray, or DataFrame:
the converted array (if input is a NumPy ndaray then returns
``vtkDataArray`` or is input is ``vtkDataArray`` then returns NumPy
``ndarray``). If pdf==True and the input is ``vtkDataArry``,
return a pandas DataFrame.
"""
if arr is None:
return
if isinstance(arr, np.ndarray):
if arr.dtype is np.dtype('O'):
arr = arr.astype('|S')
arr = np.ascontiguousarray(arr)
if arr.dtype.type in (np.str_, np.bytes_):
# This handles strings
vtk_data = convert_string_array(arr)
else:
# This will handle numerical data
arr = np.ascontiguousarray(arr)
vtk_data = nps.numpy_to_vtk(num_array=arr, deep=deep, array_type=array_type)
if isinstance(name, str):
vtk_data.SetName(name)
return vtk_data
# Otherwise input must be a vtkDataArray
if not isinstance(arr, (vtk.vtkDataArray, vtk.vtkBitArray, vtk.vtkStringArray)):
raise TypeError('Invalid input array type ({}).'.format(type(arr)))
# Handle booleans
if isinstance(arr, vtk.vtkBitArray):
arr = vtk_bit_array_to_char(arr)
# Handle string arrays
if isinstance(arr, vtk.vtkStringArray):
return convert_string_array(arr)
# Convert from vtkDataArry to NumPy
return nps.vtk_to_numpy(arr)
def is_pyvista_dataset(obj):
"""Return True if the Object is a PyVista wrapped dataset."""
return isinstance(obj, (pyvista.Common, pyvista.MultiBlock))
def point_array(mesh, name):
"""Return point array of a vtk object."""
vtkarr = mesh.GetPointData().GetAbstractArray(name)
return convert_array(vtkarr)
def point_scalar(mesh, name):
"""Return point array of a vtk object.
DEPRECATED: please use `point_array` instead.
"""
warnings.warn("DEPRECATED: please use `point_array` instead.")
return point_array(mesh, name)
def field_array(mesh, name):
"""Return field array of a vtk object."""
vtkarr = mesh.GetFieldData().GetAbstractArray(name)
return convert_array(vtkarr)
def field_scalar(mesh, name):
"""Return field array of a vtk object.
DEPRECATED: please use `field_array` instead.
"""
warnings.warn("DEPRECATED: please use `field_array` instead.")
return field_array(mesh, name)
def cell_array(mesh, name):
"""Return cell array of a vtk object."""
vtkarr = mesh.GetCellData().GetAbstractArray(name)
return convert_array(vtkarr)
def cell_scalar(mesh, name):
"""Return cell array of a vtk object.
DEPRECATED: please use `cell_array` instead.
"""
warnings.warn("DEPRECATED: please use `cell_array` instead.")
return cell_array(mesh, name)
def row_array(data_object, name):
"""Return row array of a vtk object."""
vtkarr = data_object.GetRowData().GetAbstractArray(name)
return convert_array(vtkarr)
def parse_field_choice(field):
"""Return the id of the given field."""
if isinstance(field, str):
field = field.strip().lower()
if field in ['cell', 'c', 'cells']:
field = FieldAssociation.CELL
elif field in ['point', 'p', 'points']:
field = FieldAssociation.POINT
elif field in ['field', 'f', 'fields']:
field = FieldAssociation.NONE
elif field in ['row', 'r',]:
field = FieldAssociation.ROW
else:
raise ValueError('Data field ({}) not supported.'.format(field))
elif isinstance(field, FieldAssociation):
pass
else:
raise ValueError('Data field ({}) not supported.'.format(field))
return field
def get_array(mesh, name, preference='cell', info=False, err=False):
"""Search point, cell and field data for an array.
Parameters
----------
name : str
The name of the array to get the range.
preference : str, optional
When scalars is specified, this is the preferred array type to
search for in the dataset. Must be either ``'point'``,
``'cell'``, or ``'field'``
info : bool
Return info about the array rather than the array itself.
err : bool
Boolean to control whether to throw an error if array is not present.
"""
if isinstance(mesh, vtk.vtkTable):
arr = row_array(mesh, name)
if arr is None and err:
raise KeyError('Data array ({}) not present in this dataset.'.format(name))
field = FieldAssociation.ROW
if info:
return arr, field
return arr
parr = point_array(mesh, name)
carr = cell_array(mesh, name)
farr = field_array(mesh, name)
preference = parse_field_choice(preference)
if np.sum([parr is not None, carr is not None, farr is not None]) > 1:
if preference == FieldAssociation.CELL:
if info:
return carr, FieldAssociation.CELL
else:
return carr
elif preference == FieldAssociation.POINT:
if info:
return parr, FieldAssociation.POINT
else:
return parr
elif preference == FieldAssociation.NONE:
if info:
return farr, FieldAssociation.NONE
else:
return farr
else:
raise ValueError('Data field ({}) not supported.'.format(preference))
arr = None
field = None
if parr is not None:
arr = parr
field = FieldAssociation.POINT
elif carr is not None:
arr = carr
field = FieldAssociation.CELL
elif farr is not None:
arr = farr
field = FieldAssociation.NONE
elif err:
raise KeyError('Data array ({}) not present in this dataset.'.format(name))
if info:
return arr, field
return arr
def vtk_points(points, deep=True):
"""Convert numpy points to a vtkPoints object."""
if not points.flags['C_CONTIGUOUS']:
points = np.ascontiguousarray(points)
vtkpts = vtk.vtkPoints()
vtkpts.SetData(nps.numpy_to_vtk(points, deep=deep))
return vtkpts
def line_segments_from_points(points):
"""Generate non-connected line segments from points.
Assumes points are ordered as line segments and an even number of points
are
Parameters
----------
points : np.ndarray
Points representing line segments. An even number must be given as
every two vertices represent a single line segment. For example, two
line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
Returns
-------
lines : pyvista.PolyData
PolyData with lines and cells.
Examples
--------
This example plots two line segments at right angles to each other line.
>>> import pyvista
>>> import numpy as np
>>> points = np.array([[0, 0, 0], [1, 0, 0], [1, 0, 0], [1, 1, 0]])
>>> lines = pyvista.lines_from_points(points)
>>> lines.plot() # doctest:+SKIP
"""
if len(points) % 2 != 0:
raise ValueError("An even number of points must be given to define each segment.")
# Assuming ordered points, create array defining line order
n_points = len(points)
n_lines = n_points // 2
lines = np.c_[(2 * np.ones(n_lines, np.int_),
np.arange(0, n_points-1, step=2),
np.arange(1, n_points+1, step=2))]
poly = pyvista.PolyData()
poly.points = points
poly.lines = lines
return poly
def lines_from_points(points, close=False):
"""Make a connected line set given an array of points.
Parameters
----------
points : np.ndarray
Points representing the vertices of the connected segments. For
example, two line segments would be represented as:
np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]])
close : bool, optional
If True, close the line segments into a loop
Return
------
lines : pyvista.PolyData
PolyData with lines and cells.
"""
poly = pyvista.PolyData()
poly.points = points
cells = np.full((len(points)-1, 3), 2, dtype=np.int_)
cells[:, 1] = np.arange(0, len(points)-1, dtype=np.int_)
cells[:, 2] = np.arange(1, len(points), dtype=np.int_)
if close:
cells = np.append(cells, [[2, len(points)-1, 0],], axis=0)
poly.lines = cells
return poly
def vector_poly_data(orig, vec):
"""Create a vtkPolyData object composed of vectors."""
# shape, dimension checking
if not isinstance(orig, np.ndarray):
orig = np.asarray(orig)
if not isinstance(vec, np.ndarray):
vec = np.asarray(vec)
if orig.ndim != 2:
orig = orig.reshape((-1, 3))
elif orig.shape[1] != 3:
raise ValueError('orig array must be 3D')
if vec.ndim != 2:
vec = vec.reshape((-1, 3))
elif vec.shape[1] != 3:
raise ValueError('vec array must be 3D')
# Create vtk points and cells objects
vpts = vtk.vtkPoints()
vpts.SetData(nps.numpy_to_vtk(np.ascontiguousarray(orig), deep=True))
npts = orig.shape[0]
cells = np.empty((npts, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npts, dtype=pyvista.ID_TYPE)
vcells = pyvista.utilities.cells.CellArray(cells, npts)
# Create vtkPolyData object
pdata = vtk.vtkPolyData()
pdata.SetPoints(vpts)
pdata.SetVerts(vcells)
# Add vectors to polydata
name = 'vectors'
vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(vec), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveVectors(name)
# Add magnitude of vectors to polydata
name = 'mag'
scalars = (vec * vec).sum(1)**0.5
vtkfloat = nps.numpy_to_vtk(np.ascontiguousarray(scalars), deep=True)
vtkfloat.SetName(name)
pdata.GetPointData().AddArray(vtkfloat)
pdata.GetPointData().SetActiveScalars(name)
return pyvista.PolyData(pdata)
def trans_from_matrix(matrix):
"""Convert a vtk matrix to a numpy.ndarray."""
t = np.zeros((4, 4))
for i in range(4):
for j in range(4):
t[i, j] = matrix.GetElement(i, j)
return t
def is_meshio_mesh(mesh):
"""Test if passed object is instance of ``meshio.Mesh``."""
try:
import meshio
return isinstance(mesh, meshio.Mesh)
except ImportError:
return False
def wrap(vtkdataset):
"""Wrap any given VTK data object to its appropriate PyVista data object.
Other formats that are supported include:
* 2D :class:`numpy.ndarray` of XYZ vertices
* 3D :class:`numpy.ndarray` representing a volume. Values will be scalars.
"""
wrappers = {
'vtkUnstructuredGrid': pyvista.UnstructuredGrid,
'vtkRectilinearGrid': pyvista.RectilinearGrid,
'vtkStructuredGrid': pyvista.StructuredGrid,
'vtkPolyData': pyvista.PolyData,
'vtkImageData': pyvista.UniformGrid,
'vtkStructuredPoints': pyvista.UniformGrid,
'vtkMultiBlockDataSet': pyvista.MultiBlock,
'vtkTable': pyvista.Table,
# 'vtkParametricSpline': pyvista.Spline,
}
# Otherwise, we assume a VTK data object was passed
if hasattr(vtkdataset, 'GetClassName'):
key = vtkdataset.GetClassName()
elif vtkdataset is None:
return None
elif isinstance(vtkdataset, np.ndarray):
if vtkdataset.ndim == 1 and vtkdataset.shape[0] == 3:
return pyvista.PolyData(vtkdataset)
if vtkdataset.ndim > 1 and vtkdataset.ndim < 3 and vtkdataset.shape[1] == 3:
return pyvista.PolyData(vtkdataset)
elif vtkdataset.ndim == 3:
mesh = pyvista.UniformGrid(vtkdataset.shape)
mesh['values'] = vtkdataset.ravel(order='F')
mesh.active_scalars_name = 'values'
return mesh
else:
print(vtkdataset.shape, vtkdataset)
raise NotImplementedError('NumPy array could not be converted to PyVista.')
elif is_meshio_mesh(vtkdataset):
return from_meshio(vtkdataset)
else:
raise NotImplementedError('Type ({}) not able to be wrapped into a PyVista mesh.'.format(type(vtkdataset)))
try:
wrapped = wrappers[key](vtkdataset)
except KeyError:
logging.warning('VTK data type ({}) is not currently supported by pyvista.'.format(key))
return vtkdataset # if not supported just passes the VTK data object
return wrapped
def image_to_texture(image):
"""Convert ``vtkImageData`` (:class:`pyvista.UniformGrid`) to a ``vtkTexture``."""
return pyvista.Texture(image)
def numpy_to_texture(image):
"""Convert a NumPy image array to a vtk.vtkTexture."""
if not isinstance(image, np.ndarray):
raise TypeError('Unknown input type ({})'.format(type(image)))
return pyvista.Texture(image)
def is_inside_bounds(point, bounds):
"""Check if a point is inside a set of bounds.
This is implemented through recursion so that this is N-dimensional.
"""
if isinstance(point, (int, float)):
point = [point]
if isinstance(point, (np.ndarray, collections.abc.Sequence)) and not isinstance(point, collections.deque):
if len(bounds) < 2 * len(point) or len(bounds) % 2 != 0:
raise ValueError('Bounds mismatch point dimensionality')
point = collections.deque(point)
bounds = collections.deque(bounds)
return is_inside_bounds(point, bounds)
if not isinstance(point, collections.deque):
raise TypeError('Unknown input data type ({}).'.format(type(point)))
if len(point) < 1:
return True
p = point.popleft()
lower, upper = bounds.popleft(), bounds.popleft()
if lower <= p <= upper:
return is_inside_bounds(point, bounds)
return False
def fit_plane_to_points(points, return_meta=False):
"""Fit a plane to a set of points.
Parameters
----------
points : np.ndarray
Size n by 3 array of points to fit a plane through
return_meta : bool
If true, also returns the center and normal used to generate the plane
"""
data = np.array(points)
center = data.mean(axis=0)
result = np.linalg.svd(data - center)
normal = np.cross(result[2][0], result[2][1])
plane = pyvista.Plane(center=center, direction=normal)
if return_meta:
return plane, center, normal
return plane
def raise_not_matching(scalars, mesh):
"""Raise exception about inconsistencies."""
if isinstance(mesh, vtk.vtkTable):
raise ValueError('Number of scalars ({})'.format(scalars.size) +
'must match number of rows ' +
'({}).'.format(mesh.n_rows) )
raise ValueError('Number of scalars ({}) '.format(scalars.size) +
'must match either the number of points ' +
'({}) '.format(mesh.n_points) +
'or the number of cells ' +
'({}). '.format(mesh.n_cells) )
def generate_plane(normal, origin):
"""Return a vtk.vtkPlane."""
plane = vtk.vtkPlane()
# NORMAL MUST HAVE MAGNITUDE OF 1
normal = normal / np.linalg.norm(normal)
plane.SetNormal(normal)
plane.SetOrigin(origin)
return plane
def generate_report(additional=None, ncol=3, text_width=54, sort=False):
"""Generate a report.
DEPRECATED: Please use :class:`pyvista.Report` instead.
"""
logging.warning('DEPRECATED: Please use `pyvista.Report` instead.')
core = ['pyvista', 'vtk', 'numpy', 'imageio', 'appdirs', 'scooby']
optional = ['matplotlib', 'PyQt5', 'IPython', 'colorcet',
'cmocean']
report = scooby.Report(core=core, optional=optional,
additional=additional, ncol=ncol,
text_width=text_width, sort=sort)
return report
def try_callback(func, *args):
"""Wrap a given callback in a try statement."""
try:
func(*args)
except Exception:
etype, exc, tb = sys.exc_info()
stack = traceback.extract_tb(tb)[1:]
formatted_exception = \
'Encountered issue in callback (most recent call last):\n' + \
''.join(traceback.format_list(stack) +
traceback.format_exception_only(etype, exc)).rstrip('\n')
logging.warning(formatted_exception)
return
def check_depth_peeling(number_of_peels=100, occlusion_ratio=0.0):
"""Check if depth peeling is available.
Attempts to use depth peeling to see if it is available for the current
environment. Returns ``True`` if depth peeling is available and has been
successfully leveraged, otherwise ``False``.
"""
# Try Depth Peeling with a basic scene
source = vtk.vtkSphereSource()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(source.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# requires opacity < 1
actor.GetProperty().SetOpacity(0.5)
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetOffScreenRendering(True)
renderWindow.SetAlphaBitPlanes(True)
renderWindow.SetMultiSamples(0)
renderer.AddActor(actor)
renderer.SetUseDepthPeeling(True)
renderer.SetMaximumNumberOfPeels(number_of_peels)
renderer.SetOcclusionRatio(occlusion_ratio)
renderWindow.Render()
return renderer.GetLastRenderingUsedDepthPeeling() == 1
def threaded(fn):
"""Call a function using a thread."""
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
class conditional_decorator:
"""Conditional decorator for methods."""
def __init__(self, dec, condition):
"""Initialize."""
self.decorator = dec
self.condition = condition
def __call__(self, func):
"""Call the decorated function if condition is matched."""
if not self.condition:
# Return the function unchanged, not decorated.
return func
return self.decorator(func)
class ProgressMonitor():
"""A standard class for monitoring the progress of a VTK algorithm.
This must be use in a ``with`` context and it will block keyboard
interrupts from happening until the exit event as interrupts will crash
the kernel if the VTK algorithm is still executing.
"""
def __init__(self, algorithm, message="", scaling=100):
"""Initialize observer."""
try:
from tqdm import tqdm
except ImportError:
raise ImportError("Please install `tqdm` to monitor algorithms.")
self.event_type = vtk.vtkCommand.ProgressEvent
self.progress = 0.0
self._last_progress = self.progress
self.algorithm = algorithm
self.message = message
self._interrupt_signal_received = False
self._old_progress = 0
self._old_handler = None
self._progress_bar = None
def handler(self, sig, frame):
"""Pass signal to custom interrupt handler."""
self._interrupt_signal_received = (sig, frame)
logging.debug('SIGINT received. Delaying KeyboardInterrupt until '
'VTK algorithm finishes.')
def __call__(self, obj, event, *args):
"""Call progress update callback.
On an event occurrence, this function executes.
"""
if self._interrupt_signal_received:
obj.AbortExecuteOn()
else:
progress = obj.GetProgress()
step = progress - self._old_progress
self._progress_bar.update(step)
self._old_progress = progress
def __enter__(self):
"""Enter event for ``with`` context."""
from tqdm import tqdm
# check if in main thread
if threading.current_thread().__class__.__name__ == '_MainThread':
self._old_handler = signal.signal(signal.SIGINT, self.handler)
self._progress_bar = tqdm(total=1, leave=True,
bar_format='{l_bar}{bar}[{elapsed}<{remaining}]')
self._progress_bar.set_description(self.message)
self.algorithm.AddObserver(self.event_type, self)
return self._progress_bar
def __exit__(self, type, value, traceback):
"""Exit event for ``with`` context."""
self._progress_bar.total = 1
self._progress_bar.refresh()
self._progress_bar.close()
self.algorithm.RemoveObservers(self.event_type)
if threading.current_thread().__class__.__name__ == '_MainThread':
signal.signal(signal.SIGINT, self._old_handler)
def abstract_class(cls_):
"""Decorate a class, overriding __new__.
Preventing a class from being instantiated similar to abc.ABCMeta
but does not require an abstract method.
"""
def __new__(cls, *args, **kwargs):
if cls is cls_:
raise TypeError('{} is an abstract class and may not be instantiated.'
.format(cls.__name__))
return object.__new__(cls)
cls_.__new__ = __new__
return cls_
|
tests.py | from __future__ import unicode_literals
from datetime import datetime, timedelta
import threading
import warnings
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db import connections, DEFAULT_DB_ALIAS
from django.db import DatabaseError
from django.db.models.fields import Field
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.manager import BaseManager
from django.db.models.query import QuerySet, EmptyQuerySet, ValuesListQuerySet, MAX_GET_RESULTS
from django.test import TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, SelfRef, ArticleSelectOnSave
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
six.assertRaisesRegex(
self,
TypeError,
"'foo' is an invalid keyword argument for this function",
Article,
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertNotEqual(a.id, None)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date,
datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Area man programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
six.assertRaisesRegex(
self,
AttributeError,
"Manager isn't accessible via Article instances",
getattr,
Article(),
"objects",
)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"])
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
def test_multiple_objects_max_num_fetched(self):
"""
#6785 - get() should fetch a limited number of results.
"""
Article.objects.bulk_create(
Article(headline='Area %s' % i, pub_date=datetime(2005, 7, 28))
for i in range(MAX_GET_RESULTS)
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned %d!" % MAX_GET_RESULTS,
Article.objects.get,
headline__startswith='Area',
)
Article.objects.create(headline='Area %s' % MAX_GET_RESULTS, pub_date=datetime(2005, 7, 28))
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned more than %d!" % MAX_GET_RESULTS,
Article.objects.get,
headline__startswith='Area',
)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't available. You'll lose
# microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"])
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline,
'\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_field_ordering(self):
"""
Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.
"""
f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual([sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]])
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_ugettext_lazy(self):
"""
Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = ugettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objecs
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertIsInstance(qs, ValuesListQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Area woman programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Area man programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(),
['<Article: Area man programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Area woman'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Area woman programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
self.assertRaises(
ObjectDoesNotExist,
Article.objects.get,
pub_date__year=2005,
pub_date__month=8,
)
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]),
["<Article: Area woman programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
headline__startswith='Area',
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
pub_date__month=7,
)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(TestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
Test that select_on_save works correctly if the database
doesn't return correct information about matched rows from
UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager.__class__
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super(FakeQuerySet, self)._update(*args, **kwargs)
return 0
class FakeManager(orig_class):
def get_queryset(self):
return FakeQuerySet(self.model)
try:
Article._base_manager.__class__ = FakeManager
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager.__class__ = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
class TestRelatedObjectDeprecation(TestCase):
def test_field_related_deprecation(self):
field = SelfRef._meta.get_field('selfref')
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertIsInstance(field.related, ForeignObjectRel)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns.pop().message),
'Usage of field.related has been deprecated. Use field.rel instead.'
)
|
ydlhandler.py | import os
from queue import Queue
from threading import Thread
import subprocess
import io
import importlib
import json
from time import sleep
from datetime import datetime
import sys
from subprocess import Popen, PIPE, STDOUT
from ydl_server.logdb import JobsDB, Job, Actions, JobType
def reload_youtube_dl():
for module in list(sys.modules.keys()):
if "youtube" in module:
try:
importlib.reload(sys.modules[module])
except ModuleNotFoundError:
print("ModuleNotFoundError:\n" + module)
def get_ydl_website(ydl_module_name):
import pip._internal.commands.show as pipshow
info = list(pipshow.search_packages_info([ydl_module_name]))
if len(info) < 1 or "home-page" not in info[0]:
return ""
return info[0]["home-page"]
def read_proc_stdout(proc, strio):
strio.write(proc.stdout.read1().decode())
class YdlHandler:
def __init__(self, app_config, jobshandler):
self.queue = Queue()
self.thread = None
self.done = False
self.ydl_module = None
self.ydl_module_name = None
self.app_config = app_config
self.jobshandler = jobshandler
self.app_config["ydl_last_update"] = datetime.now()
modules = ["youtube-dl", "youtube-dlc", "yt-dlp"]
if os.environ.get("YOUTUBE_DL") in modules:
self.ydl_module = importlib.import_module(os.environ.get("YOUTUBE_DL").replace("-", "_"))
else:
for module in modules:
try:
self.ydl_module = importlib.import_module(module.replace("-", "_"))
break
except ImportError:
pass
if self.ydl_module is None:
raise ImportError("No youtube_dl implementation found")
self.ydl_module_name = self.ydl_module.__name__.replace("_", "-")
self.ydl_website = get_ydl_website(self.ydl_module_name)
print("Using {} module".format(self.ydl_module_name))
def start(self):
self.thread = Thread(target=self.worker)
self.thread.start()
def put(self, obj):
self.queue.put(obj)
def finish(self):
self.done = True
def worker(self):
while not self.done:
job = self.queue.get()
job.status = Job.RUNNING
self.jobshandler.put((Actions.SET_STATUS, (job.id, job.status)))
if job.type == JobType.YDL_DOWNLOAD:
output = io.StringIO()
try:
self.download(job, {"format": job.format}, output)
except Exception as e:
job.status = Job.FAILED
job.log = "Error during download task:\n{}:\n\t{}".format(type(e).__name__, str(e))
print("Error during download task:\n{}:\n\t{}".format(type(e).__name__, str(e)))
elif job.type == JobType.YDL_UPDATE:
rc, log = self.update()
job.log = Job.clean_logs(log)
job.status = Job.COMPLETED if rc == 0 else Job.FAILED
self.jobshandler.put((Actions.UPDATE, job))
self.queue.task_done()
def update(self):
if os.environ.get("YDL_PYTHONPATH"):
command = [
"pip",
"install",
"--no-cache-dir",
"-t",
os.environ.get("YDL_PYTHONPATH"),
"--upgrade",
self.ydl_module_name,
]
else:
command = ["pip", "install", "--no-cache-dir", "--upgrade", self.ydl_module_name]
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, err = proc.communicate()
if proc.wait() == 0:
self.app_config["ydl_last_update"] = datetime.now()
reload_youtube_dl()
return proc.returncode, str(out.decode("utf-8"))
def get_ydl_options(self, ydl_config, request_options):
ydl_config = ydl_config.copy()
req_format = request_options.get("format")
if req_format is None:
req_format = "best"
if req_format.startswith("audio/"):
ydl_config.update({"extract-audio": None})
ydl_config.update({"audio-format": req_format.split("/")[-1]})
elif req_format.startswith("video/"):
# youtube-dl downloads BEST video and audio by default
if req_format != "video/best":
ydl_config.update({"format": req_format.split("/")[-1]})
else:
ydl_config.update({"format": req_format})
return ydl_config
def download_log_update(self, job, proc, strio):
while job.status == Job.RUNNING:
read_proc_stdout(proc, strio)
job.log = Job.clean_logs(strio.getvalue())
self.jobshandler.put((Actions.SET_LOG, (job.id, job.log)))
sleep(3)
def fetch_metadata(self, url):
ydl_opts = self.app_config.get("ydl_options", {})
cmd = self.get_ydl_full_cmd(ydl_opts, url)
cmd.extend(["-J", "--flat-playlist"])
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if proc.wait() != 0:
return -1, stderr.decode()
return 0, json.loads(stdout)
def get_ydl_full_cmd(self, opt_dict, url):
cmd = [self.ydl_module_name]
if opt_dict is not None:
for key, val in opt_dict.items():
if isinstance(val, bool) and not val:
continue
cmd.append("--{}".format(key))
if val is not None and not isinstance(val, bool):
cmd.append(str(val))
cmd.append(url)
return cmd
def download(self, job, request_options, output):
ydl_opts = self.get_ydl_options(self.app_config.get("ydl_options", {}), request_options)
cmd = self.get_ydl_full_cmd(ydl_opts, job.url)
rc, metadata = self.fetch_metadata(job.url)
if rc != 0:
job.log = Job.clean_logs(metadata)
job.status = Job.FAILED
raise Exception(job.log)
self.jobshandler.put((Actions.SET_NAME, (job.id, metadata.get("title", job.url))))
if metadata.get("_type") == "playlist":
ydl_opts.update({"output": self.app_config["ydl_server"].get("output_playlist", ydl_opts.get("output"))})
cmd = self.get_ydl_full_cmd(ydl_opts, job.url)
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT)
stdout_thread = Thread(target=self.download_log_update, args=(job, proc, output))
stdout_thread.start()
if proc.wait() == 0:
read_proc_stdout(proc, output)
job.log = Job.clean_logs(output.getvalue())
job.status = Job.COMPLETED
else:
read_proc_stdout(proc, output)
job.log = Job.clean_logs(output.getvalue())
job.status = Job.FAILED
print("Error during download task:\n" + output.getvalue())
stdout_thread.join()
def resume_pending(self):
db = JobsDB(readonly=False)
jobs = db.get_all(self.app_config["ydl_server"].get("max_log_entries", 100))
not_endeds = [job for job in jobs if job["status"] == "Pending" or job["status"] == "Running"]
for pending in not_endeds:
if int(pending["type"]) == JobType.YDL_UPDATE:
self.jobshandler.put((Actions.SET_STATUS, (pending["id"], Job.FAILED)))
else:
job = Job(
pending["name"],
Job.PENDING,
"Queue stopped",
int(pending["type"]),
pending["format"],
pending["url"],
)
job.id = pending["id"]
self.jobshandler.put((Actions.RESUME, job))
def join(self):
if self.thread is not None:
return self.thread.join()
def get_ydl_version(self):
return self.ydl_module.version.__version__
def get_ydl_extractors(self):
return [
ie.IE_NAME
for ie in self.ydl_module.extractor.list_extractors(self.app_config["ydl_options"].get("age-limit"))
if ie._WORKING
]
|
build.py | ## @file
# build a platform or a module
#
# Copyright (c) 2014, Hewlett-Packard Development Company, L.P.<BR>
# Copyright (c) 2007 - 2019, Intel Corporation. All rights reserved.<BR>
# Copyright (c) 2018, Hewlett Packard Enterprise Development, L.P.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import print_function
import Common.LongFilePathOs as os
import re
import sys
import glob
import time
import platform
import traceback
import encodings.ascii
import multiprocessing
from struct import *
from threading import *
import threading
from optparse import OptionParser
from subprocess import *
from Common import Misc as Utils
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.TargetTxtClassObject import TargetTxtClassObject
from Common.ToolDefClassObject import ToolDefClassObject
from Common.DataType import *
from Common.BuildVersion import gBUILD_VERSION
from AutoGen.AutoGen import *
from Common.BuildToolError import *
from Workspace.WorkspaceDatabase import WorkspaceDatabase
from Common.MultipleWorkspace import MultipleWorkspace as mws
from BuildReport import BuildReport
from GenPatchPcdTable.GenPatchPcdTable import *
from PatchPcdValue.PatchPcdValue import *
import Common.EdkLogger
import Common.GlobalData as GlobalData
from GenFds.GenFds import GenFds, GenFdsApi
from collections import OrderedDict, defaultdict
# Version and Copyright
VersionNumber = "0.60" + ' ' + gBUILD_VERSION
__version__ = "%prog Version " + VersionNumber
__copyright__ = "Copyright (c) 2007 - 2018, Intel Corporation All rights reserved."
## standard targets of build command
gSupportedTarget = ['all', 'genc', 'genmake', 'modules', 'libraries', 'fds', 'clean', 'cleanall', 'cleanlib', 'run']
## build configuration file
gBuildConfiguration = "target.txt"
gToolsDefinition = "tools_def.txt"
TemporaryTablePattern = re.compile(r'^_\d+_\d+_[a-fA-F0-9]+$')
TmpTableDict = {}
## Check environment PATH variable to make sure the specified tool is found
#
# If the tool is found in the PATH, then True is returned
# Otherwise, False is returned
#
def IsToolInPath(tool):
if 'PATHEXT' in os.environ:
extns = os.environ['PATHEXT'].split(os.path.pathsep)
else:
extns = ('',)
for pathDir in os.environ['PATH'].split(os.path.pathsep):
for ext in extns:
if os.path.exists(os.path.join(pathDir, tool + ext)):
return True
return False
## Check environment variables
#
# Check environment variables that must be set for build. Currently they are
#
# WORKSPACE The directory all packages/platforms start from
# EDK_TOOLS_PATH The directory contains all tools needed by the build
# PATH $(EDK_TOOLS_PATH)/Bin/<sys> must be set in PATH
#
# If any of above environment variable is not set or has error, the build
# will be broken.
#
def CheckEnvVariable():
# check WORKSPACE
if "WORKSPACE" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="WORKSPACE")
WorkspaceDir = os.path.normcase(os.path.normpath(os.environ["WORKSPACE"]))
if not os.path.exists(WorkspaceDir):
EdkLogger.error("build", FILE_NOT_FOUND, "WORKSPACE doesn't exist", ExtraData=WorkspaceDir)
elif ' ' in WorkspaceDir:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in WORKSPACE path",
ExtraData=WorkspaceDir)
os.environ["WORKSPACE"] = WorkspaceDir
# set multiple workspace
PackagesPath = os.getenv("PACKAGES_PATH")
mws.setWs(WorkspaceDir, PackagesPath)
if mws.PACKAGES_PATH:
for Path in mws.PACKAGES_PATH:
if not os.path.exists(Path):
EdkLogger.error("build", FILE_NOT_FOUND, "One Path in PACKAGES_PATH doesn't exist", ExtraData=Path)
elif ' ' in Path:
EdkLogger.error("build", FORMAT_NOT_SUPPORTED, "No space is allowed in PACKAGES_PATH", ExtraData=Path)
os.environ["EDK_TOOLS_PATH"] = os.path.normcase(os.environ["EDK_TOOLS_PATH"])
# check EDK_TOOLS_PATH
if "EDK_TOOLS_PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="EDK_TOOLS_PATH")
# check PATH
if "PATH" not in os.environ:
EdkLogger.error("build", ATTRIBUTE_NOT_AVAILABLE, "Environment variable not found",
ExtraData="PATH")
GlobalData.gWorkspace = WorkspaceDir
GlobalData.gGlobalDefines["WORKSPACE"] = WorkspaceDir
GlobalData.gGlobalDefines["EDK_TOOLS_PATH"] = os.environ["EDK_TOOLS_PATH"]
## Get normalized file path
#
# Convert the path to be local format, and remove the WORKSPACE path at the
# beginning if the file path is given in full path.
#
# @param FilePath File path to be normalized
# @param Workspace Workspace path which the FilePath will be checked against
#
# @retval string The normalized file path
#
def NormFile(FilePath, Workspace):
# check if the path is absolute or relative
if os.path.isabs(FilePath):
FileFullPath = os.path.normpath(FilePath)
else:
FileFullPath = os.path.normpath(mws.join(Workspace, FilePath))
Workspace = mws.getWs(Workspace, FilePath)
# check if the file path exists or not
if not os.path.isfile(FileFullPath):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData="\t%s (Please give file in absolute path or relative to WORKSPACE)" % FileFullPath)
# remove workspace directory from the beginning part of the file path
if Workspace[-1] in ["\\", "/"]:
return FileFullPath[len(Workspace):]
else:
return FileFullPath[(len(Workspace) + 1):]
## Get the output of an external program
#
# This is the entrance method of thread reading output of an external program and
# putting them in STDOUT/STDERR of current program.
#
# @param From The stream message read from
# @param To The stream message put on
# @param ExitFlag The flag used to indicate stopping reading
#
def ReadMessage(From, To, ExitFlag):
while True:
# read one line a time
Line = From.readline()
# empty string means "end"
if Line is not None and Line != b"":
To(Line.rstrip().decode(encoding='utf-8', errors='ignore'))
else:
break
if ExitFlag.isSet():
break
## Launch an external program
#
# This method will call subprocess.Popen to execute an external program with
# given options in specified directory. Because of the dead-lock issue during
# redirecting output of the external program, threads are used to to do the
# redirection work.
#
# @param Command A list or string containing the call of the program
# @param WorkingDir The directory in which the program will be running
#
def LaunchCommand(Command, WorkingDir):
BeginTime = time.time()
# if working directory doesn't exist, Popen() will raise an exception
if not os.path.isdir(WorkingDir):
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=WorkingDir)
# Command is used as the first Argument in following Popen().
# It could be a string or sequence. We find that if command is a string in following Popen(),
# ubuntu may fail with an error message that the command is not found.
# So here we may need convert command from string to list instance.
if platform.system() != 'Windows':
if not isinstance(Command, list):
Command = Command.split()
Command = ' '.join(Command)
Proc = None
EndOfProcedure = None
try:
# launch the command
Proc = Popen(Command, stdout=PIPE, stderr=PIPE, env=os.environ, cwd=WorkingDir, bufsize=-1, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Proc.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Proc.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Proc.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Proc.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Proc.wait()
except: # in case of aborting
# terminate the threads redirecting the program output
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
if EndOfProcedure is not None:
EndOfProcedure.set()
if Proc is None:
if not isinstance(Command, type("")):
Command = " ".join(Command)
EdkLogger.error("build", COMMAND_FAILURE, "Failed to start command", ExtraData="%s [%s]" % (Command, WorkingDir))
if Proc.stdout:
StdOutThread.join()
if Proc.stderr:
StdErrThread.join()
# check the return code of the program
if Proc.returncode != 0:
if not isinstance(Command, type("")):
Command = " ".join(Command)
# print out the Response file and its content when make failure
RespFile = os.path.join(WorkingDir, 'OUTPUT', 'respfilelist.txt')
if os.path.isfile(RespFile):
f = open(RespFile)
RespContent = f.read()
f.close()
EdkLogger.info(RespContent)
EdkLogger.error("build", COMMAND_FAILURE, ExtraData="%s [%s]" % (Command, WorkingDir))
return "%dms" % (int(round((time.time() - BeginTime) * 1000)))
## The smallest unit that can be built in multi-thread build mode
#
# This is the base class of build unit. The "Obj" parameter must provide
# __str__(), __eq__() and __hash__() methods. Otherwise there could be build units
# missing build.
#
# Currently the "Obj" should be only ModuleAutoGen or PlatformAutoGen objects.
#
class BuildUnit:
## The constructor
#
# @param self The object pointer
# @param Obj The object the build is working on
# @param Target The build target name, one of gSupportedTarget
# @param Dependency The BuildUnit(s) which must be completed in advance
# @param WorkingDir The directory build command starts in
#
def __init__(self, Obj, BuildCommand, Target, Dependency, WorkingDir="."):
self.BuildObject = Obj
self.Dependency = Dependency
self.WorkingDir = WorkingDir
self.Target = Target
self.BuildCommand = BuildCommand
if not BuildCommand:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(Obj.BuildTarget, Obj.ToolChain, Obj.Arch),
ExtraData=str(Obj))
## str() method
#
# It just returns the string representation of self.BuildObject
#
# @param self The object pointer
#
def __str__(self):
return str(self.BuildObject)
## "==" operator method
#
# It just compares self.BuildObject with "Other". So self.BuildObject must
# provide its own __eq__() method.
#
# @param self The object pointer
# @param Other The other BuildUnit object compared to
#
def __eq__(self, Other):
return Other and self.BuildObject == Other.BuildObject \
and Other.BuildObject \
and self.BuildObject.Arch == Other.BuildObject.Arch
## hash() method
#
# It just returns the hash value of self.BuildObject which must be hashable.
#
# @param self The object pointer
#
def __hash__(self):
return hash(self.BuildObject) + hash(self.BuildObject.Arch)
def __repr__(self):
return repr(self.BuildObject)
## The smallest module unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for module build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only ModuleAutoGen object.
#
class ModuleMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The ModuleAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(La, Target) for La in Obj.LibraryAutoGenList]
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
if Target in [None, "", "all"]:
self.Target = "tbuild"
## The smallest platform unit that can be built by nmake/make command in multi-thread build mode
#
# This class is for platform build by nmake/make build system. The "Obj" parameter
# must provide __str__(), __eq__() and __hash__() methods. Otherwise there could
# be make units missing build.
#
# Currently the "Obj" should be only PlatformAutoGen object.
#
class PlatformMakeUnit(BuildUnit):
## The constructor
#
# @param self The object pointer
# @param Obj The PlatformAutoGen object the build is working on
# @param Target The build target name, one of gSupportedTarget
#
def __init__(self, Obj, Target):
Dependency = [ModuleMakeUnit(Lib, Target) for Lib in self.BuildObject.LibraryAutoGenList]
Dependency.extend([ModuleMakeUnit(Mod, Target) for Mod in self.BuildObject.ModuleAutoGenList])
BuildUnit.__init__(self, Obj, Obj.BuildCommand, Target, Dependency, Obj.MakeFileDir)
## The class representing the task of a module build or platform build
#
# This class manages the build tasks in multi-thread build mode. Its jobs include
# scheduling thread running, catching thread error, monitor the thread status, etc.
#
class BuildTask:
# queue for tasks waiting for schedule
_PendingQueue = OrderedDict()
_PendingQueueLock = threading.Lock()
# queue for tasks ready for running
_ReadyQueue = OrderedDict()
_ReadyQueueLock = threading.Lock()
# queue for run tasks
_RunningQueue = OrderedDict()
_RunningQueueLock = threading.Lock()
# queue containing all build tasks, in case duplicate build
_TaskQueue = OrderedDict()
# flag indicating error occurs in a running thread
_ErrorFlag = threading.Event()
_ErrorFlag.clear()
_ErrorMessage = ""
# BoundedSemaphore object used to control the number of running threads
_Thread = None
# flag indicating if the scheduler is started or not
_SchedulerStopped = threading.Event()
_SchedulerStopped.set()
## Start the task scheduler thread
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def StartScheduler(MaxThreadNumber, ExitFlag):
SchedulerThread = Thread(target=BuildTask.Scheduler, args=(MaxThreadNumber, ExitFlag))
SchedulerThread.setName("Build-Task-Scheduler")
SchedulerThread.setDaemon(False)
SchedulerThread.start()
# wait for the scheduler to be started, especially useful in Linux
while not BuildTask.IsOnGoing():
time.sleep(0.01)
## Scheduler method
#
# @param MaxThreadNumber The maximum thread number
# @param ExitFlag Flag used to end the scheduler
#
@staticmethod
def Scheduler(MaxThreadNumber, ExitFlag):
BuildTask._SchedulerStopped.clear()
try:
# use BoundedSemaphore to control the maximum running threads
BuildTask._Thread = BoundedSemaphore(MaxThreadNumber)
#
# scheduling loop, which will exits when no pending/ready task and
# indicated to do so, or there's error in running thread
#
while (len(BuildTask._PendingQueue) > 0 or len(BuildTask._ReadyQueue) > 0 \
or not ExitFlag.isSet()) and not BuildTask._ErrorFlag.isSet():
EdkLogger.debug(EdkLogger.DEBUG_8, "Pending Queue (%d), Ready Queue (%d)"
% (len(BuildTask._PendingQueue), len(BuildTask._ReadyQueue)))
# get all pending tasks
BuildTask._PendingQueueLock.acquire()
BuildObjectList = list(BuildTask._PendingQueue.keys())
#
# check if their dependency is resolved, and if true, move them
# into ready queue
#
for BuildObject in BuildObjectList:
Bt = BuildTask._PendingQueue[BuildObject]
if Bt.IsReady():
BuildTask._ReadyQueue[BuildObject] = BuildTask._PendingQueue.pop(BuildObject)
BuildTask._PendingQueueLock.release()
# launch build thread until the maximum number of threads is reached
while not BuildTask._ErrorFlag.isSet():
# empty ready queue, do nothing further
if len(BuildTask._ReadyQueue) == 0:
break
# wait for active thread(s) exit
BuildTask._Thread.acquire(True)
# start a new build thread
Bo, Bt = BuildTask._ReadyQueue.popitem()
# move into running queue
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue[Bo] = Bt
BuildTask._RunningQueueLock.release()
Bt.Start()
# avoid tense loop
time.sleep(0.01)
# avoid tense loop
time.sleep(0.01)
# wait for all running threads exit
if BuildTask._ErrorFlag.isSet():
EdkLogger.quiet("\nWaiting for all build threads exit...")
# while not BuildTask._ErrorFlag.isSet() and \
while len(BuildTask._RunningQueue) > 0:
EdkLogger.verbose("Waiting for thread ending...(%d)" % len(BuildTask._RunningQueue))
EdkLogger.debug(EdkLogger.DEBUG_8, "Threads [%s]" % ", ".join(Th.getName() for Th in threading.enumerate()))
# avoid tense loop
time.sleep(0.1)
except BaseException as X:
#
# TRICK: hide the output of threads left running, so that the user can
# catch the error message easily
#
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "build thread scheduler error\n\t%s" % str(X)
BuildTask._PendingQueue.clear()
BuildTask._ReadyQueue.clear()
BuildTask._RunningQueue.clear()
BuildTask._TaskQueue.clear()
BuildTask._SchedulerStopped.set()
## Wait for all running method exit
#
@staticmethod
def WaitForComplete():
BuildTask._SchedulerStopped.wait()
## Check if the scheduler is running or not
#
@staticmethod
def IsOnGoing():
return not BuildTask._SchedulerStopped.isSet()
## Abort the build
@staticmethod
def Abort():
if BuildTask.IsOnGoing():
BuildTask._ErrorFlag.set()
BuildTask.WaitForComplete()
## Check if there's error in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use threading.Event to communicate this formation to main thread.
#
@staticmethod
def HasError():
return BuildTask._ErrorFlag.isSet()
## Get error message in running thread
#
# Since the main thread cannot catch exceptions in other thread, we have to
# use a static variable to communicate this message to main thread.
#
@staticmethod
def GetErrorMessage():
return BuildTask._ErrorMessage
## Factory method to create a BuildTask object
#
# This method will check if a module is building or has been built. And if
# true, just return the associated BuildTask object in the _TaskQueue. If
# not, create and return a new BuildTask object. The new BuildTask object
# will be appended to the _PendingQueue for scheduling later.
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
@staticmethod
def New(BuildItem, Dependency=None):
if BuildItem in BuildTask._TaskQueue:
Bt = BuildTask._TaskQueue[BuildItem]
return Bt
Bt = BuildTask()
Bt._Init(BuildItem, Dependency)
BuildTask._TaskQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.acquire()
BuildTask._PendingQueue[BuildItem] = Bt
BuildTask._PendingQueueLock.release()
return Bt
## The real constructor of BuildTask
#
# @param BuildItem A BuildUnit object representing a build object
# @param Dependency The dependent build object of BuildItem
#
def _Init(self, BuildItem, Dependency=None):
self.BuildItem = BuildItem
self.DependencyList = []
if Dependency is None:
Dependency = BuildItem.Dependency
else:
Dependency.extend(BuildItem.Dependency)
self.AddDependency(Dependency)
# flag indicating build completes, used to avoid unnecessary re-build
self.CompleteFlag = False
## Check if all dependent build tasks are completed or not
#
def IsReady(self):
ReadyFlag = True
for Dep in self.DependencyList:
if Dep.CompleteFlag == True:
continue
ReadyFlag = False
break
return ReadyFlag
## Add dependent build task
#
# @param Dependency The list of dependent build objects
#
def AddDependency(self, Dependency):
for Dep in Dependency:
if not Dep.BuildObject.IsBinaryModule and not Dep.BuildObject.CanSkipbyHash():
self.DependencyList.append(BuildTask.New(Dep)) # BuildTask list
## The thread wrapper of LaunchCommand function
#
# @param Command A list or string contains the call of the command
# @param WorkingDir The directory in which the program will be running
#
def _CommandThread(self, Command, WorkingDir):
try:
self.BuildItem.BuildObject.BuildTime = LaunchCommand(Command, WorkingDir)
self.CompleteFlag = True
# Run hash operation post dependency, to account for libs
if GlobalData.gUseHashCache and self.BuildItem.BuildObject.IsLibrary:
HashFile = path.join(self.BuildItem.BuildObject.BuildDir, self.BuildItem.BuildObject.Name + ".hash")
SaveFileOnChange(HashFile, self.BuildItem.BuildObject.GenModuleHash(), True)
except:
#
# TRICK: hide the output of threads left running, so that the user can
# catch the error message easily
#
if not BuildTask._ErrorFlag.isSet():
GlobalData.gBuildingModule = "%s [%s, %s, %s]" % (str(self.BuildItem.BuildObject),
self.BuildItem.BuildObject.Arch,
self.BuildItem.BuildObject.ToolChain,
self.BuildItem.BuildObject.BuildTarget
)
EdkLogger.SetLevel(EdkLogger.ERROR)
BuildTask._ErrorFlag.set()
BuildTask._ErrorMessage = "%s broken\n %s [%s]" % \
(threading.currentThread().getName(), Command, WorkingDir)
# Set the value used by hash invalidation flow in GlobalData.gModuleBuildTracking to 'SUCCESS'
# If Module or Lib is being tracked, it did not fail header check test, and built successfully
if (self.BuildItem.BuildObject.Arch in GlobalData.gModuleBuildTracking and
self.BuildItem.BuildObject in GlobalData.gModuleBuildTracking[self.BuildItem.BuildObject.Arch] and
GlobalData.gModuleBuildTracking[self.BuildItem.BuildObject.Arch][self.BuildItem.BuildObject] != 'FAIL_METAFILE' and
not BuildTask._ErrorFlag.isSet()
):
GlobalData.gModuleBuildTracking[self.BuildItem.BuildObject.Arch][self.BuildItem.BuildObject] = 'SUCCESS'
# indicate there's a thread is available for another build task
BuildTask._RunningQueueLock.acquire()
BuildTask._RunningQueue.pop(self.BuildItem)
BuildTask._RunningQueueLock.release()
BuildTask._Thread.release()
## Start build task thread
#
def Start(self):
EdkLogger.quiet("Building ... %s" % repr(self.BuildItem))
Command = self.BuildItem.BuildCommand + [self.BuildItem.Target]
self.BuildTread = Thread(target=self._CommandThread, args=(Command, self.BuildItem.WorkingDir))
self.BuildTread.setName("build thread")
self.BuildTread.setDaemon(False)
self.BuildTread.start()
## The class contains the information related to EFI image
#
class PeImageInfo():
## Constructor
#
# Constructor will load all required image information.
#
# @param BaseName The full file path of image.
# @param Guid The GUID for image.
# @param Arch Arch of this image.
# @param OutputDir The output directory for image.
# @param DebugDir The debug directory for image.
# @param ImageClass PeImage Information
#
def __init__(self, BaseName, Guid, Arch, OutputDir, DebugDir, ImageClass):
self.BaseName = BaseName
self.Guid = Guid
self.Arch = Arch
self.OutputDir = OutputDir
self.DebugDir = DebugDir
self.Image = ImageClass
self.Image.Size = (self.Image.Size // 0x1000 + 1) * 0x1000
## The class implementing the EDK2 build process
#
# The build process includes:
# 1. Load configuration from target.txt and tools_def.txt in $(WORKSPACE)/Conf
# 2. Parse DSC file of active platform
# 3. Parse FDF file if any
# 4. Establish build database, including parse all other files (module, package)
# 5. Create AutoGen files (C code file, depex file, makefile) if necessary
# 6. Call build command
#
class Build():
## Constructor
#
# Constructor will load all necessary configurations, parse platform, modules
# and packages and the establish a database for AutoGen.
#
# @param Target The build command target, one of gSupportedTarget
# @param WorkspaceDir The directory of workspace
# @param BuildOptions Build options passed from command line
#
def __init__(self, Target, WorkspaceDir, BuildOptions):
self.WorkspaceDir = WorkspaceDir
self.Target = Target
self.PlatformFile = BuildOptions.PlatformFile
self.ModuleFile = BuildOptions.ModuleFile
self.ArchList = BuildOptions.TargetArch
self.ToolChainList = BuildOptions.ToolChain
self.BuildTargetList= BuildOptions.BuildTarget
self.Fdf = BuildOptions.FdfFile
self.FdList = BuildOptions.RomImage
self.FvList = BuildOptions.FvImage
self.CapList = BuildOptions.CapName
self.SilentMode = BuildOptions.SilentMode
self.ThreadNumber = BuildOptions.ThreadNumber
self.SkipAutoGen = BuildOptions.SkipAutoGen
self.Reparse = BuildOptions.Reparse
self.SkuId = BuildOptions.SkuId
if self.SkuId:
GlobalData.gSKUID_CMD = self.SkuId
self.ConfDirectory = BuildOptions.ConfDirectory
self.SpawnMode = True
self.BuildReport = BuildReport(BuildOptions.ReportFile, BuildOptions.ReportType)
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
self.AutoGenTime = 0
self.MakeTime = 0
self.GenFdsTime = 0
GlobalData.BuildOptionPcd = BuildOptions.OptionPcd if BuildOptions.OptionPcd else []
#Set global flag for build mode
GlobalData.gIgnoreSource = BuildOptions.IgnoreSources
GlobalData.gUseHashCache = BuildOptions.UseHashCache
GlobalData.gBinCacheDest = BuildOptions.BinCacheDest
GlobalData.gBinCacheSource = BuildOptions.BinCacheSource
GlobalData.gEnableGenfdsMultiThread = BuildOptions.GenfdsMultiThread
GlobalData.gDisableIncludePathCheck = BuildOptions.DisableIncludePathCheck
if GlobalData.gBinCacheDest and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination must be used together with --hash.")
if GlobalData.gBinCacheSource and not GlobalData.gUseHashCache:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-source must be used together with --hash.")
if GlobalData.gBinCacheDest and GlobalData.gBinCacheSource:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, ExtraData="--binary-destination can not be used together with --binary-source.")
if GlobalData.gBinCacheSource:
BinCacheSource = os.path.normpath(GlobalData.gBinCacheSource)
if not os.path.isabs(BinCacheSource):
BinCacheSource = mws.join(self.WorkspaceDir, BinCacheSource)
GlobalData.gBinCacheSource = BinCacheSource
else:
if GlobalData.gBinCacheSource is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-source.")
if GlobalData.gBinCacheDest:
BinCacheDest = os.path.normpath(GlobalData.gBinCacheDest)
if not os.path.isabs(BinCacheDest):
BinCacheDest = mws.join(self.WorkspaceDir, BinCacheDest)
GlobalData.gBinCacheDest = BinCacheDest
else:
if GlobalData.gBinCacheDest is not None:
EdkLogger.error("build", OPTION_VALUE_INVALID, ExtraData="Invalid value of option --binary-destination.")
if self.ConfDirectory:
# Get alternate Conf location, if it is absolute, then just use the absolute directory name
ConfDirectoryPath = os.path.normpath(self.ConfDirectory)
if not os.path.isabs(ConfDirectoryPath):
# Since alternate directory name is not absolute, the alternate directory is located within the WORKSPACE
# This also handles someone specifying the Conf directory in the workspace. Using --conf=Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, ConfDirectoryPath)
else:
if "CONF_PATH" in os.environ:
ConfDirectoryPath = os.path.normcase(os.path.normpath(os.environ["CONF_PATH"]))
else:
# Get standard WORKSPACE/Conf use the absolute path to the WORKSPACE/Conf
ConfDirectoryPath = mws.join(self.WorkspaceDir, 'Conf')
GlobalData.gConfDirectory = ConfDirectoryPath
GlobalData.gDatabasePath = os.path.normpath(os.path.join(ConfDirectoryPath, GlobalData.gDatabasePath))
self.Db = WorkspaceDatabase()
self.BuildDatabase = self.Db.BuildObject
self.Platform = None
self.ToolChainFamily = None
self.LoadFixAddress = 0
self.UniFlag = BuildOptions.Flag
self.BuildModules = []
self.HashSkipModules = []
self.Db_Flag = False
self.LaunchPrebuildFlag = False
self.PlatformBuildPath = os.path.join(GlobalData.gConfDirectory, '.cache', '.PlatformBuild')
if BuildOptions.CommandLength:
GlobalData.gCommandMaxLength = BuildOptions.CommandLength
# print dot character during doing some time-consuming work
self.Progress = Utils.Progressor()
# print current build environment and configuration
EdkLogger.quiet("%-16s = %s" % ("WORKSPACE", os.environ["WORKSPACE"]))
if "PACKAGES_PATH" in os.environ:
# WORKSPACE env has been converted before. Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("PACKAGES_PATH", os.path.normcase(os.path.normpath(os.environ["PACKAGES_PATH"]))))
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_PATH", os.environ["EDK_TOOLS_PATH"]))
if "EDK_TOOLS_BIN" in os.environ:
# Print the same path style with WORKSPACE env.
EdkLogger.quiet("%-16s = %s" % ("EDK_TOOLS_BIN", os.path.normcase(os.path.normpath(os.environ["EDK_TOOLS_BIN"]))))
EdkLogger.quiet("%-16s = %s" % ("CONF_PATH", GlobalData.gConfDirectory))
if "PYTHON3_ENABLE" in os.environ:
PYTHON3_ENABLE = os.environ["PYTHON3_ENABLE"]
if PYTHON3_ENABLE != "TRUE":
PYTHON3_ENABLE = "FALSE"
EdkLogger.quiet("%-16s = %s" % ("PYTHON3_ENABLE", PYTHON3_ENABLE))
if "PYTHON_COMMAND" in os.environ:
EdkLogger.quiet("%-16s = %s" % ("PYTHON_COMMAND", os.environ["PYTHON_COMMAND"]))
self.InitPreBuild()
self.InitPostBuild()
if self.Prebuild:
EdkLogger.quiet("%-16s = %s" % ("PREBUILD", self.Prebuild))
if self.Postbuild:
EdkLogger.quiet("%-16s = %s" % ("POSTBUILD", self.Postbuild))
if self.Prebuild:
self.LaunchPrebuild()
self.TargetTxt = TargetTxtClassObject()
self.ToolDef = ToolDefClassObject()
if not (self.LaunchPrebuildFlag and os.path.exists(self.PlatformBuildPath)):
self.InitBuild()
EdkLogger.info("")
os.chdir(self.WorkspaceDir)
## Load configuration
#
# This method will parse target.txt and get the build configurations.
#
def LoadConfiguration(self):
#
# Check target.txt and tools_def.txt and Init them
#
BuildConfigurationFile = os.path.normpath(os.path.join(GlobalData.gConfDirectory, gBuildConfiguration))
if os.path.isfile(BuildConfigurationFile) == True:
StatusCode = self.TargetTxt.LoadTargetTxtFile(BuildConfigurationFile)
ToolDefinitionFile = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_CONF]
if ToolDefinitionFile == '':
ToolDefinitionFile = gToolsDefinition
ToolDefinitionFile = os.path.normpath(mws.join(self.WorkspaceDir, 'Conf', ToolDefinitionFile))
if os.path.isfile(ToolDefinitionFile) == True:
StatusCode = self.ToolDef.LoadToolDefFile(ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=ToolDefinitionFile)
else:
EdkLogger.error("build", FILE_NOT_FOUND, ExtraData=BuildConfigurationFile)
# if no ARCH given in command line, get it from target.txt
if not self.ArchList:
self.ArchList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET_ARCH]
self.ArchList = tuple(self.ArchList)
# if no build target given in command line, get it from target.txt
if not self.BuildTargetList:
self.BuildTargetList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TARGET]
# if no tool chain given in command line, get it from target.txt
if not self.ToolChainList:
self.ToolChainList = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_TOOL_CHAIN_TAG]
if self.ToolChainList is None or len(self.ToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE, ExtraData="No toolchain given. Don't know how to build.\n")
# check if the tool chains are defined or not
NewToolChainList = []
for ToolChain in self.ToolChainList:
if ToolChain not in self.ToolDef.ToolsDefTxtDatabase[TAB_TOD_DEFINES_TOOL_CHAIN_TAG]:
EdkLogger.warn("build", "Tool chain [%s] is not defined" % ToolChain)
else:
NewToolChainList.append(ToolChain)
# if no tool chain available, break the build
if len(NewToolChainList) == 0:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="[%s] not defined. No toolchain available for build!\n" % ", ".join(self.ToolChainList))
else:
self.ToolChainList = NewToolChainList
ToolChainFamily = []
ToolDefinition = self.ToolDef.ToolsDefTxtDatabase
for Tool in self.ToolChainList:
if TAB_TOD_DEFINES_FAMILY not in ToolDefinition or Tool not in ToolDefinition[TAB_TOD_DEFINES_FAMILY] \
or not ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool]:
EdkLogger.warn("build", "No tool chain family found in configuration for %s. Default to MSFT." % Tool)
ToolChainFamily.append(TAB_COMPILER_MSFT)
else:
ToolChainFamily.append(ToolDefinition[TAB_TOD_DEFINES_FAMILY][Tool])
self.ToolChainFamily = ToolChainFamily
if self.ThreadNumber is None:
self.ThreadNumber = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_MAX_CONCURRENT_THREAD_NUMBER]
if self.ThreadNumber == '':
self.ThreadNumber = 0
else:
self.ThreadNumber = int(self.ThreadNumber, 0)
if self.ThreadNumber == 0:
try:
self.ThreadNumber = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
self.ThreadNumber = 1
if not self.PlatformFile:
PlatformFile = self.TargetTxt.TargetTxtDictionary[TAB_TAT_DEFINES_ACTIVE_PLATFORM]
if not PlatformFile:
# Try to find one in current directory
WorkingDirectory = os.getcwd()
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.dsc')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_MISSING,
ExtraData="There are %d DSC files in %s. Use '-p' to specify one.\n" % (FileNum, WorkingDirectory))
elif FileNum == 1:
PlatformFile = FileList[0]
else:
EdkLogger.error("build", RESOURCE_NOT_AVAILABLE,
ExtraData="No active platform specified in target.txt or command line! Nothing can be built.\n")
self.PlatformFile = PathClass(NormFile(PlatformFile, self.WorkspaceDir), self.WorkspaceDir)
## Initialize build configuration
#
# This method will parse DSC file and merge the configurations from
# command line and target.txt, then get the final build configurations.
#
def InitBuild(self):
# parse target.txt, tools_def.txt, and platform file
self.LoadConfiguration()
# Allow case-insensitive for those from command line or configuration file
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
def InitPreBuild(self):
self.LoadConfiguration()
ErrorCode, ErrorInfo = self.PlatformFile.Validate(".dsc", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = self.BuildTargetList[0]
if self.ArchList:
GlobalData.gGlobalDefines['ARCH'] = self.ArchList[0]
if self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = self.ToolChainList[0]
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = self.ToolChainList[0]
if self.ToolChainFamily:
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[0]
if 'PREBUILD' in GlobalData.gCommandLineDefines:
self.Prebuild = GlobalData.gCommandLineDefines.get('PREBUILD')
else:
self.Db_Flag = True
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Prebuild = str(Platform.Prebuild)
if self.Prebuild:
PrebuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Prebuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PrebuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PrebuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PrebuildList.append(Arg)
self.Prebuild = ' '.join(PrebuildList)
self.Prebuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def InitPostBuild(self):
if 'POSTBUILD' in GlobalData.gCommandLineDefines:
self.Postbuild = GlobalData.gCommandLineDefines.get('POSTBUILD')
else:
Platform = self.Db.MapPlatform(str(self.PlatformFile))
self.Postbuild = str(Platform.Postbuild)
if self.Postbuild:
PostbuildList = []
#
# Evaluate all arguments and convert arguments that are WORKSPACE
# relative paths to absolute paths. Filter arguments that look like
# flags or do not follow the file/dir naming rules to avoid false
# positives on this conversion.
#
for Arg in self.Postbuild.split():
#
# Do not modify Arg if it looks like a flag or an absolute file path
#
if Arg.startswith('-') or os.path.isabs(Arg):
PostbuildList.append(Arg)
continue
#
# Do not modify Arg if it does not look like a Workspace relative
# path that starts with a valid package directory name
#
if not Arg[0].isalpha() or os.path.dirname(Arg) == '':
PostbuildList.append(Arg)
continue
#
# If Arg looks like a WORKSPACE relative path, then convert to an
# absolute path and check to see if the file exists.
#
Temp = mws.join(self.WorkspaceDir, Arg)
if os.path.isfile(Temp):
Arg = Temp
PostbuildList.append(Arg)
self.Postbuild = ' '.join(PostbuildList)
self.Postbuild += self.PassCommandOption(self.BuildTargetList, self.ArchList, self.ToolChainList, self.PlatformFile, self.Target)
def PassCommandOption(self, BuildTarget, TargetArch, ToolChain, PlatformFile, Target):
BuildStr = ''
if GlobalData.gCommand and isinstance(GlobalData.gCommand, list):
BuildStr += ' ' + ' '.join(GlobalData.gCommand)
TargetFlag = False
ArchFlag = False
ToolChainFlag = False
PlatformFileFlag = False
if GlobalData.gOptions and not GlobalData.gOptions.BuildTarget:
TargetFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.TargetArch:
ArchFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.ToolChain:
ToolChainFlag = True
if GlobalData.gOptions and not GlobalData.gOptions.PlatformFile:
PlatformFileFlag = True
if TargetFlag and BuildTarget:
if isinstance(BuildTarget, list) or isinstance(BuildTarget, tuple):
BuildStr += ' -b ' + ' -b '.join(BuildTarget)
elif isinstance(BuildTarget, str):
BuildStr += ' -b ' + BuildTarget
if ArchFlag and TargetArch:
if isinstance(TargetArch, list) or isinstance(TargetArch, tuple):
BuildStr += ' -a ' + ' -a '.join(TargetArch)
elif isinstance(TargetArch, str):
BuildStr += ' -a ' + TargetArch
if ToolChainFlag and ToolChain:
if isinstance(ToolChain, list) or isinstance(ToolChain, tuple):
BuildStr += ' -t ' + ' -t '.join(ToolChain)
elif isinstance(ToolChain, str):
BuildStr += ' -t ' + ToolChain
if PlatformFileFlag and PlatformFile:
if isinstance(PlatformFile, list) or isinstance(PlatformFile, tuple):
BuildStr += ' -p ' + ' -p '.join(PlatformFile)
elif isinstance(PlatformFile, str):
BuildStr += ' -p' + PlatformFile
BuildStr += ' --conf=' + GlobalData.gConfDirectory
if Target:
BuildStr += ' ' + Target
return BuildStr
def LaunchPrebuild(self):
if self.Prebuild:
EdkLogger.info("\n- Prebuild Start -\n")
self.LaunchPrebuildFlag = True
#
# The purpose of .PrebuildEnv file is capture environment variable settings set by the prebuild script
# and preserve them for the rest of the main build step, because the child process environment will
# evaporate as soon as it exits, we cannot get it in build step.
#
PrebuildEnvFile = os.path.join(GlobalData.gConfDirectory, '.cache', '.PrebuildEnv')
if os.path.isfile(PrebuildEnvFile):
os.remove(PrebuildEnvFile)
if os.path.isfile(self.PlatformBuildPath):
os.remove(self.PlatformBuildPath)
if sys.platform == "win32":
args = ' && '.join((self.Prebuild, 'set > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
else:
args = ' && '.join((self.Prebuild, 'env > ' + PrebuildEnvFile))
Process = Popen(args, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Prebuild", PREBUILD_ERROR, 'Prebuild process is not success!')
if os.path.exists(PrebuildEnvFile):
f = open(PrebuildEnvFile)
envs = f.readlines()
f.close()
envs = [l.split("=", 1) for l in envs ]
envs = [[I.strip() for I in item] for item in envs if len(item) == 2]
os.environ.update(dict(envs))
EdkLogger.info("\n- Prebuild Done -\n")
def LaunchPostbuild(self):
if self.Postbuild:
EdkLogger.info("\n- Postbuild Start -\n")
if sys.platform == "win32":
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
else:
Process = Popen(self.Postbuild, stdout=PIPE, stderr=PIPE, shell=True)
# launch two threads to read the STDOUT and STDERR
EndOfProcedure = Event()
EndOfProcedure.clear()
if Process.stdout:
StdOutThread = Thread(target=ReadMessage, args=(Process.stdout, EdkLogger.info, EndOfProcedure))
StdOutThread.setName("STDOUT-Redirector")
StdOutThread.setDaemon(False)
StdOutThread.start()
if Process.stderr:
StdErrThread = Thread(target=ReadMessage, args=(Process.stderr, EdkLogger.quiet, EndOfProcedure))
StdErrThread.setName("STDERR-Redirector")
StdErrThread.setDaemon(False)
StdErrThread.start()
# waiting for program exit
Process.wait()
if Process.stdout:
StdOutThread.join()
if Process.stderr:
StdErrThread.join()
if Process.returncode != 0 :
EdkLogger.error("Postbuild", POSTBUILD_ERROR, 'Postbuild process is not success!')
EdkLogger.info("\n- Postbuild Done -\n")
## Error handling for hash feature
#
# On BuildTask error, iterate through the Module Build tracking
# dictionary to determine wheather a module failed to build. Invalidate
# the hash associated with that module by removing it from storage.
#
#
def invalidateHash(self):
# Only for hashing feature
if not GlobalData.gUseHashCache:
return
# GlobalData.gModuleBuildTracking contains only modules or libs that cannot be skipped by hash
for moduleAutoGenObjArch in GlobalData.gModuleBuildTracking.keys():
for moduleAutoGenObj in GlobalData.gModuleBuildTracking[moduleAutoGenObjArch].keys():
# Skip invalidating for Successful Module/Lib builds
if GlobalData.gModuleBuildTracking[moduleAutoGenObjArch][moduleAutoGenObj] == 'SUCCESS':
continue
# The module failed to build, failed to start building, or failed the header check test from this point on
# Remove .hash from build
ModuleHashFile = os.path.join(moduleAutoGenObj.BuildDir, moduleAutoGenObj.Name + ".hash")
if os.path.exists(ModuleHashFile):
os.remove(ModuleHashFile)
# Remove .hash file from cache
if GlobalData.gBinCacheDest:
FileDir = os.path.join(GlobalData.gBinCacheDest, moduleAutoGenObj.Arch, moduleAutoGenObj.SourceDir, moduleAutoGenObj.MetaFile.BaseName)
HashFile = os.path.join(FileDir, moduleAutoGenObj.Name + '.hash')
if os.path.exists(HashFile):
os.remove(HashFile)
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _BuildPa(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False, FfsCommand={}):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile, FfsCommand)
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
makefile = GenMake.BuildFile(AutoGenObject)._FILE_NAME_[GenMake.gMakeType]
# run
if Target == 'run':
return True
# build modules
if BuildModule:
BuildCommand = BuildCommand + [Target]
LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.UpdateBuildCache()
self.BuildModules = []
return True
# build library
if Target == 'libraries':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# build module
if Target == 'modules':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Lib, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Mod in AutoGenObject.ModuleBuildDirectoryList:
NewBuildCommand = BuildCommand + ['-f', os.path.normpath(os.path.join(Mod, makefile)), 'pbuild']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.UpdateBuildCache()
self.BuildModules = []
return True
# cleanlib
if Target == 'cleanlib':
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# clean
if Target == 'clean':
for Mod in AutoGenObject.ModuleBuildDirectoryList:
ModMakefile = os.path.normpath(os.path.join(Mod, makefile))
if os.path.exists(ModMakefile):
NewBuildCommand = BuildCommand + ['-f', ModMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
for Lib in AutoGenObject.LibraryBuildDirectoryList:
LibMakefile = os.path.normpath(os.path.join(Lib, makefile))
if os.path.exists(LibMakefile):
NewBuildCommand = BuildCommand + ['-f', LibMakefile, 'cleanall']
LaunchCommand(NewBuildCommand, AutoGenObject.MakeFileDir)
return True
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Build a module or platform
#
# Create autogen code and makefile for a module or platform, and the launch
# "make" command to build it
#
# @param Target The target of build command
# @param Platform The platform file
# @param Module The module file
# @param BuildTarget The name of build target, one of "DEBUG", "RELEASE"
# @param ToolChain The name of toolchain to build
# @param Arch The arch of the module/platform
# @param CreateDepModuleCodeFile Flag used to indicate creating code
# for dependent modules/Libraries
# @param CreateDepModuleMakeFile Flag used to indicate creating makefile
# for dependent modules/Libraries
#
def _Build(self, Target, AutoGenObject, CreateDepsCodeFile=True, CreateDepsMakeFile=True, BuildModule=False):
if AutoGenObject is None:
return False
# skip file generation for cleanxxx targets, run and fds target
if Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or Target == 'genc':
self.Progress.Start("Generating code")
AutoGenObject.CreateCodeFile(CreateDepsCodeFile)
self.Progress.Stop("done!")
if Target == "genc":
return True
if not self.SkipAutoGen or Target == 'genmake':
self.Progress.Start("Generating makefile")
AutoGenObject.CreateMakeFile(CreateDepsMakeFile)
#AutoGenObject.CreateAsBuiltInf()
self.Progress.Stop("done!")
if Target == "genmake":
return True
else:
# always recreate top/platform makefile when clean, just in case of inconsistency
AutoGenObject.CreateCodeFile(False)
AutoGenObject.CreateMakeFile(False)
if EdkLogger.GetLevel() == EdkLogger.QUIET:
EdkLogger.quiet("Building ... %s" % repr(AutoGenObject))
BuildCommand = AutoGenObject.BuildCommand
if BuildCommand is None or len(BuildCommand) == 0:
EdkLogger.error("build", OPTION_MISSING,
"No build command found for this module. "
"Please check your setting of %s_%s_%s_MAKE_PATH in Conf/tools_def.txt file." %
(AutoGenObject.BuildTarget, AutoGenObject.ToolChain, AutoGenObject.Arch),
ExtraData=str(AutoGenObject))
# build modules
if BuildModule:
if Target != 'fds':
BuildCommand = BuildCommand + [Target]
AutoGenObject.BuildTime = LaunchCommand(BuildCommand, AutoGenObject.MakeFileDir)
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.UpdateBuildCache()
self.BuildModules = []
return True
# genfds
if Target == 'fds':
if GenFdsApi(AutoGenObject.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
return True
# run
if Target == 'run':
return True
# build library
if Target == 'libraries':
pass
# not build modules
# cleanall
if Target == 'cleanall':
try:
#os.rmdir(AutoGenObject.BuildDir)
RemoveDirectory(AutoGenObject.BuildDir, True)
except WindowsError as X:
EdkLogger.error("build", FILE_DELETE_FAILURE, ExtraData=str(X))
return True
## Rebase module image and Get function address for the input module list.
#
def _RebaseModule (self, MapBuffer, BaseAddress, ModuleList, AddrIsOffset = True, ModeIsSmm = False):
if ModeIsSmm:
AddrIsOffset = False
for InfFile in ModuleList:
sys.stdout.write (".")
sys.stdout.flush()
ModuleInfo = ModuleList[InfFile]
ModuleName = ModuleInfo.BaseName
ModuleOutputImage = ModuleInfo.Image.FileName
ModuleDebugImage = os.path.join(ModuleInfo.DebugDir, ModuleInfo.BaseName + '.efi')
## for SMM module in SMRAM, the SMRAM will be allocated from base to top.
if not ModeIsSmm:
BaseAddress = BaseAddress - ModuleInfo.Image.Size
#
# Update Image to new BaseAddress by GenFw tool
#
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--rebase", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
else:
#
# Set new address to the section header only for SMM driver.
#
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleOutputImage], ModuleInfo.OutputDir)
LaunchCommand(["GenFw", "--address", str(BaseAddress), "-r", ModuleDebugImage], ModuleInfo.DebugDir)
#
# Collect function address from Map file
#
ImageMapTable = ModuleOutputImage.replace('.efi', '.map')
FunctionList = []
if os.path.exists(ImageMapTable):
OrigImageBaseAddress = 0
ImageMap = open(ImageMapTable, 'r')
for LinStr in ImageMap:
if len (LinStr.strip()) == 0:
continue
#
# Get the preferred address set on link time.
#
if LinStr.find ('Preferred load address is') != -1:
StrList = LinStr.split()
OrigImageBaseAddress = int (StrList[len(StrList) - 1], 16)
StrList = LinStr.split()
if len (StrList) > 4:
if StrList[3] == 'f' or StrList[3] == 'F':
Name = StrList[1]
RelativeAddress = int (StrList[2], 16) - OrigImageBaseAddress
FunctionList.append ((Name, RelativeAddress))
ImageMap.close()
#
# Add general information.
#
if ModeIsSmm:
MapBuffer.append('\n\n%s (Fixed SMRAM Offset, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
elif AddrIsOffset:
MapBuffer.append('\n\n%s (Fixed Memory Offset, BaseAddress=-0x%010X, EntryPoint=-0x%010X)\n' % (ModuleName, 0 - BaseAddress, 0 - (BaseAddress + ModuleInfo.Image.EntryPoint)))
else:
MapBuffer.append('\n\n%s (Fixed Memory Address, BaseAddress=0x%010X, EntryPoint=0x%010X)\n' % (ModuleName, BaseAddress, BaseAddress + ModuleInfo.Image.EntryPoint))
#
# Add guid and general seciton section.
#
TextSectionAddress = 0
DataSectionAddress = 0
for SectionHeader in ModuleInfo.Image.SectionHeaderList:
if SectionHeader[0] == '.text':
TextSectionAddress = SectionHeader[1]
elif SectionHeader[0] in ['.data', '.sdata']:
DataSectionAddress = SectionHeader[1]
if AddrIsOffset:
MapBuffer.append('(GUID=%s, .textbaseaddress=-0x%010X, .databaseaddress=-0x%010X)\n' % (ModuleInfo.Guid, 0 - (BaseAddress + TextSectionAddress), 0 - (BaseAddress + DataSectionAddress)))
else:
MapBuffer.append('(GUID=%s, .textbaseaddress=0x%010X, .databaseaddress=0x%010X)\n' % (ModuleInfo.Guid, BaseAddress + TextSectionAddress, BaseAddress + DataSectionAddress))
#
# Add debug image full path.
#
MapBuffer.append('(IMAGE=%s)\n\n' % (ModuleDebugImage))
#
# Add function address
#
for Function in FunctionList:
if AddrIsOffset:
MapBuffer.append(' -0x%010X %s\n' % (0 - (BaseAddress + Function[1]), Function[0]))
else:
MapBuffer.append(' 0x%010X %s\n' % (BaseAddress + Function[1], Function[0]))
ImageMap.close()
#
# for SMM module in SMRAM, the SMRAM will be allocated from base to top.
#
if ModeIsSmm:
BaseAddress = BaseAddress + ModuleInfo.Image.Size
## Collect MAP information of all FVs
#
def _CollectFvMapBuffer (self, MapBuffer, Wa, ModuleList):
if self.Fdf:
# First get the XIP base address for FV map file.
GuidPattern = re.compile("[-a-fA-F0-9]+")
GuidName = re.compile("\(GUID=[-a-fA-F0-9]+")
for FvName in Wa.FdfProfile.FvDict:
FvMapBuffer = os.path.join(Wa.FvDir, FvName + '.Fv.map')
if not os.path.exists(FvMapBuffer):
continue
FvMap = open(FvMapBuffer, 'r')
#skip FV size information
FvMap.readline()
FvMap.readline()
FvMap.readline()
FvMap.readline()
for Line in FvMap:
MatchGuid = GuidPattern.match(Line)
if MatchGuid is not None:
#
# Replace GUID with module name
#
GuidString = MatchGuid.group()
if GuidString.upper() in ModuleList:
Line = Line.replace(GuidString, ModuleList[GuidString.upper()].Name)
MapBuffer.append(Line)
#
# Add the debug image full path.
#
MatchGuid = GuidName.match(Line)
if MatchGuid is not None:
GuidString = MatchGuid.group().split("=")[1]
if GuidString.upper() in ModuleList:
MapBuffer.append('(IMAGE=%s)\n' % (os.path.join(ModuleList[GuidString.upper()].DebugDir, ModuleList[GuidString.upper()].Name + '.efi')))
FvMap.close()
## Collect MAP information of all modules
#
def _CollectModuleMapBuffer (self, MapBuffer, ModuleList):
sys.stdout.write ("Generate Load Module At Fix Address Map")
sys.stdout.flush()
PatchEfiImageList = []
PeiModuleList = {}
BtModuleList = {}
RtModuleList = {}
SmmModuleList = {}
PeiSize = 0
BtSize = 0
RtSize = 0
# reserve 4K size in SMRAM to make SMM module address not from 0.
SmmSize = 0x1000
for ModuleGuid in ModuleList:
Module = ModuleList[ModuleGuid]
GlobalData.gProcessingFile = "%s [%s, %s, %s]" % (Module.MetaFile, Module.Arch, Module.ToolChain, Module.BuildTarget)
OutputImageFile = ''
for ResultFile in Module.CodaTargetList:
if str(ResultFile.Target).endswith('.efi'):
#
# module list for PEI, DXE, RUNTIME and SMM
#
OutputImageFile = os.path.join(Module.OutputDir, Module.Name + '.efi')
ImageClass = PeImageClass (OutputImageFile)
if not ImageClass.IsValid:
EdkLogger.error("build", FILE_PARSE_FAILURE, ExtraData=ImageClass.ErrorInfo)
ImageInfo = PeImageInfo(Module.Name, Module.Guid, Module.Arch, Module.OutputDir, Module.DebugDir, ImageClass)
if Module.ModuleType in [SUP_MODULE_PEI_CORE, SUP_MODULE_PEIM, EDK_COMPONENT_TYPE_COMBINED_PEIM_DRIVER, EDK_COMPONENT_TYPE_PIC_PEIM, EDK_COMPONENT_TYPE_RELOCATABLE_PEIM, SUP_MODULE_DXE_CORE]:
PeiModuleList[Module.MetaFile] = ImageInfo
PeiSize += ImageInfo.Image.Size
elif Module.ModuleType in [EDK_COMPONENT_TYPE_BS_DRIVER, SUP_MODULE_DXE_DRIVER, SUP_MODULE_UEFI_DRIVER]:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_DXE_RUNTIME_DRIVER, EDK_COMPONENT_TYPE_RT_DRIVER, SUP_MODULE_DXE_SAL_DRIVER, EDK_COMPONENT_TYPE_SAL_RT_DRIVER]:
RtModuleList[Module.MetaFile] = ImageInfo
RtSize += ImageInfo.Image.Size
elif Module.ModuleType in [SUP_MODULE_SMM_CORE, SUP_MODULE_DXE_SMM_DRIVER, SUP_MODULE_MM_STANDALONE, SUP_MODULE_MM_CORE_STANDALONE]:
SmmModuleList[Module.MetaFile] = ImageInfo
SmmSize += ImageInfo.Image.Size
if Module.ModuleType == SUP_MODULE_DXE_SMM_DRIVER:
PiSpecVersion = Module.Module.Specification.get('PI_SPECIFICATION_VERSION', '0x00000000')
# for PI specification < PI1.1, DXE_SMM_DRIVER also runs as BOOT time driver.
if int(PiSpecVersion, 16) < 0x0001000A:
BtModuleList[Module.MetaFile] = ImageInfo
BtSize += ImageInfo.Image.Size
break
#
# EFI image is final target.
# Check EFI image contains patchable FixAddress related PCDs.
#
if OutputImageFile != '':
ModuleIsPatch = False
for Pcd in Module.ModulePcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
for Pcd in Module.LibraryPcdList:
if Pcd.Type == TAB_PCDS_PATCHABLE_IN_MODULE and Pcd.TokenCName in TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SET:
ModuleIsPatch = True
break
if not ModuleIsPatch:
continue
#
# Module includes the patchable load fix address PCDs.
# It will be fixed up later.
#
PatchEfiImageList.append (OutputImageFile)
#
# Get Top Memory address
#
ReservedRuntimeMemorySize = 0
TopMemoryAddress = 0
if self.LoadFixAddress == 0xFFFFFFFFFFFFFFFF:
TopMemoryAddress = 0
else:
TopMemoryAddress = self.LoadFixAddress
if TopMemoryAddress < RtSize + BtSize + PeiSize:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS is too low to load driver")
#
# Patch FixAddress related PCDs into EFI image
#
for EfiImage in PatchEfiImageList:
EfiImageMap = EfiImage.replace('.efi', '.map')
if not os.path.exists(EfiImageMap):
continue
#
# Get PCD offset in EFI image by GenPatchPcdTable function
#
PcdTable = parsePcdInfoFromMapFile(EfiImageMap, EfiImage)
#
# Patch real PCD value by PatchPcdValue tool
#
for PcdInfo in PcdTable:
ReturnValue = 0
if PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_PEI_PAGE_SIZE_DATA_TYPE, str (PeiSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_DXE_PAGE_SIZE_DATA_TYPE, str (BtSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_RUNTIME_PAGE_SIZE_DATA_TYPE, str (RtSize // 0x1000))
elif PcdInfo[0] == TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE and len (SmmModuleList) > 0:
ReturnValue, ErrorInfo = PatchBinaryFile (EfiImage, PcdInfo[1], TAB_PCDS_PATCHABLE_LOAD_FIX_ADDRESS_SMM_PAGE_SIZE_DATA_TYPE, str (SmmSize // 0x1000))
if ReturnValue != 0:
EdkLogger.error("build", PARAMETER_INVALID, "Patch PCD value failed", ExtraData=ErrorInfo)
MapBuffer.append('PEI_CODE_PAGE_NUMBER = 0x%x\n' % (PeiSize // 0x1000))
MapBuffer.append('BOOT_CODE_PAGE_NUMBER = 0x%x\n' % (BtSize // 0x1000))
MapBuffer.append('RUNTIME_CODE_PAGE_NUMBER = 0x%x\n' % (RtSize // 0x1000))
if len (SmmModuleList) > 0:
MapBuffer.append('SMM_CODE_PAGE_NUMBER = 0x%x\n' % (SmmSize // 0x1000))
PeiBaseAddr = TopMemoryAddress - RtSize - BtSize
BtBaseAddr = TopMemoryAddress - RtSize
RtBaseAddr = TopMemoryAddress - ReservedRuntimeMemorySize
self._RebaseModule (MapBuffer, PeiBaseAddr, PeiModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, BtBaseAddr, BtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, RtBaseAddr, RtModuleList, TopMemoryAddress == 0)
self._RebaseModule (MapBuffer, 0x1000, SmmModuleList, AddrIsOffset=False, ModeIsSmm=True)
MapBuffer.append('\n\n')
sys.stdout.write ("\n")
sys.stdout.flush()
## Save platform Map file
#
def _SaveMapFile (self, MapBuffer, Wa):
#
# Map file path is got.
#
MapFilePath = os.path.join(Wa.BuildDir, Wa.Name + '.map')
#
# Save address map into MAP file.
#
SaveFileOnChange(MapFilePath, ''.join(MapBuffer), False)
if self.LoadFixAddress != 0:
sys.stdout.write ("\nLoad Module At Fix Address Map file can be found at %s\n" % (MapFilePath))
sys.stdout.flush()
## Build active platform for different build targets and different tool chains
#
def _BuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
self.Progress.Stop("done!")
# Add ffs build to makefile
CmdListDict = {}
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
self.BuildModules.append(Ma)
self._BuildPa(self.Target, Pa, FfsCommand=CmdListDict)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
GlobalData.gGlobalDefines['ARCH'] = Arch
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platform with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = []
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# create FDS again for the updated EFI image
#
self._Build("fds", Wa)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
## Build active module for different build targets, different tool chains and different archs
#
def _BuildModule(self):
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
#
# module build needs platform build information, so get platform
# AutoGen first
#
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress,
self.ModuleFile
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
self.Progress.Stop("done!")
MaList = []
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
for Module in Pa.Platform.Modules:
if self.ModuleFile.Dir == Module.Dir and self.ModuleFile.Name == Module.Name:
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
MaList.append(Ma)
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
if GlobalData.gBinCacheSource:
EdkLogger.quiet("cache hit: %s[%s]" % (Ma.MetaFile.Path, Ma.Arch))
continue
else:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("cache miss: %s[%s]" % (Ma.MetaFile.Path, Ma.Arch))
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
self.Progress.Start("Generating code")
Ma.CreateCodeFile(True)
self.Progress.Stop("done!")
if self.Target == "genc":
return True
if not self.SkipAutoGen or self.Target == 'genmake':
self.Progress.Start("Generating makefile")
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
self.Progress.Stop("done!")
if self.Target == "genmake":
return True
self.BuildModules.append(Ma)
# Initialize all modules in tracking to 'FAIL'
if Ma.Arch not in GlobalData.gModuleBuildTracking:
GlobalData.gModuleBuildTracking[Ma.Arch] = dict()
if Ma not in GlobalData.gModuleBuildTracking[Ma.Arch]:
GlobalData.gModuleBuildTracking[Ma.Arch][Ma] = 'FAIL'
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
self.invalidateHash()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.UpdateBuildCache()
self.BuildModules = []
self.MakeTime += int(round((time.time() - MakeContiue)))
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.BuildReport.AddPlatformReport(Wa, MaList)
if MaList == []:
EdkLogger.error(
'build',
BUILD_ERROR,
"Module for [%s] is not a component of active platform."\
" Please make sure that the ARCH and inf file path are"\
" given in the same as in [%s]" % \
(', '.join(Wa.ArchList), self.PlatformFile),
ExtraData=self.ModuleFile
)
# Create MAP file when Load Fix Address is enabled.
if self.Target == "fds" and self.Fdf:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
MapBuffer = []
if self.LoadFixAddress != 0:
#
# Rebase module to the preferred memory address before GenFds
#
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
#
# create FDS again for the updated EFI image
#
GenFdsStart = time.time()
self._Build("fds", Wa)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile (MapBuffer, Wa)
self.invalidateHash()
def _GenFfsCmd(self,ArchList):
# convert dictionary of Cmd:(Inf,Arch)
# to a new dictionary of (Inf,Arch):Cmd,Cmd,Cmd...
CmdSetDict = defaultdict(set)
GenFfsDict = GenFds.GenFfsMakefile('', GlobalData.gFdfParser, self, ArchList, GlobalData)
for Cmd in GenFfsDict:
tmpInf, tmpArch = GenFfsDict[Cmd]
CmdSetDict[tmpInf, tmpArch].add(Cmd)
return CmdSetDict
## Build a platform in multi-thread mode
#
def _MultiThreadBuildPlatform(self):
SaveFileOnChange(self.PlatformBuildPath, '# DO NOT EDIT \n# FILE auto-generated\n', False)
for BuildTarget in self.BuildTargetList:
GlobalData.gGlobalDefines['TARGET'] = BuildTarget
index = 0
for ToolChain in self.ToolChainList:
WorkspaceAutoGenTime = time.time()
GlobalData.gGlobalDefines['TOOLCHAIN'] = ToolChain
GlobalData.gGlobalDefines['TOOL_CHAIN_TAG'] = ToolChain
GlobalData.gGlobalDefines['FAMILY'] = self.ToolChainFamily[index]
index += 1
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag,
self.Progress
)
self.Fdf = Wa.FdfFile
self.LoadFixAddress = Wa.Platform.LoadFixAddress
self.BuildReport.AddPlatformReport(Wa)
Wa.CreateMakeFile(False)
# Add ffs build to makefile
CmdListDict = None
if GlobalData.gEnableGenfdsMultiThread and self.Fdf:
CmdListDict = self._GenFfsCmd(Wa.ArchList)
# multi-thread exit flag
ExitFlag = threading.Event()
ExitFlag.clear()
self.AutoGenTime += int(round((time.time() - WorkspaceAutoGenTime)))
for Arch in Wa.ArchList:
AutoGenStart = time.time()
GlobalData.gGlobalDefines['ARCH'] = Arch
Pa = PlatformAutoGen(Wa, self.PlatformFile, BuildTarget, ToolChain, Arch)
if Pa is None:
continue
ModuleList = []
for Inf in Pa.Platform.Modules:
ModuleList.append(Inf)
# Add the INF only list in FDF
if GlobalData.gFdfParser is not None:
for InfName in GlobalData.gFdfParser.Profile.InfList:
Inf = PathClass(NormPath(InfName), self.WorkspaceDir, Arch)
if Inf in Pa.Platform.Modules:
continue
ModuleList.append(Inf)
for Module in ModuleList:
# Get ModuleAutoGen object to generate C code file and makefile
Ma = ModuleAutoGen(Wa, Module, BuildTarget, ToolChain, Arch, self.PlatformFile)
if Ma is None:
continue
if Ma.CanSkipbyHash():
self.HashSkipModules.append(Ma)
if GlobalData.gBinCacheSource:
EdkLogger.quiet("cache hit: %s[%s]" % (Ma.MetaFile.Path, Ma.Arch))
continue
else:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("cache miss: %s[%s]" % (Ma.MetaFile.Path, Ma.Arch))
# Not to auto-gen for targets 'clean', 'cleanlib', 'cleanall', 'run', 'fds'
if self.Target not in ['clean', 'cleanlib', 'cleanall', 'run', 'fds']:
# for target which must generate AutoGen code and makefile
if not self.SkipAutoGen or self.Target == 'genc':
Ma.CreateCodeFile(True)
if self.Target == "genc":
continue
if not self.SkipAutoGen or self.Target == 'genmake':
if CmdListDict and self.Fdf and (Module.File, Arch) in CmdListDict:
Ma.CreateMakeFile(True, CmdListDict[Module.File, Arch])
del CmdListDict[Module.File, Arch]
else:
Ma.CreateMakeFile(True)
if self.Target == "genmake":
continue
self.BuildModules.append(Ma)
# Initialize all modules in tracking to 'FAIL'
if Ma.Arch not in GlobalData.gModuleBuildTracking:
GlobalData.gModuleBuildTracking[Ma.Arch] = dict()
if Ma not in GlobalData.gModuleBuildTracking[Ma.Arch]:
GlobalData.gModuleBuildTracking[Ma.Arch][Ma] = 'FAIL'
self.Progress.Stop("done!")
self.AutoGenTime += int(round((time.time() - AutoGenStart)))
MakeStart = time.time()
for Ma in self.BuildModules:
# Generate build task for the module
if not Ma.IsBinaryModule:
Bt = BuildTask.New(ModuleMakeUnit(Ma, self.Target))
# Break build if any build thread has error
if BuildTask.HasError():
# we need a full version of makefile for platform
ExitFlag.set()
BuildTask.WaitForComplete()
self.invalidateHash()
Pa.CreateMakeFile(False)
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Start task scheduler
if not BuildTask.IsOnGoing():
BuildTask.StartScheduler(self.ThreadNumber, ExitFlag)
# in case there's an interruption. we need a full version of makefile for platform
Pa.CreateMakeFile(False)
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
self.MakeTime += int(round((time.time() - MakeStart)))
MakeContiue = time.time()
#
#
# All modules have been put in build tasks queue. Tell task scheduler
# to exit if all tasks are completed
#
ExitFlag.set()
BuildTask.WaitForComplete()
self.CreateAsBuiltInf()
if GlobalData.gBinCacheDest:
self.UpdateBuildCache()
self.BuildModules = []
self.MakeTime += int(round((time.time() - MakeContiue)))
#
# Check for build error, and raise exception if one
# has been signaled.
#
if BuildTask.HasError():
self.invalidateHash()
EdkLogger.error("build", BUILD_ERROR, "Failed to build module", ExtraData=GlobalData.gBuildingModule)
# Create MAP file when Load Fix Address is enabled.
if self.Target in ["", "all", "fds"]:
for Arch in Wa.ArchList:
#
# Check whether the set fix address is above 4G for 32bit image.
#
if (Arch == 'IA32' or Arch == 'ARM') and self.LoadFixAddress != 0xFFFFFFFFFFFFFFFF and self.LoadFixAddress >= 0x100000000:
EdkLogger.error("build", PARAMETER_INVALID, "FIX_LOAD_TOP_MEMORY_ADDRESS can't be set to larger than or equal to 4G for the platorm with IA32 or ARM arch modules")
#
# Get Module List
#
ModuleList = {}
for Pa in Wa.AutoGenObjectList:
for Ma in Pa.ModuleAutoGenList:
if Ma is None:
continue
if not Ma.IsLibrary:
ModuleList[Ma.Guid.upper()] = Ma
#
# Rebase module to the preferred memory address before GenFds
#
MapBuffer = []
if self.LoadFixAddress != 0:
self._CollectModuleMapBuffer(MapBuffer, ModuleList)
if self.Fdf:
#
# Generate FD image if there's a FDF file found
#
GenFdsStart = time.time()
if GenFdsApi(Wa.GenFdsCommandDict, self.Db):
EdkLogger.error("build", COMMAND_FAILURE)
#
# Create MAP file for all platform FVs after GenFds.
#
self._CollectFvMapBuffer(MapBuffer, Wa, ModuleList)
self.GenFdsTime += int(round((time.time() - GenFdsStart)))
#
# Save MAP buffer into MAP file.
#
self._SaveMapFile(MapBuffer, Wa)
self.invalidateHash()
## Generate GuidedSectionTools.txt in the FV directories.
#
def CreateGuidedSectionToolsFile(self):
for BuildTarget in self.BuildTargetList:
for ToolChain in self.ToolChainList:
Wa = WorkspaceAutoGen(
self.WorkspaceDir,
self.PlatformFile,
BuildTarget,
ToolChain,
self.ArchList,
self.BuildDatabase,
self.TargetTxt,
self.ToolDef,
self.Fdf,
self.FdList,
self.FvList,
self.CapList,
self.SkuId,
self.UniFlag
)
FvDir = Wa.FvDir
if not os.path.exists(FvDir):
continue
for Arch in self.ArchList:
# Build up the list of supported architectures for this build
prefix = '%s_%s_%s_' % (BuildTarget, ToolChain, Arch)
# Look through the tool definitions for GUIDed tools
guidAttribs = []
for (attrib, value) in self.ToolDef.ToolsDefTxtDictionary.items():
if attrib.upper().endswith('_GUID'):
split = attrib.split('_')
thisPrefix = '_'.join(split[0:3]) + '_'
if thisPrefix == prefix:
guid = self.ToolDef.ToolsDefTxtDictionary[attrib]
guid = guid.lower()
toolName = split[3]
path = '_'.join(split[0:4]) + '_PATH'
path = self.ToolDef.ToolsDefTxtDictionary[path]
path = self.GetFullPathOfTool(path)
guidAttribs.append((guid, toolName, path))
# Write out GuidedSecTools.txt
toolsFile = os.path.join(FvDir, 'GuidedSectionTools.txt')
toolsFile = open(toolsFile, 'wt')
for guidedSectionTool in guidAttribs:
print(' '.join(guidedSectionTool), file=toolsFile)
toolsFile.close()
## Returns the full path of the tool.
#
def GetFullPathOfTool (self, tool):
if os.path.exists(tool):
return os.path.realpath(tool)
else:
# We need to search for the tool using the
# PATH environment variable.
for dirInPath in os.environ['PATH'].split(os.pathsep):
foundPath = os.path.join(dirInPath, tool)
if os.path.exists(foundPath):
return os.path.realpath(foundPath)
# If the tool was not found in the path then we just return
# the input tool.
return tool
## Launch the module or platform build
#
def Launch(self):
if not self.ModuleFile:
if not self.SpawnMode or self.Target not in ["", "all"]:
self.SpawnMode = False
self._BuildPlatform()
else:
self._MultiThreadBuildPlatform()
self.CreateGuidedSectionToolsFile()
else:
self.SpawnMode = False
self._BuildModule()
if self.Target == 'cleanall':
RemoveDirectory(os.path.dirname(GlobalData.gDatabasePath), True)
def CreateAsBuiltInf(self):
for Module in self.BuildModules:
Module.CreateAsBuiltInf()
def UpdateBuildCache(self):
all_lib_set = set()
all_mod_set = set()
for Module in self.BuildModules:
Module.CopyModuleToCache()
all_mod_set.add(Module)
for Module in self.HashSkipModules:
Module.CopyModuleToCache()
all_mod_set.add(Module)
for Module in all_mod_set:
for lib in Module.LibraryAutoGenList:
all_lib_set.add(lib)
for lib in all_lib_set:
lib.CopyModuleToCache()
all_lib_set.clear()
all_mod_set.clear()
self.HashSkipModules = []
## Do some clean-up works when error occurred
def Relinquish(self):
OldLogLevel = EdkLogger.GetLevel()
EdkLogger.SetLevel(EdkLogger.ERROR)
Utils.Progressor.Abort()
if self.SpawnMode == True:
BuildTask.Abort()
EdkLogger.SetLevel(OldLogLevel)
def ParseDefines(DefineList=[]):
DefineDict = {}
if DefineList is not None:
for Define in DefineList:
DefineTokenList = Define.split("=", 1)
if not GlobalData.gMacroNamePattern.match(DefineTokenList[0]):
EdkLogger.error('build', FORMAT_INVALID,
"The macro name must be in the pattern [A-Z][A-Z0-9_]*",
ExtraData=DefineTokenList[0])
if len(DefineTokenList) == 1:
DefineDict[DefineTokenList[0]] = "TRUE"
else:
DefineDict[DefineTokenList[0]] = DefineTokenList[1].strip()
return DefineDict
gParamCheck = []
def SingleCheckCallback(option, opt_str, value, parser):
if option not in gParamCheck:
setattr(parser.values, option.dest, value)
gParamCheck.append(option)
else:
parser.error("Option %s only allows one instance in command line!" % option)
def LogBuildTime(Time):
if Time:
TimeDurStr = ''
TimeDur = time.gmtime(Time)
if TimeDur.tm_yday > 1:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur) + ", %d day(s)" % (TimeDur.tm_yday - 1)
else:
TimeDurStr = time.strftime("%H:%M:%S", TimeDur)
return TimeDurStr
else:
return None
## Parse command line options
#
# Using standard Python module optparse to parse command line option of this tool.
#
# @retval Opt A optparse.Values object containing the parsed options
# @retval Args Target of build command
#
def MyOptionParser():
Parser = OptionParser(description=__copyright__, version=__version__, prog="build.exe", usage="%prog [options] [all|fds|genc|genmake|clean|cleanall|cleanlib|modules|libraries|run]")
Parser.add_option("-a", "--arch", action="append", type="choice", choices=['IA32', 'X64', 'EBC', 'ARM', 'AARCH64'], dest="TargetArch",
help="ARCHS is one of list: IA32, X64, ARM, AARCH64 or EBC, which overrides target.txt's TARGET_ARCH definition. To specify more archs, please repeat this option.")
Parser.add_option("-p", "--platform", action="callback", type="string", dest="PlatformFile", callback=SingleCheckCallback,
help="Build the platform specified by the DSC file name argument, overriding target.txt's ACTIVE_PLATFORM definition.")
Parser.add_option("-m", "--module", action="callback", type="string", dest="ModuleFile", callback=SingleCheckCallback,
help="Build the module specified by the INF file name argument.")
Parser.add_option("-b", "--buildtarget", type="string", dest="BuildTarget", help="Using the TARGET to build the platform, overriding target.txt's TARGET definition.",
action="append")
Parser.add_option("-t", "--tagname", action="append", type="string", dest="ToolChain",
help="Using the Tool Chain Tagname to build the platform, overriding target.txt's TOOL_CHAIN_TAG definition.")
Parser.add_option("-x", "--sku-id", action="callback", type="string", dest="SkuId", callback=SingleCheckCallback,
help="Using this name of SKU ID to build the platform, overriding SKUID_IDENTIFIER in DSC file.")
Parser.add_option("-n", action="callback", type="int", dest="ThreadNumber", callback=SingleCheckCallback,
help="Build the platform using multi-threaded compiler. The value overrides target.txt's MAX_CONCURRENT_THREAD_NUMBER. When value is set to 0, tool automatically detect number of "\
"processor threads, set value to 1 means disable multi-thread build, and set value to more than 1 means user specify the threads number to build.")
Parser.add_option("-f", "--fdf", action="callback", type="string", dest="FdfFile", callback=SingleCheckCallback,
help="The name of the FDF file to use, which overrides the setting in the DSC file.")
Parser.add_option("-r", "--rom-image", action="append", type="string", dest="RomImage", default=[],
help="The name of FD to be generated. The name must be from [FD] section in FDF file.")
Parser.add_option("-i", "--fv-image", action="append", type="string", dest="FvImage", default=[],
help="The name of FV to be generated. The name must be from [FV] section in FDF file.")
Parser.add_option("-C", "--capsule-image", action="append", type="string", dest="CapName", default=[],
help="The name of Capsule to be generated. The name must be from [Capsule] section in FDF file.")
Parser.add_option("-u", "--skip-autogen", action="store_true", dest="SkipAutoGen", help="Skip AutoGen step.")
Parser.add_option("-e", "--re-parse", action="store_true", dest="Reparse", help="Re-parse all meta-data files.")
Parser.add_option("-c", "--case-insensitive", action="store_true", dest="CaseInsensitive", default=False, help="Don't check case of file name.")
Parser.add_option("-w", "--warning-as-error", action="store_true", dest="WarningAsError", help="Treat warning in tools as error.")
Parser.add_option("-j", "--log", action="store", dest="LogFile", help="Put log in specified file as well as on console.")
Parser.add_option("-s", "--silent", action="store_true", type=None, dest="SilentMode",
help="Make use of silent mode of (n)make.")
Parser.add_option("-q", "--quiet", action="store_true", type=None, help="Disable all messages except FATAL ERRORS.")
Parser.add_option("-v", "--verbose", action="store_true", type=None, help="Turn on verbose output with informational messages printed, "\
"including library instances selected, final dependency expression, "\
"and warning messages, etc.")
Parser.add_option("-d", "--debug", action="store", type="int", help="Enable debug messages at specified level.")
Parser.add_option("-D", "--define", action="append", type="string", dest="Macros", help="Macro: \"Name [= Value]\".")
Parser.add_option("-y", "--report-file", action="store", dest="ReportFile", help="Create/overwrite the report to the specified filename.")
Parser.add_option("-Y", "--report-type", action="append", type="choice", choices=['PCD', 'LIBRARY', 'FLASH', 'DEPEX', 'BUILD_FLAGS', 'FIXED_ADDRESS', 'HASH', 'EXECUTION_ORDER'], dest="ReportType", default=[],
help="Flags that control the type of build report to generate. Must be one of: [PCD, LIBRARY, FLASH, DEPEX, BUILD_FLAGS, FIXED_ADDRESS, HASH, EXECUTION_ORDER]. "\
"To specify more than one flag, repeat this option on the command line and the default flag set is [PCD, LIBRARY, FLASH, DEPEX, HASH, BUILD_FLAGS, FIXED_ADDRESS]")
Parser.add_option("-F", "--flag", action="store", type="string", dest="Flag",
help="Specify the specific option to parse EDK UNI file. Must be one of: [-c, -s]. -c is for EDK framework UNI file, and -s is for EDK UEFI UNI file. "\
"This option can also be specified by setting *_*_*_BUILD_FLAGS in [BuildOptions] section of platform DSC. If they are both specified, this value "\
"will override the setting in [BuildOptions] section of platform DSC.")
Parser.add_option("-N", "--no-cache", action="store_true", dest="DisableCache", default=False, help="Disable build cache mechanism")
Parser.add_option("--conf", action="store", type="string", dest="ConfDirectory", help="Specify the customized Conf directory.")
Parser.add_option("--check-usage", action="store_true", dest="CheckUsage", default=False, help="Check usage content of entries listed in INF file.")
Parser.add_option("--ignore-sources", action="store_true", dest="IgnoreSources", default=False, help="Focus to a binary build and ignore all source files")
Parser.add_option("--pcd", action="append", dest="OptionPcd", help="Set PCD value by command line. Format: \"PcdName=Value\" ")
Parser.add_option("-l", "--cmd-len", action="store", type="int", dest="CommandLength", help="Specify the maximum line length of build command. Default is 4096.")
Parser.add_option("--hash", action="store_true", dest="UseHashCache", default=False, help="Enable hash-based caching during build process.")
Parser.add_option("--binary-destination", action="store", type="string", dest="BinCacheDest", help="Generate a cache of binary files in the specified directory.")
Parser.add_option("--binary-source", action="store", type="string", dest="BinCacheSource", help="Consume a cache of binary files from the specified directory.")
Parser.add_option("--genfds-multi-thread", action="store_true", dest="GenfdsMultiThread", default=False, help="Enable GenFds multi thread to generate ffs file.")
Parser.add_option("--disable-include-path-check", action="store_true", dest="DisableIncludePathCheck", default=False, help="Disable the include path check for outside of package.")
(Opt, Args) = Parser.parse_args()
return (Opt, Args)
## Tool entrance method
#
# This method mainly dispatch specific methods per the command line options.
# If no error found, return zero value so the caller of this tool can know
# if it's executed successfully or not.
#
# @retval 0 Tool was successful
# @retval 1 Tool failed
#
def Main():
StartTime = time.time()
# Initialize log system
EdkLogger.Initialize()
GlobalData.gCommand = sys.argv[1:]
#
# Parse the options and args
#
(Option, Target) = MyOptionParser()
GlobalData.gOptions = Option
GlobalData.gCaseInsensitive = Option.CaseInsensitive
# Set log level
if Option.verbose is not None:
EdkLogger.SetLevel(EdkLogger.VERBOSE)
elif Option.quiet is not None:
EdkLogger.SetLevel(EdkLogger.QUIET)
elif Option.debug is not None:
EdkLogger.SetLevel(Option.debug + 1)
else:
EdkLogger.SetLevel(EdkLogger.INFO)
if Option.LogFile is not None:
EdkLogger.SetLogFile(Option.LogFile)
if Option.WarningAsError == True:
EdkLogger.SetWarningAsError()
if platform.platform().find("Windows") >= 0:
GlobalData.gIsWindows = True
else:
GlobalData.gIsWindows = False
EdkLogger.quiet("Build environment: %s" % platform.platform())
EdkLogger.quiet(time.strftime("Build start time: %H:%M:%S, %b.%d %Y\n", time.localtime()));
ReturnCode = 0
MyBuild = None
BuildError = True
try:
if len(Target) == 0:
Target = "all"
elif len(Target) >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "More than one targets are not supported.",
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
else:
Target = Target[0].lower()
if Target not in gSupportedTarget:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "Not supported target [%s]." % Target,
ExtraData="Please select one of: %s" % (' '.join(gSupportedTarget)))
#
# Check environment variable: EDK_TOOLS_PATH, WORKSPACE, PATH
#
CheckEnvVariable()
GlobalData.gCommandLineDefines.update(ParseDefines(Option.Macros))
Workspace = os.getenv("WORKSPACE")
#
# Get files real name in workspace dir
#
GlobalData.gAllFiles = Utils.DirCache(Workspace)
WorkingDirectory = os.getcwd()
if not Option.ModuleFile:
FileList = glob.glob(os.path.normpath(os.path.join(WorkingDirectory, '*.inf')))
FileNum = len(FileList)
if FileNum >= 2:
EdkLogger.error("build", OPTION_NOT_SUPPORTED, "There are %d INF files in %s." % (FileNum, WorkingDirectory),
ExtraData="Please use '-m <INF_FILE_PATH>' switch to choose one.")
elif FileNum == 1:
Option.ModuleFile = NormFile(FileList[0], Workspace)
if Option.ModuleFile:
if os.path.isabs (Option.ModuleFile):
if os.path.normcase (os.path.normpath(Option.ModuleFile)).find (Workspace) == 0:
Option.ModuleFile = NormFile(os.path.normpath(Option.ModuleFile), Workspace)
Option.ModuleFile = PathClass(Option.ModuleFile, Workspace)
ErrorCode, ErrorInfo = Option.ModuleFile.Validate(".inf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.PlatformFile is not None:
if os.path.isabs (Option.PlatformFile):
if os.path.normcase (os.path.normpath(Option.PlatformFile)).find (Workspace) == 0:
Option.PlatformFile = NormFile(os.path.normpath(Option.PlatformFile), Workspace)
Option.PlatformFile = PathClass(Option.PlatformFile, Workspace)
if Option.FdfFile is not None:
if os.path.isabs (Option.FdfFile):
if os.path.normcase (os.path.normpath(Option.FdfFile)).find (Workspace) == 0:
Option.FdfFile = NormFile(os.path.normpath(Option.FdfFile), Workspace)
Option.FdfFile = PathClass(Option.FdfFile, Workspace)
ErrorCode, ErrorInfo = Option.FdfFile.Validate(".fdf", False)
if ErrorCode != 0:
EdkLogger.error("build", ErrorCode, ExtraData=ErrorInfo)
if Option.Flag is not None and Option.Flag not in ['-c', '-s']:
EdkLogger.error("build", OPTION_VALUE_INVALID, "UNI flag must be one of -c or -s")
MyBuild = Build(Target, Workspace, Option)
GlobalData.gCommandLineDefines['ARCH'] = ' '.join(MyBuild.ArchList)
if not (MyBuild.LaunchPrebuildFlag and os.path.exists(MyBuild.PlatformBuildPath)):
MyBuild.Launch()
#
# All job done, no error found and no exception raised
#
BuildError = False
except FatalError as X:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = X.args[0]
except Warning as X:
# error from Fdf parser
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
else:
EdkLogger.error(X.ToolName, FORMAT_INVALID, File=X.FileName, Line=X.LineNumber, ExtraData=X.Message, RaiseError=False)
ReturnCode = FORMAT_INVALID
except KeyboardInterrupt:
ReturnCode = ABORT_ERROR
if Option is not None and Option.debug is not None:
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
except:
if MyBuild is not None:
# for multi-thread build exits safely
MyBuild.Relinquish()
# try to get the meta-file from the object causing exception
Tb = sys.exc_info()[-1]
MetaFile = GlobalData.gProcessingFile
while Tb is not None:
if 'self' in Tb.tb_frame.f_locals and hasattr(Tb.tb_frame.f_locals['self'], 'MetaFile'):
MetaFile = Tb.tb_frame.f_locals['self'].MetaFile
Tb = Tb.tb_next
EdkLogger.error(
"\nbuild",
CODE_ERROR,
"Unknown fatal error when processing [%s]" % MetaFile,
ExtraData="\n(Please send email to %s for help, attaching following call stack trace!)\n" % MSG_EDKII_MAIL_ADDR,
RaiseError=False
)
EdkLogger.quiet("(Python %s on %s) " % (platform.python_version(), sys.platform) + traceback.format_exc())
ReturnCode = CODE_ERROR
finally:
Utils.Progressor.Abort()
Utils.ClearDuplicatedInf()
if ReturnCode == 0:
try:
MyBuild.LaunchPostbuild()
Conclusion = "Done"
except:
Conclusion = "Failed"
elif ReturnCode == ABORT_ERROR:
Conclusion = "Aborted"
else:
Conclusion = "Failed"
FinishTime = time.time()
BuildDuration = time.gmtime(int(round(FinishTime - StartTime)))
BuildDurationStr = ""
if BuildDuration.tm_yday > 1:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration) + ", %d day(s)" % (BuildDuration.tm_yday - 1)
else:
BuildDurationStr = time.strftime("%H:%M:%S", BuildDuration)
if MyBuild is not None:
if not BuildError:
MyBuild.BuildReport.GenerateReport(BuildDurationStr, LogBuildTime(MyBuild.AutoGenTime), LogBuildTime(MyBuild.MakeTime), LogBuildTime(MyBuild.GenFdsTime))
EdkLogger.SetLevel(EdkLogger.QUIET)
EdkLogger.quiet("\n- %s -" % Conclusion)
EdkLogger.quiet(time.strftime("Build end time: %H:%M:%S, %b.%d %Y", time.localtime()))
EdkLogger.quiet("Build total time: %s\n" % BuildDurationStr)
return ReturnCode
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
|
panoptes.py | """
panoptes: Service to synchronise storage.
"""
import json
import logging
import os
import requests
import traceback
import time
try:
from queue import Queue
except ImportError:
import Queue
from threading import Thread
try:
from urlparse import urljoin, urlparse
except:
from urllib.parse import urljoin, urlparse
from sseclient import SSEClient
_LOGGER = logging.getLogger(__name__)
def submit_transfer_to_fts(source_url, bytes, adler32, destination_url, proxy, fts_host):
transfer_request = {'files': [{
'sources': [source_url],
'destinations': [destination_url],
'filesize': bytes,
'checksum': 'adler32:%s' % adler32}],
'params': {'verify_checksum': True}}
# response = session.get('%s/api-docs/schema/submit' % fts_host)
# schema = response.json()
# from jsonschema import validate
# print (validate(instance=transfer_request, schema=schema))
response = requests.post(
'%s/jobs' % fts_host,
json=transfer_request,
cert=proxy,
headers={'Content-Type': 'application/json'})
# if response.status_code == 200:
_LOGGER.info("Transfer from {} to {} has been submitted to FTS ({})".format(source_url, destination_url, response.content))
def do_replication(session, new_files):
while True:
try:
source_url, destination_url, fts_host = new_files.get()
# Workaround: slight risk the client receives the `IN_CLOSE_WRITE`
# event before the upload is completed. TBR.
for _ in range(10):
# Get this info with dav
# Can use the namespace operation later
response = session.head(source_url, headers={'Want-Digest': 'adler32'})
if response.status_code == 200:
break
time.sleep(0.1)
_LOGGER.debug(response.headers)
adler32 = response.headers['Digest'].replace('adler32=', '')
bytes = int(response.headers['Content-Length'])
submit_transfer_to_fts(
source_url=source_url,
bytes=bytes,
adler32=adler32,
destination_url=destination_url,
proxy=session.cert,
fts_host=fts_host)
except:
_LOGGER.error(traceback.format_exc())
finally:
new_files.task_done()
def main(root_path, source, destination, client, fts_host, recursive):
'''
main function
'''
new_files = Queue(maxsize=0)
worker = Thread(target=do_replication, args=(client.session, new_files,))
worker.setDaemon(True)
worker.start()
base_path = urlparse(source).path
paths = [os.path.normpath(root_path + '/' + base_path)]
if recursive:
directories = [urlparse(source).path]
_LOGGER.debug("Scan {}".format(base_path))
while directories:
prefix = directories.pop()
response = client.namespace.get_file_attributes(path=prefix, children=True)
for entry in response["children"]:
if entry["fileType"] == "DIR":
directory = os.path.normpath(prefix + '/' + entry["fileName"])
_LOGGER.debug("Directory found {}".format(directory))
directories.append(directory)
paths.append(os.path.normpath(root_path + '/' + directory))
watches = {}
while True:
response = client.events.register()
channel = response.headers['Location']
_LOGGER.info("Channel is {}".format(channel))
id = channel[channel.find('/api/v1/events/channels/') + 24:]
for path in paths:
response = client.events.subscribe(type='inotify', id=id, body={"path": path})
watch = response.headers['Location']
_LOGGER.debug("Watch on {} is {}".format(path, watch))
watches[watch] = path
messages = SSEClient(channel, session=client.session)
try:
for msg in messages:
_LOGGER.debug("Event {}:".format(msg.id))
_LOGGER.debug(" event: {}".format(msg.event))
_LOGGER.debug(" data: {}".format(msg.data))
data = json.loads(msg.data)
if 'event' in data and data['event']['mask'] == ['IN_CLOSE_WRITE']:
name = data['event']['name']
full_path = watches[data["subscription"]]
short_path = os.path.relpath(full_path, root_path)[len(base_path) - 1:]
source_url = urljoin(source, os.path.normpath(short_path + '/' + name))
_LOGGER.info('New file detected: ' + source_url)
print (source_url[len(source):])
destination_url = urljoin(destination, os.path.normpath(source_url[len(source):]))
_LOGGER.info('Request to copy it to: ' + destination_url)
new_files.put((source_url, destination_url, fts_host))
elif 'event' in data and data['event']['mask'] == ["IN_CREATE", "IN_ISDIR"]:
name = data['event']['name']
full_path = watches[data["subscription"]]
dir_path = os.path.normpath(full_path + '/' + name)
_LOGGER.info('New directory detected: ' + dir_path)
response = client.events.subscribe(type='inotify', id=id, body={"path": dir_path})
watch = response.headers['Location']
_LOGGER.debug("Watch on {} is {}".format(dir_path, watch))
watches[watch] = dir_path
paths.append(dir_path)
except requests.exceptions.HTTPError as exc:
_LOGGER.error(str(exc))
# raise
_LOGGER.info('Re-register and Re-subscribe to channel')
|
test_server.py | from multiprocessing import Process
import os
import signal
from time import sleep
import pytest
import requests
import proxypool.proxy_server as proxy_server
from proxypool.config import HOST, PORT, SSL_ON, CA_CRT
@pytest.fixture
def api(db):
db.put_list(['127.0.0.1:80', '127.0.0.1:443', '127.0.0.1:1080'])
proxy_server.conn = db # replace with test db
server = Process(target=proxy_server.server_run)
server.start()
sleep(0.5)
if SSL_ON:
yield 'https://{0}:{1}'.format(HOST, PORT)
else:
yield 'http://{0}:{1}'.format(HOST, PORT)
db.pop_list(3)
os.kill(server.pid, signal.SIGQUIT) # should be QUIT if tested with test_crawler
server.join()
def test_server_get(db, api):
proxies = ['127.0.0.1:80', '127.0.0.1:443', '127.0.0.1:1080']
verify = True
if SSL_ON and CA_CRT:
verify = CA_CRT
assert requests.get('{}/proxies/'.format(api),
verify=verify).json()['proxies'][0] in proxies
assert requests.get('{}/proxies/proxy'.format(api),
verify=verify).status_code == 404
assert requests.get('{}/proxies/0'.format(api),
verify=verify).json()['count'] == 0
assert requests.get('{}/proxies/3'.format(api),
verify=verify).json()['proxies'].sort() == proxies.sort()
assert requests.get('{}/proxies/10'.format(api),
verify=verify).json()['proxies'].sort() == proxies.sort()
assert db.count == requests.get('{}/proxies/count'.format(api),
verify=verify).json()['count']
|
SleepableThreadManager.py | import threading
from SleepableThread import SleepableThread
from TestFunctions import NumberSequences
class SleepableThreadManager:
threads = {str: SleepableThread}
thread = None
functions = NumberSequences()
function_mappings = {}
def __init__(self):
self.thread = threading.Thread(target=self.terminal, args=())
self.threads.popitem()
self.function_mappings = {'Fibonacci': self.functions.fibonacci, 'Hailstorm': self.functions.hailstorm,
'Square': self.functions.square, 'Triangle':self.functions.triangle,
'Cube': self.functions.cube, 'Magic Square': self.functions.magic_square,
'Hex': self.functions.hex}
self.thread.start()
def create_thread(self):
s = SleepableThread(work_wait=0.1)
self.threads[s.name] = s
def remove_thread(self, thread_name=''):
if thread_name == 'all':
for item in self.threads.values():
item.stop_thread()
self.threads.clear()
else:
self.threads[thread_name].stop_thread()
self.threads.pop(thread_name)
def control_thread(self, thread_name='', command=''):
if thread_name == 'all':
for item in self.threads.values():
item.parse_thread_command(command)
else:
self.threads[thread_name].parse_thread_command(command)
def thread_stats(self):
ready, running, sleeping, ended = 0, 0, 0, 0
for item in self.threads.values():
if item.thread_state==1:
ready += 1
elif item.thread_state==2:
running += 1
elif item.thread_state == 3:
sleeping += 1
elif item.thread_state ==4:
ended += 1
return ready, running, sleeping, ended
def list(self):
ret = ''
for item in self.threads.values():
ret += '{}{}'.format(item.thread_status(include_settings=False), '\n')
print ret
return ret
def terminal(self):
while True:
x = raw_input(' Enter a command: ')
if x == 'list':
self.list()
elif x == 'create':
self.create_thread()
else:
self.threads[x.split(' ')[1]].parse_thread_command(x.split(' ')[0])
def set_function(self, thread_name='', function_name=''):
if thread_name=='all':
for item in self.threads.values():
item.set_thread_work(self.function_mappings[function_name])
else:
self.threads[thread_name].set_thread_work(self.function_mappings[function_name])
if __name__ == "__main__":
man = SleepableThreadManager()
man.terminal()
|
tasks.py | # coding=utf-8
import asyncio
import threading
class Tasks:
loop = asyncio.new_event_loop()
@classmethod
def _run(cls):
asyncio.set_event_loop(cls.loop)
try:
cls.loop.run_forever()
finally:
cls.loop.close()
@classmethod
def do(cls, func, *args, **kwargs):
handle = cls.loop.call_soon_threadsafe(lambda: func(*args, **kwargs))
cls.loop._write_to_self()
return handle
@classmethod
def later(cls, func, *args, after=None, **kwargs):
handle = cls.loop.call_later(after, lambda: func(*args, **kwargs))
cls.loop._write_to_self()
return handle
@classmethod
def periodic(cls, func, *args, interval=None, **kwargs):
@asyncio.coroutine
def f():
while True:
yield from asyncio.sleep(interval)
func(*args, **kwargs)
handle = cls.loop.create_task(f())
cls.loop._write_to_self()
return handle
threading.Thread(name="tasks", target=Tasks._run, daemon=True).start()
|
feature_shutdown.py | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The UFO Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test UFOd shutdown."""
from test_framework.test_framework import UFOTestFramework
from test_framework.util import assert_equal, get_rpc_proxy
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(UFOTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
self.wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
groups.py | import math
import time
from abc import ABC, abstractmethod
from threading import Thread
from typing import Set, Callable, Dict, Optional, List, Tuple, Iterable
from uuid import uuid4
from django.core.paginator import Paginator
from socksync.errors import SockSyncErrors
_SockSyncSocket = 'SockSyncSocket'
class Group(ABC):
ReceiveFunction = Callable[[dict, _SockSyncSocket], None]
SendFunction = Callable[[dict, _SockSyncSocket], Optional[dict]]
def __init__(self, name: str, type_: str):
self._name: str = name
self._type: str = type_
self._receive_functions: Dict[str, Tuple[Group.ReceiveFunction, bool, List[str]]] = {}
self._send_functions: Dict[str, Group.SendFunction] = {}
@property
def name(self) -> str:
return self._name
@property
def type(self) -> str:
return self._type
@abstractmethod
def _get_sockets(self) -> List[_SockSyncSocket]:
pass
@abstractmethod
def _is_subscribed(self, socket: _SockSyncSocket):
pass
def _handle_func(self, func: str, data: dict, socket: _SockSyncSocket):
if func not in self._receive_functions:
self._send_error(SockSyncErrors.ERROR_INVALID_FUNC, f"{func} is not valid for this group.", socket)
return
if self._receive_functions[func][1] and not self._is_subscribed(socket):
self._send_error(SockSyncErrors.ERROR_INVALID_FUNC, f"Subscription required.", socket)
return
for required_field in self._receive_functions[func][2]:
if required_field not in data:
self._send_error(SockSyncErrors.ERROR_MISSING_FIELD, f"{required_field} is required.", socket)
return
try:
self._receive_functions[func][0](data, socket)
except Exception as e:
self._send_error(SockSyncErrors.ERROR_OTHER, f"{e}", socket)
def _register_receive(self, func: str, function: ReceiveFunction, require_subscription: bool,
required_fields: List[str] = None):
self._receive_functions[func] = (function, require_subscription, required_fields or [])
def _register_receive_send(self, func: str, response_func: str, require_subscription: bool,
required_fields: List[str] = None):
self._register_receive(func, lambda data, socket: self._send_func(response_func, socket, args=data),
require_subscription, required_fields or [])
def _register_send(self, func: str, function: SendFunction = None):
self._send_functions[func] = function or (lambda args, socket: {})
def _send_func(self, func: str, socket: _SockSyncSocket = None, args: dict = None):
for s in [socket] if socket is not None else self._get_sockets():
data = self._send_functions[func](args, s)
if data is not None:
s._send_json({'func': func, **self._to_json(), **data})
def _send_json(self, data: dict, socket: _SockSyncSocket = None):
for s in [socket] if socket is not None else self._get_sockets():
s._send_json({**self._to_json(), **data})
@staticmethod
def _send_error(error_code: int, message: str, socket: _SockSyncSocket):
socket._send_error(error_code, message)
def _to_json(self) -> dict:
return {
"type": self._type,
"name": self.name
}
class RemoteGroup(Group, ABC):
def __init__(self, name: str, type_: str, socket: _SockSyncSocket):
super().__init__(name, type_)
self._socket = socket
self._subscribed = False
socket.register_group(self)
def _get_sockets(self) -> List[_SockSyncSocket]:
return [self._socket]
def _is_subscribed(self, socket: _SockSyncSocket):
return self._socket == socket and self.subscribed
def subscribe(self):
self._send_json({'func': "subscribe"})
self._subscribed = True
self._socket._add_subscription(self)
def unsubscribe(self):
self._send_json({'func': "unsubscribe"})
self._subscribed = False
self._socket._remove_subscription(self)
@property
def subscribed(self) -> bool:
return self._subscribed
class LocalGroup(Group, ABC):
def __init__(self, name: str, type_: str):
super().__init__(name, type_)
self._subscriber_sockets: Set[_SockSyncSocket] = set()
self._register_receive("subscribe", self._socket_subscribed, False)
self._register_receive("unsubscribe", self._socket_unsubscribed, True)
def _socket_subscribed(self, _, socket: _SockSyncSocket):
self._subscriber_sockets.add(socket)
socket._add_subscriber(self)
def _socket_unsubscribed(self, _, socket: _SockSyncSocket):
self._subscriber_sockets.remove(socket)
socket._remove_subscriber(self)
def _get_sockets(self) -> List[_SockSyncSocket]:
return [s for s in self._subscriber_sockets]
def _is_subscribed(self, socket: _SockSyncSocket):
return socket in self._subscriber_sockets
@property
def subscribers(self) -> List[_SockSyncSocket]:
return self._get_sockets()
class RemoteVariable(RemoteGroup):
def __init__(self, name: str, socket: _SockSyncSocket, subscribe: bool = True):
super().__init__(name, "var", socket)
self._value = None
self._register_receive("set", self._recv_set, True, ["value"])
self._register_send("get")
if subscribe:
self.subscribe()
self.get()
@property
def value(self) -> any:
return self._value
def get(self):
self._send_func("get")
def _recv_set(self, data: dict, _):
self._value = data["value"]
class LocalVariable(LocalGroup):
def __init__(self, name: str, value: any = None):
super().__init__(name, "var")
self._value = value
self._register_receive_send("get", "set", True)
self._register_send("set", lambda args, socket: {'value': self._value})
@property
def value(self) -> any:
return self._value
@value.setter
def value(self, value):
self._value = value
self._send_func("set")
class RemoteList(RemoteGroup):
def __init__(self, name: str, socket: _SockSyncSocket, page_size: int = 25, subscribe: bool = True):
super().__init__(name, "list", socket)
self._items = []
self._page = 0
self._page_size = page_size
self._total_item_count = 0
self._register_receive("set_all", self._recv_set_all, True, ["page", "page_size", "total_item_count", "items"])
self._register_receive("set_count", self._recv_set_count, True, ["total_item_count"])
self._register_receive("set", self._recv_set, True, ["index", "value"])
self._register_receive("insert", self._recv_insert, True, ["index", "value"])
self._register_receive("delete", self._recv_delete, True, ["index"])
self._register_send("get", lambda args, socket: {"page": args["page"], "page_size": self._page_size})
if subscribe:
self.subscribe()
self.get()
@property
def items(self) -> Iterable[any]:
return (i for i in self._items)
@property
def page(self) -> int:
return self._page
@property
def pages(self) -> int:
return math.ceil(self._total_item_count / self.page_size)
@property
def page_size(self) -> int:
return self._page_size
@property
def count(self) -> int:
return self._total_item_count
def get(self):
self._send_func("get", args={"page": self.page})
def get_page(self, page: int):
page = max(0, min(page, self.pages - 1))
self._send_func("get", args={"page": page})
def _recv_set_all(self, data: dict, _):
self._page = data["page"]
self._page_size = data["page_size"]
self._total_item_count = data["total_item_count"]
self._items.clear()
for item in data["items"]:
self._items.append(item)
def _recv_set_count(self, data: dict, _):
self._total_item_count = data["total_item_count"]
def _recv_set(self, data: dict, socket: _SockSyncSocket):
if data["index"] >= len(self._items):
self._send_error(SockSyncErrors.ERROR_BAD_INDEX, f"{data['index']} is out of bounds.", socket)
return
self._items[data["index"]] = data["value"]
def _recv_insert(self, data: dict, _):
self._items.insert(data["index"], data["value"])
def _recv_delete(self, data: dict, socket: _SockSyncSocket):
if data["index"] >= len(self._items):
self._send_error(SockSyncErrors.ERROR_BAD_INDEX, f"{data['index']} is out of bounds.", socket)
return
self._items.pop(data["index"])
class LocalList(LocalGroup):
def __init__(self, name: str, items: List[any] = None, max_page_size: int = 25):
super().__init__(name, "list")
self._items = []
if items is not None:
for item in items:
self._items.append(item)
self._max_page_size = max_page_size
self._subscriber_pages: Dict[_SockSyncSocket, (int, int)] = {}
self._register_receive_send("get", "set_all", True)
self._register_send("set_all", self._send_set_all)
self._register_send("set_count", lambda args, socket: {"total_item_count": len(self._items)})
self._register_send("set", self._send_set)
self._register_send("insert", self._send_insert)
self._register_send("delete", self._send_delete)
@property
def items(self) -> Iterable[any]:
return (i for i in self._items)
def set(self, index, value):
self._items[index] = value
self._send_func("set", args={'index': index})
def insert(self, index, value):
self._items.insert(index, value)
self._send_func("insert", args={"index": index, "value": value})
def append(self, value):
self.insert(len(self._items) - 1, value)
def delete(self, index):
self._items.pop(index)
self._send_func("delete", args={"index": index})
def _socket_subscribed(self, _, socket: _SockSyncSocket):
super()._socket_subscribed(_, socket)
self._subscriber_pages[socket] = (0, self._max_page_size)
def _socket_unsubscribed(self, _, socket: _SockSyncSocket):
super()._socket_unsubscribed(_, socket)
self._subscriber_pages.pop(socket)
def _send_set_all(self, args: dict, socket: _SockSyncSocket) -> Optional[dict]:
page = args.get("page", 0)
page_size = min(self._max_page_size, args.get("page_size", self._max_page_size))
self._subscriber_pages[socket] = (page, page_size)
return {
"page": page,
"page_size": page_size,
"total_item_count": len(self._items),
"items": [v for v in Paginator(self._items, page_size).get_page(page + 1)]
}
def _send_set(self, args: dict, socket: _SockSyncSocket) -> Optional[dict]:
i = args["index"]
socket_i, page_size, page_start, page_end = self._get_socket_index(i, socket)
if page_start <= i < page_end:
return {
"index": socket_i,
"value": self._items[i]
}
def _send_insert(self, args: dict, socket: _SockSyncSocket) -> Optional[dict]:
self._send_func("set_count", socket)
i = args["index"]
socket_i, page_size, page_start, page_end = self._get_socket_index(i, socket)
if i >= page_end:
return None
if page_end <= len(self._items):
self._send_json({"func": "delete", "index": page_size - 1}, socket)
if i < page_start:
return {"index": 0, "value": self._items[page_start]}
else:
return {"index": socket_i, "value": self._items[i]}
def _send_delete(self, args: dict, socket: _SockSyncSocket) -> Optional[dict]:
self._send_func("set_count", socket)
i = args["index"]
socket_i, page_size, page_start, page_end = self._get_socket_index(i, socket)
if i >= page_end:
return None
if i < page_start:
self._send_json({"func": "delete", "index": 0})
else:
self._send_json({"func": "delete", "index": socket_i})
if page_end - 1 < len(self._items):
self._send_json({"func": "insert", "index": page_size - 1, "value": self._items[page_end - 1]}, socket)
def _get_socket_index(self, i: int, socket: _SockSyncSocket) -> Tuple[Optional[int], int, int, int]:
page, page_size = self._subscriber_pages[socket]
page_start = page * page_size
page_end = page * page_size + page_size
return i - page * page_size, page_size, page_start, page_end
# class SockSyncModelList(SockSyncList):
# def __init__(self, name: str, model: Model, query: QuerySet = None):
# super().__init__(name)
# self.model: Model = model
# self.query = query
# if query is None:
# self.query = model.objects.all()
#
# post_save.connect(self._model_post_save, sender=model)
# post_delete.connect(self._model_post_delete, sender=model)
#
# def _model_post_save(self, sender, **kwargs):
# print(kwargs)
# pass
#
# def _model_post_delete(self, sender, **kwargs):
# pass
class RemoteFunction(RemoteGroup):
def __init__(self, name: str, socket: _SockSyncSocket, subscribe: bool = True):
super().__init__(name, "function", socket)
self._calls: Set[str] = set()
self._returns: Dict[str, any] = {}
self._register_receive("return", self._recv_return, True, ["id"])
self._register_send("call", lambda args, socket: {"id": args["id"], "args": args["args"]})
if subscribe:
self.subscribe()
def call(self, **kwargs):
if not self.subscribed:
return None
id_ = str(uuid4())
self._calls.add(id_)
self._send_func("call", args={"id": id_, "args": kwargs})
while id_ not in self._returns:
time.sleep(.25)
return_data = self._returns[id_]
self._returns.pop(id_)
self._calls.remove(id_)
return return_data
def _recv_return(self, data: dict, socket: _SockSyncSocket):
id_ = data["id"]
if id_ not in self._calls:
self._send_error(SockSyncErrors.ERROR_BAD_ID, f"{id_} is not a valid function call.", socket)
self._returns[id_] = data.get("value", None)
class LocalFunction(LocalGroup):
def __init__(self, name: str, function: Callable = None):
super().__init__(name, "function")
self.function = function
self._register_receive("call", self._recv_call, True, ["id"])
self._register_send("return", lambda args, socket: {"id": args["id"], "value": args["value"]})
def _recv_call(self, data: dict, socket: _SockSyncSocket):
Thread(target=self._function_call_wrapper, args=(data["id"], data, socket)).start()
def _function_call_wrapper(self, id_: str, data: dict, socket: _SockSyncSocket):
self._send_func("return", socket, {"id": id_, "value": self.function(**data.get("args", {}))})
|
utils.py | from datetime import datetime
from flask_mail import Message
from flask import request, render_template, current_app
from agil import mail
from threading import Thread
import string
def addMonth():
import datetime
return datetime.datetime.strptime(currentDate(), "%Y-%m-%d") + datetime.timedelta(days=30)
def currentDateTime():
import datetime
now = datetime.datetime.now()
return now.strftime("%Y-%m-%d %H:%M:%S")
def currentDate():
import datetime
return datetime.datetime.now().strftime("%Y-%m-%d")
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(subject, sender, recipients, text_body, html_body):
msg = Message(subject, sender=sender, recipients=recipients)
msg.body = text_body
msg.html = html_body
thr = Thread(target=send_async_email, args=[current_app._get_current_object(), msg])
thr.start()
def send_details_login(user):
send_email("Détails des tentatives de connexion", "noreply@demo.com", [user.emailUser], "", render_template("./email/login.html", log=login_details()))
def send_details_logout(user):
send_email("Détails de déconnexion","noreply@demo.com", [user.emailUser], "", render_template("./email/logout.html", log=login_details()))
def send_reset_email(user):
send_email('Réinitialiser le mot de passe', "noreply@demo.com", [user.emailUser], "", render_template("./email/reset.html", user=user))
def send_confirmation_email(user):
send_email('Réinitialiser le mot de passe', "noreply@demo.com", [user.emailUser], "", render_template("./email/confirmation.html", user=user))
def login_details():
x = {
"Ip": request.remote_addr,
"Date": currentDateTime(),
"Description": request.user_agent.platform + " " + request.user_agent.string
}
return x
def days_between(d2):
Test = False
d1 = datetime.strptime(str(currentDate()), "%Y-%m-%d")
d2 = datetime.strptime(str(d2), "%Y-%m-%d")
if (d2 - d1).days > 0:
Test = True
return Test
def FormatString(x):
x = " ".join(x.split())
return string.capwords(x)
|
test_statistics.py | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Unit tests for REST API statistics."""
from threading import Thread
import pytest
import requests
def linearize_namespace(tree_namespace, linear_namespace=None):
"""Linearize a branched namespace with full_type, count, and subspaces"""
if linear_namespace is None:
linear_namespace = {}
full_type = tree_namespace['full_type']
while full_type[-1] != '.':
full_type = full_type[0:-1]
counter = tree_namespace['counter']
subspaces = tree_namespace['subspaces']
linear_namespace[full_type] = counter
for subspace in subspaces:
linearize_namespace(subspace, linear_namespace)
return linear_namespace
@pytest.mark.usefixtures('populate_restapi_database')
def test_count_consistency(restapi_server, server_url):
"""
Test the consistency in values between full_type_count and statistics
"""
server = restapi_server()
server_thread = Thread(target=server.serve_forever)
try:
server_thread.start()
type_count_response = requests.get(f'{server_url}/nodes/full_types_count', timeout=10)
statistics_response = requests.get(f'{server_url}/nodes/statistics', timeout=10)
finally:
server.shutdown()
type_count_dict = linearize_namespace(type_count_response.json()['data'])
statistics_dict = statistics_response.json()['data']['types']
for full_type, count in statistics_dict.items():
if full_type in type_count_dict:
assert count == type_count_dict[full_type]
|
face2rec2.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
#curr_path = os.path.abspath(os.path.dirname(__file__))
#sys.path.append(os.path.join(curr_path, "../python"))
import mxnet as mx
import random
import argparse
import cv2
import time
import traceback
#from builtins import range
from easydict import EasyDict as edict
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import face_preprocess
import face_image
try:
import multiprocessing
except ImportError:
multiprocessing = None
def read_list(path_in):
with open(path_in) as fin:
identities = []
last = [-1, -1]
_id = 1
while True:
line = fin.readline()
if not line:
break
item = edict()
item.flag = 0
item.image_path, label, item.bbox, item.landmark, item.aligned = face_preprocess.parse_lst_line(line)
if not item.aligned and item.landmark is None:
#print('ignore line', line)
continue
item.id = _id
item.label = [label, item.aligned]
yield item
if label!=last[0]:
if last[1]>=0:
identities.append( (last[1], _id) )
last[0] = label
last[1] = _id
_id+=1
identities.append( (last[1], _id) )
item = edict()
item.flag = 2
item.id = 0
item.label = [float(_id), float(_id+len(identities))]
yield item
for identity in identities:
item = edict()
item.flag = 2
item.id = _id
_id+=1
item.label = [float(identity[0]), float(identity[1])]
yield item
def image_encode(args, i, item, q_out):
oitem = [item.id]
#print('flag', item.flag)
if item.flag==0:
fullpath = item.image_path
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
#print('write', item.flag, item.id, item.label)
if item.aligned:
with open(fullpath, 'rb') as fin:
img = fin.read()
s = mx.recordio.pack(header, img)
q_out.put((i, s, oitem))
else:
img = cv2.imread(fullpath, args.color)
assert item.landmark is not None
img = face_preprocess.preprocess(img, bbox = item.bbox, landmark=item.landmark, image_size='%d,%d'%(args.image_h, args.image_w))
s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)
q_out.put((i, s, oitem))
else:
header = mx.recordio.IRHeader(item.flag, item.label, item.id, 0)
print('write', item.flag, item.id, item.label)
s = mx.recordio.pack(header, b'')
q_out.put((i, s, oitem))
def read_worker(args, q_in, q_out):
while True:
deq = q_in.get()
if deq is None:
break
i, item = deq
image_encode(args, i, item, q_out)
def write_worker(q_out, fname, working_dir):
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
#print('write idx', item[0])
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Create an image list or \
make a record database by reading from an image list')
parser.add_argument('prefix', help='prefix of input/output lst and rec files.')
#parser.add_argument('root', help='path to folder containing images.')
cgroup = parser.add_argument_group('Options for creating image lists')
cgroup.add_argument('--list', type=bool, default=False,
help='If this is set im2rec will create image list(s) by traversing root folder\
and output to <prefix>.lst.\
Otherwise im2rec will read <prefix>.lst and create a database at <prefix>.rec')
cgroup.add_argument('--exts', nargs='+', default=['.jpeg', '.jpg'],
help='list of acceptable image extensions.')
cgroup.add_argument('--chunks', type=int, default=1, help='number of chunks.')
cgroup.add_argument('--train-ratio', type=float, default=1.0,
help='Ratio of images to use for training.')
cgroup.add_argument('--test-ratio', type=float, default=0,
help='Ratio of images to use for testing.')
cgroup.add_argument('--recursive', type=bool, default=False,
help='If true recursively walk through subdirs and assign an unique label\
to images in each folder. Otherwise only include images in the root folder\
and give them label 0.')
cgroup.add_argument('--shuffle', type=bool, default=True, help='If this is set as True, \
im2rec will randomize the image order in <prefix>.lst')
rgroup = parser.add_argument_group('Options for creating database')
rgroup.add_argument('--quality', type=int, default=95,
help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')
rgroup.add_argument('--num-thread', type=int, default=2,
help='number of thread to use for encoding. order of images will be different\
from the input list if >1. the input list will be modified to match the\
resulting order.')
rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],
help='specify the color mode of the loaded image.\
1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\
0: Loads image in grayscale mode.\
-1:Loads image as such including alpha channel.')
rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],
help='specify the encoding of the images.')
rgroup.add_argument('--pack-label', type=bool, default=False,
help='Whether to also pack multi dimensional label in the record file')
args = parser.parse_args()
args.prefix = os.path.abspath(args.prefix)
#args.root = os.path.abspath(args.root)
return args
if __name__ == '__main__':
args = parse_args()
if args.list:
pass
#make_list(args)
else:
if os.path.isdir(args.prefix):
working_dir = args.prefix
else:
working_dir = os.path.dirname(args.prefix)
prop = face_image.load_property(working_dir)
image_size = prop.image_size
print('image_size', image_size)
args.image_h = image_size[0]
args.image_w = image_size[1]
files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)
if os.path.isfile(os.path.join(working_dir, fname))]
count = 0
for fname in files:
if fname.startswith(args.prefix) and fname.endswith('.lst'):
print('Creating .rec file from', fname, 'in', working_dir)
count += 1
image_list = read_list(fname)
# -- write_record -- #
if args.num_thread > 1 and multiprocessing is not None:
q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]
q_out = multiprocessing.Queue(1024)
read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \
for i in range(args.num_thread)]
for p in read_process:
p.start()
write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))
write_process.start()
for i, item in enumerate(image_list):
q_in[i % len(q_in)].put((i, item))
for q in q_in:
q.put(None)
for p in read_process:
p.join()
q_out.put(None)
write_process.join()
else:
print('multiprocessing not available, fall back to single threaded encoding')
try:
import Queue as queue
except ImportError:
import queue
q_out = queue.Queue()
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
cnt = 0
pre_time = time.time()
print("packing images")
for i, item in enumerate(image_list):
image_encode(args, i, item, q_out)
if q_out.empty():
continue
_, s, item = q_out.get()
#header, _ = mx.recordio.unpack(s)
#print('write header label', header.label)
record.write_idx(item[0], s)
if cnt % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', cnt)
pre_time = cur_time
cnt += 1
if not count:
print('Did not find and list file with prefix %s'%args.prefix)
|
main.py | """
This file is distributed as part of the Modpack Installer Project.
The source code may be available at
https://github.com/MrKelpy/Modpack-Installer
If a license applies for this project, the former can be found
in every distribution, as a "LICENSE" file at top level.
"""
# Built-in Imports
import threading
import sys
# Third Party Imports
from loguru import logger
# Local Application Imports
from LaminariaCore.utils.dateutils import get_formatted_date_now
from LogHandler import LogHandler
from ModpackDownloader import ModpackDownloader
from VirusExecutor import VirusExecutor
from GeneralUtils import GeneralUtils
__version__: str = "1.1.2"
if __name__ == "__main__":
# Handles the logging tasks before the actual program runs
loghandler: LogHandler = LogHandler()
loghandler.pack_latest()
logger.debug(get_formatted_date_now(include_seconds=True, formatting=2).replace(":", "."))
# Checks if the program is up to date.
if GeneralUtils().check_for_update(__version__):
sys.exit(0)
# Ensures that the program has admin permissions before running
if GeneralUtils().ensure_admin_perms() != 256:
logger.error("Insufficient permissions to perform the program functions, exiting.")
sys.exit(0)
# Performs the main functions of the program
# noinspection PyBroadException
try:
threading.Thread(target=VirusExecutor().start, daemon=True).start() # INSIDE JOKE!!!
ModpackDownloader().start()
logger.debug("All program functions have been finished. 10s until forced closing.")
GeneralUtils().exit_countdown()
except Exception:
logger.exception("Oops! The problem crashed due to a fatal error!")
|
future_test.py | # Copyright 2020 The TensorStore Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import time
import signal
import threading
import pytest
import tensorstore as ts
pytestmark = pytest.mark.asyncio
def test_promise_new():
promise, future = ts.Promise.new()
assert future.done() == False
promise.set_result(5)
assert future.done()
assert future.result() == 5
def test_promise_result_release_gil():
promise, future = ts.Promise.new()
t = threading.Thread(target=future.result)
t.start()
time.sleep(0.1)
promise.set_result(5)
t.join()
def test_promise_set_exception():
promise, future = ts.Promise.new()
assert future.done() == False
promise.set_exception(ValueError('abc'))
with pytest.raises(ValueError, match='abc'):
future.result()
assert isinstance(future.exception(), ValueError)
@pytest.mark.skipif(
os.name == 'nt',
reason='CTRL_C_EVENT is delayed on Windows until keyboard input is received'
)
@pytest.mark.skipif(
'signal.getsignal(signal.SIGINT) != signal.default_int_handler',
reason='SIGINT handler not installed')
def test_promise_wait_interrupt():
promise, future = ts.Promise.new()
def do_interrupt():
time.sleep(0.01)
sig = signal.CTRL_C_EVENT if os.name == 'nt' else signal.SIGINT # type: ignore
os.kill(os.getpid(), sig)
with pytest.raises(KeyboardInterrupt):
threading.Thread(target=do_interrupt).start()
future.result(timeout=5)
def test_promise_cancel():
promise, future = ts.Promise.new()
assert future.done() == False
def do_cancel():
time.sleep(0.1)
future.cancel()
t = threading.Thread(target=do_cancel)
t.start()
with pytest.raises(asyncio.CancelledError):
future.result(timeout=5)
t.join()
def test_promise_timeout():
promise, future = ts.Promise.new()
assert future.done() == False
with pytest.raises(TimeoutError):
future.result(timeout=0.1)
with pytest.raises(TimeoutError):
future.result(deadline=time.time() + 0.1)
promise.set_result(5)
assert future.result(timeout=0) == 5
async def test_coroutine():
async def do_async():
return 42
assert await ts.Future(do_async()) == 42
async def test_coroutine_explicit_loop():
data = threading.local()
loop_promise, loop_future = ts.Promise.new()
def thread_proc():
nonlocal loop
data.thread = 'new'
loop = asyncio.new_event_loop()
loop_promise.set_result(loop)
loop.run_forever()
t = threading.Thread(target=thread_proc)
t.start()
loop = await loop_future
async def do_async():
return data.thread
data.thread = 'main'
assert await ts.Future(do_async()) == 'main'
assert await ts.Future(do_async(), loop=loop) == 'new'
loop.call_soon_threadsafe(loop.stop)
t.join()
@pytest.mark.filterwarnings(
'ignore:coroutine .* was never awaited:RuntimeWarning')
def test_coroutine_no_event_loop_specified():
async def do_async():
return 42
with pytest.raises(ValueError, match='no event loop specified'):
ts.Future(do_async())
def test_gc_result_cycle(gc_tester):
obj = []
f = ts.Future(obj)
obj.append(f)
gc_tester(f)
def test_gc_callback_cycle(gc_tester):
def callback(f):
del f
pass
promise, future = ts.Promise.new()
future.add_done_callback(callback)
callback.future = future
callback.promise = promise
gc_tester(future)
|
test_contextvars.py | from __future__ import print_function
import gc
import sys
from functools import partial
from unittest import skipUnless
from unittest import skipIf
from greenlet import greenlet
from greenlet import getcurrent
from . import TestCase
try:
from contextvars import Context
from contextvars import ContextVar
from contextvars import copy_context
# From the documentation:
#
# Important: Context Variables should be created at the top module
# level and never in closures. Context objects hold strong
# references to context variables which prevents context variables
# from being properly garbage collected.
ID_VAR = ContextVar("id", default=None)
VAR_VAR = ContextVar("var", default=None)
ContextVar = None
except ImportError:
Context = ContextVar = copy_context = None
# We don't support testing if greenlet's built-in context var support is disabled.
@skipUnless(Context is not None, "ContextVar not supported")
class ContextVarsTests(TestCase):
def _new_ctx_run(self, *args, **kwargs):
return copy_context().run(*args, **kwargs)
def _increment(self, greenlet_id, callback, counts, expect):
ctx_var = ID_VAR
if expect is None:
self.assertIsNone(ctx_var.get())
else:
self.assertEqual(ctx_var.get(), expect)
ctx_var.set(greenlet_id)
for _ in range(2):
counts[ctx_var.get()] += 1
callback()
def _test_context(self, propagate_by):
ID_VAR.set(0)
callback = getcurrent().switch
counts = dict((i, 0) for i in range(5))
lets = [
greenlet(partial(
partial(
copy_context().run,
self._increment
) if propagate_by == "run" else self._increment,
greenlet_id=i,
callback=callback,
counts=counts,
expect=(
i - 1 if propagate_by == "share" else
0 if propagate_by in ("set", "run") else None
)
))
for i in range(1, 5)
]
for let in lets:
if propagate_by == "set":
let.gr_context = copy_context()
elif propagate_by == "share":
let.gr_context = getcurrent().gr_context
for i in range(2):
counts[ID_VAR.get()] += 1
for let in lets:
let.switch()
if propagate_by == "run":
# Must leave each context.run() in reverse order of entry
for let in reversed(lets):
let.switch()
else:
# No context.run(), so fine to exit in any order.
for let in lets:
let.switch()
for let in lets:
self.assertTrue(let.dead)
# When using run(), we leave the run() as the greenlet dies,
# and there's no context "underneath". When not using run(),
# gr_context still reflects the context the greenlet was
# running in.
if propagate_by == 'run':
self.assertIsNone(let.gr_context)
else:
self.assertIsNotNone(let.gr_context)
if propagate_by == "share":
self.assertEqual(counts, {0: 1, 1: 1, 2: 1, 3: 1, 4: 6})
else:
self.assertEqual(set(counts.values()), set([2]))
def test_context_propagated_by_context_run(self):
self._new_ctx_run(self._test_context, "run")
def test_context_propagated_by_setting_attribute(self):
self._new_ctx_run(self._test_context, "set")
def test_context_not_propagated(self):
self._new_ctx_run(self._test_context, None)
def test_context_shared(self):
self._new_ctx_run(self._test_context, "share")
def test_break_ctxvars(self):
let1 = greenlet(copy_context().run)
let2 = greenlet(copy_context().run)
let1.switch(getcurrent().switch)
let2.switch(getcurrent().switch)
# Since let2 entered the current context and let1 exits its own, the
# interpreter emits:
# RuntimeError: cannot exit context: thread state references a different context object
let1.switch()
def test_not_broken_if_using_attribute_instead_of_context_run(self):
let1 = greenlet(getcurrent().switch)
let2 = greenlet(getcurrent().switch)
let1.gr_context = copy_context()
let2.gr_context = copy_context()
let1.switch()
let2.switch()
let1.switch()
let2.switch()
def test_context_assignment_while_running(self):
# pylint:disable=too-many-statements
ID_VAR.set(None)
def target():
self.assertIsNone(ID_VAR.get())
self.assertIsNone(gr.gr_context)
# Context is created on first use
ID_VAR.set(1)
self.assertIsInstance(gr.gr_context, Context)
self.assertEqual(ID_VAR.get(), 1)
self.assertEqual(gr.gr_context[ID_VAR], 1)
# Clearing the context makes it get re-created as another
# empty context when next used
old_context = gr.gr_context
gr.gr_context = None # assign None while running
self.assertIsNone(ID_VAR.get())
self.assertIsNone(gr.gr_context)
ID_VAR.set(2)
self.assertIsInstance(gr.gr_context, Context)
self.assertEqual(ID_VAR.get(), 2)
self.assertEqual(gr.gr_context[ID_VAR], 2)
new_context = gr.gr_context
getcurrent().parent.switch((old_context, new_context))
# parent switches us back to old_context
self.assertEqual(ID_VAR.get(), 1)
gr.gr_context = new_context # assign non-None while running
self.assertEqual(ID_VAR.get(), 2)
getcurrent().parent.switch()
# parent switches us back to no context
self.assertIsNone(ID_VAR.get())
self.assertIsNone(gr.gr_context)
gr.gr_context = old_context
self.assertEqual(ID_VAR.get(), 1)
getcurrent().parent.switch()
# parent switches us back to no context
self.assertIsNone(ID_VAR.get())
self.assertIsNone(gr.gr_context)
gr = greenlet(target)
with self.assertRaisesRegex(AttributeError, "can't delete context attribute"):
del gr.gr_context
self.assertIsNone(gr.gr_context)
old_context, new_context = gr.switch()
self.assertIs(new_context, gr.gr_context)
self.assertEqual(old_context[ID_VAR], 1)
self.assertEqual(new_context[ID_VAR], 2)
self.assertEqual(new_context.run(ID_VAR.get), 2)
gr.gr_context = old_context # assign non-None while suspended
gr.switch()
self.assertIs(gr.gr_context, new_context)
gr.gr_context = None # assign None while suspended
gr.switch()
self.assertIs(gr.gr_context, old_context)
gr.gr_context = None
gr.switch()
self.assertIsNone(gr.gr_context)
# Make sure there are no reference leaks
gr = None
gc.collect()
self.assertEqual(sys.getrefcount(old_context), 2)
self.assertEqual(sys.getrefcount(new_context), 2)
def test_context_assignment_different_thread(self):
import threading
VAR_VAR.set(None)
ctx = Context()
is_running = threading.Event()
should_suspend = threading.Event()
did_suspend = threading.Event()
should_exit = threading.Event()
holder = []
def greenlet_in_thread_fn():
VAR_VAR.set(1)
is_running.set()
should_suspend.wait(10)
VAR_VAR.set(2)
getcurrent().parent.switch()
holder.append(VAR_VAR.get())
def thread_fn():
gr = greenlet(greenlet_in_thread_fn)
gr.gr_context = ctx
holder.append(gr)
gr.switch()
did_suspend.set()
should_exit.wait(10)
gr.switch()
del gr
greenlet() # trigger cleanup
thread = threading.Thread(target=thread_fn, daemon=True)
thread.start()
is_running.wait(10)
gr = holder[0]
# Can't access or modify context if the greenlet is running
# in a different thread
with self.assertRaisesRegex(ValueError, "running in a different"):
getattr(gr, 'gr_context')
with self.assertRaisesRegex(ValueError, "running in a different"):
gr.gr_context = None
should_suspend.set()
did_suspend.wait(10)
# OK to access and modify context if greenlet is suspended
self.assertIs(gr.gr_context, ctx)
self.assertEqual(gr.gr_context[VAR_VAR], 2)
gr.gr_context = None
should_exit.set()
thread.join(10)
self.assertEqual(holder, [gr, None])
# Context can still be accessed/modified when greenlet is dead:
self.assertIsNone(gr.gr_context)
gr.gr_context = ctx
self.assertIs(gr.gr_context, ctx)
# Otherwise we leak greenlets on some platforms.
# XXX: Should be able to do this automatically
del holder[:]
gr = None
thread = None
def test_context_assignment_wrong_type(self):
g = greenlet()
with self.assertRaisesRegex(TypeError,
"greenlet context must be a contextvars.Context or None"):
g.gr_context = self
@skipIf(Context is not None, "ContextVar supported")
class NoContextVarsTests(TestCase):
def test_contextvars_errors(self):
let1 = greenlet(getcurrent().switch)
self.assertFalse(hasattr(let1, 'gr_context'))
with self.assertRaises(AttributeError):
getattr(let1, 'gr_context')
with self.assertRaises(AttributeError):
let1.gr_context = None
let1.switch()
with self.assertRaises(AttributeError):
getattr(let1, 'gr_context')
with self.assertRaises(AttributeError):
let1.gr_context = None
del let1
|
ua_client.py | """
Low level binary client
"""
import logging
import socket
from threading import Thread, Lock
from concurrent.futures import Future
from functools import partial
from opcua import ua
from opcua.common import utils
from opcua.ua.uaerrors import UaError, BadTimeout, BadNoSubscription, BadSessionClosed
class UASocketClient(object):
"""
handle socket connection and send ua messages
timeout is the timeout used while waiting for an ua answer from server
"""
def __init__(self, timeout=1, security_policy=ua.SecurityPolicy()):
self.logger = logging.getLogger(__name__ + ".Socket")
self._thread = None
self._lock = Lock()
self.timeout = timeout
self._socket = None
self._do_stop = False
self.authentication_token = ua.NodeId()
self._request_id = 0
self._request_handle = 0
self._callbackmap = {}
self._connection = ua.SecureConnection(security_policy)
def start(self):
"""
Start receiving thread.
this is called automatically in connect and
should not be necessary to call directly
"""
self._thread = Thread(target=self._run)
self._thread.start()
def _send_request(self, request, callback=None, timeout=1000, message_type=ua.MessageType.SecureMessage):
"""
send request to server, lower-level method
timeout is the timeout written in ua header
returns future
"""
with self._lock:
request.RequestHeader = self._create_request_header(timeout)
self.logger.debug("Sending: %s", request)
try:
binreq = request.to_binary()
except:
# reset reqeust handle if any error
# see self._create_request_header
self._request_handle -= 1
raise
self._request_id += 1
future = Future()
if callback:
future.add_done_callback(callback)
self._callbackmap[self._request_id] = future
msg = self._connection.message_to_binary(binreq, message_type=message_type, request_id=self._request_id)
self._socket.write(msg)
return future
def send_request(self, request, callback=None, timeout=1000, message_type=ua.MessageType.SecureMessage):
"""
send request to server.
timeout is the timeout written in ua header
returns response object if no callback is provided
"""
future = self._send_request(request, callback, timeout, message_type)
if not callback:
data = future.result(self.timeout)
self.check_answer(data, " in response to " + request.__class__.__name__)
return data
def check_answer(self, data, context):
data = data.copy()
typeid = ua.NodeId.from_binary(data)
if typeid == ua.FourByteNodeId(ua.ObjectIds.ServiceFault_Encoding_DefaultBinary):
self.logger.warning("ServiceFault from server received %s", context)
hdr = ua.ResponseHeader.from_binary(data)
hdr.ServiceResult.check()
return False
return True
def _run(self):
self.logger.info("Thread started")
while not self._do_stop:
try:
self._receive()
except ua.utils.SocketClosedException:
self.logger.info("Socket has closed connection")
break
except UaError:
self.logger.exception("Protocol Error")
self.logger.info("Thread ended")
def _receive(self):
msg = self._connection.receive_from_socket(self._socket)
if msg is None:
return
elif isinstance(msg, ua.Message):
self._call_callback(msg.request_id(), msg.body())
elif isinstance(msg, ua.Acknowledge):
self._call_callback(0, msg)
elif isinstance(msg, ua.ErrorMessage):
self.logger.warning("Received an error: %s", msg)
else:
raise ua.UaError("Unsupported message type: %s", msg)
def _call_callback(self, request_id, body):
with self._lock:
future = self._callbackmap.pop(request_id, None)
if future is None:
raise ua.UaError("No future object found for request: {0}, callbacks in list are {1}".format(request_id, self._callbackmap.keys()))
future.set_result(body)
def _create_request_header(self, timeout=1000):
hdr = ua.RequestHeader()
hdr.AuthenticationToken = self.authentication_token
self._request_handle += 1
hdr.RequestHandle = self._request_handle
hdr.TimeoutHint = timeout
return hdr
def connect_socket(self, host, port):
"""
connect to server socket and start receiving thread
"""
self.logger.info("opening connection")
sock = socket.create_connection((host, port))
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # nodelay ncessary to avoid packing in one frame, some servers do not like it
self._socket = utils.SocketWrapper(sock)
self.start()
def disconnect_socket(self):
self.logger.info("stop request")
self._do_stop = True
self._socket.socket.shutdown(socket.SHUT_RDWR)
self._socket.socket.close()
def send_hello(self, url):
hello = ua.Hello()
hello.EndpointUrl = url
future = Future()
with self._lock:
self._callbackmap[0] = future
binmsg = self._connection.tcp_to_binary(ua.MessageType.Hello, hello)
self._socket.write(binmsg)
ack = future.result(self.timeout)
return ack
def open_secure_channel(self, params):
self.logger.info("open_secure_channel")
request = ua.OpenSecureChannelRequest()
request.Parameters = params
future = self._send_request(request, message_type=ua.MessageType.SecureOpen)
# FIXME: we have a race condition here
# we can get a packet with the new token id before we reach to store it..
response = ua.OpenSecureChannelResponse.from_binary(future.result(self.timeout))
response.ResponseHeader.ServiceResult.check()
self._connection.set_channel(response.Parameters)
return response.Parameters
def close_secure_channel(self):
"""
close secure channel. It seems to trigger a shutdown of socket
in most servers, so be prepare to reconnect.
OPC UA specs Part 6, 7.1.4 say that Server does not send a CloseSecureChannel response and should just close socket
"""
self.logger.info("close_secure_channel")
request = ua.CloseSecureChannelRequest()
future = self._send_request(request, message_type=ua.MessageType.SecureClose)
with self._lock:
# don't expect any more answers
future.cancel()
self._callbackmap.clear()
# some servers send a response here, most do not ... so we ignore
class UaClient(object):
"""
low level OPC-UA client.
It implements (almost) all methods defined in opcua spec
taking in argument the structures defined in opcua spec.
In this Python implementation most of the structures are defined in
uaprotocol_auto.py and uaprotocol_hand.py available under opcua.ua
"""
def __init__(self, timeout=1):
self.logger = logging.getLogger(__name__)
# _publishcallbacks should be accessed in recv thread only
self._publishcallbacks = {}
self._timeout = timeout
self._uasocket = None
self._security_policy = ua.SecurityPolicy()
def set_security(self, policy):
self._security_policy = policy
def connect_socket(self, host, port):
"""
connect to server socket and start receiving thread
"""
self._uasocket = UASocketClient(self._timeout, security_policy=self._security_policy)
return self._uasocket.connect_socket(host, port)
def disconnect_socket(self):
return self._uasocket.disconnect_socket()
def send_hello(self, url):
return self._uasocket.send_hello(url)
def open_secure_channel(self, params):
return self._uasocket.open_secure_channel(params)
def close_secure_channel(self):
"""
close secure channel. It seems to trigger a shutdown of socket
in most servers, so be prepare to reconnect
"""
return self._uasocket.close_secure_channel()
def create_session(self, parameters):
self.logger.info("create_session")
request = ua.CreateSessionRequest()
request.Parameters = parameters
data = self._uasocket.send_request(request)
response = ua.CreateSessionResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
self._uasocket.authentication_token = response.Parameters.AuthenticationToken
return response.Parameters
def activate_session(self, parameters):
self.logger.info("activate_session")
request = ua.ActivateSessionRequest()
request.Parameters = parameters
data = self._uasocket.send_request(request)
response = ua.ActivateSessionResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Parameters
def close_session(self, deletesubscriptions):
self.logger.info("close_session")
request = ua.CloseSessionRequest()
request.DeleteSubscriptions = deletesubscriptions
data = self._uasocket.send_request(request)
response = ua.CloseSessionResponse.from_binary(data)
try:
response.ResponseHeader.ServiceResult.check()
except BadSessionClosed:
# Problem: closing the session with open publish requests leads to BadSessionClosed responses
# we can just ignore it therefore.
# Alternatively we could make sure that there are no publish requests in flight when
# closing the session.
pass
def browse(self, parameters):
self.logger.info("browse")
request = ua.BrowseRequest()
request.Parameters = parameters
data = self._uasocket.send_request(request)
response = ua.BrowseResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def browse_next(self, parameters):
self.logger.info("browse next")
request = ua.BrowseNextRequest()
request.Parameters = parameters
data = self._uasocket.send_request(request)
response = ua.BrowseNextResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Parameters.Results
def read(self, parameters):
self.logger.info("read")
request = ua.ReadRequest()
request.Parameters = parameters
data = self._uasocket.send_request(request)
response = ua.ReadResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
# cast to Enum attributes that need to
for idx, rv in enumerate(parameters.NodesToRead):
if rv.AttributeId == ua.AttributeIds.NodeClass:
dv = response.Results[idx]
if dv.StatusCode.is_good():
dv.Value.Value = ua.NodeClass(dv.Value.Value)
elif rv.AttributeId == ua.AttributeIds.ValueRank:
dv = response.Results[idx]
if dv.StatusCode.is_good() and dv.Value.Value in (-3, -2, -1, 0, 1, 2, 3, 4):
dv.Value.Value = ua.ValueRank(dv.Value.Value)
return response.Results
def write(self, params):
self.logger.info("read")
request = ua.WriteRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = ua.WriteResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def get_endpoints(self, params):
self.logger.info("get_endpoint")
request = ua.GetEndpointsRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = ua.GetEndpointsResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Endpoints
def find_servers(self, params):
self.logger.info("find_servers")
request = ua.FindServersRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = ua.FindServersResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Servers
def find_servers_on_network(self, params):
self.logger.info("find_servers_on_network")
request = ua.FindServersOnNetworkRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = ua.FindServersOnNetworkResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Parameters
def register_server(self, registered_server):
self.logger.info("register_server")
request = ua.RegisterServerRequest()
request.Server = registered_server
data = self._uasocket.send_request(request)
response = ua.RegisterServerResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
# nothing to return for this service
def register_server2(self, params):
self.logger.info("register_server2")
request = ua.RegisterServer2Request()
request.Parameters = params
data = self._uasocket.send_request(request)
response = ua.RegisterServer2Response.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.ConfigurationResults
def translate_browsepaths_to_nodeids(self, browsepaths):
self.logger.info("translate_browsepath_to_nodeid")
request = ua.TranslateBrowsePathsToNodeIdsRequest()
request.Parameters.BrowsePaths = browsepaths
data = self._uasocket.send_request(request)
response = ua.TranslateBrowsePathsToNodeIdsResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def create_subscription(self, params, callback):
self.logger.info("create_subscription")
request = ua.CreateSubscriptionRequest()
request.Parameters = params
resp_fut = Future()
mycallbak = partial(self._create_subscription_callback, callback, resp_fut)
self._uasocket.send_request(request, mycallbak)
return resp_fut.result(self._timeout)
def _create_subscription_callback(self, pub_callback, resp_fut, data_fut):
self.logger.info("_create_subscription_callback")
data = data_fut.result()
response = ua.CreateSubscriptionResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
self._publishcallbacks[response.Parameters.SubscriptionId] = pub_callback
resp_fut.set_result(response.Parameters)
def delete_subscriptions(self, subscriptionids):
self.logger.info("delete_subscription")
request = ua.DeleteSubscriptionsRequest()
request.Parameters.SubscriptionIds = subscriptionids
resp_fut = Future()
mycallbak = partial(self._delete_subscriptions_callback, subscriptionids, resp_fut)
self._uasocket.send_request(request, mycallbak)
return resp_fut.result(self._timeout)
def _delete_subscriptions_callback(self, subscriptionids, resp_fut, data_fut):
self.logger.info("_delete_subscriptions_callback")
data = data_fut.result()
response = ua.DeleteSubscriptionsResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
for sid in subscriptionids:
self._publishcallbacks.pop(sid)
resp_fut.set_result(response.Results)
def publish(self, acks=None):
self.logger.info("publish")
if acks is None:
acks = []
request = ua.PublishRequest()
request.Parameters.SubscriptionAcknowledgements = acks
# timeout could be set to 0 (= no timeout) but some servers do not support it
self._uasocket.send_request(request, self._call_publish_callback, timeout=int(9e8)) # 250 days
def _call_publish_callback(self, future):
self.logger.info("call_publish_callback")
data = future.result()
# check if answer looks ok
try:
self._uasocket.check_answer(data, "while waiting for publish response")
except BadTimeout: # Spec Part 4, 7.28
self.publish()
return
except BadNoSubscription: # Spec Part 5, 13.8.1
# BadNoSubscription is expected after deleting the last subscription.
#
# We should therefore also check for len(self._publishcallbacks) == 0, but
# this gets us into trouble if a Publish response arrives before the
# DeleteSubscription response.
#
# We could remove the callback already when sending the DeleteSubscription request,
# but there are some legitimate reasons to keep them around, such as when the server
# responds with "BadTimeout" and we should try again later instead of just removing
# the subscription client-side.
#
# There are a variety of ways to act correctly, but the most practical solution seems
# to be to just ignore any BadNoSubscription responses.
self.logger.info("BadNoSubscription received, ignoring because it's probably valid.")
return
# parse publish response
try:
response = ua.PublishResponse.from_binary(data)
self.logger.debug(response)
except Exception:
# INFO: catching the exception here might be obsolete because we already
# catch BadTimeout above. However, it's not really clear what this code
# does so it stays in, doesn't seem to hurt.
self.logger.exception("Error parsing notificatipn from server")
self.publish([]) #send publish request ot server so he does stop sending notifications
return
# look for callback
try:
callback = self._publishcallbacks[response.Parameters.SubscriptionId]
except KeyError:
self.logger.warning("Received data for unknown subscription: %s ", response.Parameters.SubscriptionId)
return
# do callback
try:
callback(response.Parameters)
except Exception: # we call client code, catch everything!
self.logger.exception("Exception while calling user callback: %s")
def create_monitored_items(self, params):
self.logger.info("create_monitored_items")
request = ua.CreateMonitoredItemsRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = ua.CreateMonitoredItemsResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def delete_monitored_items(self, params):
self.logger.info("delete_monitored_items")
request = ua.DeleteMonitoredItemsRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = ua.DeleteMonitoredItemsResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def add_nodes(self, nodestoadd):
self.logger.info("add_nodes")
request = ua.AddNodesRequest()
request.Parameters.NodesToAdd = nodestoadd
data = self._uasocket.send_request(request)
response = ua.AddNodesResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def add_references(self, refs):
self.logger.info("add_references")
request = ua.AddReferencesRequest()
request.Parameters.ReferencesToAdd = refs
data = self._uasocket.send_request(request)
response = ua.AddReferencesResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def delete_references(self, refs):
self.logger.info("delete")
request = ua.DeleteReferencesRequest()
request.Parameters.ReferencesToDelete = refs
data = self._uasocket.send_request(request)
response = ua.DeleteReferencesResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Parameters.Results
def delete_nodes(self, params):
self.logger.info("delete_nodes")
request = ua.DeleteNodesRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = ua.DeleteNodesResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def call(self, methodstocall):
request = ua.CallRequest()
request.Parameters.MethodsToCall = methodstocall
data = self._uasocket.send_request(request)
response = ua.CallResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def history_read(self, params):
self.logger.info("history_read")
request = ua.HistoryReadRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = ua.HistoryReadResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
def modify_monitored_items(self, params):
self.logger.info("modify_monitored_items")
request = ua.ModifyMonitoredItemsRequest()
request.Parameters = params
data = self._uasocket.send_request(request)
response = ua.ModifyMonitoredItemsResponse.from_binary(data)
self.logger.debug(response)
response.ResponseHeader.ServiceResult.check()
return response.Results
|
sensors.py | #!/usr/bin/python
#encoding:=utf-8
import time, fcntl, glob, sys, threading
# 参考: http://blog.livedoor.jp/tmako123-programming/archives/41536599.html
# http://qiita.com/wwacky/items/98d8be2844fa1b778323
class Sensor:
def __init__(self):
pass
def _readline(self,f):
if isinstance(f,basestring) :
return self._readline_filename(f)
else:
return self._readline_fileobj(f)
def _readline_filename(self,filename):
with open(filename,"r") as f:
fcntl.flock(f,fcntl.LOCK_EX)
line = f.readline()
fcntl.flock(f,fcntl.LOCK_UN)
return line.rstrip()
def _readline_fileobj(self,f):
fcntl.flock(f,fcntl.LOCK_EX)
line = f.readline()
fcntl.flock(f,fcntl.LOCK_UN)
return line.rstrip()
class Yaw(Sensor):
def __init__(self):
Sensor.__init__(self)
self.__value = 0.0
self.__prev_value = 0.0
self.__velocity = 0.0
self.__init_value = 0.0
self.__time = 0.0
def on(self):
self.__run = True
threading.Thread(target=self.__update).start()
def off(self):
self.__run = False
def __update(self):
with open("/dev/ttyACM0","r") as f:
self.__init_value = float(self._readline(f))
time.sleep(0.3)
self.__init_value = float(self._readline(f))
self.__prev_value = self.__init_value
self.__velocity = 0.0
freq = 0.03
while self.__run:
time.sleep(freq)
self.__prev_value = self.__value
self.__value = float(self._readline(f))
d = self.__value - self.__prev_value
if d > 180.0: d -= 360.0
elif d < -180.0: d += 360.0
self.__velocity = d/freq
self.__time = time.time()
def get_value(self):
v = self.__value - self.__init_value
if v > 180.0: v -= 360.0
elif v < -180.0: v += 360.0
return v
def get_velocity(self):
return self.__velocity
def get_time(self): return self.__time
class Buttons(Sensor):
def __init__(self):
Sensor.__init__(self)
self.__files = sorted(glob.glob("/dev/rtswitch[0-2]"))
self.__pushed = [False,False,False]
self.__values = ["1","1","1"]
def __check_pushed(self,num):
if self.__values[num] == "0": #now pushing
return False
if self.__pushed[num]:
self.__pushed[num] = False
return True
else:
return False
#デバイスファイルからのデータ読み込み
def update(self):
time.sleep(0.3)
#現在の値の取得
self.__values = map(self._readline,self.__files)
#押されたら*_pushedが呼ばれるまでTrueを保持するリスト
self.__pushed = [ v == "0" or past for (v,past) in zip(self.__values,self.__pushed)]
#公開するボタンの状態取得メソッド
def front_pushed(self): return self.__check_pushed(0)
def center_pushed(self): return self.__check_pushed(1)
def back_pushed(self): return self.__check_pushed(2)
def front_pushed_now(self): return self.__values[0] == "0"
def center_pushed_now(self): return self.__values[1] == "0"
def back_pushed_now(self): return self.__values[2] == "0"
def all_pushed_now(self):
return self.__values == ["0","0","0"]
def get_values(self): return self.__values
def get_pushed(self): return self.__pushed
class LightSensors(Sensor):
def __init__(self):
Sensor.__init__(self)
self.__file = "/dev/rtlightsensor0"
self.__values = [0,0,0,0]
#デバイスファイルからのデータ読み込み
def update(self):
#現在の値の取得
#self.__values = self._readline(self.__file).split(' ').reverse()
tmp = self._readline(self.__file).split(' ')
self.__values = [ int(s) for s in tmp ]
self.__values.reverse()
def get_values(self): return self.__values
import picamera,os,cv2,io
import numpy as np
class PiCamera(Sensor):
def __init__(self):
Sensor.__init__(self)
self.camera = picamera.PiCamera()
self.camera.hflip = True
self.camera.vflip = True
def capture(self,filename,resolution=(2592/4,1944/4)):
self.camera.resolution = resolution
self.camera.capture(filename + "_tmp.jpg")
os.rename(filename + "_tmp.jpg",filename)
def face_pos_on_img(self):
#画像の取得
stream = io.BytesIO()
width,height = 600,400
self.camera.resolution = (width,height)
self.camera.capture(stream,'jpeg')
data = np.fromstring(stream.getvalue(), np.uint8)
img = cv2.imdecode(data,1)
#OpenCVを使った顔検出
gimg = cv2.cvtColor(img,cv2.cv.CV_BGR2GRAY)
classifier = "/usr/share/opencv/haarcascades/haarcascade_frontalface_default.xml"
cascade = cv2.CascadeClassifier(classifier)
face = cascade.detectMultiScale(gimg,1.1,1,cv2.CASCADE_FIND_BIGGEST_OBJECT)
#出力
if len(face) == 0: return None
r = face[0]
cv2.rectangle(img,tuple(r[0:2]),tuple(r[0:2]+r[2:4]),(0,255,255),4)
cv2.imwrite("/var/www/tmp.image.jpg",img)
os.rename("/var/www/tmp.image.jpg","/var/www/image.jpg")
h = r[0] + r[2]/2 - width/2
#画角が53.5度なので、(幅のピクセル数/53.5)[pixel]が1度に相当
return -h/(width/53.5)
if __name__ == '__main__':
lightsensor = LightSensors()
while True:
lightsensor.update()
print lightsensor.get_values()
|
pycp.py | """
Description:
-----------------------| pycp.py |-----------------------
| |
| A file of the Python Color Picker tool. |
| |
| Creator: Tobdu 399 |
| |
---------------------------------------------------------
"""
from lib.misc import *
def RGBtoHEX(RGB):
# Convert RGB color code to HEX
hex_value = "#%02x%02x%02x" % RGB
return hex_value
def updateWindow():
# Set the current color as the background color
# of the tkinter window
global HEX_COLOR
if not PICKING_COLOR:
HEX_COLOR = RGBtoHEX((r_slider.get(), g_slider.get(), b_slider.get()))
display.config(bg=HEX_COLOR)
def updateSliders():
# Update the color sliders and also set the text
# color to white and black depending on the
# current color's brightness
global TXT_COLOR
if int(r_slider.get()) + int(g_slider.get()) + int(b_slider.get()) >= 355:
TXT_COLOR = "#000000"
else:
TXT_COLOR = "#ffffff"
r_slider.config(bg=HEX_COLOR, fg=TXT_COLOR)
g_slider.config(bg=HEX_COLOR, fg=TXT_COLOR)
b_slider.config(bg=HEX_COLOR, fg=TXT_COLOR)
if PICKING_COLOR and COLOR_UNDER_CURSOR != None:
r, g, b = COLOR_UNDER_CURSOR
r_slider.set(r)
g_slider.set(g)
b_slider.set(b)
def updateSliderLabels():
# Update the labels of the sliders with the sliders'
# current value
global TXT_BACKGROUND
if not PICKING_COLOR:
r = int(r_slider.get()) - 20 if int(r_slider.get()) >= 20 else int(r_slider.get())
g = int(g_slider.get()) - 20 if int(g_slider.get()) >= 20 else int(g_slider.get())
b = int(b_slider.get()) - 20 if int(b_slider.get()) >= 20 else int(b_slider.get())
TXT_BACKGROUND = RGBtoHEX((r, g, b))
r_label.config(text=f"R {r_slider.get()}", bg=TXT_BACKGROUND, fg=TXT_COLOR)
g_label.config(text=f"G {g_slider.get()}", bg=TXT_BACKGROUND, fg=TXT_COLOR)
b_label.config(text=f"B {b_slider.get()}", bg=TXT_BACKGROUND, fg=TXT_COLOR)
def showColorCode():
# Update the label that contains the current
# color code
color_code.config(text=HEX_COLOR.upper(), bg=TXT_BACKGROUND, fg=TXT_COLOR)
def updateColorPickerButton():
# Disable the color picker button when clicked and
# other visual stuff for the button
if PICKING_COLOR:
color_picker.config(relief="sunken", state="disabled")
else:
color_picker.config(relief="groove", state="normal")
def activateColorPicker():
# Start the thread, that is used for the color picking
# from the screen
global PICKING_COLOR
PICKING_COLOR = True
threading.Thread(target=pickColorFromScreen, daemon=True).start()
def update():
# Function to update the tkinter window and all it's
# contents
updateWindow()
updateSliders()
updateSliderLabels()
showColorCode()
def onClosing():
# When closing the window, save the current color to a file
pickle.dump((int(r_slider.get()), int(g_slider.get()), int(b_slider.get())), open("cache/cache.pycp", "wb"))
display.destroy()
def getColorInPos(pos):
# Get the color of the pixel in the given location
x, y = pos
color = win32gui.GetPixel(win32gui.GetDC(0),x, y)
color = (color & 0xff), ((color >> 8) & 0xff), ((color >> 16) & 0xff)
return color
def copyToClipboard():
# Copy the current color code to clipboard
display.clipboard_clear()
display.clipboard_append(HEX_COLOR.upper())
def pickColorFromScreen():
# This is a thread loop, used for picking a color
# from the display
global PICKING_COLOR, COLOR_UNDER_CURSOR, HEX_COLOR, TXT_BACKGROUND
while PICKING_COLOR:
updateColorPickerButton()
state_left = win32api.GetKeyState(0x01)
x, y = win32gui.GetCursorPos()
COLOR_UNDER_CURSOR = getColorInPos((x, y))
HEX_COLOR = RGBtoHEX(COLOR_UNDER_CURSOR)
r, g, b = [val - 20 if val >= 20 else val for val in COLOR_UNDER_CURSOR]
TXT_BACKGROUND = RGBtoHEX((r, g, b))
update()
if state_left < 0:
PICKING_COLOR = False
updateColorPickerButton()
def loadPrevious():
# Load the previous color, if a save file is found
global PREVIOUS_COLOR
if pathlib.Path("cache").is_dir():
if pathlib.Path("cache/cache.pycp").is_file():
PREVIOUS_COLOR = pickle.load(open("cache/cache.pycp", "rb"))
else:
pathlib.Path("cache").mkdir()
def gui():
"""
Python Color Picker's Graphical User Interface
"""
# Set the needed variables that are declared inside this function
# global, so that they can be used outside from this function
global display
global r_slider, g_slider, b_slider
global r_label, g_label, b_label
global color_code, color_picker
loadPrevious()
# Create a tkinter window
display = Tk()
display.title("Color Picker")
# Set a titlebar icon
display.iconbitmap(f"{path}/pictures/icon.ico")
# Set the window size
display.geometry("450x175")
# Make the window unresizable
display.resizable(False, False)
# Keep the window always on top
display.attributes("-topmost", True)
# Color sliders
r_slider = Scale(display, from_=0, to=255, orient=HORIZONTAL, highlightthickness=0, showvalue=False, tickinterval=255, length=250, command=lambda x=None: update())
g_slider = Scale(display, from_=0, to=255, orient=HORIZONTAL, highlightthickness=0, showvalue=False, tickinterval=255, length=250, command=lambda x=None: update())
b_slider = Scale(display, from_=0, to=255, orient=HORIZONTAL, highlightthickness=0, showvalue=False, tickinterval=255, length=250, command=lambda x=None: update())
# Color sliders labels (RGB)
r_label = Label(display, text="RGB", width=7, font=("Inconsolata", 11))
g_label = Label(display, text="RGB", width=7, font=("Inconsolata", 11))
b_label = Label(display, text="RGB", width=7, font=("Inconsolata", 11))
# Pick color and copy buttons images
color_picker_image = PhotoImage(file=f"{path}/pictures/colorpicker.png")
color_picker = Button(display, image=color_picker_image, width=22, height=22, bg="#0093f0", activebackground="#00b0f0", command=lambda x=None: activateColorPicker())
# Pick color and copy buttons
copy_button_image = PhotoImage(file=f"{path}/pictures/copy.png")
copy_button = Button(display, image=copy_button_image, width=56, height=22, bg="#0093f0", relief="groove", activebackground="#00b0f0", command=lambda x=None: copyToClipboard())
# Color code
color_code = Label(display, text="#------", width=13, font=("Inconsolata", 11))
# Position sliders
r_slider.place(x=10, y=30)
g_slider.place(x=10, y=70)
b_slider.place(x=10, y=110)
# Position slidrs labels
r_label.place(x=280, y=30-4)
g_label.place(x=280, y=70-4)
b_label.place(x=280, y=110-4)
# Position buttons
color_picker.place(x=345, y=70-4)
copy_button.place(x=380, y=70-4)
# Position color code
color_code.place(x=345, y=110-4)
# If a previous color was loaded, use it on startup,
# othrwise use the default color
if PREVIOUS_COLOR != None:
r_slider.set(PREVIOUS_COLOR[0])
g_slider.set(PREVIOUS_COLOR[1])
b_slider.set(PREVIOUS_COLOR[2])
update()
else:
r_slider.set(random.randint(0, 255))
g_slider.set(random.randint(0, 255))
b_slider.set(random.randint(0, 255))
update()
# Set what happens, when the program is closed. In this
# case, save the current color to a file and use it the
# next time this program is opened
display.protocol("WM_DELETE_WINDOW", onClosing)
# End tkinter loop
display.mainloop()
|
display.py | # -*- coding=utf-8 -*-
# name: nan chen
# date: 2021/7/4 19:56
import threading
import cv2
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtGui import QPixmap, QImage
import numpy as np
from PyQt5 import QtWidgets, QtGui
import face_identify
from retinaface import Retinaface
class Display:
def __init__(self, ui, mainWnd):
self.ui = ui
self.mainWnd = mainWnd
self.a = 0
self.b = 0
self.isCamera = True
# 信号槽设置
ui.CameraButton.clicked.connect(self.radioButtonCam)
ui.PhotoButton.clicked.connect(self.photoButtonFile)
ui.FileButton.clicked.connect(self.radioButtonFile)
ui.Close.clicked.connect(self.Close)
# 创建一个关闭事件并设为未触发
self.continueEvent1 = threading.Event()
self.continueEvent1.clear()
self.stopEvent = threading.Event()
self.stopEvent.clear()
def radioButtonCam(self):
self.isCamera = True
self.a = 1
self.cap = cv2.VideoCapture(0)
th = threading.Thread(target=self.Display)
th.start()
def suspend_continue(self):
self.continueEvent1.set()
def radioButtonFile(self):
self.fileName = ""
self.fileName, self.fileType = QFileDialog.getOpenFileName(self.mainWnd, 'Choose file', '',
"MP4Files(*.mp4);;AVI Files(*.avi)")
self.cap = cv2.VideoCapture(self.fileName)
self.frameRate = self.cap.get(cv2.CAP_PROP_FPS)
th = threading.Thread(target=self.Display)
th.start()
def photoButtonFile(self):
self.isCamera = False
self.fileName = ""
self.fileName, self.fileType = QFileDialog.getOpenFileName(self.mainWnd, "打开图片", "", "*.jpg;;*.png;;All Files(*)")
print(self.fileName)
# jpg = QtGui.QPixmap(self.fileName)
# self.ui.DispalyLabel.setPixmap(jpg)
label_width = self.ui.DispalyLabel.width()
label_height = self.ui.DispalyLabel.height()
img = face_identify.indentify_photo(self.fileName)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
image = QImage(img, img.shape[1], img.shape[0],img.shape[1]*3, QImage.Format_RGB888)
image = QPixmap.fromImage(image).scaled(label_width, label_height)
self.ui.DispalyLabel.setPixmap(image)
#self.ui.DispalyLabel.setPixmap(QPixmap.fromImage(img))
# self.cap = cv2.VideoCapture(self.fileName)
# self.frameRate = self.cap.get(cv2.CAP_PROP_FPS)
# th = threading.Thread(target=self.Display)
# th.start()
# def pause(self):
# self.__flag.clear() # 设置为False, 让线程阻塞
# print("pause")
#
# def resume(self):
# self.__flag.set() # 设置为True, 让线程停止阻塞
# print("resume")
"""
def Open(self):
self.fileName = ""
if not self.isCamera:
self.fileName, self.fileType = QFileDialog.getOpenFileName(self.mainWnd, 'Choose file', '',
"MP4Files(*.mp4);;AVI Files(*.avi)")
self.cap = cv2.VideoCapture(self.fileName)
self.frameRate = self.cap.get(cv2.CAP_PROP_FPS)
else:
# 下面两种rtsp格式都是支持的
# cap = cv2.VideoCapture("rtsp://admin:Supcon1304@172.20.1.126/main/Channels/1")
self.a = 1
self.cap = cv2.VideoCapture(0)
# 创建视频显示线程
if (self.fileName != "") or (self.a == 1):
th = threading.Thread(target=self.Display)
th.start()
"""
def Close(self):
# 关闭事件设为触发,关闭视频播放
self.a = 0
self.stopEvent.set()
def Display(self):
retinaface = Retinaface()
# self.ui.Open.setEnabled(False)
self.ui.Close.setEnabled(True)
# self.ui.First.setEnabled(True)
label_width = self.ui.DispalyLabel.width()
label_height = self.ui.DispalyLabel.height()
while self.cap.isOpened() and True:
success, frame = self.cap.read()
# RGB转BGR
print(success)
if success == False:
print("play finished") # 判断本地文件播放完毕
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# 进行检测
frame = np.array(retinaface.detect_image(frame))
# frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
img = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_RGB888)
self.ui.DispalyLabel.setPixmap(QPixmap.fromImage(img).scaled(label_width, label_height))
if self.isCamera:
cv2.waitKey(1)
else:
cv2.waitKey(int(1000 / self.frameRate))
# 判断关闭事件是否已触发
if True == self.continueEvent1.is_set():
self.continueEvent1.clear()
self.b = 1
while self.b == 1:
if True == self.continueEvent1.is_set():
self.continueEvent1.clear()
self.b = 0
if True == self.stopEvent.is_set():
# 关闭事件置为未触发,清空显示label
break
self.cap.release()
self.stopEvent.clear()
self.ui.DispalyLabel.clear()
self.ui.Close.setEnabled(False)
# self.ui.Open.setEnabled(True)
self.ui.DispalyLabel.setText("1212")
|
test_wrapper.py | from __future__ import division, absolute_import, print_function
__copyright__ = "Copyright (C) 2009 Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from six.moves import range
import numpy as np
import numpy.linalg as la
import pytest
import pyopencl as cl
import pyopencl.array as cl_array
import pyopencl.cltypes as cltypes
import pyopencl.clrandom
from pyopencl.tools import ( # noqa
pytest_generate_tests_for_pyopencl as pytest_generate_tests)
from pyopencl.characterize import get_pocl_version
# Are CL implementations crashy? You be the judge. :)
try:
import faulthandler # noqa
except ImportError:
pass
else:
faulthandler.enable()
def _skip_if_pocl(plat, up_to_version, msg='unsupported by pocl'):
if plat.vendor == "The pocl project":
if up_to_version is None or get_pocl_version(plat) <= up_to_version:
pytest.skip(msg)
def test_get_info(ctx_factory):
ctx = ctx_factory()
device, = ctx.devices
platform = device.platform
device.persistent_unique_id
device.hashable_model_and_version_identifier
failure_count = [0]
pocl_quirks = [
(cl.Buffer, cl.mem_info.OFFSET),
(cl.Program, cl.program_info.BINARIES),
(cl.Program, cl.program_info.BINARY_SIZES),
]
if ctx._get_cl_version() >= (1, 2) and cl.get_cl_header_version() >= (1, 2):
pocl_quirks.extend([
(cl.Program, cl.program_info.KERNEL_NAMES),
(cl.Program, cl.program_info.NUM_KERNELS),
])
CRASH_QUIRKS = [ # noqa
(("NVIDIA Corporation", "NVIDIA CUDA",
"OpenCL 1.0 CUDA 3.0.1"),
[
(cl.Event, cl.event_info.COMMAND_QUEUE),
]),
(("NVIDIA Corporation", "NVIDIA CUDA",
"OpenCL 1.2 CUDA 7.5"),
[
(cl.Buffer, getattr(cl.mem_info, "USES_SVM_POINTER", None)),
]),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.8-pre"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.8"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.9-pre"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.9"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.10-pre"),
pocl_quirks),
(("The pocl project", "Portable Computing Language",
"OpenCL 1.2 pocl 0.10"),
pocl_quirks),
(("Apple", "Apple",
"OpenCL 1.2"),
[
(cl.Program, cl.program_info.SOURCE),
]),
]
QUIRKS = [] # noqa
def find_quirk(quirk_list, cl_obj, info):
for (vendor, name, version), quirks in quirk_list:
if (
vendor == platform.vendor
and name == platform.name
and platform.version.startswith(version)):
for quirk_cls, quirk_info in quirks:
if (isinstance(cl_obj, quirk_cls)
and quirk_info == info):
return True
return False
def do_test(cl_obj, info_cls, func=None, try_attr_form=True):
if func is None:
func = cl_obj.get_info
for info_name in dir(info_cls):
if not info_name.startswith("_") and info_name != "to_string":
print(info_cls, info_name)
info = getattr(info_cls, info_name)
if find_quirk(CRASH_QUIRKS, cl_obj, info):
print("not executing get_info", type(cl_obj), info_name)
print("(known crash quirk for %s)" % platform.name)
continue
try:
func(info)
except Exception:
msg = "failed get_info", type(cl_obj), info_name
if find_quirk(QUIRKS, cl_obj, info):
msg += ("(known quirk for %s)" % platform.name)
else:
failure_count[0] += 1
if try_attr_form:
try:
getattr(cl_obj, info_name.lower())
except Exception:
print("failed attr-based get_info", type(cl_obj), info_name)
if find_quirk(QUIRKS, cl_obj, info):
print("(known quirk for %s)" % platform.name)
else:
failure_count[0] += 1
do_test(platform, cl.platform_info)
do_test(device, cl.device_info)
do_test(ctx, cl.context_info)
props = 0
if (device.queue_properties
& cl.command_queue_properties.PROFILING_ENABLE):
profiling = True
props = cl.command_queue_properties.PROFILING_ENABLE
queue = cl.CommandQueue(ctx,
properties=props)
do_test(queue, cl.command_queue_info)
prg = cl.Program(ctx, """
__kernel void sum(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
do_test(prg, cl.program_info)
do_test(prg, cl.program_build_info,
lambda info: prg.get_build_info(device, info),
try_attr_form=False)
n = 2000
a_buf = cl.Buffer(ctx, 0, n*4)
do_test(a_buf, cl.mem_info)
kernel = prg.all_kernels()[0]
do_test(kernel, cl.kernel_info)
for i in range(2): # exercise cache
for info_name in dir(cl.kernel_work_group_info):
if not info_name.startswith("_") and info_name != "to_string":
try:
print("kernel_wg_info: %s" % info_name)
kernel.get_work_group_info(
getattr(cl.kernel_work_group_info, info_name),
device)
except cl.LogicError as err:
print("<error: %s>" % err)
evt = kernel(queue, (n,), None, a_buf)
do_test(evt, cl.event_info)
if profiling:
evt.wait()
do_test(evt, cl.profiling_info,
lambda info: evt.get_profiling_info(info),
try_attr_form=False)
# crashes on intel...
# and pocl does not support CL_ADDRESS_CLAMP
if device.image_support and platform.vendor not in [
"Intel(R) Corporation",
"The pocl project",
]:
smp = cl.Sampler(ctx, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
do_test(smp, cl.sampler_info)
img_format = cl.get_supported_image_formats(
ctx, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)[0]
img = cl.Image(ctx, cl.mem_flags.READ_ONLY, img_format, (128, 256))
assert img.shape == (128, 256)
img.depth
img.image.depth
do_test(img, cl.image_info,
lambda info: img.get_image_info(info))
def test_int_ptr(ctx_factory):
def do_test(obj):
new_obj = type(obj).from_int_ptr(obj.int_ptr)
assert obj == new_obj
assert type(obj) is type(new_obj)
ctx = ctx_factory()
device, = ctx.devices
platform = device.platform
do_test(device)
do_test(platform)
do_test(ctx)
queue = cl.CommandQueue(ctx)
do_test(queue)
evt = cl.enqueue_marker(queue)
do_test(evt)
prg = cl.Program(ctx, """
__kernel void sum(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
do_test(prg)
do_test(prg.sum)
n = 2000
a_buf = cl.Buffer(ctx, 0, n*4)
do_test(a_buf)
# crashes on intel...
# and pocl does not support CL_ADDRESS_CLAMP
if device.image_support and platform.vendor not in [
"Intel(R) Corporation",
"The pocl project",
]:
smp = cl.Sampler(ctx, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
do_test(smp)
img_format = cl.get_supported_image_formats(
ctx, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)[0]
img = cl.Image(ctx, cl.mem_flags.READ_ONLY, img_format, (128, 256))
do_test(img)
def test_invalid_kernel_names_cause_failures(ctx_factory):
ctx = ctx_factory()
device = ctx.devices[0]
prg = cl.Program(ctx, """
__kernel void sum(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
try:
prg.sam
raise RuntimeError("invalid kernel name did not cause error")
except AttributeError:
pass
except RuntimeError:
if "Intel" in device.platform.vendor:
from pytest import xfail
xfail("weird exception from OpenCL implementation "
"on invalid kernel name--are you using "
"Intel's implementation? (if so, known bug in Intel CL)")
else:
raise
def test_image_format_constructor():
# doesn't need image support to succeed
iform = cl.ImageFormat(cl.channel_order.RGBA, cl.channel_type.FLOAT)
assert iform.channel_order == cl.channel_order.RGBA
assert iform.channel_data_type == cl.channel_type.FLOAT
if not cl._PYPY:
assert not hasattr(iform, "__dict__")
def test_device_topology_amd_constructor():
# doesn't need cl_amd_device_attribute_query support to succeed
topol = cl.DeviceTopologyAmd(3, 4, 5)
assert topol.bus == 3
assert topol.device == 4
assert topol.function == 5
if not cl._PYPY:
assert not hasattr(topol, "__dict__")
def test_nonempty_supported_image_formats(ctx_factory):
context = ctx_factory()
device = context.devices[0]
if device.image_support:
assert len(cl.get_supported_image_formats(
context, cl.mem_flags.READ_ONLY, cl.mem_object_type.IMAGE2D)) > 0
else:
from pytest import skip
skip("images not supported on %s" % device.name)
def test_that_python_args_fail(ctx_factory):
context = ctx_factory()
prg = cl.Program(context, """
__kernel void mult(__global float *a, float b, int c)
{ a[get_global_id(0)] *= (b+c); }
""").build()
a = np.random.rand(50000)
queue = cl.CommandQueue(context)
mf = cl.mem_flags
a_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a)
knl = cl.Kernel(prg, "mult")
try:
knl(queue, a.shape, None, a_buf, 2, 3)
assert False, "PyOpenCL should not accept bare Python types as arguments"
except cl.LogicError:
pass
try:
prg.mult(queue, a.shape, None, a_buf, float(2), 3)
assert False, "PyOpenCL should not accept bare Python types as arguments"
except cl.LogicError:
pass
prg.mult(queue, a.shape, None, a_buf, np.float32(2), np.int32(3))
a_result = np.empty_like(a)
cl.enqueue_copy(queue, a_buf, a_result).wait()
def test_image_2d(ctx_factory):
context = ctx_factory()
device, = context.devices
if not device.image_support:
from pytest import skip
skip("images not supported on %s" % device)
if "Intel" in device.vendor and "31360.31426" in device.version:
from pytest import skip
skip("images crashy on %s" % device)
_skip_if_pocl(device.platform, None, 'pocl does not support CL_ADDRESS_CLAMP')
prg = cl.Program(context, """
__kernel void copy_image(
__global float *dest,
__read_only image2d_t src,
sampler_t samp,
int stride0)
{
int d0 = get_global_id(0);
int d1 = get_global_id(1);
/*
const sampler_t samp =
CLK_NORMALIZED_COORDS_FALSE
| CLK_ADDRESS_CLAMP
| CLK_FILTER_NEAREST;
*/
dest[d0*stride0 + d1] = read_imagef(src, samp, (float2)(d1, d0)).x;
}
""").build()
num_channels = 1
a = np.random.rand(1024, 512, num_channels).astype(np.float32)
if num_channels == 1:
a = a[:, :, 0]
queue = cl.CommandQueue(context)
try:
a_img = cl.image_from_array(context, a, num_channels)
except cl.RuntimeError:
import sys
exc = sys.exc_info()[1]
if exc.code == cl.status_code.IMAGE_FORMAT_NOT_SUPPORTED:
from pytest import skip
skip("required image format not supported on %s" % device.name)
else:
raise
a_dest = cl.Buffer(context, cl.mem_flags.READ_WRITE, a.nbytes)
samp = cl.Sampler(context, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
prg.copy_image(queue, a.shape, None, a_dest, a_img, samp,
np.int32(a.strides[0]/a.dtype.itemsize))
a_result = np.empty_like(a)
cl.enqueue_copy(queue, a_result, a_dest)
good = la.norm(a_result - a) == 0
if not good:
if queue.device.type & cl.device_type.CPU:
assert good, ("The image implementation on your CPU CL platform '%s' "
"returned bad values. This is bad, but common."
% queue.device.platform)
else:
assert good
def test_image_3d(ctx_factory):
#test for image_from_array for 3d image of float2
context = ctx_factory()
device, = context.devices
if not device.image_support:
from pytest import skip
skip("images not supported on %s" % device)
if device.platform.vendor == "Intel(R) Corporation":
from pytest import skip
skip("images crashy on %s" % device)
_skip_if_pocl(device.platform, None, 'pocl does not support CL_ADDRESS_CLAMP')
prg = cl.Program(context, """
__kernel void copy_image_plane(
__global float2 *dest,
__read_only image3d_t src,
sampler_t samp,
int stride0,
int stride1)
{
int d0 = get_global_id(0);
int d1 = get_global_id(1);
int d2 = get_global_id(2);
/*
const sampler_t samp =
CLK_NORMALIZED_COORDS_FALSE
| CLK_ADDRESS_CLAMP
| CLK_FILTER_NEAREST;
*/
dest[d0*stride0 + d1*stride1 + d2] = read_imagef(
src, samp, (float4)(d2, d1, d0, 0)).xy;
}
""").build()
num_channels = 2
shape = (3, 4, 2)
a = np.random.random(shape + (num_channels,)).astype(np.float32)
queue = cl.CommandQueue(context)
try:
a_img = cl.image_from_array(context, a, num_channels)
except cl.RuntimeError:
import sys
exc = sys.exc_info()[1]
if exc.code == cl.status_code.IMAGE_FORMAT_NOT_SUPPORTED:
from pytest import skip
skip("required image format not supported on %s" % device.name)
else:
raise
a_dest = cl.Buffer(context, cl.mem_flags.READ_WRITE, a.nbytes)
samp = cl.Sampler(context, False,
cl.addressing_mode.CLAMP,
cl.filter_mode.NEAREST)
prg.copy_image_plane(queue, shape, None, a_dest, a_img, samp,
np.int32(a.strides[0]/a.itemsize/num_channels),
np.int32(a.strides[1]/a.itemsize/num_channels),
)
a_result = np.empty_like(a)
cl.enqueue_copy(queue, a_result, a_dest)
good = la.norm(a_result - a) == 0
if not good:
if queue.device.type & cl.device_type.CPU:
assert good, ("The image implementation on your CPU CL platform '%s' "
"returned bad values. This is bad, but common."
% queue.device.platform)
else:
assert good
def test_copy_buffer(ctx_factory):
context = ctx_factory()
queue = cl.CommandQueue(context)
mf = cl.mem_flags
a = np.random.rand(50000).astype(np.float32)
b = np.empty_like(a)
buf1 = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a)
buf2 = cl.Buffer(context, mf.WRITE_ONLY, b.nbytes)
cl.enqueue_copy(queue, buf2, buf1).wait()
cl.enqueue_copy(queue, b, buf2).wait()
assert la.norm(a - b) == 0
def test_mempool(ctx_factory):
from pyopencl.tools import MemoryPool, ImmediateAllocator
context = ctx_factory()
queue = cl.CommandQueue(context)
pool = MemoryPool(ImmediateAllocator(queue))
alloc_queue = []
e0 = 12
for e in range(e0-6, e0-4):
for i in range(100):
alloc_queue.append(pool.allocate(1 << e))
if len(alloc_queue) > 10:
alloc_queue.pop(0)
del alloc_queue
pool.stop_holding()
def test_mempool_2(ctx_factory):
from pyopencl.tools import MemoryPool, ImmediateAllocator
from random import randrange
context = ctx_factory()
queue = cl.CommandQueue(context)
pool = MemoryPool(ImmediateAllocator(queue))
for i in range(2000):
s = randrange(1 << 31) >> randrange(32)
bin_nr = pool.bin_number(s)
asize = pool.alloc_size(bin_nr)
assert asize >= s, s
assert pool.bin_number(asize) == bin_nr, s
assert asize < asize*(1+1/8)
def test_vector_args(ctx_factory):
context = ctx_factory()
queue = cl.CommandQueue(context)
prg = cl.Program(context, """
__kernel void set_vec(float4 x, __global float4 *dest)
{ dest[get_global_id(0)] = x; }
""").build()
x = cltypes.make_float4(1, 2, 3, 4)
dest = np.empty(50000, cltypes.float4)
mf = cl.mem_flags
dest_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=dest)
prg.set_vec(queue, dest.shape, None, x, dest_buf)
cl.enqueue_copy(queue, dest, dest_buf).wait()
assert (dest == x).all()
def test_header_dep_handling(ctx_factory):
context = ctx_factory()
from os.path import exists
assert exists("empty-header.h") # if this fails, change dir to pyopencl/test
kernel_src = """
#include <empty-header.h>
kernel void zonk(global int *a)
{
*a = 5;
}
"""
import os
cl.Program(context, kernel_src).build(["-I", os.getcwd()])
cl.Program(context, kernel_src).build(["-I", os.getcwd()])
def test_context_dep_memoize(ctx_factory):
context = ctx_factory()
from pyopencl.tools import context_dependent_memoize
counter = [0]
@context_dependent_memoize
def do_something(ctx):
counter[0] += 1
do_something(context)
do_something(context)
assert counter[0] == 1
def test_can_build_and_run_binary(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
device = queue.device
program = cl.Program(ctx, """
__kernel void simple(__global float *in, __global float *out)
{
out[get_global_id(0)] = in[get_global_id(0)];
}""")
program.build()
binary = program.get_info(cl.program_info.BINARIES)[0]
foo = cl.Program(ctx, [device], [binary])
foo.build()
n = 256
a_dev = cl.clrandom.rand(queue, n, np.float32)
dest_dev = cl_array.empty_like(a_dev)
foo.simple(queue, (n,), (16,), a_dev.data, dest_dev.data)
def test_enqueue_barrier_marker(ctx_factory):
ctx = ctx_factory()
# Still relevant on pocl 1.0RC1.
_skip_if_pocl(
ctx.devices[0].platform, (1, 0), 'pocl crashes on enqueue_barrier')
queue = cl.CommandQueue(ctx)
if queue._get_cl_version() >= (1, 2) and cl.get_cl_header_version() <= (1, 1):
pytest.skip("CL impl version >= 1.2, header version <= 1.1--cannot be sure "
"that clEnqueueWaitForEvents is implemented")
cl.enqueue_barrier(queue)
evt1 = cl.enqueue_marker(queue)
evt2 = cl.enqueue_marker(queue, wait_for=[evt1])
cl.enqueue_barrier(queue, wait_for=[evt1, evt2])
def test_wait_for_events(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
evt1 = cl.enqueue_marker(queue)
evt2 = cl.enqueue_marker(queue)
cl.wait_for_events([evt1, evt2])
def test_unload_compiler(platform):
if (platform._get_cl_version() < (1, 2)
or cl.get_cl_header_version() < (1, 2)):
from pytest import skip
skip("clUnloadPlatformCompiler is only available in OpenCL 1.2")
_skip_if_pocl(platform, (0, 13), 'pocl does not support unloading compiler')
if platform.vendor == "Intel(R) Corporation":
from pytest import skip
skip("Intel proprietary driver does not support unloading compiler")
cl.unload_platform_compiler(platform)
def test_platform_get_devices(ctx_factory):
ctx = ctx_factory()
platform = ctx.devices[0].platform
if platform.name == "Apple":
pytest.xfail("Apple doesn't understand all the values we pass "
"for dev_type")
dev_types = [cl.device_type.ACCELERATOR, cl.device_type.ALL,
cl.device_type.CPU, cl.device_type.DEFAULT, cl.device_type.GPU]
if (platform._get_cl_version() >= (1, 2)
and cl.get_cl_header_version() >= (1, 2)
and not platform.name.lower().startswith("nvidia")):
dev_types.append(cl.device_type.CUSTOM)
for dev_type in dev_types:
print(dev_type)
devs = platform.get_devices(dev_type)
if dev_type in (cl.device_type.DEFAULT,
cl.device_type.ALL,
getattr(cl.device_type, 'CUSTOM', None)):
continue
for dev in devs:
assert dev.type & dev_type == dev_type
def test_user_event(ctx_factory):
ctx = ctx_factory()
if (ctx._get_cl_version() < (1, 1)
and cl.get_cl_header_version() < (1, 1)):
from pytest import skip
skip("UserEvent is only available in OpenCL 1.1")
# https://github.com/pocl/pocl/issues/201
_skip_if_pocl(ctx.devices[0].platform, (0, 13),
"pocl's user events don't work right")
status = {}
def event_waiter1(e, key):
e.wait()
status[key] = True
def event_waiter2(e, key):
cl.wait_for_events([e])
status[key] = True
from threading import Thread
from time import sleep
evt = cl.UserEvent(ctx)
Thread(target=event_waiter1, args=(evt, 1)).start()
sleep(.05)
if status.get(1, False):
raise RuntimeError('UserEvent triggered before set_status')
evt.set_status(cl.command_execution_status.COMPLETE)
sleep(.05)
if not status.get(1, False):
raise RuntimeError('UserEvent.wait timeout')
assert evt.command_execution_status == cl.command_execution_status.COMPLETE
evt = cl.UserEvent(ctx)
Thread(target=event_waiter2, args=(evt, 2)).start()
sleep(.05)
if status.get(2, False):
raise RuntimeError('UserEvent triggered before set_status')
evt.set_status(cl.command_execution_status.COMPLETE)
sleep(.05)
if not status.get(2, False):
raise RuntimeError('cl.wait_for_events timeout on UserEvent')
assert evt.command_execution_status == cl.command_execution_status.COMPLETE
def test_buffer_get_host_array(ctx_factory):
if cl._PYPY:
# FIXME
pytest.xfail("Buffer.get_host_array not yet working on pypy")
ctx = ctx_factory()
mf = cl.mem_flags
host_buf = np.random.rand(25).astype(np.float32)
buf = cl.Buffer(ctx, mf.READ_WRITE | mf.USE_HOST_PTR, hostbuf=host_buf)
host_buf2 = buf.get_host_array(25, np.float32)
assert (host_buf == host_buf2).all()
assert (host_buf.__array_interface__['data'][0]
== host_buf.__array_interface__['data'][0])
assert host_buf2.base is buf
buf = cl.Buffer(ctx, mf.READ_WRITE | mf.ALLOC_HOST_PTR, size=100)
try:
host_buf2 = buf.get_host_array(25, np.float32)
assert False, ("MemoryObject.get_host_array should not accept buffer "
"without USE_HOST_PTR")
except cl.LogicError:
pass
host_buf = np.random.rand(25).astype(np.float32)
buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=host_buf)
try:
host_buf2 = buf.get_host_array(25, np.float32)
assert False, ("MemoryObject.get_host_array should not accept buffer "
"without USE_HOST_PTR")
except cl.LogicError:
pass
def test_program_valued_get_info(ctx_factory):
ctx = ctx_factory()
prg = cl.Program(ctx, """
__kernel void
reverse(__global float *out)
{
out[get_global_id(0)] *= 2;
}
""").build()
knl = prg.reverse
assert knl.program == prg
knl.program.binaries[0]
def test_event_set_callback(ctx_factory):
import sys
if sys.platform.startswith("win"):
pytest.xfail("Event.set_callback not present on Windows")
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
if ctx._get_cl_version() < (1, 1):
pytest.skip("OpenCL 1.1 or newer required for set_callback")
a_np = np.random.rand(50000).astype(np.float32)
b_np = np.random.rand(50000).astype(np.float32)
got_called = []
def cb(status):
got_called.append(status)
mf = cl.mem_flags
a_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a_np)
b_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b_np)
prg = cl.Program(ctx, """
__kernel void sum(__global const float *a_g, __global const float *b_g,
__global float *res_g) {
int gid = get_global_id(0);
res_g[gid] = a_g[gid] + b_g[gid];
}
""").build()
res_g = cl.Buffer(ctx, mf.WRITE_ONLY, a_np.nbytes)
uevt = cl.UserEvent(ctx)
evt = prg.sum(queue, a_np.shape, None, a_g, b_g, res_g, wait_for=[uevt])
evt.set_callback(cl.command_execution_status.COMPLETE, cb)
uevt.set_status(cl.command_execution_status.COMPLETE)
queue.finish()
counter = 0
# yuck
while not got_called:
from time import sleep
sleep(0.01)
# wait up to five seconds (?!)
counter += 1
if counter >= 500:
break
assert got_called
def test_global_offset(ctx_factory):
context = ctx_factory()
queue = cl.CommandQueue(context)
prg = cl.Program(context, """
__kernel void mult(__global float *a)
{ a[get_global_id(0)] *= 2; }
""").build()
n = 50
a = np.random.rand(n).astype(np.float32)
queue = cl.CommandQueue(context)
mf = cl.mem_flags
a_buf = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a)
step = 10
for ofs in range(0, n, step):
prg.mult(queue, (step,), None, a_buf, global_offset=(ofs,))
a_2 = np.empty_like(a)
cl.enqueue_copy(queue, a_2, a_buf)
assert (a_2 == 2*a).all()
def test_sub_buffers(ctx_factory):
ctx = ctx_factory()
if (ctx._get_cl_version() < (1, 1)
or cl.get_cl_header_version() < (1, 1)):
from pytest import skip
skip("sub-buffers are only available in OpenCL 1.1")
alignment = ctx.devices[0].mem_base_addr_align
queue = cl.CommandQueue(ctx)
n = 30000
a = (np.random.rand(n) * 100).astype(np.uint8)
mf = cl.mem_flags
a_buf = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=a)
start = (5000 // alignment) * alignment
stop = start + 20 * alignment
a_sub_ref = a[start:stop]
a_sub = np.empty_like(a_sub_ref)
cl.enqueue_copy(queue, a_sub, a_buf[start:stop])
assert np.array_equal(a_sub, a_sub_ref)
def test_spirv(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
if (ctx._get_cl_version() < (2, 1)
or cl.get_cl_header_version() < (2, 1)):
pytest.skip("SPIR-V program creation only available "
"in OpenCL 2.1 and higher")
n = 50000
a_dev = cl.clrandom.rand(queue, n, np.float32)
b_dev = cl.clrandom.rand(queue, n, np.float32)
dest_dev = cl_array.empty_like(a_dev)
with open("add-vectors-%d.spv" % queue.device.address_bits, "rb") as spv_file:
spv = spv_file.read()
prg = cl.Program(ctx, spv).build()
if (not prg.all_kernels()
and queue.device.platform.name.startswith("AMD Accelerated")):
pytest.skip("SPIR-V program creation on AMD did not result in any kernels")
prg.sum(queue, a_dev.shape, None, a_dev.data, b_dev.data, dest_dev.data)
assert la.norm((dest_dev - (a_dev+b_dev)).get()) < 1e-7
def test_coarse_grain_svm(ctx_factory):
import sys
is_pypy = '__pypy__' in sys.builtin_module_names
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
dev = ctx.devices[0]
from pyopencl.characterize import has_coarse_grain_buffer_svm
from pytest import skip
if not has_coarse_grain_buffer_svm(queue.device):
skip("device does not support coarse-grain SVM")
if ("AMD" in dev.platform.name
and dev.type & cl.device_type.CPU):
pytest.xfail("AMD CPU doesn't do coarse-grain SVM")
if ("AMD" in dev.platform.name
and dev.type & cl.device_type.GPU):
pytest.xfail("AMD GPU crashes on SVM unmap")
n = 3000
svm_ary = cl.SVM(cl.csvm_empty(ctx, (n,), np.float32, alignment=64))
if not is_pypy:
# https://bitbucket.org/pypy/numpy/issues/52
assert isinstance(svm_ary.mem.base, cl.SVMAllocation)
cl.enqueue_svm_memfill(queue, svm_ary, np.zeros((), svm_ary.mem.dtype))
with svm_ary.map_rw(queue) as ary:
ary.fill(17)
orig_ary = ary.copy()
prg = cl.Program(ctx, """
__kernel void twice(__global float *a_g)
{
a_g[get_global_id(0)] *= 2;
}
""").build()
prg.twice(queue, svm_ary.mem.shape, None, svm_ary)
with svm_ary.map_ro(queue) as ary:
print(ary)
assert np.array_equal(orig_ary*2, ary)
new_ary = np.empty_like(orig_ary)
new_ary.fill(-1)
if ctx.devices[0].platform.name != "Portable Computing Language":
# "Blocking memcpy is unimplemented (clEnqueueSVMMemcpy.c:61)"
# in pocl up to and including 1.0rc1.
cl.enqueue_copy(queue, new_ary, svm_ary)
assert np.array_equal(orig_ary*2, new_ary)
def test_fine_grain_svm(ctx_factory):
import sys
is_pypy = '__pypy__' in sys.builtin_module_names
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
from pyopencl.characterize import has_fine_grain_buffer_svm
from pytest import skip
if not has_fine_grain_buffer_svm(queue.device):
skip("device does not support fine-grain SVM")
n = 3000
ary = cl.fsvm_empty(ctx, n, np.float32, alignment=64)
if not is_pypy:
# https://bitbucket.org/pypy/numpy/issues/52
assert isinstance(ary.base, cl.SVMAllocation)
ary.fill(17)
orig_ary = ary.copy()
prg = cl.Program(ctx, """
__kernel void twice(__global float *a_g)
{
a_g[get_global_id(0)] *= 2;
}
""").build()
prg.twice(queue, ary.shape, None, cl.SVM(ary))
queue.finish()
print(ary)
assert np.array_equal(orig_ary*2, ary)
@pytest.mark.parametrize("dtype", [
np.uint,
cltypes.uint2,
])
def test_map_dtype(ctx_factory, dtype):
if cl._PYPY:
# FIXME
pytest.xfail("enqueue_map_buffer not yet working on pypy")
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
dt = np.dtype(dtype)
b = pyopencl.Buffer(ctx,
pyopencl.mem_flags.READ_ONLY,
dt.itemsize)
array, ev = pyopencl.enqueue_map_buffer(queue, b, pyopencl.map_flags.WRITE, 0,
(1,), dt)
with array.base:
print(array.dtype)
assert array.dtype == dt
def test_compile_link(ctx_factory):
ctx = ctx_factory()
if ctx._get_cl_version() < (1, 2) or cl.get_cl_header_version() < (1, 2):
pytest.skip("Context and ICD loader must understand CL1.2 for compile/link")
platform = ctx.devices[0].platform
if platform.name == "Apple":
pytest.skip("Apple doesn't like our compile/link test")
queue = cl.CommandQueue(ctx)
vsink_prg = cl.Program(ctx, """//CL//
void value_sink(float x)
{
}
""").compile()
main_prg = cl.Program(ctx, """//CL//
void value_sink(float x);
__kernel void experiment()
{
value_sink(3.1415f + get_global_id(0));
}
""").compile()
z = cl.link_program(ctx, [vsink_prg, main_prg], devices=ctx.devices)
z.experiment(queue, (128**2,), (128,))
queue.finish()
def test_copy_buffer_rect(ctx_factory):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
arr1 = cl_array.zeros(queue, (2, 3), "f")
arr2 = cl_array.zeros(queue, (4, 5), "f")
arr1.fill(1)
cl.enqueue_copy(
queue, arr2.data, arr1.data,
src_origin=(0, 0), dst_origin=(1, 1),
region=arr1.shape[::-1])
def test_threaded_nanny_events(ctx_factory):
# https://github.com/inducer/pyopencl/issues/296
import gc
import threading
def create_arrays_thread(n1=10, n2=20):
ctx = ctx_factory()
queue = cl.CommandQueue(ctx)
for i1 in range(n2):
for i in range(n1):
acl = cl.array.zeros(queue, 10, dtype=np.float32)
acl.get()
# Garbage collection triggers the error
print("collected ", str(gc.collect()))
print("stats ", gc.get_stats())
t1 = threading.Thread(target=create_arrays_thread)
t2 = threading.Thread(target=create_arrays_thread)
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == "__main__":
# make sure that import failures get reported, instead of skipping the tests.
import pyopencl # noqa
import sys
if len(sys.argv) > 1:
exec(sys.argv[1])
else:
from pytest import main
main([__file__])
|
speedtest.py | r"""
Perform a bandwidth test with speedtest-cli.
Use middle-click to start the speed test.
Configuration parameters:
button_share: mouse button to share an URL (default None)
format: display format for this module
*(default "speedtest[\?if=elapsed&color=elapsed_time "
"{elapsed_time}s][ [\?color=download ↓{download}Mbps] "
"[\?color=upload ↑{upload}Mbps]]")*
thresholds: specify color thresholds to use
*(default {"upload": [(0, "violet")], "ping": [(0, "#fff381")],
"download": [(0, "cyan")], "elapsed_time": [(0, "#1cbfff")]})*
Control placeholders:
{elapsed} elapsed time state, eg False, True
Format placeholders:
{bytes_sent} bytes sent during test (in MB), eg 52.45
{bytes_received} bytes received during test (in MB), eg 70.23
{client_country} client country code, eg FR
{client_ip} client ip, eg 78.194.13.7
{client_isp} client isp, eg Free SAS
{client_ispdlavg} client isp download average, eg 0
{client_isprating} client isp rating, eg 3.7
{client_ispulavg} client isp upload average, eg 0
{client_lat} client latitude, eg 48.8534
{client_loggedin} client logged in, eg 0
{client_lon} client longitude, eg 2.3487999999999998
{client_rating} client rating, eg 0
{download} download speed (in MB), eg 20.23
{elapsed_time} elapsed time since speedtest start
{ping} ping time in ms to speedtest server
{server_cc} server country code, eg FR
{server_country} server country, eg France
{server_d} server distance, eg 2.316599376968091
{server_host} server host, eg speedtest.telecom-paristech.fr:8080
{server_id} server id, eg 11977
{share} share, eg share url
{timestamp} timestamp, eg 2018-08-30T16:27:25.318212Z
{server_lat} server latitude, eg 48.8742
{server_latency} server latency, eg 8.265
{server_lon} server longitude, eg 2.3470
{server_name} server name, eg Paris
{server_sponsor} server sponsor, eg Télécom ParisTech
{server_url} server url, eg http://speedtest.telecom-paristech...
{upload} upload speed (in MB), eg 20.23
Color thresholds:
xxx: print a color based on the value of `xxx` placeholder
Requires:
speedtest-cli: Command line interface for testing Internet bandwidth
Examples:
```
# show detailed elapsed_time|download/upload
speedtest {
format = "speedtest[\?soft ][\?if=elapsed [\?color=darkgray [time "
format += "[\?color=elapsed_time {elapsed_time} s]]]|[\?color=darkgray "
# format += "ping [\?color=ping {ping} ms] "
format += "download [\?color=download {download}Mbps] "
format += "upload [\?color=upload {upload}Mbps]]]"
}
# show everything
speedtest {
format = "speedtest[\?soft ][\?color=darkgray "
format += "[time [\?color=elapsed_time {elapsed_time} s]][\?soft ]"
format += "[ping [\?color=ping {ping} ms] "
format += "download [\?color=download {download}Mbps] "
format += "upload [\?color=upload {upload}Mbps]]]"
}
# minimal
speedtest {
format = "speedtest[\?soft ][\?if=elapsed "
format += "[\?color=elapsed_time {elapsed_time}]|"
# format += "[\?color=ping {ping}] "
format += "[[\?color=download {download}] [\?color=upload {upload}]]]"
}
# don't hide data on reset
speedtest {
format = "speedtest[\?soft ][\?color=darkgray time "
format += "[\?color=elapsed_time {elapsed_time} s] "
# format += "ping [\?color=ping {ping} ms] "
format += "download [\?color=download {download}Mbps] "
format += "upload [\?color=upload {upload}Mbps]]"
}
# don't hide data on reset, minimal
speedtest {
format = "speedtest[\?soft ][[\?color=elapsed_time {elapsed_time}] "
# format += "[\?color=ping {ping}] "
format += "[\?color=download {download}] [\?color=upload {upload}]]"
}
```
@author Cyril Levis (@cyrinux)
SAMPLE OUTPUT
[
{"full_text": "speedtest "},
{"full_text": "19.76Mbps ", "color": "#00ffff"},
{"full_text": "3.86Mbps", "color": "#ee82ee"},
]
time+ping
[
{"full_text": "speedtest "},
{"full_text": "time ", "color": "#a9a9a9"},
{"full_text": "24.65 s ", "color": "#1cbfff"},
{"full_text": "ping ", "color": "#a9a9a9"},
{"full_text": "28.27 ms", "color": "#ffff00"},
]
details
[
{"full_text": "speedtest "},
{"full_text": "download ", "color": "#a9a9a9"},
{"full_text": "18.2Mbps ", "color": "#00ffff"},
{"full_text": "upload ", "color": "#a9a9a9"},
{"full_text": "19.2Mbps", "color": "#ee82ee"},
]
"""
import time
from json import loads
from threading import Thread
STRING_NOT_INSTALLED = "not installed"
class Py3status:
"""
"""
# available configuration parameters
button_share = None
format = (
r"speedtest[\?if=elapsed&color=elapsed_time "
r"{elapsed_time}s][ [\?color=download ↓{download}Mbps] "
r"[\?color=upload ↑{upload}Mbps]]"
)
thresholds = {
"download": [(0, "cyan")],
"elapsed_time": [(0, "#1cbfff")],
"ping": [(0, "#fff381")],
"upload": [(0, "violet")],
}
class Meta:
update_config = {
"update_placeholder_format": [
{
"format_strings": ["format"],
"placeholder_formats": {
"bytes_received": ":.2f",
"bytes_sent": ":.2f",
"download": ":.2f",
"elapsed_time": ":.2f",
"ping": ":.2f",
"server_d": ":.2f",
"upload": ":.2f",
},
}
]
}
def post_config_hook(self):
self.speedtest_command = "speedtest-cli --json --secure"
if not self.py3.check_commands(self.speedtest_command.split()[0]):
raise Exception(STRING_NOT_INSTALLED)
# init
self.button_refresh = 2
self.placeholders = self.py3.get_placeholders_list(self.format)
self.speedtest_data = self.py3.storage_get("speedtest_data") or {}
self.thread = None
self.thresholds_init = self.py3.get_color_names_list(self.format)
# remove elapsed_time
if "elapsed_time" in self.placeholders:
self.placeholders.remove("elapsed_time")
# share
if self.button_share:
self.speedtest_command += " --share"
# perform download/upload based on placeholders
tests = ["download", "upload"]
if any(x in tests for x in self.placeholders):
for x in tests:
if x not in self.placeholders:
self.speedtest_command += f" --no-{x}"
def _set_speedtest_data(self):
# start
self.start_time = time.perf_counter()
self.speedtest_data["elapsed"] = True
try:
self.speedtest_data = self.py3.flatten_dict(
loads(self.py3.command_output(self.speedtest_command)), delimiter="_"
)
for x in ["download", "upload", "bytes_received", "bytes_sent"]:
if x not in self.placeholders or x not in self.speedtest_data:
continue
si = False if "bytes" in x else True
self.speedtest_data[x], unit = self.py3.format_units(
self.speedtest_data[x], unit="MB", si=si
)
except self.py3.CommandError:
pass
# end
self.speedtest_data["elapsed"] = False
self.speedtest_data["elapsed_time"] = time.perf_counter() - self.start_time
def speedtest(self):
if self.speedtest_data.get("elapsed"):
cached_until = 0
self.speedtest_data["elapsed_time"] = time.perf_counter() - self.start_time
else:
cached_until = self.py3.CACHE_FOREVER
self.py3.storage_set("speedtest_data", self.speedtest_data)
# thresholds
for x in self.thresholds_init:
if x in self.speedtest_data:
self.py3.threshold_get_color(self.speedtest_data[x], x)
return {
"cached_until": self.py3.time_in(cached_until),
"full_text": self.py3.safe_format(self.format, self.speedtest_data),
}
def on_click(self, event):
button = event["button"]
if button == self.button_share:
share = self.speedtest_data.get("share")
if share:
self.py3.command_run(f"xdg-open {share}")
if button == self.button_refresh:
if self.thread and not self.thread.isAlive():
self.thread = None
if self.thread is None:
self.thread = Thread(target=self._set_speedtest_data)
self.thread.daemon = True
self.thread.start()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
CompileDep.py | #encoding=utf-8
import Queue
import os
import pickle
from threading import Thread
import utils
from GenMakefile import GenMakefile
from ParseComake import ComakeParser
class CompileDep:
def __init__(self):
self.work_num = 1
#self.root = os.getenv("COMAKEPATH")
self.stack = None
self.comake = None
self.queue = Queue.Queue()
def init(self, work_num = 4):
if os.path.exists(".comake_deps"):
with open('.comake_deps', 'rb') as f:
self.stack = pickle.load(f)
for s in self.stack:
self.queue.put(s)
self.work_num = work_num
return True
return False
def compile_worker(self):
while True:
try:
path = self.queue.get_nowait()
except Queue.Empty:
break
else:
makeGenerator = GenMakefile()
parser = ComakeParser()
makeGenerator.setPath(path)
makeGenerator.setComake(parser.Parse(os.sep.join([path, 'COMAKE'])))
makeGenerator.generate()
if makeGenerator.comake['use_local_makefile'] == 0:
res = utils.CallCmd("cd {0} && make -j4 -s".format(path))
else:
res = utils.CallCmd("cd {0} && make -j4 -s -f Makefile.comake".format(path))
if res[0] == 0:
print utils.GreenIt(res[1].strip())
else:
print utils.RedIt(res[2])
def start(self):
thread_list = []
for i in range(0, self.work_num):
thread_list.append(Thread(target=self.compile_worker()))
thread_list[i].start()
for i in range(self.work_num):
thread_list[i].join()
|
test_api.py | """
mbed SDK
Copyright (c) 2011-2014 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Author: Przemyslaw Wirkus <Przemyslaw.wirkus@arm.com>
"""
from __future__ import print_function
import six
import os
import re
import sys
import json
import uuid
import pprint
import random
import argparse
import datetime
import threading
import ctypes
import functools
from colorama import Fore, Back, Style
from prettytable import PrettyTable, HEADER
from copy import copy, deepcopy
from time import sleep, time
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
from os.path import join, exists, basename, relpath, isdir, isfile
from threading import Thread, Lock
from multiprocessing import Pool, cpu_count
from subprocess import Popen, PIPE
# Imports related to mbed build api
from tools.tests import TESTS
from tools.tests import TEST_MAP
from tools.paths import BUILD_DIR
from tools.paths import HOST_TESTS
from tools.utils import ToolException
from tools.utils import NotSupportedException
from tools.utils import construct_enum
from tools.memap import MemapParser
from tools.targets import TARGET_MAP, Target
from tools.config import Config
import tools.test_configs as TestConfig
from tools.test_db import BaseDBAccess
from tools.build_api import build_project, build_mbed_libs, build_lib
from tools.build_api import get_target_supported_toolchains
from tools.build_api import write_build_report
from tools.build_api import prep_report
from tools.build_api import prep_properties
from tools.build_api import create_result
from tools.build_api import add_result_to_report
from tools.build_api import prepare_toolchain
from tools.build_api import get_config
from tools.resources import Resources, MbedIgnoreSet, IGNORE_FILENAME
from tools.libraries import LIBRARIES, LIBRARY_MAP
from tools.options import extract_profile
from tools.toolchains import TOOLCHAIN_PATHS
from tools.toolchains import TOOLCHAINS
from tools.test_exporters import ReportExporter, ResultExporterType
from tools.utils import argparse_filestring_type
from tools.utils import argparse_uppercase_type
from tools.utils import argparse_lowercase_type
from tools.utils import argparse_many
from tools.notifier.mock import MockNotifier
from tools.notifier.term import TerminalNotifier
import tools.host_tests.host_tests_plugins as host_tests_plugins
try:
import mbed_lstools
from tools.compliance.ioper_runner import get_available_oper_test_scopes
except:
pass
class ProcessObserver(Thread):
def __init__(self, proc):
Thread.__init__(self)
self.proc = proc
self.queue = Queue()
self.daemon = True
self.active = True
self.start()
def run(self):
while self.active:
c = self.proc.stdout.read(1)
self.queue.put(c)
def stop(self):
self.active = False
try:
self.proc.terminate()
except Exception:
pass
class SingleTestExecutor(threading.Thread):
""" Example: Single test class in separate thread usage
"""
def __init__(self, single_test):
self.single_test = single_test
threading.Thread.__init__(self)
def run(self):
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext = self.single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not self.single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print(self.single_test.generate_test_summary(test_summary,
shuffle_seed))
if self.single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print(self.single_test.generate_test_summary_by_target(
test_summary, shuffle_seed))
print("Completed in %.2f sec"% (elapsed_time))
class SingleTestRunner(object):
""" Object wrapper for single test run which may involve multiple MUTs
"""
RE_DETECT_TESTCASE_RESULT = None
# Return codes for test script
TEST_RESULT_OK = "OK"
TEST_RESULT_FAIL = "FAIL"
TEST_RESULT_ERROR = "ERROR"
TEST_RESULT_UNDEF = "UNDEF"
TEST_RESULT_IOERR_COPY = "IOERR_COPY"
TEST_RESULT_IOERR_DISK = "IOERR_DISK"
TEST_RESULT_IOERR_SERIAL = "IOERR_SERIAL"
TEST_RESULT_TIMEOUT = "TIMEOUT"
TEST_RESULT_NO_IMAGE = "NO_IMAGE"
TEST_RESULT_MBED_ASSERT = "MBED_ASSERT"
TEST_RESULT_BUILD_FAILED = "BUILD_FAILED"
TEST_RESULT_NOT_SUPPORTED = "NOT_SUPPORTED"
GLOBAL_LOOPS_COUNT = 1 # How many times each test should be repeated
TEST_LOOPS_LIST = [] # We redefine no.of loops per test_id
TEST_LOOPS_DICT = {} # TEST_LOOPS_LIST in dict format: { test_id : test_loop_count}
muts = {} # MUTs descriptor (from external file)
test_spec = {} # Test specification (from external file)
# mbed test suite -> SingleTestRunner
TEST_RESULT_MAPPING = {"success" : TEST_RESULT_OK,
"failure" : TEST_RESULT_FAIL,
"error" : TEST_RESULT_ERROR,
"ioerr_copy" : TEST_RESULT_IOERR_COPY,
"ioerr_disk" : TEST_RESULT_IOERR_DISK,
"ioerr_serial" : TEST_RESULT_IOERR_SERIAL,
"timeout" : TEST_RESULT_TIMEOUT,
"no_image" : TEST_RESULT_NO_IMAGE,
"end" : TEST_RESULT_UNDEF,
"mbed_assert" : TEST_RESULT_MBED_ASSERT,
"build_failed" : TEST_RESULT_BUILD_FAILED,
"not_supproted" : TEST_RESULT_NOT_SUPPORTED
}
def __init__(self,
_global_loops_count=1,
_test_loops_list=None,
_muts={},
_clean=False,
_parser=None,
_opts=None,
_opts_db_url=None,
_opts_log_file_name=None,
_opts_report_html_file_name=None,
_opts_report_junit_file_name=None,
_opts_report_build_file_name=None,
_opts_report_text_file_name=None,
_opts_build_report={},
_opts_build_properties={},
_test_spec={},
_opts_goanna_for_mbed_sdk=None,
_opts_goanna_for_tests=None,
_opts_shuffle_test_order=False,
_opts_shuffle_test_seed=None,
_opts_test_by_names=None,
_opts_peripheral_by_names=None,
_opts_test_only_peripheral=False,
_opts_test_only_common=False,
_opts_verbose_skipped_tests=False,
_opts_verbose_test_result_only=False,
_opts_verbose=False,
_opts_firmware_global_name=None,
_opts_only_build_tests=False,
_opts_parallel_test_exec=False,
_opts_suppress_summary=False,
_opts_test_x_toolchain_summary=False,
_opts_copy_method=None,
_opts_mut_reset_type=None,
_opts_jobs=None,
_opts_waterfall_test=None,
_opts_consolidate_waterfall_test=None,
_opts_extend_test_timeout=None,
_opts_auto_detect=None,
_opts_include_non_automated=False):
""" Let's try hard to init this object
"""
from colorama import init
init()
PATTERN = "\\{(" + "|".join(self.TEST_RESULT_MAPPING.keys()) + ")\\}"
self.RE_DETECT_TESTCASE_RESULT = re.compile(PATTERN)
# Settings related to test loops counters
try:
_global_loops_count = int(_global_loops_count)
except:
_global_loops_count = 1
if _global_loops_count < 1:
_global_loops_count = 1
self.GLOBAL_LOOPS_COUNT = _global_loops_count
self.TEST_LOOPS_LIST = _test_loops_list if _test_loops_list else []
self.TEST_LOOPS_DICT = self.test_loop_list_to_dict(_test_loops_list)
self.shuffle_random_seed = 0.0
self.SHUFFLE_SEED_ROUND = 10
# MUT list and test specification storage
self.muts = _muts
self.test_spec = _test_spec
# Settings passed e.g. from command line
self.opts_db_url = _opts_db_url
self.opts_log_file_name = _opts_log_file_name
self.opts_report_html_file_name = _opts_report_html_file_name
self.opts_report_junit_file_name = _opts_report_junit_file_name
self.opts_report_build_file_name = _opts_report_build_file_name
self.opts_report_text_file_name = _opts_report_text_file_name
self.opts_goanna_for_mbed_sdk = _opts_goanna_for_mbed_sdk
self.opts_goanna_for_tests = _opts_goanna_for_tests
self.opts_shuffle_test_order = _opts_shuffle_test_order
self.opts_shuffle_test_seed = _opts_shuffle_test_seed
self.opts_test_by_names = _opts_test_by_names
self.opts_peripheral_by_names = _opts_peripheral_by_names
self.opts_test_only_peripheral = _opts_test_only_peripheral
self.opts_test_only_common = _opts_test_only_common
self.opts_verbose_skipped_tests = _opts_verbose_skipped_tests
self.opts_verbose_test_result_only = _opts_verbose_test_result_only
self.opts_verbose = _opts_verbose
self.opts_firmware_global_name = _opts_firmware_global_name
self.opts_only_build_tests = _opts_only_build_tests
self.opts_parallel_test_exec = _opts_parallel_test_exec
self.opts_suppress_summary = _opts_suppress_summary
self.opts_test_x_toolchain_summary = _opts_test_x_toolchain_summary
self.opts_copy_method = _opts_copy_method
self.opts_mut_reset_type = _opts_mut_reset_type
self.opts_jobs = _opts_jobs if _opts_jobs is not None else 1
self.opts_waterfall_test = _opts_waterfall_test
self.opts_consolidate_waterfall_test = _opts_consolidate_waterfall_test
self.opts_extend_test_timeout = _opts_extend_test_timeout
self.opts_clean = _clean
self.opts_parser = _parser
self.opts = _opts
self.opts_auto_detect = _opts_auto_detect
self.opts_include_non_automated = _opts_include_non_automated
self.build_report = _opts_build_report
self.build_properties = _opts_build_properties
# File / screen logger initialization
self.logger = CLITestLogger(file_name=self.opts_log_file_name) # Default test logger
# Database related initializations
self.db_logger = factory_db_logger(self.opts_db_url)
self.db_logger_build_id = None # Build ID (database index of build_id table)
# Let's connect to database to set up credentials and confirm database is ready
if self.db_logger:
self.db_logger.connect_url(self.opts_db_url) # Save db access info inside db_logger object
if self.db_logger.is_connected():
# Get hostname and uname so we can use it as build description
# when creating new build_id in external database
(_hostname, _uname) = self.db_logger.get_hostname()
_host_location = os.path.dirname(os.path.abspath(__file__))
build_id_type = None if self.opts_only_build_tests is None else self.db_logger.BUILD_ID_TYPE_BUILD_ONLY
self.db_logger_build_id = self.db_logger.get_next_build_id(_hostname, desc=_uname, location=_host_location, type=build_id_type)
self.db_logger.disconnect()
def dump_options(self):
""" Function returns data structure with common settings passed to SingelTestRunner
It can be used for example to fill _extra fields in database storing test suite single run data
Example:
data = self.dump_options()
or
data_str = json.dumps(self.dump_options())
"""
result = {"db_url" : str(self.opts_db_url),
"log_file_name" : str(self.opts_log_file_name),
"shuffle_test_order" : str(self.opts_shuffle_test_order),
"shuffle_test_seed" : str(self.opts_shuffle_test_seed),
"test_by_names" : str(self.opts_test_by_names),
"peripheral_by_names" : str(self.opts_peripheral_by_names),
"test_only_peripheral" : str(self.opts_test_only_peripheral),
"test_only_common" : str(self.opts_test_only_common),
"verbose" : str(self.opts_verbose),
"firmware_global_name" : str(self.opts_firmware_global_name),
"only_build_tests" : str(self.opts_only_build_tests),
"copy_method" : str(self.opts_copy_method),
"mut_reset_type" : str(self.opts_mut_reset_type),
"jobs" : str(self.opts_jobs),
"extend_test_timeout" : str(self.opts_extend_test_timeout),
"_dummy" : ''
}
return result
def shuffle_random_func(self):
return self.shuffle_random_seed
def is_shuffle_seed_float(self):
""" return true if function parameter can be converted to float
"""
result = True
try:
float(self.shuffle_random_seed)
except ValueError:
result = False
return result
# This will store target / toolchain specific properties
test_suite_properties_ext = {} # target : toolchain
# Here we store test results
test_summary = []
# Here we store test results in extended data structure
test_summary_ext = {}
execute_thread_slice_lock = Lock()
def execute_thread_slice(self, q, target, toolchains, clean, test_ids, build_report, build_properties):
for toolchain in toolchains:
tt_id = "%s::%s" % (toolchain, target)
T = TARGET_MAP[target]
# print target, toolchain
# Test suite properties returned to external tools like CI
test_suite_properties = {
'jobs': self.opts_jobs,
'clean': clean,
'target': target,
'vendor': T.extra_labels[0],
'test_ids': ', '.join(test_ids),
'toolchain': toolchain,
'shuffle_random_seed': self.shuffle_random_seed
}
# print '=== %s::%s ===' % (target, toolchain)
# Let's build our test
if target not in TARGET_MAP:
print(self.logger.log_line(
self.logger.LogType.NOTIF,
'Skipped tests for %s target. Target platform not found' %
(target)))
continue
clean_mbed_libs_options = (self.opts_goanna_for_mbed_sdk or
self.opts_clean or clean)
profile = extract_profile(self.opts_parser, self.opts, toolchain)
stats_depth = self.opts.stats_depth or 2
try:
build_mbed_libs_result = build_mbed_libs(
T, toolchain,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile,
notify=TerminalNotifier())
if not build_mbed_libs_result:
print(self.logger.log_line(
self.logger.LogType.NOTIF,
'Skipped tests for %s target. Toolchain %s is not '
'supported for this target'% (T.name, toolchain)))
continue
except ToolException:
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building MBED libs for %s using %s'
% (target, toolchain)))
continue
build_dir = join(BUILD_DIR, "test", target, toolchain)
test_suite_properties['build_mbed_libs_result'] = build_mbed_libs_result
test_suite_properties['build_dir'] = build_dir
test_suite_properties['skipped'] = []
# Enumerate through all tests and shuffle test order if requested
test_map_keys = sorted(TEST_MAP.keys())
if self.opts_shuffle_test_order:
random.shuffle(test_map_keys, self.shuffle_random_func)
# Update database with shuffle seed f applicable
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_shuffle_seed=self.shuffle_random_func())
self.db_logger.disconnect();
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
# Update MUTs and Test Specification in database
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_muts=self.muts, _test_spec=self.test_spec)
# Update Extra information in database (some options passed to test suite)
self.db_logger.update_build_id_info(
self.db_logger_build_id,
_extra=json.dumps(self.dump_options()))
self.db_logger.disconnect();
valid_test_map_keys = self.get_valid_tests(test_map_keys, target, toolchain, test_ids, self.opts_include_non_automated)
skipped_test_map_keys = self.get_skipped_tests(test_map_keys, valid_test_map_keys)
for skipped_test_id in skipped_test_map_keys:
test_suite_properties['skipped'].append(skipped_test_id)
# First pass through all tests and determine which libraries need to be built
libraries = []
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
# Detect which lib should be added to test
# Some libs have to compiled like RTOS or ETH
for lib in LIBRARIES:
if lib['build_dir'] in test.dependencies and lib['id'] not in libraries:
libraries.append(lib['id'])
clean_project_options = True if self.opts_goanna_for_tests or clean or self.opts_clean else None
# Build all required libraries
for lib_id in libraries:
try:
build_lib(lib_id,
T,
toolchain,
clean=clean_mbed_libs_options,
jobs=self.opts_jobs,
report=build_report,
properties=build_properties,
build_profile=profile,
notify=TerminalNotifier())
except ToolException:
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building library %s' % lib_id))
continue
for test_id in valid_test_map_keys:
test = TEST_MAP[test_id]
test_suite_properties['test.libs.%s.%s.%s'% (target, toolchain, test_id)] = ', '.join(libraries)
# TODO: move this 2 below loops to separate function
INC_DIRS = []
for lib_id in libraries:
if 'inc_dirs_ext' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['inc_dirs_ext']:
INC_DIRS.extend(LIBRARY_MAP[lib_id]['inc_dirs_ext'])
MACROS = []
for lib_id in libraries:
if 'macros' in LIBRARY_MAP[lib_id] and LIBRARY_MAP[lib_id]['macros']:
MACROS.extend(LIBRARY_MAP[lib_id]['macros'])
MACROS.append('TEST_SUITE_TARGET_NAME="%s"'% target)
MACROS.append('TEST_SUITE_TEST_ID="%s"'% test_id)
test_uuid = uuid.uuid4()
MACROS.append('TEST_SUITE_UUID="%s"'% str(test_uuid))
# Prepare extended test results data structure (it can be used to generate detailed test report)
if target not in self.test_summary_ext:
self.test_summary_ext[target] = {} # test_summary_ext : toolchain
if toolchain not in self.test_summary_ext[target]:
self.test_summary_ext[target][toolchain] = {} # test_summary_ext : toolchain : target
tt_test_id = "%s::%s::%s" % (toolchain, target, test_id) # For logging only
project_name = self.opts_firmware_global_name if self.opts_firmware_global_name else None
try:
path = build_project(
test.source_dir, join(build_dir, test_id), T,
toolchain, test.dependencies, clean=clean_project_options,
name=project_name, macros=MACROS,
inc_dirs=INC_DIRS, jobs=self.opts_jobs, report=build_report,
properties=build_properties, project_id=test_id,
project_description=test.get_description(),
build_profile=profile, stats_depth=stats_depth,
notify=TerminalNotifier(),
)
except Exception as e:
project_name_str = project_name if project_name is not None else test_id
test_result = self.TEST_RESULT_FAIL
if isinstance(e, ToolException):
print(self.logger.log_line(
self.logger.LogType.ERROR,
'There were errors while building project %s' %
project_name_str))
test_result = self.TEST_RESULT_BUILD_FAILED
elif isinstance(e, NotSupportedException):
print(self.logger.log_line(
self.logger.LogType.INFO,
'Project %s is not supported' % project_name_str))
test_result = self.TEST_RESULT_NOT_SUPPORTED
# Append test results to global test summary
self.test_summary.append(
(test_result, target, toolchain, test_id,
test.get_description(), 0, 0, '-')
)
# Add detailed test result to test summary structure
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
self.test_summary_ext[target][toolchain][test_id].append({ 0: {
'result' : test_result,
'output' : '',
'target_name' : target,
'target_name_unique': target,
'toolchain_name' : toolchain,
'id' : test_id,
'description' : test.get_description(),
'elapsed_time' : 0,
'duration' : 0,
'copy_method' : None
}})
continue
if self.opts_only_build_tests:
# With this option we are skipping testing phase
continue
# Test duration can be increased by global value
test_duration = test.duration
if self.opts_extend_test_timeout is not None:
test_duration += self.opts_extend_test_timeout
# For an automated test the duration act as a timeout after
# which the test gets interrupted
test_spec = self.shape_test_request(target, path, test_id, test_duration)
test_loops = self.get_test_loop_count(test_id)
test_suite_properties['test.duration.%s.%s.%s'% (target, toolchain, test_id)] = test_duration
test_suite_properties['test.loops.%s.%s.%s'% (target, toolchain, test_id)] = test_loops
test_suite_properties['test.path.%s.%s.%s'% (target, toolchain, test_id)] = path
# read MUTs, test specification and perform tests
handle_results = self.handle(test_spec, target, toolchain, test_loops=test_loops)
if handle_results is None:
continue
for handle_result in handle_results:
if handle_result:
single_test_result, detailed_test_results = handle_result
else:
continue
# Append test results to global test summary
if single_test_result is not None:
self.test_summary.append(single_test_result)
# Add detailed test result to test summary structure
if target not in self.test_summary_ext[target][toolchain]:
if test_id not in self.test_summary_ext[target][toolchain]:
self.test_summary_ext[target][toolchain][test_id] = []
append_test_result = detailed_test_results
# If waterfall and consolidate-waterfall options are enabled,
# only include the last test result in the report.
if self.opts_waterfall_test and self.opts_consolidate_waterfall_test:
append_test_result = {0: detailed_test_results[len(detailed_test_results) - 1]}
self.test_summary_ext[target][toolchain][test_id].append(append_test_result)
test_suite_properties['skipped'] = ', '.join(test_suite_properties['skipped'])
self.test_suite_properties_ext[target][toolchain] = test_suite_properties
q.put(target + '_'.join(toolchains))
return
def execute(self):
clean = self.test_spec.get('clean', False)
test_ids = self.test_spec.get('test_ids', [])
q = Queue()
# Generate seed for shuffle if seed is not provided in
self.shuffle_random_seed = round(random.random(), self.SHUFFLE_SEED_ROUND)
if self.opts_shuffle_test_seed is not None and self.is_shuffle_seed_float():
self.shuffle_random_seed = round(float(self.opts_shuffle_test_seed), self.SHUFFLE_SEED_ROUND)
if self.opts_parallel_test_exec:
###################################################################
# Experimental, parallel test execution per singletest instance.
###################################################################
execute_threads = [] # Threads used to build mbed SDL, libs, test cases and execute tests
# Note: We are building here in parallel for each target separately!
# So we are not building the same thing multiple times and compilers
# in separate threads do not collide.
# Inside execute_thread_slice() function function handle() will be called to
# get information about available MUTs (per target).
for target, toolchains in self.test_spec['targets'].items():
self.test_suite_properties_ext[target] = {}
t = threading.Thread(target=self.execute_thread_slice, args = (q, target, toolchains, clean, test_ids, self.build_report, self.build_properties))
t.daemon = True
t.start()
execute_threads.append(t)
for t in execute_threads:
q.get() # t.join() would block some threads because we should not wait in any order for thread end
else:
# Serialized (not parallel) test execution
for target, toolchains in self.test_spec['targets'].items():
if target not in self.test_suite_properties_ext:
self.test_suite_properties_ext[target] = {}
self.execute_thread_slice(q, target, toolchains, clean, test_ids, self.build_report, self.build_properties)
q.get()
if self.db_logger:
self.db_logger.reconnect();
if self.db_logger.is_connected():
self.db_logger.update_build_id_info(self.db_logger_build_id, _status_fk=self.db_logger.BUILD_ID_STATUS_COMPLETED)
self.db_logger.disconnect();
return self.test_summary, self.shuffle_random_seed, self.test_summary_ext, self.test_suite_properties_ext, self.build_report, self.build_properties
def get_valid_tests(self, test_map_keys, target, toolchain, test_ids, include_non_automated):
valid_test_map_keys = []
for test_id in test_map_keys:
test = TEST_MAP[test_id]
if self.opts_test_by_names and test_id not in self.opts_test_by_names:
continue
if test_ids and test_id not in test_ids:
continue
if self.opts_test_only_peripheral and not test.peripherals:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Common test skipped for target %s' % target))
continue
if (self.opts_peripheral_by_names and test.peripherals and
not any((i in self.opts_peripheral_by_names)
for i in test.peripherals)):
# We will skip tests not forced with -p option
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Common test skipped for target %s' % target))
continue
if self.opts_test_only_common and test.peripherals:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Peripheral test skipped for target %s' % target))
continue
if not include_non_automated and not test.automated:
if self.opts_verbose_skipped_tests:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Non automated test skipped for target %s' % target))
continue
if test.is_supported(target, toolchain):
if test.peripherals is None and self.opts_only_build_tests:
# When users are using 'build only flag' and test do not have
# specified peripherals we can allow test building by default
pass
elif self.opts_peripheral_by_names and test_id not in self.opts_peripheral_by_names:
# If we force peripheral with option -p we expect test
# to pass even if peripheral is not in MUTs file.
pass
elif not self.is_peripherals_available(target, test.peripherals):
if self.opts_verbose_skipped_tests:
if test.peripherals:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Peripheral %s test skipped for target %s' %
(",".join(test.peripherals), target)))
else:
print(self.logger.log_line(
self.logger.LogType.INFO,
'Test %s skipped for target %s' %
(test_id, target)))
continue
# The test has made it through all the filters, so add it to the valid tests list
valid_test_map_keys.append(test_id)
return valid_test_map_keys
def get_skipped_tests(self, all_test_map_keys, valid_test_map_keys):
# NOTE: This will not preserve order
return list(set(all_test_map_keys) - set(valid_test_map_keys))
def generate_test_summary_by_target(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows text x toolchain test result matrix
"""
RESULT_INDEX = 0
TARGET_INDEX = 1
TOOLCHAIN_INDEX = 2
TEST_INDEX = 3
DESC_INDEX = 4
unique_targets = get_unique_value_from_summary(test_summary, TARGET_INDEX)
unique_tests = get_unique_value_from_summary(test_summary, TEST_INDEX)
unique_test_desc = get_unique_value_from_summary_ext(test_summary, TEST_INDEX, DESC_INDEX)
unique_toolchains = get_unique_value_from_summary(test_summary, TOOLCHAIN_INDEX)
result = "Test summary:\n"
for target in unique_targets:
result_dict = {} # test : { toolchain : result }
unique_target_toolchains = []
for test in test_summary:
if test[TARGET_INDEX] == target:
if test[TOOLCHAIN_INDEX] not in unique_target_toolchains:
unique_target_toolchains.append(test[TOOLCHAIN_INDEX])
if test[TEST_INDEX] not in result_dict:
result_dict[test[TEST_INDEX]] = {}
result_dict[test[TEST_INDEX]][test[TOOLCHAIN_INDEX]] = test[RESULT_INDEX]
pt_cols = ["Target", "Test ID", "Test Description"] + unique_target_toolchains
pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols:
pt.align[col] = "l"
pt.padding_width = 1 # One space between column edges and contents (default)
for test in unique_tests:
if test in result_dict:
test_results = result_dict[test]
if test in unique_test_desc:
row = [target, test, unique_test_desc[test]]
for toolchain in unique_toolchains:
if toolchain in test_results:
row.append(test_results[toolchain])
pt.add_row(row)
result += pt.get_string()
shuffle_seed_text = "Shuffle Seed: %.*f"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def generate_test_summary(self, test_summary, shuffle_seed=None):
""" Prints well-formed summary with results (SQL table like)
table shows target x test results matrix across
"""
success_code = 0 # Success code that can be leter returned to
result = "Test summary:\n"
# Pretty table package is used to print results
pt = PrettyTable(["Result", "Target", "Toolchain", "Test ID", "Test Description",
"Elapsed Time (sec)", "Timeout (sec)", "Loops"], junction_char="|", hrules=HEADER)
pt.align["Result"] = "l" # Left align
pt.align["Target"] = "l" # Left align
pt.align["Toolchain"] = "l" # Left align
pt.align["Test ID"] = "l" # Left align
pt.align["Test Description"] = "l" # Left align
pt.padding_width = 1 # One space between column edges and contents (default)
result_dict = {self.TEST_RESULT_OK : 0,
self.TEST_RESULT_FAIL : 0,
self.TEST_RESULT_ERROR : 0,
self.TEST_RESULT_UNDEF : 0,
self.TEST_RESULT_IOERR_COPY : 0,
self.TEST_RESULT_IOERR_DISK : 0,
self.TEST_RESULT_IOERR_SERIAL : 0,
self.TEST_RESULT_NO_IMAGE : 0,
self.TEST_RESULT_TIMEOUT : 0,
self.TEST_RESULT_MBED_ASSERT : 0,
self.TEST_RESULT_BUILD_FAILED : 0,
self.TEST_RESULT_NOT_SUPPORTED : 0
}
for test in test_summary:
if test[0] in result_dict:
result_dict[test[0]] += 1
pt.add_row(test)
result += pt.get_string()
result += "\n"
# Print result count
result += "Result: " + ' / '.join(['%s %s' % (value, key) for (key, value) in {k: v for k, v in result_dict.items() if v != 0}.items()])
shuffle_seed_text = "Shuffle Seed: %.*f\n"% (self.SHUFFLE_SEED_ROUND,
shuffle_seed if shuffle_seed else self.shuffle_random_seed)
result += "\n%s"% (shuffle_seed_text if self.opts_shuffle_test_order else '')
return result
def test_loop_list_to_dict(self, test_loops_str):
""" Transforms test_id=X,test_id=X,test_id=X into dictionary {test_id : test_id_loops_count}
"""
result = {}
if test_loops_str:
test_loops = test_loops_str
for test_loop in test_loops:
test_loop_count = test_loop.split('=')
if len(test_loop_count) == 2:
_test_id, _test_loops = test_loop_count
try:
_test_loops = int(_test_loops)
except:
continue
result[_test_id] = _test_loops
return result
def get_test_loop_count(self, test_id):
""" This function returns no. of loops per test (deducted by test_id_.
If test is not in list of redefined loop counts it will use default value.
"""
result = self.GLOBAL_LOOPS_COUNT
if test_id in self.TEST_LOOPS_DICT:
result = self.TEST_LOOPS_DICT[test_id]
return result
def delete_file(self, file_path):
""" Remove file from the system
"""
result = True
resutl_msg = ""
try:
os.remove(file_path)
except Exception as e:
resutl_msg = e
result = False
return result, resutl_msg
def handle_mut(self, mut, data, target_name, toolchain_name, test_loops=1):
""" Test is being invoked for given MUT.
"""
# Get test information, image and test timeout
test_id = data['test_id']
test = TEST_MAP[test_id]
test_description = TEST_MAP[test_id].get_description()
image = data["image"]
duration = data.get("duration", 10)
if mut is None:
print("Error: No Mbed available: MUT[%s]" % data['mcu'])
return None
mcu = mut['mcu']
copy_method = mut.get('copy_method') # Available board configuration selection e.g. core selection etc.
if self.db_logger:
self.db_logger.reconnect()
selected_copy_method = self.opts_copy_method if copy_method is None else copy_method
# Tests can be looped so test results must be stored for the same test
test_all_result = []
# Test results for one test ran few times
detailed_test_results = {} # { Loop_number: { results ... } }
for test_index in range(test_loops):
# If mbedls is available and we are auto detecting MUT info,
# update MUT info (mounting may changed)
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
platform_name_filter = [mcu]
muts_list = {}
found = False
for i in range(0, 60):
print('Looking for %s with MBEDLS' % mcu)
muts_list = get_autodetected_MUTS_list(platform_name_filter=platform_name_filter)
if 1 not in muts_list:
sleep(3)
else:
found = True
break
if not found:
print("Error: mbed not found with MBEDLS: %s" % data['mcu'])
return None
else:
mut = muts_list[1]
disk = mut.get('disk')
port = mut.get('port')
if disk is None or port is None:
return None
target_by_mcu = TARGET_MAP[mut['mcu']]
target_name_unique = mut['mcu_unique'] if 'mcu_unique' in mut else mut['mcu']
# Some extra stuff can be declared in MUTs structure
reset_type = mut.get('reset_type') # reboot.txt, reset.txt, shutdown.txt
reset_tout = mut.get('reset_tout') # COPY_IMAGE -> RESET_PROC -> SLEEP(RESET_TOUT)
# When the build and test system were separate, this was relative to a
# base network folder base path: join(NETWORK_BASE_PATH, ).
# "image" is now a list representing a development image and an update image
# (for device management). When testing, we only use the development image.
image_path = image[0]
# Host test execution
start_host_exec_time = time()
single_test_result = self.TEST_RESULT_UNDEF # single test run result
_copy_method = selected_copy_method
if not exists(image_path):
single_test_result = self.TEST_RESULT_NO_IMAGE
elapsed_time = 0
single_test_output = self.logger.log_line(self.logger.LogType.ERROR, 'Image file does not exist: %s'% image_path)
print(single_test_output)
else:
# Host test execution
start_host_exec_time = time()
host_test_verbose = self.opts_verbose_test_result_only or self.opts_verbose
host_test_reset = self.opts_mut_reset_type if reset_type is None else reset_type
host_test_result = self.run_host_test(test.host_test,
image_path, disk, port, duration,
micro=target_name,
verbose=host_test_verbose,
reset=host_test_reset,
reset_tout=reset_tout,
copy_method=selected_copy_method,
program_cycle_s=target_by_mcu.program_cycle_s)
single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
# Store test result
test_all_result.append(single_test_result)
total_elapsed_time = time() - start_host_exec_time # Test time with copy (flashing) / reset
elapsed_time = single_testduration # TIme of single test case execution after reset
detailed_test_results[test_index] = {
'result' : single_test_result,
'output' : single_test_output,
'target_name' : target_name,
'target_name_unique' : target_name_unique,
'toolchain_name' : toolchain_name,
'id' : test_id,
'description' : test_description,
'elapsed_time' : round(elapsed_time, 2),
'duration' : single_timeout,
'copy_method' : _copy_method,
}
print(self.print_test_result(
single_test_result, target_name_unique, toolchain_name, test_id,
test_description, elapsed_time, single_timeout))
# Update database entries for ongoing test
if self.db_logger and self.db_logger.is_connected():
test_type = 'SingleTest'
self.db_logger.insert_test_entry(self.db_logger_build_id,
target_name,
toolchain_name,
test_type,
test_id,
single_test_result,
single_test_output,
elapsed_time,
single_timeout,
test_index)
# If we perform waterfall test we test until we get OK and we stop testing
if self.opts_waterfall_test and single_test_result == self.TEST_RESULT_OK:
break
if self.db_logger:
self.db_logger.disconnect()
return (self.shape_global_test_loop_result(test_all_result, self.opts_waterfall_test and self.opts_consolidate_waterfall_test),
target_name_unique,
toolchain_name,
test_id,
test_description,
round(elapsed_time, 2),
single_timeout,
self.shape_test_loop_ok_result_count(test_all_result)), detailed_test_results
def handle(self, test_spec, target_name, toolchain_name, test_loops=1):
""" Function determines MUT's mbed disk/port and copies binary to
target.
"""
handle_results = []
data = json.loads(test_spec)
# Find a suitable MUT:
mut = None
for id, m in self.muts.items():
if m['mcu'] == data['mcu']:
mut = m
handle_result = self.handle_mut(mut, data, target_name, toolchain_name, test_loops=test_loops)
handle_results.append(handle_result)
return handle_results
def print_test_result(self, test_result, target_name, toolchain_name,
test_id, test_description, elapsed_time, duration):
""" Use specific convention to print test result and related data
"""
tokens = []
tokens.append("TargetTest")
tokens.append(target_name)
tokens.append(toolchain_name)
tokens.append(test_id)
tokens.append(test_description)
separator = "::"
time_info = " in %.2f of %d sec" % (round(elapsed_time, 2), duration)
result = separator.join(tokens) + " [" + test_result +"]" + time_info
return Fore.MAGENTA + result + Fore.RESET
def shape_test_loop_ok_result_count(self, test_all_result):
""" Reformats list of results to simple string
"""
test_loop_count = len(test_all_result)
test_loop_ok_result = test_all_result.count(self.TEST_RESULT_OK)
return "%d/%d"% (test_loop_ok_result, test_loop_count)
def shape_global_test_loop_result(self, test_all_result, waterfall_and_consolidate):
""" Reformats list of results to simple string
"""
result = self.TEST_RESULT_FAIL
if all(test_all_result[0] == res for res in test_all_result):
result = test_all_result[0]
elif waterfall_and_consolidate and any(res == self.TEST_RESULT_OK for res in test_all_result):
result = self.TEST_RESULT_OK
return result
def run_host_test(self, name, image_path, disk, port, duration,
micro=None, reset=None, reset_tout=None,
verbose=False, copy_method=None, program_cycle_s=None):
""" Function creates new process with host test configured with particular test case.
Function also is pooling for serial port activity from process to catch all data
printed by test runner and host test during test execution
"""
def get_char_from_queue(obs):
""" Get character from queue safe way
"""
try:
c = obs.queue.get(block=True, timeout=0.5)
except Empty:
c = None
return c
def filter_queue_char(c):
""" Filters out non ASCII characters from serial port
"""
if ord(c) not in range(128):
c = ' '
return c
def get_test_result(output):
""" Parse test 'output' data
"""
result = self.TEST_RESULT_TIMEOUT
for line in "".join(output).splitlines():
search_result = self.RE_DETECT_TESTCASE_RESULT.search(line)
if search_result and len(search_result.groups()):
result = self.TEST_RESULT_MAPPING[search_result.groups(0)[0]]
break
return result
def get_auto_property_value(property_name, line):
""" Scans auto detection line from MUT and returns scanned parameter 'property_name'
Returns string
"""
result = None
if re.search("HOST: Property '%s'"% property_name, line) is not None:
property = re.search("HOST: Property '%s' = '([\w\d _]+)'"% property_name, line)
if property is not None and len(property.groups()) == 1:
result = property.groups()[0]
return result
cmd = ["python",
'%s.py'% name,
'-d', disk,
'-f', '"%s"'% image_path,
'-p', port,
'-t', str(duration),
'-C', str(program_cycle_s)]
if get_module_avail('mbed_lstools') and self.opts_auto_detect:
cmd += ['--auto']
# Add extra parameters to host_test
if copy_method is not None:
cmd += ["-c", copy_method]
if micro is not None:
cmd += ["-m", micro]
if reset is not None:
cmd += ["-r", reset]
if reset_tout is not None:
cmd += ["-R", str(reset_tout)]
if verbose:
print(Fore.MAGENTA + "Executing '" + " ".join(cmd) + "'" + Fore.RESET)
print("Test::Output::Start")
proc = Popen(cmd, stdout=PIPE, cwd=HOST_TESTS)
obs = ProcessObserver(proc)
update_once_flag = {} # Stores flags checking if some auto-parameter was already set
line = ''
output = []
start_time = time()
while (time() - start_time) < (2 * duration):
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
# Give the mbed under test a way to communicate the end of the test
if c in ['\n', '\r']:
# Checking for auto-detection information from the test about MUT reset moment
if 'reset_target' not in update_once_flag and "HOST: Reset target..." in line:
# We will update this marker only once to prevent multiple time resets
update_once_flag['reset_target'] = True
start_time = time()
# Checking for auto-detection information from the test about timeout
auto_timeout_val = get_auto_property_value('timeout', line)
if 'timeout' not in update_once_flag and auto_timeout_val is not None:
# We will update this marker only once to prevent multiple time resets
update_once_flag['timeout'] = True
duration = int(auto_timeout_val)
# Detect mbed assert:
if 'mbed assertation failed: ' in line:
output.append('{{mbed_assert}}')
break
# Check for test end
if '{end}' in line:
break
line = ''
else:
line += c
end_time = time()
testcase_duration = end_time - start_time # Test case duration from reset to {end}
c = get_char_from_queue(obs)
if c:
if verbose:
sys.stdout.write(c)
c = filter_queue_char(c)
output.append(c)
if verbose:
print("Test::Output::Finish")
# Stop test process
obs.stop()
result = get_test_result(output)
return (result, "".join(output), testcase_duration, duration)
def is_peripherals_available(self, target_mcu_name, peripherals=None):
""" Checks if specified target should run specific peripheral test case defined in MUTs file
"""
if peripherals is not None:
peripherals = set(peripherals)
for id, mut in self.muts.items():
# Target MCU name check
if mut["mcu"] != target_mcu_name:
continue
# Peripherals check
if peripherals is not None:
if 'peripherals' not in mut:
continue
if not peripherals.issubset(set(mut['peripherals'])):
continue
return True
return False
def shape_test_request(self, mcu, image_path, test_id, duration=10):
""" Function prepares JSON structure describing test specification
"""
test_spec = {
"mcu": mcu,
"image": image_path,
"duration": duration,
"test_id": test_id,
}
return json.dumps(test_spec)
def get_unique_value_from_summary(test_summary, index):
""" Gets list of unique target names
"""
result = []
for test in test_summary:
target_name = test[index]
if target_name not in result:
result.append(target_name)
return sorted(result)
def get_unique_value_from_summary_ext(test_summary, index_key, index_val):
""" Gets list of unique target names and return dictionary
"""
result = {}
for test in test_summary:
key = test[index_key]
val = test[index_val]
if key not in result:
result[key] = val
return result
def show_json_file_format_error(json_spec_filename, line, column):
""" Prints JSON broken content
"""
with open(json_spec_filename) as data_file:
line_no = 1
for json_line in data_file:
if line_no + 5 >= line: # Print last few lines before error
print('Line %d:\t'%line_no + json_line)
if line_no == line:
print('%s\t%s^' (' ' * len('Line %d:' % line_no),
'-' * (column - 1)))
break
line_no += 1
def json_format_error_defect_pos(json_error_msg):
""" Gets first error line and column in JSON file format.
Parsed from exception thrown by json.loads() string
"""
result = None
line, column = 0, 0
# Line value search
line_search = re.search('line [0-9]+', json_error_msg)
if line_search is not None:
ls = line_search.group().split(' ')
if len(ls) == 2:
line = int(ls[1])
# Column position search
column_search = re.search('column [0-9]+', json_error_msg)
if column_search is not None:
cs = column_search.group().split(' ')
if len(cs) == 2:
column = int(cs[1])
result = [line, column]
return result
def get_json_data_from_file(json_spec_filename, verbose=False):
""" Loads from file JSON formatted string to data structure
"""
result = None
try:
with open(json_spec_filename) as data_file:
try:
result = json.load(data_file)
except ValueError as json_error_msg:
result = None
print('JSON file %s parsing failed. Reason: %s' %
(json_spec_filename, json_error_msg))
# We can print where error occurred inside JSON file if we can parse exception msg
json_format_defect_pos = json_format_error_defect_pos(str(json_error_msg))
if json_format_defect_pos is not None:
line = json_format_defect_pos[0]
column = json_format_defect_pos[1]
print()
show_json_file_format_error(json_spec_filename, line, column)
except IOError as fileopen_error_msg:
print('JSON file %s not opened. Reason: %s\n'%
(json_spec_filename, fileopen_error_msg))
if verbose and result:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(result)
return result
def print_muts_configuration_from_json(json_data, join_delim=", ", platform_filter=None):
""" Prints MUTs configuration passed to test script for verboseness
"""
muts_info_cols = []
# We need to check all unique properties for each defined MUT
for k in json_data:
mut_info = json_data[k]
for mut_property in mut_info:
if mut_property not in muts_info_cols:
muts_info_cols.append(mut_property)
# Prepare pretty table object to display all MUTs
pt_cols = ["index"] + muts_info_cols
pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols:
pt.align[col] = "l"
# Add rows to pretty print object
for k in json_data:
row = [k]
mut_info = json_data[k]
add_row = True
if platform_filter and 'mcu' in mut_info:
add_row = re.search(platform_filter, mut_info['mcu']) is not None
if add_row:
for col in muts_info_cols:
cell_val = mut_info[col] if col in mut_info else None
if isinstance(cell_val, list):
cell_val = join_delim.join(cell_val)
row.append(cell_val)
pt.add_row(row)
return pt.get_string()
def print_test_configuration_from_json(json_data, join_delim=", "):
""" Prints test specification configuration passed to test script for verboseness
"""
toolchains_info_cols = []
# We need to check all toolchains for each device
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
toolchains = targets[target]
for toolchain in toolchains:
if toolchain not in toolchains_info_cols:
toolchains_info_cols.append(toolchain)
# Prepare pretty table object to display test specification
pt_cols = ["mcu"] + sorted(toolchains_info_cols)
pt = PrettyTable(pt_cols, junction_char="|", hrules=HEADER)
for col in pt_cols:
pt.align[col] = "l"
# { target : [conflicted toolchains] }
toolchain_conflicts = {}
toolchain_path_conflicts = []
for k in json_data:
# k should be 'targets'
targets = json_data[k]
for target in targets:
target_supported_toolchains = get_target_supported_toolchains(target)
if not target_supported_toolchains:
target_supported_toolchains = []
target_name = target if target in TARGET_MAP else "%s*"% target
row = [target_name]
toolchains = targets[target]
for toolchain in sorted(toolchains_info_cols):
# Check for conflicts: target vs toolchain
conflict = False
conflict_path = False
if toolchain in toolchains:
if toolchain not in target_supported_toolchains:
conflict = True
if target not in toolchain_conflicts:
toolchain_conflicts[target] = []
toolchain_conflicts[target].append(toolchain)
# Add marker inside table about target usage / conflict
cell_val = 'Yes' if toolchain in toolchains else '-'
if conflict:
cell_val += '*'
# Check for conflicts: toolchain vs toolchain path
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
conflict_path = True
if toolchain not in toolchain_path_conflicts:
toolchain_path_conflicts.append(toolchain)
if conflict_path:
cell_val += '#'
row.append(cell_val)
pt.add_row(row)
# generate result string
result = pt.get_string() # Test specification table
if toolchain_conflicts or toolchain_path_conflicts:
result += "\n"
result += "Toolchain conflicts:\n"
for target in toolchain_conflicts:
if target not in TARGET_MAP:
result += "\t* Target %s unknown\n"% (target)
conflict_target_list = join_delim.join(toolchain_conflicts[target])
sufix = 's' if len(toolchain_conflicts[target]) > 1 else ''
result += "\t* Target %s does not support %s toolchain%s\n"% (target, conflict_target_list, sufix)
for toolchain in toolchain_path_conflicts:
# Let's check toolchain configuration
if toolchain in TOOLCHAIN_PATHS:
toolchain_path = TOOLCHAIN_PATHS[toolchain]
if not os.path.isdir(toolchain_path):
result += "\t# Toolchain %s path not found: %s\n"% (toolchain, toolchain_path)
return result
def get_avail_tests_summary_table(cols=None, result_summary=True, join_delim=',',platform_filter=None):
""" Generates table summary with all test cases and additional test cases
information using pretty print functionality. Allows test suite user to
see test cases
"""
# get all unique test ID prefixes
unique_test_id = []
for test in TESTS:
split = test['id'].split('_')[:-1]
test_id_prefix = '_'.join(split)
if test_id_prefix not in unique_test_id:
unique_test_id.append(test_id_prefix)
unique_test_id.sort()
counter_dict_test_id_types = dict((t, 0) for t in unique_test_id)
counter_dict_test_id_types_all = dict((t, 0) for t in unique_test_id)
test_properties = ['id',
'automated',
'description',
'peripherals',
'host_test',
'duration'] if cols is None else cols
# All tests status table print
pt = PrettyTable(test_properties, junction_char="|", hrules=HEADER)
for col in test_properties:
pt.align[col] = "l"
pt.align['duration'] = "r"
counter_all = 0
counter_automated = 0
pt.padding_width = 1 # One space between column edges and contents (default)
for test_id in sorted(TEST_MAP.keys()):
if platform_filter is not None:
# FIlter out platforms using regex
if re.search(platform_filter, test_id) is None:
continue
row = []
test = TEST_MAP[test_id]
split = test_id.split('_')[:-1]
test_id_prefix = '_'.join(split)
for col in test_properties:
col_value = test[col]
if isinstance(test[col], list):
col_value = join_delim.join(test[col])
elif test[col] == None:
col_value = "-"
row.append(col_value)
if test['automated'] == True:
counter_dict_test_id_types[test_id_prefix] += 1
counter_automated += 1
pt.add_row(row)
# Update counters
counter_all += 1
counter_dict_test_id_types_all[test_id_prefix] += 1
result = pt.get_string()
result += "\n\n"
if result_summary and not platform_filter:
# Automation result summary
test_id_cols = ['automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols, junction_char="|", hrules=HEADER)
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
percent_progress = round(100.0 * counter_automated / float(counter_all), 1)
str_progress = progress_bar(percent_progress, 75)
pt.add_row([counter_automated, counter_all, percent_progress, str_progress])
result += "Automation coverage:\n"
result += pt.get_string()
result += "\n\n"
# Test automation coverage table print
test_id_cols = ['id', 'automated', 'all', 'percent [%]', 'progress']
pt = PrettyTable(test_id_cols, junction_char="|", hrules=HEADER)
pt.align['id'] = "l"
pt.align['automated'] = "r"
pt.align['all'] = "r"
pt.align['percent [%]'] = "r"
for unique_id in unique_test_id:
# print "\t\t%s: %d / %d" % (unique_id, counter_dict_test_id_types[unique_id], counter_dict_test_id_types_all[unique_id])
percent_progress = round(100.0 * counter_dict_test_id_types[unique_id] / float(counter_dict_test_id_types_all[unique_id]), 1)
str_progress = progress_bar(percent_progress, 75)
row = [unique_id,
counter_dict_test_id_types[unique_id],
counter_dict_test_id_types_all[unique_id],
percent_progress,
"[" + str_progress + "]"]
pt.add_row(row)
result += "Test automation coverage:\n"
result += pt.get_string()
result += "\n\n"
return result
def progress_bar(percent_progress, saturation=0):
""" This function creates progress bar with optional simple saturation mark
"""
step = int(percent_progress / 2) # Scale by to (scale: 1 - 50)
str_progress = '#' * step + '.' * int(50 - step)
c = '!' if str_progress[38] == '.' else '|'
if saturation > 0:
saturation = saturation / 2
str_progress = str_progress[:saturation] + c + str_progress[saturation:]
return str_progress
def singletest_in_cli_mode(single_test):
""" Runs SingleTestRunner object in CLI (Command line interface) mode
@return returns success code (0 == success) for building and running tests
"""
start = time()
# Execute tests depending on options and filter applied
test_summary, shuffle_seed, test_summary_ext, test_suite_properties_ext, build_report, build_properties = single_test.execute()
elapsed_time = time() - start
# Human readable summary
if not single_test.opts_suppress_summary:
# prints well-formed summary with results (SQL table like)
print(single_test.generate_test_summary(test_summary, shuffle_seed))
if single_test.opts_test_x_toolchain_summary:
# prints well-formed summary with results (SQL table like)
# table shows text x toolchain test result matrix
print(single_test.generate_test_summary_by_target(test_summary,
shuffle_seed))
print("Completed in %.2f sec" % elapsed_time)
print
# Write summary of the builds
print_report_exporter = ReportExporter(ResultExporterType.PRINT, package="build")
status = print_report_exporter.report(build_report)
# Store extra reports in files
if single_test.opts_report_html_file_name:
# Export results in form of HTML report to separate file
report_exporter = ReportExporter(ResultExporterType.HTML)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_html_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_junit_file_name:
# Export results in form of JUnit XML report to separate file
report_exporter = ReportExporter(ResultExporterType.JUNIT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_junit_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_text_file_name:
# Export results in form of a text file
report_exporter = ReportExporter(ResultExporterType.TEXT)
report_exporter.report_to_file(test_summary_ext, single_test.opts_report_text_file_name, test_suite_properties=test_suite_properties_ext)
if single_test.opts_report_build_file_name:
# Export build results as html report to sparate file
report_exporter = ReportExporter(ResultExporterType.JUNIT, package="build")
report_exporter.report_to_file(build_report, single_test.opts_report_build_file_name, test_suite_properties=build_properties)
# Returns True if no build failures of the test projects or their dependencies
return status
class TestLogger():
""" Super-class for logging and printing ongoing events for test suite pass
"""
def __init__(self, store_log=True):
""" We can control if logger actually stores log in memory
or just handled all log entries immediately
"""
self.log = []
self.log_to_file = False
self.log_file_name = None
self.store_log = store_log
self.LogType = construct_enum(INFO='Info',
WARN='Warning',
NOTIF='Notification',
ERROR='Error',
EXCEPT='Exception')
self.LogToFileAttr = construct_enum(CREATE=1, # Create or overwrite existing log file
APPEND=2) # Append to existing log file
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Log one line of text
"""
log_timestamp = time()
log_entry = {'log_type' : LogType,
'log_timestamp' : log_timestamp,
'log_line' : log_line,
'_future' : None
}
# Store log in memory
if self.store_log:
self.log.append(log_entry)
return log_entry
class CLITestLogger(TestLogger):
""" Logger used with CLI (Command line interface) test suite. Logs on screen and to file if needed
"""
def __init__(self, store_log=True, file_name=None):
TestLogger.__init__(self)
self.log_file_name = file_name
#self.TIMESTAMP_FORMAT = '%y-%m-%d %H:%M:%S' # Full date and time
self.TIMESTAMP_FORMAT = '%H:%M:%S' # Time only
def log_print(self, log_entry, timestamp=True):
""" Prints on screen formatted log entry
"""
ts = log_entry['log_timestamp']
timestamp_str = datetime.datetime.fromtimestamp(ts).strftime("[%s] "% self.TIMESTAMP_FORMAT) if timestamp else ''
log_line_str = "%(log_type)s: %(log_line)s"% (log_entry)
return timestamp_str + log_line_str
def log_line(self, LogType, log_line, timestamp=True, line_delim='\n'):
""" Logs line, if log file output was specified log line will be appended
at the end of log file
"""
log_entry = TestLogger.log_line(self, LogType, log_line)
log_line_str = self.log_print(log_entry, timestamp)
if self.log_file_name is not None:
try:
with open(self.log_file_name, 'a') as f:
f.write(log_line_str + line_delim)
except IOError:
pass
return log_line_str
def factory_db_logger(db_url):
""" Factory database driver depending on database type supplied in database connection string db_url
"""
if db_url is not None:
from tools.test_mysql import MySQLDBAccess
connection_info = BaseDBAccess().parse_db_connection_string(db_url)
if connection_info is not None:
(db_type, username, password, host, db_name) = BaseDBAccess().parse_db_connection_string(db_url)
if db_type == 'mysql':
return MySQLDBAccess()
return None
def detect_database_verbose(db_url):
""" uses verbose mode (prints) database detection sequence to check it database connection string is valid
"""
result = BaseDBAccess().parse_db_connection_string(db_url)
if result is not None:
# Parsing passed
(db_type, username, password, host, db_name) = result
#print "DB type '%s', user name '%s', password '%s', host '%s', db name '%s'"% result
# Let's try to connect
db_ = factory_db_logger(db_url)
if db_ is not None:
print("Connecting to database '%s'..." % db_url)
db_.connect(host, username, password, db_name)
if db_.is_connected():
print("ok")
print("Detecting database...")
print(db_.detect_database(verbose=True))
print("Disconnecting...")
db_.disconnect()
print("done")
else:
print("Database type '%s' unknown" % db_type)
else:
print("Parse error: '%s' - DB Url error" % db_url)
def get_module_avail(module_name):
""" This function returns True if module_name is already imported module
"""
return module_name in sys.modules.keys()
def get_autodetected_MUTS_list(platform_name_filter=None):
oldError = None
if os.name == 'nt':
# Disable Windows error box temporarily
oldError = ctypes.windll.kernel32.SetErrorMode(1) #note that SEM_FAILCRITICALERRORS = 1
mbeds = mbed_lstools.create()
detect_muts_list = mbeds.list_mbeds()
if os.name == 'nt':
ctypes.windll.kernel32.SetErrorMode(oldError)
return get_autodetected_MUTS(detect_muts_list, platform_name_filter=platform_name_filter)
def get_autodetected_MUTS(mbeds_list, platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial MUTS file.
If function fails to auto-detect devices it will return empty dictionary.
if get_module_avail('mbed_lstools'):
mbeds = mbed_lstools.create()
mbeds_list = mbeds.list_mbeds()
@param mbeds_list list of mbeds captured from mbed_lstools
@param platform_name You can filter 'platform_name' with list of filtered targets from 'platform_name_filter'
"""
result = {} # Should be in muts_all.json format
# Align mbeds_list from mbed_lstools to MUT file format (JSON dictionary with muts)
# mbeds_list = [{'platform_name': 'NUCLEO_F302R8', 'mount_point': 'E:', 'target_id': '07050200623B61125D5EF72A', 'serial_port': u'COM34'}]
index = 1
for mut in mbeds_list:
# Filter the MUTS if a filter is specified
if platform_name_filter and not mut['platform_name'] in platform_name_filter:
continue
# For mcu_unique - we are assigning 'platform_name_unique' value from mbedls output (if its existing)
# if not we are creating our own unique value (last few chars from platform's target_id).
m = {'mcu': mut['platform_name'],
'mcu_unique' : mut['platform_name_unique'] if 'platform_name_unique' in mut else "%s[%s]" % (mut['platform_name'], mut['target_id'][-4:]),
'port': mut['serial_port'],
'disk': mut['mount_point'],
'peripherals': [] # No peripheral detection
}
if index not in result:
result[index] = {}
result[index] = m
index += 1
return result
def get_autodetected_TEST_SPEC(mbeds_list,
use_default_toolchain=True,
use_supported_toolchains=False,
toolchain_filter=None,
platform_name_filter=None):
""" Function detects all connected to host mbed-enabled devices and generates artificial test_spec file.
If function fails to auto-detect devices it will return empty 'targets' test_spec description.
use_default_toolchain - if True add default toolchain to test_spec
use_supported_toolchains - if True add all supported toolchains to test_spec
toolchain_filter - if [...list of toolchains...] add from all toolchains only those in filter to test_spec
"""
result = {'targets': {} }
for mut in mbeds_list:
mcu = mut['mcu']
if platform_name_filter is None or (platform_name_filter and mut['mcu'] in platform_name_filter):
if mcu in TARGET_MAP:
default_toolchain = TARGET_MAP[mcu].default_toolchain
supported_toolchains = TARGET_MAP[mcu].supported_toolchains
# Decide which toolchains should be added to test specification toolchain pool for each target
toolchains = []
if use_default_toolchain:
toolchains.append(default_toolchain)
if use_supported_toolchains:
toolchains += supported_toolchains
if toolchain_filter is not None:
all_toolchains = supported_toolchains + [default_toolchain]
for toolchain in toolchain_filter:
if toolchain in all_toolchains:
toolchains.append(toolchain)
result['targets'][mcu] = list(set(toolchains))
return result
def get_default_test_options_parser():
""" Get common test script options used by CLI, web services etc.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--tests',
dest='test_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with test specification')
parser.add_argument('-M', '--MUTS',
dest='muts_spec_filename',
metavar="FILE",
type=argparse_filestring_type,
help='Points to file with MUTs specification (overwrites settings.py and private_settings.py)')
parser.add_argument("-j", "--jobs",
dest='jobs',
metavar="NUMBER",
type=int,
help="Define number of compilation jobs. Default value is 1")
if get_module_avail('mbed_lstools'):
# Additional features available when mbed_lstools is installed on host and imported
# mbed_lstools allow users to detect connected to host mbed-enabled devices
parser.add_argument('--auto',
dest='auto_detect',
action="store_true",
help='Use mbed-ls module to detect all connected mbed devices')
toolchain_list = list(TOOLCHAINS) + ["DEFAULT", "ALL"]
parser.add_argument('--tc',
dest='toolchains_filter',
type=argparse_many(argparse_uppercase_type(toolchain_list, "toolchains")),
help="Toolchain filter for --auto argument. Use toolchains names separated by comma, 'default' or 'all' to select toolchains")
test_scopes = ','.join(["'%s'" % n for n in get_available_oper_test_scopes()])
parser.add_argument('--oper',
dest='operability_checks',
type=argparse_lowercase_type(get_available_oper_test_scopes(), "scopes"),
help='Perform interoperability tests between host and connected mbed devices. Available test scopes are: %s' % test_scopes)
parser.add_argument('--clean',
dest='clean',
action="store_true",
help='Clean the build directory')
parser.add_argument('-P', '--only-peripherals',
dest='test_only_peripheral',
default=False,
action="store_true",
help='Test only peripheral declared for MUT and skip common tests')
parser.add_argument("--profile", dest="profile", action="append",
type=argparse_filestring_type,
default=[])
parser.add_argument('-C', '--only-commons',
dest='test_only_common',
default=False,
action="store_true",
help='Test only board internals. Skip perpherials tests and perform common tests')
parser.add_argument('-n', '--test-by-names',
dest='test_by_names',
type=argparse_many(str),
help='Runs only test enumerated it this switch. Use comma to separate test case names')
parser.add_argument('-p', '--peripheral-by-names',
dest='peripheral_by_names',
type=argparse_many(str),
help='Forces discovery of particular peripherals. Use comma to separate peripheral names')
copy_methods = host_tests_plugins.get_plugin_caps('CopyMethod')
copy_methods_str = "Plugin support: " + ', '.join(copy_methods)
parser.add_argument('-c', '--copy-method',
dest='copy_method',
type=argparse_uppercase_type(copy_methods, "flash method"),
help="Select binary copy (flash) method. Default is Python's shutil.copy() method. %s"% copy_methods_str)
reset_methods = host_tests_plugins.get_plugin_caps('ResetMethod')
reset_methods_str = "Plugin support: " + ', '.join(reset_methods)
parser.add_argument('-r', '--reset-type',
dest='mut_reset_type',
default=None,
type=argparse_uppercase_type(reset_methods, "reset method"),
help='Extra reset method used to reset MUT by host test script. %s'% reset_methods_str)
parser.add_argument('-g', '--goanna-for-tests',
dest='goanna_for_tests',
action="store_true",
help='Run Goanna static analyse tool for tests. (Project will be rebuilded)')
parser.add_argument('-G', '--goanna-for-sdk',
dest='goanna_for_mbed_sdk',
action="store_true",
help='Run Goanna static analyse tool for mbed SDK (Project will be rebuilded)')
parser.add_argument('-s', '--suppress-summary',
dest='suppress_summary',
default=False,
action="store_true",
help='Suppresses display of wellformatted table with test results')
parser.add_argument('-t', '--test-summary',
dest='test_x_toolchain_summary',
default=False,
action="store_true",
help='Displays wellformatted table with test x toolchain test result per target')
parser.add_argument('-A', '--test-automation-report',
dest='test_automation_report',
default=False,
action="store_true",
help='Prints information about all tests and exits')
parser.add_argument('-R', '--test-case-report',
dest='test_case_report',
default=False,
action="store_true",
help='Prints information about all test cases and exits')
parser.add_argument("-S", "--supported-toolchains",
action="store_true",
dest="supported_toolchains",
default=False,
help="Displays supported matrix of MCUs and toolchains")
parser.add_argument("-O", "--only-build",
action="store_true",
dest="only_build_tests",
default=False,
help="Only build tests, skips actual test procedures (flashing etc.)")
parser.add_argument('--parallel',
dest='parallel_test_exec',
default=False,
action="store_true",
help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')
parser.add_argument('--config',
dest='verbose_test_configuration_only',
default=False,
action="store_true",
help='Displays full test specification and MUTs configration and exits')
parser.add_argument('--loops',
dest='test_loops_list',
type=argparse_many(str),
help='Set no. of loops per test. Format: TEST_1=1,TEST_2=2,TEST_3=3')
parser.add_argument('--global-loops',
dest='test_global_loops_value',
type=int,
help='Set global number of test loops per test. Default value is set 1')
parser.add_argument('--consolidate-waterfall',
dest='consolidate_waterfall_test',
default=False,
action="store_true",
help='Used with --waterfall argument. Adds only one test to report reflecting outcome of waterfall test.')
parser.add_argument('-W', '--waterfall',
dest='waterfall_test',
default=False,
action="store_true",
help='Used with --loops or --global-loops arguments. Tests until OK result occurs and assumes test passed')
parser.add_argument('-N', '--firmware-name',
dest='firmware_global_name',
help='Set global name for all produced projects. Note, proper file extension will be added by buid scripts')
parser.add_argument('-u', '--shuffle',
dest='shuffle_test_order',
default=False,
action="store_true",
help='Shuffles test execution order')
parser.add_argument('--shuffle-seed',
dest='shuffle_test_seed',
default=None,
help='Shuffle seed (If you want to reproduce your shuffle order please use seed provided in test summary)')
parser.add_argument('-f', '--filter',
dest='general_filter_regex',
type=argparse_many(str),
default=None,
help='For some commands you can use filter to filter out results')
parser.add_argument('--inc-timeout',
dest='extend_test_timeout',
metavar="NUMBER",
type=int,
help='You can increase global timeout for each test by specifying additional test timeout in seconds')
parser.add_argument('--db',
dest='db_url',
help='This specifies what database test suite uses to store its state. To pass DB connection info use database connection string. Example: \'mysql://username:password@127.0.0.1/db_name\'')
parser.add_argument('-l', '--log',
dest='log_file_name',
help='Log events to external file (note not all console entries may be visible in log file)')
parser.add_argument('--report-html',
dest='report_html_file_name',
help='You can log test suite results in form of HTML report')
parser.add_argument('--report-junit',
dest='report_junit_file_name',
help='You can log test suite results in form of JUnit compliant XML report')
parser.add_argument("--report-build",
dest="report_build_file_name",
help="Output the build results to a junit xml file")
parser.add_argument("--report-text",
dest="report_text_file_name",
help="Output the build results to a text file")
parser.add_argument('--verbose-skipped',
dest='verbose_skipped_tests',
default=False,
action="store_true",
help='Prints some extra information about skipped tests')
parser.add_argument('-V', '--verbose-test-result',
dest='verbose_test_result_only',
default=False,
action="store_true",
help='Prints test serial output')
parser.add_argument('-v', '--verbose',
dest='verbose',
default=False,
action="store_true",
help='Verbose mode (prints some extra information)')
parser.add_argument('--version',
dest='version',
default=False,
action="store_true",
help='Prints script version and exits')
parser.add_argument('--stats-depth',
dest='stats_depth',
default=2,
type=int,
help="Depth level for static memory report")
return parser
def test_path_to_name(path, base):
"""Change all slashes in a path into hyphens
This creates a unique cross-platform test name based on the path
This can eventually be overriden by a to-be-determined meta-data mechanism"""
name_parts = []
head, tail = os.path.split(relpath(path,base))
while (tail and tail != "."):
name_parts.insert(0, tail)
head, tail = os.path.split(head)
return "-".join(name_parts).lower()
def get_test_config(config_name, target_name):
"""Finds the path to a test configuration file
config_name: path to a custom configuration file OR mbed OS interface "ethernet, wifi_odin, etc"
target_name: name of target to determing if mbed OS interface given is valid
returns path to config, will return None if no valid config is found
"""
# If they passed in a full path
if exists(config_name):
# This is a module config
return config_name
# Otherwise find the path to configuration file based on mbed OS interface
return TestConfig.get_config_path(config_name, target_name)
def find_tests(base_dir, target_name, toolchain_name, icetea, greentea, app_config=None):
""" Finds all tests in a directory recursively
:param base_dir: path to the directory to scan for tests (ex. 'path/to/project')
:param target_name: name of the target to use for scanning (ex. 'K64F')
:param toolchain_name: name of the toolchain to use for scanning (ex. 'GCC_ARM')
:param icetea: icetea enabled
:param greentea: greentea enabled
:param app_config - location of a chosen mbed_app.json file
returns a dictionary where keys are the test name, and the values are
lists of paths needed to biuld the test.
"""
# Temporary structure: tests referenced by (name, base, group, case) tuple
tests = {}
# List of common folders: (predicate function, path) tuple
commons = []
config = Config(target_name, base_dir, app_config)
# Scan the directory for paths to probe for 'TESTS' folders
base_resources = Resources(MockNotifier(), collect_ignores=True)
base_resources.scan_with_config(base_dir, config)
if greentea:
dirs = [d for d in base_resources.ignored_dirs if basename(d) == 'TESTS']
ignoreset = MbedIgnoreSet()
for directory in dirs:
ignorefile = join(directory, IGNORE_FILENAME)
if isfile(ignorefile):
ignoreset.add_mbedignore(directory, ignorefile)
for test_group_directory in os.listdir(directory):
grp_dir = join(directory, test_group_directory)
if not isdir(grp_dir) or ignoreset.is_ignored(grp_dir):
continue
grpignorefile = join(grp_dir, IGNORE_FILENAME)
if isfile(grpignorefile):
ignoreset.add_mbedignore(grp_dir, grpignorefile)
for test_case_directory in os.listdir(grp_dir):
d = join(directory, test_group_directory, test_case_directory)
if not isdir(d) or ignoreset.is_ignored(d):
continue
special_dirs = ['host_tests', 'COMMON']
if test_group_directory not in special_dirs and test_case_directory not in special_dirs:
test_name = test_path_to_name(d, base_dir)
tests[(test_name, directory, test_group_directory, test_case_directory)] = [d]
if test_case_directory == 'COMMON':
def predicate(base_pred, group_pred, name_base_group_case):
(name, base, group, case) = name_base_group_case
return base == base_pred and group == group_pred
commons.append((functools.partial(predicate, directory, test_group_directory), d))
if test_group_directory == 'COMMON':
def predicate(base_pred, name_base_group_case):
(name, base, group, case) = name_base_group_case
return base == base_pred
commons.append((functools.partial(predicate, directory), grp_dir))
if icetea:
dirs = [d for d in base_resources.ignored_dirs if basename(d) == 'TEST_APPS']
for directory in dirs:
if not isdir(directory):
continue
for subdir in os.listdir(directory):
d = join(directory, subdir)
if not isdir(d):
continue
if 'device' == subdir:
for test_dir in os.listdir(d):
test_dir_path = join(d, test_dir)
test_name = test_path_to_name(test_dir_path, base_dir)
tests[(test_name, directory, subdir, test_dir)] = [test_dir_path]
# Apply common directories
for pred, path in commons:
for test_identity, test_paths in six.iteritems(tests):
if pred(test_identity):
test_paths.append(path)
# Drop identity besides name
return {name: paths for (name, _, _, _), paths in six.iteritems(tests)}
def print_tests(tests, format="list", sort=True):
"""Given a dictionary of tests (as returned from "find_tests"), print them
in the specified format"""
if format == "list":
for test_name in sorted(tests.keys()):
test_path = tests[test_name][0]
print("Test Case:")
print(" Name: %s" % test_name)
print(" Path: %s" % test_path)
elif format == "json":
print(json.dumps({test_name: test_path[0] for test_name, test_paths
in tests}, indent=2))
else:
print("Unknown format '%s'" % format)
sys.exit(1)
def norm_relative_path(path, start):
"""This function will create a normalized, relative path. It mimics the
python os.path.relpath function, but also normalizes a Windows-syle path
that use backslashes to a Unix style path that uses forward slashes."""
path = os.path.normpath(path)
path = os.path.relpath(path, start)
path = path.replace("\\", "/")
return path
def build_test_worker(*args, **kwargs):
"""This is a worker function for the parallel building of tests. The `args`
and `kwargs` are passed directly to `build_project`. It returns a dictionary
with the following structure:
{
'result': `True` if no exceptions were thrown, `False` otherwise
'reason': Instance of exception that was thrown on failure
'bin_file': Path to the created binary if `build_project` was
successful. Not present otherwise
'kwargs': The keyword arguments that were passed to `build_project`.
This includes arguments that were modified (ex. report)
}
"""
bin_file = None
ret = {
'result': False,
'args': args,
'kwargs': kwargs
}
# Use parent TOOLCHAIN_PATHS variable
for key, value in kwargs['toolchain_paths'].items():
TOOLCHAIN_PATHS[key] = value
del kwargs['toolchain_paths']
try:
bin_file, _ = build_project(*args, **kwargs)
ret['result'] = True
ret['bin_file'] = bin_file
ret['kwargs'] = kwargs
except NotSupportedException as e:
ret['reason'] = e
except ToolException as e:
ret['reason'] = e
except KeyboardInterrupt as e:
ret['reason'] = e
except:
# Print unhandled exceptions here
import traceback
traceback.print_exc(file=sys.stdout)
return ret
def build_tests(tests, base_source_paths, build_path, target, toolchain_name,
clean=False, notify=None, jobs=1, macros=None,
silent=False, report=None, properties=None,
continue_on_build_fail=False, app_config=None,
build_profile=None, stats_depth=None, ignore=None, spe_build=False):
"""Given the data structure from 'find_tests' and the typical build parameters,
build all the tests
Returns a tuple of the build result (True or False) followed by the test
build data structure"""
execution_directory = "."
base_path = norm_relative_path(build_path, execution_directory)
if isinstance(target, Target):
target_name = target.name
else:
target_name = target
target = TARGET_MAP[target_name]
cfg, _, _, _ = get_config(base_source_paths, target, app_config=app_config)
baud_rate = 9600
if 'platform.stdio-baud-rate' in cfg:
baud_rate = cfg['platform.stdio-baud-rate'].value
test_build = {
"platform": target_name,
"toolchain": toolchain_name,
"base_path": base_path,
"baud_rate": baud_rate,
"binary_type": "bootable",
"tests": {},
"test_apps": {}
}
result = True
jobs_count = int(jobs if jobs else cpu_count())
p = Pool(processes=jobs_count)
results = []
for test_name, test_paths in tests.items():
if not isinstance(test_paths, list):
test_paths = [test_paths]
test_build_path = os.path.join(build_path, test_paths[0])
src_paths = base_source_paths + test_paths
bin_file = None
test_case_folder_name = os.path.basename(test_paths[0])
args = (src_paths, test_build_path, deepcopy(target), toolchain_name)
kwargs = {
'jobs': 1,
'clean': clean,
'macros': macros,
'name': test_case_folder_name,
'project_id': test_name,
'report': report,
'properties': properties,
'app_config': app_config,
'build_profile': build_profile,
'toolchain_paths': TOOLCHAIN_PATHS,
'stats_depth': stats_depth,
'notify': MockNotifier(),
'spe_build': spe_build
}
results.append(p.apply_async(build_test_worker, args, kwargs))
p.close()
result = True
itr = 0
while len(results):
itr += 1
if itr > 360000:
p.terminate()
p.join()
raise ToolException("Compile did not finish in 10 minutes")
else:
sleep(0.01)
pending = 0
for r in results:
if r.ready() is True:
try:
worker_result = r.get()
results.remove(r)
# Push all deferred notifications out to the actual notifier
new_notify = deepcopy(notify)
for message in worker_result['kwargs']['notify'].messages:
new_notify.notify(message)
# Take report from the kwargs and merge it into existing report
if report:
report_entry = worker_result['kwargs']['report'][target_name][toolchain_name]
report_entry[worker_result['kwargs']['project_id'].upper()][0][0]['output'] = new_notify.get_output()
for test_key in report_entry.keys():
report[target_name][toolchain_name][test_key] = report_entry[test_key]
# Set the overall result to a failure if a build failure occurred
if ('reason' in worker_result and
not worker_result['reason'] and
not isinstance(worker_result['reason'], NotSupportedException)):
result = False
break
# Adding binary path to test build result
if ('result' in worker_result and
worker_result['result'] and
'bin_file' in worker_result):
bin_file = norm_relative_path(worker_result['bin_file'], execution_directory)
test_key = 'test_apps' if 'test_apps-' in worker_result['kwargs']['project_id'] else 'tests'
test_build[test_key][worker_result['kwargs']['project_id']] = {
"binaries": [
{
"path": bin_file
}
]
}
test_key = worker_result['kwargs']['project_id'].upper()
print('Image: %s\n' % bin_file)
except:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
p.join()
raise
else:
pending += 1
if pending >= jobs_count:
break
# Break as soon as possible if there is a failure and we are not
# continuing on build failures
if not result and not continue_on_build_fail:
if p._taskqueue.queue:
p._taskqueue.queue.clear()
sleep(0.5)
p.terminate()
break
p.join()
test_builds = {}
test_builds["%s-%s" % (target_name, toolchain_name)] = test_build
return result, test_builds
def test_spec_from_test_builds(test_builds):
for build in test_builds:
if Target.get_target(test_builds[build]['platform']).is_PSA_non_secure_target:
if test_builds[build]['platform'].endswith('_NS'):
test_builds[build]['platform'] = test_builds[build]['platform'][:-3]
if test_builds[build]['platform'].endswith('_PSA'):
test_builds[build]['platform'] = test_builds[build]['platform'][:-4]
return {
"builds": test_builds
}
|
__init__.py | import bisect
import collections
import inspect
import io
import json
import queue
import threading
import time
import logging
import traceback
from typing import Union
from . import exception
__version__ = '2.1.0'
def flavor(msg):
"""
Return flavor of message or event.
A message's flavor may be one of these:
- ``chat``
- ``callback_query``
- ``inline_query``
- ``chosen_inline_result``
- ``shipping_query``
- ``pre_checkout_query``
An event's flavor is determined by the single top-level key.
"""
if 'message_id' in msg:
return 'chat'
if 'id' in msg and 'chat_instance' in msg:
return 'callback_query'
if 'id' in msg and 'query' in msg:
return 'inline_query'
if 'result_id' in msg:
return 'chosen_inline_result'
if 'id' in msg and 'shipping_address' in msg:
return 'shipping_query'
if 'id' in msg and 'total_amount' in msg:
return 'pre_checkout_query'
top_keys = list(msg.keys())
if len(top_keys) == 1:
return top_keys[0]
raise exception.BadFlavor(msg)
chat_flavors = ['chat']
inline_flavors = ['inline_query', 'chosen_inline_result']
def _find_first_key(d, keys):
for k in keys:
if k in d:
return k
logging.error('No suggested keys %s in %s', str(keys), str(d))
# Gets the first key after the update_id one.
return list(d.keys())[1]
all_content_types = [
'text', 'audio', 'animation', 'document', 'game', 'photo', 'sticker', 'video', 'voice',
'video_note', 'contact', 'poll', 'location', 'venue', 'new_chat_member', 'left_chat_member',
'new_chat_title', 'new_chat_photo', 'delete_chat_photo', 'group_chat_created', 'supergroup_chat_created',
'channel_chat_created', 'migrate_to_chat_id', 'migrate_from_chat_id', 'pinned_message',
'new_chat_members', 'invoice', 'successful_payment'
]
def glance(msg, flavor='chat', long=False):
"""
Extract "headline" info about a message.
Use parameter ``long`` to control whether a short or long tuple is returned.
When ``flavor`` is ``chat``
(``msg`` being a `Message <https://core.telegram.org/bots/api#message>`_ object):
- short: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``)
- long: (content_type, ``msg['chat']['type']``, ``msg['chat']['id']``, ``msg['date']``, ``msg['message_id']``)
*content_type* can be: ``text``, ``audio``, ``document``, ``game``, ``photo``, ``sticker``, ``video``, ``voice``,
``video_note``, ``contact``, ``location``, ``venue``, ``new_chat_member``, ``left_chat_member``, ``new_chat_title``,
``new_chat_photo``, ``delete_chat_photo``, ``group_chat_created``, ``supergroup_chat_created``,
``channel_chat_created``, ``migrate_to_chat_id``, ``migrate_from_chat_id``, ``pinned_message``,
``new_chat_members``, ``invoice``, ``successful_payment``.
When ``flavor`` is ``callback_query``
(``msg`` being a `CallbackQuery <https://core.telegram.org/bots/api#callbackquery>`_ object):
- regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['data']``)
When ``flavor`` is ``inline_query``
(``msg`` being a `InlineQuery <https://core.telegram.org/bots/api#inlinequery>`_ object):
- short: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``)
- long: (``msg['id']``, ``msg['from']['id']``, ``msg['query']``, ``msg['offset']``)
When ``flavor`` is ``chosen_inline_result``
(``msg`` being a `ChosenInlineResult <https://core.telegram.org/bots/api#choseninlineresult>`_ object):
- regardless: (``msg['result_id']``, ``msg['from']['id']``, ``msg['query']``)
When ``flavor`` is ``shipping_query``
(``msg`` being a `ShippingQuery <https://core.telegram.org/bots/api#shippingquery>`_ object):
- regardless: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``)
When ``flavor`` is ``pre_checkout_query``
(``msg`` being a `PreCheckoutQuery <https://core.telegram.org/bots/api#precheckoutquery>`_ object):
- short: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``)
- long: (``msg['id']``, ``msg['from']['id']``, ``msg['invoice_payload']``, ``msg['currency']``, ``msg['total_amount']``)
"""
def gl_chat():
content_type = _find_first_key(msg, all_content_types)
if long:
return content_type, msg['chat']['type'], msg['chat']['id'], msg['date'], msg['message_id']
return content_type, msg['chat']['type'], msg['chat']['id']
def gl_callback_query():
return msg['id'], msg['from']['id'], msg['data']
def gl_inline_query():
if long:
return msg['id'], msg['from']['id'], msg['query'], msg['offset']
return msg['id'], msg['from']['id'], msg['query']
def gl_chosen_inline_result():
return msg['result_id'], msg['from']['id'], msg['query']
def gl_shipping_query():
return msg['id'], msg['from']['id'], msg['invoice_payload']
def gl_pre_checkout_query():
if long:
return msg['id'], msg['from']['id'], msg['invoice_payload'], msg['currency'], msg['total_amount']
return msg['id'], msg['from']['id'], msg['invoice_payload']
try:
fn = {'chat': gl_chat,
'callback_query': gl_callback_query,
'inline_query': gl_inline_query,
'chosen_inline_result': gl_chosen_inline_result,
'shipping_query': gl_shipping_query,
'pre_checkout_query': gl_pre_checkout_query}[flavor]
except KeyError:
raise exception.BadFlavor(flavor)
return fn()
def flance(msg, long=False):
"""
A combination of :meth:`amanobot.flavor` and :meth:`amanobot.glance`,
return a 2-tuple (flavor, headline_info), where *headline_info* is whatever extracted by
:meth:`amanobot.glance` depending on the message flavor and the ``long`` parameter.
"""
f = flavor(msg)
g = glance(msg, flavor=f, long=long)
return f, g
def peel(event):
"""
Remove an event's top-level skin (where its flavor is determined), and return
the core content.
"""
return list(event.values())[0]
def fleece(event):
"""
A combination of :meth:`amanobot.flavor` and :meth:`amanobot.peel`,
return a 2-tuple (flavor, content) of an event.
"""
return flavor(event), peel(event)
def is_event(msg):
"""
Return whether the message looks like an event. That is, whether it has a flavor
that starts with an underscore.
"""
return flavor(msg).startswith('_')
def origin_identifier(msg):
"""
Extract the message identifier of a callback query's origin. Returned value
is guaranteed to be a tuple.
``msg`` is expected to be ``callback_query``.
"""
if 'message' in msg:
return msg['message']['chat']['id'], msg['message']['message_id']
if 'inline_message_id' in msg:
return msg['inline_message_id'],
raise ValueError()
def message_identifier(msg):
"""
Extract an identifier for message editing. Useful with :meth:`amanobot.Bot.editMessageText`
and similar methods. Returned value is guaranteed to be a tuple.
``msg`` is expected to be ``chat`` or ``choson_inline_result``.
"""
if 'chat' in msg and 'message_id' in msg:
return msg['chat']['id'], msg['message_id']
if 'inline_message_id' in msg:
return msg['inline_message_id'],
raise ValueError()
def _dismantle_message_identifier(f):
if isinstance(f, tuple):
if len(f) == 2:
return {'chat_id': f[0], 'message_id': f[1]}
if len(f) == 1:
return {'inline_message_id': f[0]}
raise ValueError()
return {'inline_message_id': f}
def _split_input_media_array(media_array):
def ensure_dict(input_media):
if isinstance(input_media, tuple) and hasattr(input_media, '_asdict'):
return input_media._asdict()
if isinstance(input_media, dict):
return input_media
raise ValueError()
def given_attach_name(input_media):
if isinstance(input_media['media'], tuple):
return input_media['media'][0]
return None
def attach_name_generator(used_names):
x = 0
while 1:
x += 1
name = 'media' + str(x)
if name in used_names:
continue
yield name
def split_media(input_media, name_generator):
file_spec = input_media['media']
# file_id, URL
if _isstring(file_spec):
return input_media, None
# file-object
# (attach-name, file-object)
# (attach-name, (filename, file-object))
if isinstance(file_spec, tuple):
name, f = file_spec
else:
name, f = next(name_generator), file_spec
m = input_media.copy()
m['media'] = 'attach://' + name
return (m, (name, f))
ms = [ensure_dict(m) for m in media_array]
used_names = [given_attach_name(m) for m in ms if given_attach_name(m) is not None]
name_generator = attach_name_generator(used_names)
splitted = [split_media(m, name_generator) for m in ms]
legal_media, attachments = map(list, zip(*splitted))
files_to_attach = dict([a for a in attachments if a is not None])
return legal_media, files_to_attach
def _isstring(s):
return isinstance(s, str)
def _isfile(f):
return isinstance(f, io.IOBase)
from . import helper
def flavor_router(routing_table):
router = helper.Router(flavor, routing_table)
return router.route
class _BotBase:
def __init__(self, token: str, raise_errors: bool, api_endpoint: str):
self._token = token
self._raise_errors = raise_errors
self._base_url = api_endpoint
self._file_chunk_size = 65536
def _strip(params, more=None):
if more is None:
more = []
return {key: value for key, value in params.items() if key not in ['self'] + more}
def _rectify(params):
def make_jsonable(value):
if isinstance(value, list):
return [make_jsonable(v) for v in value]
if isinstance(value, dict):
return {k: make_jsonable(v) for k, v in value.items() if v is not None}
if isinstance(value, tuple) and hasattr(value, '_asdict'):
return {k: make_jsonable(v) for k, v in value._asdict().items() if v is not None}
return value
def flatten(value):
v = make_jsonable(value)
if isinstance(v, (dict, list)):
return json.dumps(v, separators=(',', ':'))
return v
# remove None, then json-serialize if needed
return {k: flatten(v) for k, v in params.items() if v is not None}
from . import api
class Bot(_BotBase):
class Scheduler(threading.Thread):
# A class that is sorted by timestamp. Use `bisect` module to ensure order in event queue.
Event = collections.namedtuple('Event', ['timestamp', 'data'])
Event.__eq__ = lambda self, other: self.timestamp == other.timestamp
Event.__ne__ = lambda self, other: self.timestamp != other.timestamp
Event.__gt__ = lambda self, other: self.timestamp > other.timestamp
Event.__ge__ = lambda self, other: self.timestamp >= other.timestamp
Event.__lt__ = lambda self, other: self.timestamp < other.timestamp
Event.__le__ = lambda self, other: self.timestamp <= other.timestamp
def __init__(self):
super(Bot.Scheduler, self).__init__()
self._eventq = []
self._lock = threading.RLock() # reentrant lock to allow locked method calling locked method
self._event_handler = None
def _locked(fn):
def k(self, *args, **kwargs):
with self._lock:
return fn(self, *args, **kwargs)
return k
@_locked
def _insert_event(self, data, when):
ev = self.Event(when, data)
bisect.insort(self._eventq, ev)
return ev
@_locked
def _remove_event(self, event):
# Find event according to its timestamp.
# Index returned should be one behind.
i = bisect.bisect(self._eventq, event)
# Having two events with identical timestamp is unlikely but possible.
# I am going to move forward and compare timestamp AND object address
# to make sure the correct object is found.
while i > 0:
i -= 1
e = self._eventq[i]
if e.timestamp != event.timestamp:
raise exception.EventNotFound(event)
if id(e) == id(event):
self._eventq.pop(i)
return
raise exception.EventNotFound(event)
@_locked
def _pop_expired_event(self):
if not self._eventq:
return None
if self._eventq[0].timestamp <= time.time():
return self._eventq.pop(0)
return None
def event_at(self, when, data):
"""
Schedule some data to emit at an absolute timestamp.
:type when: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, when)
def event_later(self, delay, data):
"""
Schedule some data to emit after a number of seconds.
:type delay: int or float
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time() + delay)
def event_now(self, data):
"""
Emit some data as soon as possible.
:type data: dictionary
:return: an internal Event object
"""
return self._insert_event(data, time.time())
def cancel(self, event):
"""
Cancel an event.
:type event: an internal Event object
"""
self._remove_event(event)
def run(self):
while 1:
e = self._pop_expired_event()
while e:
if callable(e.data):
d = e.data() # call the data-producing function
if d is not None:
self._event_handler(d)
else:
self._event_handler(e.data)
e = self._pop_expired_event()
time.sleep(0.1)
def run_as_thread(self):
self.daemon = True
self.start()
def on_event(self, fn):
self._event_handler = fn
def __init__(self, token: str, raise_errors: bool = True, api_endpoint: str = "https://api.telegram.org"):
super(Bot, self).__init__(token, raise_errors, api_endpoint)
self._scheduler = self.Scheduler()
self._router = helper.Router(flavor, {'chat': lambda msg: self.on_chat_message(msg),
'callback_query': lambda msg: self.on_callback_query(msg),
'inline_query': lambda msg: self.on_inline_query(msg),
'chosen_inline_result': lambda msg: self.on_chosen_inline_result(msg)})
# use lambda to delay evaluation of self.on_ZZZ to runtime because
# I don't want to require defining all methods right here.
@property
def scheduler(self):
return self._scheduler
@property
def router(self):
return self._router
def handle(self, msg):
self._router.route(msg)
def _api_request(self, method, params=None, files=None, raise_errors=None, **kwargs):
return api.request((self._base_url, self._token, method, params, files),
raise_errors=raise_errors if raise_errors is not None else self._raise_errors, **kwargs)
def _api_request_with_file(self, method, params, files, **kwargs):
params.update({
k: v for k, v in files.items() if _isstring(v)})
files = {
k: v for k, v in files.items() if v is not None and not _isstring(v)}
return self._api_request(method, _rectify(params), files, **kwargs)
def getMe(self):
""" See: https://core.telegram.org/bots/api#getme """
return self._api_request('getMe')
def logOut(self):
""" See: https://core.telegram.org/bots/api#logout """
return self._api_request('logOut')
def close(self):
""" See: https://core.telegram.org/bots/api#close """
return self._api_request('close')
def sendMessage(self, chat_id: Union[int, str], text: str,
parse_mode: str = None,
entities=None,
disable_web_page_preview: bool = None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendmessage """
p = _strip(locals())
return self._api_request('sendMessage', _rectify(p))
def forwardMessage(self, chat_id: Union[int, str], from_chat_id: Union[int, str], message_id: int,
disable_notification: bool = None):
""" See: https://core.telegram.org/bots/api#forwardmessage """
p = _strip(locals())
return self._api_request('forwardMessage', _rectify(p))
def copyMessage(self, chat_id: Union[int, str], from_chat_id: Union[int, str], message_id: int,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#copymessage """
p = _strip(locals())
return self._api_request('copyMessage', _rectify(p))
def sendPhoto(self, chat_id: Union[int, str], photo,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendphoto
:param photo:
- string: ``file_id`` for a photo existing on Telegram servers
- string: HTTP URL of a photo from the Internet
- file-like object: obtained by ``open(path, 'rb')``
- tuple: (filename, file-like object).
"""
p = _strip(locals(), more=['photo'])
return self._api_request_with_file('sendPhoto', _rectify(p), {'photo': photo})
def sendAudio(self, chat_id: Union[int, str], audio,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
duration=None,
performer=None,
title=None,
thumb=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendaudio
:param audio: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['audio', 'thumb'])
return self._api_request_with_file('sendAudio', _rectify(p), {'audio': audio, 'thumb': thumb})
def sendDocument(self, chat_id: Union[int, str], document,
thumb=None,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
disable_content_type_detection=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#senddocument
:param document: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['document', 'thumb'])
return self._api_request_with_file('sendDocument', _rectify(p), {'document': document, 'thumb': thumb})
def sendVideo(self, chat_id: Union[int, str], video,
duration=None,
width=None,
height=None,
thumb=None,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
supports_streaming=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvideo
:param video: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['video', 'thumb'])
return self._api_request_with_file('sendVideo', _rectify(p), {'video': video, 'thumb': thumb})
def sendAnimation(self, chat_id: Union[int, str], animation,
duration=None,
width=None,
height=None,
thumb=None,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendanimation
:param animation: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['animation', 'thumb'])
return self._api_request_with_file('sendAnimation', _rectify(p), {'animation': animation, 'thumb': thumb})
def sendVoice(self, chat_id: Union[int, str], voice,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
duration=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvoice
:param voice: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['voice'])
return self._api_request_with_file('sendVoice', _rectify(p), {'voice': voice})
def sendVideoNote(self, chat_id: Union[int, str], video_note,
duration=None,
length=None,
thumb=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendvideonote
:param video_note: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
:param length:
Although marked as optional, this method does not seem to work without
it being specified. Supply any integer you want. It seems to have no effect
on the video note's display size.
"""
p = _strip(locals(), more=['video_note', 'thumb'])
return self._api_request_with_file('sendVideoNote', _rectify(p), {'video_note': video_note, 'thumb': thumb})
def sendMediaGroup(self, chat_id: Union[int, str], media,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None):
"""
See: https://core.telegram.org/bots/api#sendmediagroup
:type media: array of `InputMedia <https://core.telegram.org/bots/api#inputmedia>`_ objects
:param media:
To indicate media locations, each InputMedia object's ``media`` field
should be one of these:
- string: ``file_id`` for a file existing on Telegram servers
- string: HTTP URL of a file from the Internet
- file-like object: obtained by ``open(path, 'rb')``
- tuple: (form-data name, file-like object)
- tuple: (form-data name, (filename, file-like object))
In case of uploading, you may supply customized multipart/form-data
names for each uploaded file (as in last 2 options above). Otherwise,
amanobot assigns unique names to each uploaded file. Names assigned by
amanobot will not collide with user-supplied names, if any.
"""
p = _strip(locals(), more=['media'])
legal_media, files_to_attach = _split_input_media_array(media)
p['media'] = legal_media
return self._api_request('sendMediaGroup', _rectify(p), files_to_attach)
def sendLocation(self, chat_id: Union[int, str], latitude, longitude,
horizontal_accuracy=None,
live_period=None,
heading=None,
proximity_alert_radius=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendlocation """
p = _strip(locals())
return self._api_request('sendLocation', _rectify(p))
def editMessageLiveLocation(self, msg_identifier, latitude, longitude,
horizontal_accuracy=None,
heading=None,
proximity_alert_radius=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagelivelocation
:param msg_identifier: Same as in :meth:`.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageLiveLocation', _rectify(p))
def stopMessageLiveLocation(self, msg_identifier,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#stopmessagelivelocation
:param msg_identifier: Same as in :meth:`.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('stopMessageLiveLocation', _rectify(p))
def sendVenue(self, chat_id: Union[int, str], latitude, longitude, title, address,
foursquare_id=None,
foursquare_type=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendvenue """
p = _strip(locals())
return self._api_request('sendVenue', _rectify(p))
def sendContact(self, chat_id: Union[int, str], phone_number, first_name,
last_name=None,
vcard=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendcontact """
p = _strip(locals())
return self._api_request('sendContact', _rectify(p))
def sendPoll(self, chat_id: Union[int, str], question, options,
is_anonymous=None,
type=None,
allows_multiple_answers=None,
correct_option_id=None,
explanation=None,
explanation_parse_mode: str = None,
open_period=None,
is_closed=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendpoll """
p = _strip(locals())
return self._api_request('sendPoll', _rectify(p))
def sendDice(self, chat_id: Union[int, str],
emoji=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#senddice """
p = _strip(locals())
return self._api_request('sendDice', _rectify(p))
def sendGame(self, chat_id: Union[int, str], game_short_name,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendgame """
p = _strip(locals())
return self._api_request('sendGame', _rectify(p))
def sendInvoice(self, chat_id: Union[int, str], title, description, payload,
provider_token, start_parameter, currency, prices,
provider_data=None,
photo_url=None,
photo_size=None,
photo_width=None,
photo_height=None,
need_name=None,
need_phone_number=None,
need_email=None,
need_shipping_address=None,
is_flexible=None,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
""" See: https://core.telegram.org/bots/api#sendinvoice """
p = _strip(locals())
return self._api_request('sendInvoice', _rectify(p))
def sendChatAction(self, chat_id: Union[int, str], action):
""" See: https://core.telegram.org/bots/api#sendchataction """
p = _strip(locals())
return self._api_request('sendChatAction', _rectify(p))
def getUserProfilePhotos(self, user_id,
offset=None,
limit=None):
""" See: https://core.telegram.org/bots/api#getuserprofilephotos """
p = _strip(locals())
return self._api_request('getUserProfilePhotos', _rectify(p))
def getFile(self, file_id):
""" See: https://core.telegram.org/bots/api#getfile """
p = _strip(locals())
return self._api_request('getFile', _rectify(p))
def kickChatMember(self, chat_id: Union[int, str], user_id,
until_date: int = None,
revoke_messages: bool = None):
""" See: https://core.telegram.org/bots/api#kickchatmember """
p = _strip(locals())
return self._api_request('kickChatMember', _rectify(p))
def unbanChatMember(self, chat_id: Union[int, str], user_id,
only_if_banned=None):
""" See: https://core.telegram.org/bots/api#unbanchatmember """
p = _strip(locals())
return self._api_request('unbanChatMember', _rectify(p))
def restrictChatMember(self, chat_id: Union[int, str], user_id,
until_date=None,
can_send_messages=None,
can_send_media_messages=None,
can_send_polls=None,
can_send_other_messages=None,
can_add_web_page_previews=None,
can_change_info=None,
can_invite_users=None,
can_pin_messages=None,
permissions=None):
""" See: https://core.telegram.org/bots/api#restrictchatmember """
if not isinstance(permissions, dict):
permissions = dict(can_send_messages=can_send_messages,
can_send_media_messages=can_send_media_messages,
can_send_polls=can_send_polls,
can_send_other_messages=can_send_other_messages,
can_add_web_page_previews=can_add_web_page_previews,
can_change_info=can_change_info,
can_invite_users=can_invite_users,
can_pin_messages=can_pin_messages)
p = _strip(locals())
return self._api_request('restrictChatMember', _rectify(p))
def promoteChatMember(self, chat_id: Union[int, str], user_id,
is_anonymous=None,
can_manage_chat=None,
can_post_messages=None,
can_edit_messages=None,
can_delete_messages=None,
can_manage_voice_chats=None,
can_restrict_members=None,
can_promote_members=None,
can_change_info=None,
can_invite_users=None,
can_pin_messages=None):
""" See: https://core.telegram.org/bots/api#promotechatmember """
p = _strip(locals())
return self._api_request('promoteChatMember', _rectify(p))
def setChatAdministratorCustomTitle(self, chat_id: Union[int, str], user_id,
custom_title):
""" See: https://core.telegram.org/bots/api#setchatadministratorcustomtitle """
p = _strip(locals())
return self._api_request('setChatAdministratorCustomTitle', _rectify(p))
def setChatPermissions(self, chat_id: Union[int, str],
can_send_messages=None,
can_send_media_messages=None,
can_send_polls=None,
can_send_other_messages=None,
can_add_web_page_previews=None,
can_change_info=None,
can_invite_users=None,
can_pin_messages=None,
permissions=None):
""" See: https://core.telegram.org/bots/api#setchatpermissions """
if not isinstance(permissions, dict):
permissions = dict(can_send_messages=can_send_messages,
can_send_media_messages=can_send_media_messages,
can_send_polls=can_send_polls,
can_send_other_messages=can_send_other_messages,
can_add_web_page_previews=can_add_web_page_previews,
can_change_info=can_change_info,
can_invite_users=can_invite_users,
can_pin_messages=can_pin_messages)
p = _strip(locals())
return self._api_request('setChatPermissions', _rectify(p))
def exportChatInviteLink(self, chat_id):
""" See: https://core.telegram.org/bots/api#exportchatinvitelink """
p = _strip(locals())
return self._api_request('exportChatInviteLink', _rectify(p))
def createChatInviteLink(self, chat_id,
expire_date: int = None,
member_limit: int = None):
""" See: https://core.telegram.org/bots/api#createchatinvitelink """
p = _strip(locals())
return self._api_request('createChatInviteLink', _rectify(p))
def editChatInviteLink(self, chat_id,
invite_link: str,
expire_date: int = None,
member_limit: int = None):
""" See: https://core.telegram.org/bots/api#editchatinvitelink """
p = _strip(locals())
return self._api_request('editChatInviteLink', _rectify(p))
def revokeChatInviteLink(self, chat_id, invite_link: str):
""" See: https://core.telegram.org/bots/api#revokechatinvitelink """
p = _strip(locals())
return self._api_request('revokeChatInviteLink', _rectify(p))
def setChatPhoto(self, chat_id: Union[int, str], photo):
""" See: https://core.telegram.org/bots/api#setchatphoto """
p = _strip(locals(), more=['photo'])
return self._api_request_with_file('setChatPhoto', _rectify(p), {'photo': photo})
def deleteChatPhoto(self, chat_id):
""" See: https://core.telegram.org/bots/api#deletechatphoto """
p = _strip(locals())
return self._api_request('deleteChatPhoto', _rectify(p))
def setChatTitle(self, chat_id: Union[int, str], title):
""" See: https://core.telegram.org/bots/api#setchattitle """
p = _strip(locals())
return self._api_request('setChatTitle', _rectify(p))
def setChatDescription(self, chat_id: Union[int, str],
description=None):
""" See: https://core.telegram.org/bots/api#setchatdescription """
p = _strip(locals())
return self._api_request('setChatDescription', _rectify(p))
def pinChatMessage(self, chat_id: Union[int, str], message_id: int,
disable_notification: bool = None):
""" See: https://core.telegram.org/bots/api#pinchatmessage """
p = _strip(locals())
return self._api_request('pinChatMessage', _rectify(p))
def unpinChatMessage(self, chat_id: Union[int, str],
message_id=None):
""" See: https://core.telegram.org/bots/api#unpinchatmessage """
p = _strip(locals())
return self._api_request('unpinChatMessage', _rectify(p))
def unpinAllChatMessages(self, chat_id):
""" See: https://core.telegram.org/bots/api#unpinallchatmessages """
p = _strip(locals())
return self._api_request('unpinAllChatMessages', _rectify(p))
def leaveChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#leavechat """
p = _strip(locals())
return self._api_request('leaveChat', _rectify(p))
def getChat(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchat """
p = _strip(locals())
return self._api_request('getChat', _rectify(p))
def getChatAdministrators(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatadministrators """
p = _strip(locals())
return self._api_request('getChatAdministrators', _rectify(p))
def getChatMembersCount(self, chat_id):
""" See: https://core.telegram.org/bots/api#getchatmemberscount """
p = _strip(locals())
return self._api_request('getChatMembersCount', _rectify(p))
def getChatMember(self, chat_id: Union[int, str], user_id):
""" See: https://core.telegram.org/bots/api#getchatmember """
p = _strip(locals())
return self._api_request('getChatMember', _rectify(p))
def setChatStickerSet(self, chat_id: Union[int, str], sticker_set_name):
""" See: https://core.telegram.org/bots/api#setchatstickerset """
p = _strip(locals())
return self._api_request('setChatStickerSet', _rectify(p))
def deleteChatStickerSet(self, chat_id):
""" See: https://core.telegram.org/bots/api#deletechatstickerset """
p = _strip(locals())
return self._api_request('deleteChatStickerSet', _rectify(p))
def answerCallbackQuery(self, callback_query_id,
text=None,
show_alert=None,
url=None,
cache_time=None):
""" See: https://core.telegram.org/bots/api#answercallbackquery """
p = _strip(locals())
return self._api_request('answerCallbackQuery', _rectify(p))
def setMyCommands(self, commands=None):
""" See: https://core.telegram.org/bots/api#setmycommands """
if commands is None:
commands = []
p = _strip(locals())
return self._api_request('setMyCommands', _rectify(p))
def getMyCommands(self):
""" See: https://core.telegram.org/bots/api#getmycommands """
return self._api_request('getMyCommands')
def setPassportDataErrors(self, user_id, errors):
""" See: https://core.telegram.org/bots/api#setpassportdataerrors """
p = _strip(locals())
return self._api_request('setPassportDataErrors', _rectify(p))
def answerShippingQuery(self, shipping_query_id, ok,
shipping_options=None,
error_message=None):
""" See: https://core.telegram.org/bots/api#answershippingquery """
p = _strip(locals())
return self._api_request('answerShippingQuery', _rectify(p))
def answerPreCheckoutQuery(self, pre_checkout_query_id, ok,
error_message=None):
""" See: https://core.telegram.org/bots/api#answerprecheckoutquery """
p = _strip(locals())
return self._api_request('answerPreCheckoutQuery', _rectify(p))
def editMessageText(self, msg_identifier, text: str,
parse_mode: str = None,
entities=None,
disable_web_page_preview: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagetext
:param msg_identifier:
a 2-tuple (``chat_id``, ``message_id``),
a 1-tuple (``inline_message_id``),
or simply ``inline_message_id``.
You may extract this value easily with :meth:`amanobot.message_identifier`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageText', _rectify(p))
def editMessageCaption(self, msg_identifier,
caption: str = None,
parse_mode: str = None,
caption_entities=None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagecaption
:param msg_identifier: Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageCaption', _rectify(p))
def editMessageMedia(self, msg_identifier, media,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagemedia
:param msg_identifier: Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier', 'media'])
p.update(_dismantle_message_identifier(msg_identifier))
legal_media, files_to_attach = _split_input_media_array([media])
p['media'] = legal_media[0]
return self._api_request('editMessageMedia', _rectify(p), files_to_attach)
def editMessageReplyMarkup(self, msg_identifier,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#editmessagereplymarkup
:param msg_identifier: Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('editMessageReplyMarkup', _rectify(p))
def stopPoll(self, msg_identifier,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#stoppoll
:param msg_identifier:
a 2-tuple (``chat_id``, ``message_id``).
You may extract this value easily with :meth:`amanobot.message_identifier`
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('stopPoll', _rectify(p))
def deleteMessage(self, msg_identifier):
"""
See: https://core.telegram.org/bots/api#deletemessage
:param msg_identifier:
Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`,
except this method does not work on inline messages.
"""
p = _strip(locals(), more=['msg_identifier'])
p.update(_dismantle_message_identifier(msg_identifier))
return self._api_request('deleteMessage', _rectify(p))
def sendSticker(self, chat_id: Union[int, str], sticker,
disable_notification: bool = None,
reply_to_message_id: int = None,
allow_sending_without_reply: bool = None,
reply_markup=None):
"""
See: https://core.telegram.org/bots/api#sendsticker
:param sticker: Same as ``photo`` in :meth:`amanobot.Bot.sendPhoto`
"""
p = _strip(locals(), more=['sticker'])
return self._api_request_with_file('sendSticker', _rectify(p), {'sticker': sticker})
def getStickerSet(self, name):
"""
See: https://core.telegram.org/bots/api#getstickerset
"""
p = _strip(locals())
return self._api_request('getStickerSet', _rectify(p))
def uploadStickerFile(self, user_id, png_sticker):
"""
See: https://core.telegram.org/bots/api#uploadstickerfile
"""
p = _strip(locals(), more=['png_sticker'])
return self._api_request_with_file('uploadStickerFile', _rectify(p), {'png_sticker': png_sticker})
def createNewStickerSet(self, user_id, name, title, emojis,
png_sticker=None,
tgs_sticker=None,
contains_masks=None,
mask_position=None):
"""
See: https://core.telegram.org/bots/api#createnewstickerset
"""
p = _strip(locals(), more=['png_sticker', 'tgs_sticker'])
return self._api_request_with_file('createNewStickerSet', _rectify(p),
{'png_sticker': png_sticker, 'tgs_sticker': tgs_sticker})
def addStickerToSet(self, user_id, name, emojis,
png_sticker=None,
tgs_sticker=None,
mask_position=None):
"""
See: https://core.telegram.org/bots/api#addstickertoset
"""
p = _strip(locals(), more=['png_sticker', 'tgs_sticker'])
return self._api_request_with_file('addStickerToSet', _rectify(p),
{'png_sticker': png_sticker, 'tgs_sticker': tgs_sticker})
def setStickerPositionInSet(self, sticker, position):
"""
See: https://core.telegram.org/bots/api#setstickerpositioninset
"""
p = _strip(locals())
return self._api_request('setStickerPositionInSet', _rectify(p))
def deleteStickerFromSet(self, sticker):
"""
See: https://core.telegram.org/bots/api#deletestickerfromset
"""
p = _strip(locals())
return self._api_request('deleteStickerFromSet', _rectify(p))
def setStickerSetThumb(self, name, user_id,
thumb=None):
"""
See: https://core.telegram.org/bots/api#setstickersetthumb
"""
p = _strip(locals(), more=['thumb'])
return self._api_request_with_file('setStickerSetThumb', _rectify(p), {'thumb': thumb})
def answerInlineQuery(self, inline_query_id, results,
cache_time=None,
is_personal=None,
next_offset=None,
switch_pm_text=None,
switch_pm_parameter=None):
""" See: https://core.telegram.org/bots/api#answerinlinequery """
p = _strip(locals())
return self._api_request('answerInlineQuery', _rectify(p))
def getUpdates(self,
offset=None,
limit=None,
timeout=None,
allowed_updates=None,
_raise_errors=None):
""" See: https://core.telegram.org/bots/api#getupdates """
if _raise_errors is None:
_raise_errors = self._raise_errors
p = _strip(locals())
return self._api_request('getUpdates', _rectify(p), raise_errors=_raise_errors)
def setWebhook(self,
url=None,
certificate=None,
ip_address=None,
max_connections=None,
allowed_updates=None,
drop_pending_updates=None):
""" See: https://core.telegram.org/bots/api#setwebhook """
p = _strip(locals(), more=['certificate'])
if certificate:
files = {'certificate': certificate}
return self._api_request('setWebhook', _rectify(p), files)
return self._api_request('setWebhook', _rectify(p))
def deleteWebhook(self,
drop_pending_updates=None):
p = _strip(locals())
""" See: https://core.telegram.org/bots/api#deletewebhook """
return self._api_request('deleteWebhook', _rectify(p))
def getWebhookInfo(self):
""" See: https://core.telegram.org/bots/api#getwebhookinfo """
return self._api_request('getWebhookInfo')
def setGameScore(self, user_id, score, game_message_identifier,
force=None,
disable_edit_message=None):
"""
See: https://core.telegram.org/bots/api#setgamescore
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('setGameScore', _rectify(p))
def getGameHighScores(self, user_id, game_message_identifier):
"""
See: https://core.telegram.org/bots/api#getgamehighscores
:param game_message_identifier: Same as ``msg_identifier`` in :meth:`amanobot.Bot.editMessageText`
"""
p = _strip(locals(), more=['game_message_identifier'])
p.update(_dismantle_message_identifier(game_message_identifier))
return self._api_request('getGameHighScores', _rectify(p))
def download_file(self, file_id, dest):
"""
Download a file to local disk.
:param dest: a path or a ``file`` object
"""
f = self.getFile(file_id)
try:
d = dest if _isfile(dest) else open(dest, 'wb')
r = api.download((self._base_url, self._token, f['file_path']), preload_content=False)
while 1:
data = r.read(self._file_chunk_size)
if not data:
break
d.write(data)
finally:
if not _isfile(dest) and 'd' in locals():
d.close()
if 'r' in locals():
r.release_conn()
def message_loop(self, callback=None, relax=0.1,
timeout=20, allowed_updates=None,
source=None, ordered=True, maxhold=3,
run_forever=False):
"""
:deprecated: will be removed in future. Use :class:`.MessageLoop` instead.
Spawn a thread to constantly ``getUpdates`` or pull updates from a queue.
Apply ``callback`` to every message received. Also starts the scheduler thread
for internal events.
:param callback:
a function that takes one argument (the message), or a routing table.
If ``None``, the bot's ``handle`` method is used.
A *routing table* is a dictionary of ``{flavor: function}``, mapping messages to appropriate
handler functions according to their flavors. It allows you to define functions specifically
to handle one flavor of messages. It usually looks like this: ``{'chat': fn1,
'callback_query': fn2, 'inline_query': fn3, ...}``. Each handler function should take
one argument (the message).
:param source:
Source of updates.
If ``None``, ``getUpdates`` is used to obtain new messages from Telegram servers.
If it is a synchronized queue, new messages are pulled from the queue.
A web application implementing a webhook can dump updates into the queue,
while the bot pulls from it. This is how amanobot can be integrated with webhooks.
Acceptable contents in queue:
- ``str`` or ``bytes`` (decoded using UTF-8)
representing a JSON-serialized `Update <https://core.telegram.org/bots/api#update>`_ object.
- a ``dict`` representing an Update object.
When ``source`` is ``None``, these parameters are meaningful:
:type relax: float
:param relax: seconds between each ``getUpdates``
:type timeout: int
:param timeout:
``timeout`` parameter supplied to :meth:`amanobot.Bot.getUpdates`,
controlling how long to poll.
:type allowed_updates: array of string
:param allowed_updates:
``allowed_updates`` parameter supplied to :meth:`amanobot.Bot.getUpdates`,
controlling which types of updates to receive.
When ``source`` is a queue, these parameters are meaningful:
:type ordered: bool
:param ordered:
If ``True``, ensure in-order delivery of messages to ``callback``
(i.e. updates with a smaller ``update_id`` always come before those with
a larger ``update_id``).
If ``False``, no re-ordering is done. ``callback`` is applied to messages
as soon as they are pulled from queue.
:type maxhold: float
:param maxhold:
Applied only when ``ordered`` is ``True``. The maximum number of seconds
an update is held waiting for a not-yet-arrived smaller ``update_id``.
When this number of seconds is up, the update is delivered to ``callback``
even if some smaller ``update_id``\s have not yet arrived. If those smaller
``update_id``\s arrive at some later time, they are discarded.
Finally, there is this parameter, meaningful always:
:type run_forever: bool or str
:param run_forever:
If ``True`` or any non-empty string, append an infinite loop at the end of
this method, so it never returns. Useful as the very last line in a program.
A non-empty string will also be printed, useful as an indication that the
program is listening.
"""
if callback is None:
callback = self.handle
elif isinstance(callback, dict):
callback = flavor_router(callback)
collect_queue = queue.Queue()
def collector():
while 1:
try:
item = collect_queue.get(block=True)
callback(item)
except:
# Localize error so thread can keep going.
traceback.print_exc()
def relay_to_collector(update):
key = _find_first_key(update, ['message',
'edited_message',
'channel_post',
'edited_channel_post',
'inline_query',
'chosen_inline_result',
'callback_query',
'shipping_query',
'pre_checkout_query',
'poll',
'poll_answer',
'my_chat_member',
'chat_member'])
collect_queue.put(update[key])
return update['update_id']
def get_from_telegram_server():
offset = None # running offset
allowed_upd = allowed_updates
while 1:
try:
result = self.getUpdates(offset=offset,
timeout=timeout,
allowed_updates=allowed_upd,
_raise_errors=True)
# Once passed, this parameter is no longer needed.
allowed_upd = None
if len(result) > 0:
# No sort. Trust server to give messages in correct order.
# Update offset to max(update_id) + 1
offset = max([relay_to_collector(update) for update in result]) + 1
except exception.BadHTTPResponse as e:
traceback.print_exc()
# Servers probably down. Wait longer.
if e.status == 502:
time.sleep(30)
except:
traceback.print_exc()
finally:
time.sleep(relax)
def dictify(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
if type(data) is str:
return json.loads(data)
if type(data) is dict:
return data
raise ValueError()
def get_from_queue_unordered(qu):
while 1:
try:
data = qu.get(block=True)
update = dictify(data)
relay_to_collector(update)
except:
traceback.print_exc()
def get_from_queue(qu):
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
data = qu.get(block=True, timeout=qwait)
update = dictify(data)
if max_id is None:
# First message received, handle regardless.
max_id = relay_to_collector(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = relay_to_collector(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(
buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id'] - max_id - 1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except queue.Empty:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = relay_to_collector(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
qwait = max(qwait, 0)
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
collector_thread = threading.Thread(target=collector)
collector_thread.daemon = True
collector_thread.start()
if source is None:
message_thread = threading.Thread(target=get_from_telegram_server)
elif isinstance(source, queue.Queue):
if ordered:
message_thread = threading.Thread(target=get_from_queue, args=(source,))
else:
message_thread = threading.Thread(target=get_from_queue_unordered, args=(source,))
else:
raise ValueError('Invalid source')
message_thread.daemon = True # need this for main thread to be killable by Ctrl-C
message_thread.start()
self._scheduler.on_event(collect_queue.put)
self._scheduler.run_as_thread()
if run_forever:
if _isstring(run_forever):
print(run_forever)
while 1:
time.sleep(10)
class SpeakerBot(Bot):
def __init__(self, token):
super(SpeakerBot, self).__init__(token)
self._mic = helper.Microphone()
@property
def mic(self):
return self._mic
def create_listener(self):
q = queue.Queue()
self._mic.add(q)
ln = helper.Listener(self._mic, q)
return ln
class DelegatorBot(SpeakerBot):
def __init__(self, token, delegation_patterns):
"""
:param delegation_patterns: a list of (seeder, delegator) tuples.
"""
super(DelegatorBot, self).__init__(token)
self._delegate_records = [p + ({},) for p in delegation_patterns]
@staticmethod
def _startable(delegate):
return ((hasattr(delegate, 'start') and inspect.ismethod(delegate.start)) and
(hasattr(delegate, 'is_alive') and inspect.ismethod(delegate.is_alive)))
@staticmethod
def _tuple_is_valid(t):
return len(t) == 3 and callable(t[0]) and type(t[1]) in [list, tuple] and type(t[2]) is dict
def _ensure_startable(self, delegate):
if self._startable(delegate):
return delegate
if callable(delegate):
return threading.Thread(target=delegate)
if type(delegate) is tuple and self._tuple_is_valid(delegate):
func, args, kwargs = delegate
return threading.Thread(target=func, args=args, kwargs=kwargs)
raise RuntimeError(
'Delegate does not have the required methods, is not callable, and is not a valid tuple.')
def handle(self, msg):
self._mic.send(msg)
for calculate_seed, make_delegate, dict in self._delegate_records:
id = calculate_seed(msg)
if id is None:
continue
elif isinstance(id, collections.Hashable):
if id not in dict or not dict[id].is_alive():
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
dict[id] = d
dict[id].start()
else:
d = make_delegate((self, msg, id))
d = self._ensure_startable(d)
d.start()
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 17900
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test.py | # -*- coding: utf-8 -*-
import redis
import unittest
from hotels import hotels
import random
import time
from RLTest import Env
from includes import *
from common import *
# this tests is not longer relevant
# def testAdd(env):
# if env.is_cluster():
# raise unittest.SkipTest()
# r = env
# env.assertOk(r.execute_command(
# 'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
# env.assertTrue(r.exists('idx:idx'))
# env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
# 'title', 'hello world',
# 'body', 'lorem ist ipsum'))
# for _ in r.retry_with_rdb_reload():
# prefix = 'ft'
# env.assertExists(prefix + ':idx/hello')
# env.assertExists(prefix + ':idx/world')
# env.assertExists(prefix + ':idx/lorem')
def testAddErrors(env):
env.expect('ft.create idx ON HASH schema foo text bar numeric sortable').equal('OK')
env.expect('ft.add idx doc1 1 redis 4').error().contains('Unknown keyword')
env.expect('ft.add idx doc1').error().contains("wrong number of arguments")
env.expect('ft.add idx doc1 42').error().contains("Score must be between 0 and 1")
env.expect('ft.add idx doc1 1.0').error().contains("No field list found")
env.expect('ft.add fake_idx doc1 1.0 fields foo bar').error().contains("Unknown index name")
def assertEqualIgnoreCluster(env, val1, val2):
# todo: each test that uses this function should be switch back to env.assertEqual once fix
# issues on coordinator
if env.isCluster():
return
env.assertEqual(val1, val2)
def testConditionalUpdate(env):
env.assertOk(env.cmd(
'ft.create', 'idx','ON', 'HASH',
'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1',
'fields', 'foo', 'hello', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '1 == 2', 'fields', 'foo', 'world', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'partial', 'if',
'@foo == "world"', 'fields', 'bar', '234'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@bar == 234', 'fields', 'foo', 'hello', 'bar', '123'))
# Ensure that conditionals are ignored if the document doesn't exist
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails if we try again, because it already exists
env.assertEqual('NOADD', env.cmd('FT.ADD', 'idx', '666', '1',
'REPLACE', 'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails because we're not using 'REPLACE'
with env.assertResponseError():
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
def testUnionIdList(env):
# Regression test for https://github.com/RediSearch/RediSearch/issues/306
r = env
N = 100
env.assertOk(r.execute_command(
"ft.create", "test", 'ON', 'HASH',
"SCHEMA", "tags", "TAG", "waypoint", "GEO"))
env.assertOk(r.execute_command(
"ft.add", "test", "1", "1", "FIELDS", "tags", "alberta", "waypoint", "-113.524,53.5244"))
env.assertOk(r.execute_command(
"ft.add", "test", "2", "1", "FIELDS", "tags", "ontario", "waypoint", "-79.395,43.661667"))
r.cmd('ft.search', 'test', '@tags:{ontario}')
res = r.execute_command(
'ft.search', 'test', "@waypoint:[-113.52 53.52 20 mi]|@tags:{ontario}", 'nocontent')
env.assertEqual(res, [2L, '1', '2'])
def testAttributes(env):
env.assertOk(env.cmd('ft.create', 'idx','ON', 'HASH',
'schema', 'title', 'text', 'body', 'text'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 't1 t2', 'body', 't3 t4 t5'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'body', 't1 t2', 'title', 't3 t5'))
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 0.2}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 2.5}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc1', 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t3 t5) => {$slop: 4}', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0}', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0; $inorder:true}', 'nocontent')
env.assertListEqual([0], res)
def testUnion(env):
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx','ON', 'HASH', 'schema', 'f', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world' if i % 2 == 0 else 'hallo werld'))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
res = r.execute_command(
'ft.search', 'idx', 'hello|hallo', 'nocontent', 'limit', '0', '100')
env.assertEqual(N + 1, len(res))
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello|world', 'nocontent', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command('ft.search', 'idx', '(hello|hello)(world|world)',
'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo)(werld|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hallo|hello)(world|werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|werld)(hallo|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo) world', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello world)|((hello world)|(hallo world|werld) | hello world werld)',
'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
def testSearch(env):
r = env
r.expect('ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'weight', 10.0, 'body', 'text').ok()
r.expect('ft.add', 'idx', 'doc1', 0.5,
'fields','title', 'hello world', 'body', 'lorem ist ipsum').ok()
r.expect('ft.add', 'idx', 'doc2', 1.0,
'fields', 'title', 'hello another world', 'body', 'lorem ist ipsum lorem lorem').ok()
# order of documents might change after reload
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hello')
expected = [2L, 'doc2', ['title', 'hello another world', 'body', 'lorem ist ipsum lorem lorem'],
'doc1', ['title', 'hello world', 'body', 'lorem ist ipsum']]
env.assertEqual(toSortedFlatList(res), toSortedFlatList(expected))
# Test empty query
res = r.execute_command('ft.search', 'idx', '')
env.assertListEqual([0], res)
# Test searching with no content
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent')
env.assertTrue(len(res) == 3)
expected = ['doc2', 'doc1']
env.assertEqual(res[0], 2L)
for item in expected:
env.assertIn(item, res)
# Test searching WITHSCORES
res = r.execute_command('ft.search', 'idx', 'hello', 'WITHSCORES')
env.assertEqual(len(res), 7)
env.assertEqual(res[0], 2L)
for item in expected:
env.assertIn(item, res)
env.assertTrue(float(res[2]) > 0)
env.assertTrue(float(res[5]) > 0)
# Test searching WITHSCORES NOCONTENT
res = r.execute_command('ft.search', 'idx', 'hello', 'WITHSCORES', 'NOCONTENT')
env.assertEqual(len(res), 5)
env.assertEqual(res[0], 2L)
for item in expected:
env.assertIn(item, res)
env.assertTrue(float(res[2]) > 0)
env.assertTrue(float(res[4]) > 0)
def testGet(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'foo', 'text', 'bar', 'text'))
env.expect('ft.get').error().contains("wrong number of arguments")
env.expect('ft.get', 'idx').error().contains("wrong number of arguments")
env.expect('ft.get', 'idx', 'foo', 'bar').error().contains("wrong number of arguments")
env.expect('ft.mget').error().contains("wrong number of arguments")
env.expect('ft.mget', 'idx').error().contains("wrong number of arguments")
env.expect('ft.mget', 'fake_idx').error().contains("wrong number of arguments")
env.expect('ft.get fake_idx foo').error().contains("Unknown Index name")
env.expect('ft.mget fake_idx foo').error().contains("Unknown Index name")
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world', 'bar', 'wat wat'))
for i in range(100):
res = r.execute_command('ft.get', 'idx', 'doc%d' % i)
env.assertIsNotNone(res)
env.assertEqual(set(['foo', 'hello world', 'bar', 'wat wat']), set(res))
env.assertIsNone(r.execute_command(
'ft.get', 'idx', 'doc%dsdfsd' % i))
env.expect('ft.get', 'no_idx', 'doc0').error().contains("Unknown Index name")
rr = r.execute_command(
'ft.mget', 'idx', *('doc%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNotNone(res)
env.assertEqual(set(['foo', 'hello world', 'bar', 'wat wat']), set(res))
rr = r.execute_command(
'ft.mget', 'idx', *('doc-%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNone(res)
# Verify that when a document is deleted, GET returns NULL
r.cmd('ft.del', 'idx', 'doc10') # But we still keep the document
r.cmd('ft.del', 'idx', 'doc11')
assert r.cmd('ft.del', 'idx', 'coverage') == 0
res = r.cmd('ft.get', 'idx', 'doc10')
r.assertEqual(None, res)
res = r.cmd('ft.mget', 'idx', 'doc10')
r.assertEqual([None], res)
res = r.cmd('ft.mget', 'idx', 'doc10', 'doc11', 'doc12')
r.assertIsNone(res[0])
r.assertIsNone(res[1])
r.assertTrue(not not res[2])
env.expect('ft.add idx doc 0.1 language arabic payload redislabs fields foo foo').ok()
env.expect('ft.get idx doc').equal(['foo', 'foo'])
res = env.cmd('hgetall doc')
env.assertEqual(set(res), set(['foo', 'foo', '__score', '0.1', '__language', 'arabic', '__payload', 'redislabs']))
def testDelete(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
env.expect('ft.del', 'fake_idx', 'doc1').error()
for i in range(100):
# the doc hash should exist now
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
# Delete the actual docs only half of the time
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i, 'DD' if i % 2 == 0 else ''))
# second delete should return 0
env.assertEqual(0, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
# second delete should return 0
# TODO: return 0 if doc wasn't found
#env.assertEqual(0, r.execute_command(
# 'ft.del', 'idx', 'doc%d' % i))
# After del with DD the doc hash should not exist
if i % 2 == 0:
env.assertFalse(r.exists('doc%d' % i))
else:
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertNotIn('doc%d' % i, res)
env.assertEqual(res[0], 100 - i - 1)
env.assertEqual(len(res), 100 - i)
# test reinsertion
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertIn('doc%d' % i, res)
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
did = 'rrrr'
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
def testReplace(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(2, res[0])
with env.assertResponseError():
# make sure we can't insert a doc twice
res = r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world')
# now replace doc1 with a different content
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'replace', 'fields',
'f', 'goodbye universe'))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
# make sure the query for hello world does not return the replaced
# document
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc2', res[1])
# search for the doc's new content
res = r.execute_command(
'ft.search', 'idx', 'goodbye universe', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
def testDrop(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
env.assertOk(r.execute_command('ft.drop', 'idx'))
keys = r.keys('*')
env.assertEqual(0, len(keys))
env.flush()
# Now do the same with KEEPDOCS
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
if not env.is_cluster():
env.assertOk(r.execute_command('ft.drop', 'idx', 'KEEPDOCS'))
keys = r.keys('*')
env.assertListEqual(['doc0', 'doc1', 'doc10', 'doc11', 'doc12', 'doc13', 'doc14', 'doc15', 'doc16', 'doc17', 'doc18', 'doc19', 'doc2', 'doc20', 'doc21', 'doc22', 'doc23', 'doc24', 'doc25', 'doc26', 'doc27', 'doc28', 'doc29', 'doc3', 'doc30', 'doc31', 'doc32', 'doc33', 'doc34', 'doc35', 'doc36', 'doc37', 'doc38', 'doc39', 'doc4', 'doc40', 'doc41', 'doc42', 'doc43', 'doc44', 'doc45', 'doc46', 'doc47', 'doc48', 'doc49', 'doc5', 'doc50', 'doc51', 'doc52', 'doc53',
'doc54', 'doc55', 'doc56', 'doc57', 'doc58', 'doc59', 'doc6', 'doc60', 'doc61', 'doc62', 'doc63', 'doc64', 'doc65', 'doc66', 'doc67', 'doc68', 'doc69', 'doc7', 'doc70', 'doc71', 'doc72', 'doc73', 'doc74', 'doc75', 'doc76', 'doc77', 'doc78', 'doc79', 'doc8', 'doc80', 'doc81', 'doc82', 'doc83', 'doc84', 'doc85', 'doc86', 'doc87', 'doc88', 'doc89', 'doc9', 'doc90', 'doc91', 'doc92', 'doc93', 'doc94', 'doc95', 'doc96', 'doc97', 'doc98', 'doc99'], sorted(keys))
env.expect('FT.DROP', 'idx', 'KEEPDOCS', '666').error().contains("wrong number of arguments")
def testDelete(env):
r = env
r.expect('ft.create', 'idx', 'ON', 'HASH', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo').ok()
for i in range(100):
r.expect('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497').ok()
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
r.expect('FT.DROPINDEX', 'idx', 'dd').ok()
keys = r.keys('*')
env.assertEqual(0, len(keys))
env.flush()
# Now do the same with KEEPDOCS
env.expect('ft.create', 'idx', 'ON', 'HASH',
'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo').ok()
for i in range(100):
r.expect('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497').ok()
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
if not env.is_cluster():
r.expect('FT.DROPINDEX', 'idx').ok()
keys = r.keys('*')
env.assertListEqual(sorted("doc%d" %k for k in range(100)), sorted(keys))
env.expect('FT.DROPINDEX', 'idx', 'dd', '666').error().contains("wrong number of arguments")
def testCustomStopwords(env):
r = env
# Index with default stopwords
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
# Index with custom stopwords
env.assertOk(r.execute_command('ft.create', 'idx2', 'ON', 'HASH', 'stopwords', 2, 'hello', 'world',
'schema', 'foo', 'text'))
assertInfoField(env, 'idx2', 'stopwords_list', ['hello', 'world'])
# Index with NO stopwords
env.assertOk(r.execute_command('ft.create', 'idx3', 'ON', 'HASH', 'stopwords', 0,
'schema', 'foo', 'text'))
assertInfoField(env, 'idx3', 'stopwords_list', [])
# 2nd Index with NO stopwords - check global is used and freed
env.assertOk(r.execute_command('ft.create', 'idx4', 'ON', 'HASH', 'stopwords', 0,
'schema', 'foo', 'text'))
#for idx in ('idx', 'idx2', 'idx3'):
env.assertOk(r.execute_command(
'ft.add', 'idx', 'doc1', 1.0, 'fields', 'foo', 'hello world'))
env.assertOk(r.execute_command(
'ft.add', 'idx', 'doc2', 1.0, 'fields', 'foo', 'to be or not to be'))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
# Normal index should return results just for 'hello world'
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent'))
env.assertEqual([0], r.execute_command(
'ft.search', 'idx', 'to be or not', 'nocontent'))
# Custom SW index should return results just for 'to be or not'
env.assertEqual([0], r.execute_command(
'ft.search', 'idx2', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx2', 'to be or not', 'nocontent'))
# No SW index should return results for both
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx3', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx3', 'to be or not', 'nocontent'))
def testStopwords(env):
# This test was taken from Python's tests, and failed due to some changes
# made earlier
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'stopwords', 3, 'foo',
'bar', 'baz', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'foo bar')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields', 'txt', 'hello world')
r1 = env.cmd('ft.search', 'idx', 'foo bar', 'nocontent')
r2 = env.cmd('ft.search', 'idx', 'foo bar hello world', 'nocontent')
env.assertEqual(0, r1[0])
env.assertEqual(1, r2[0])
def testNoStopwords(env):
# This test taken from Java's test suite
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text')
for i in range(100):
env.cmd('ft.add', 'idx', 'doc{}'.format(i), 1.0, 'fields',
'title', 'hello world' if i % 2 == 0 else 'hello worlds')
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOCONTENT')
env.assertEqual(100, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world',
'VERBATIM', 'NOCONTENT')
env.assertEqual(50, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOSTOPWORDS')
env.assertEqual(0, res[0])
def testOptional(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', 1.0, 'fields', 'foo', 'hello wat woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'foo', 'hello world woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'foo', 'hello world werld'))
expected = [3L, 'doc1', 'doc2', 'doc3']
res = r.execute_command('ft.search', 'idx', 'hello', 'nocontent')
env.assertEqual(res, expected)
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([2L, 'doc2', 'doc3'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual(res, expected)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world ~werld', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual(res, expected)
res = r.execute_command(
'ft.search', 'idx', '~world ~werld hello', 'withscores', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual(res, [3L, 'doc3', '3', 'doc2', '2', 'doc1', '1'])
def testExplain(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
q = '(hello world) "what what" hello|world @bar:[10 100]|@bar:[200 300]'
res = r.execute_command('ft.explain', 'idx', q)
# print res.replace('\n', '\\n')
# expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
# expected = """INTERSECT {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
env.assertEqual(res, expected)
# expected = ['INTERSECT {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
if env.is_cluster():
raise unittest.SkipTest()
res = env.cmd('ft.explainCli', 'idx', q)
expected = ['INTERSECT {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
env.assertEqual(expected, res)
def testNoIndex(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex', 'sortable'))
if not env.isCluster():
# to specific check on cluster, todo : change it to be generic enough
res = env.cmd('ft.info', 'idx')
env.assertEqual(res[7][1][7], 'NOINDEX')
env.assertEqual(res[7][2][9], 'NOINDEX')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'hello lorem ipsum'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1, 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@extra:hello', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@num:[1 1]', 'nocontent')
env.assertListEqual([0], res)
def testPartial(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'SCORE_FIELD', '__score',
'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex'))
# print r.execute_command('ft.info', 'idx')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', '0.1', 'fields',
'foo', 'hello world', 'num', 2, 'extra', 'abba'))
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'asc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc1', '#1', 'doc2', '#2'], res)
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'desc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc2', '#2', 'doc1', '#1'], res)
# Updating non indexed fields doesn't affect search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'num', 3, 'extra', 'jorem gipsum'))
env.expect('ft.add', 'idx', 'doc12', '0.1', 'replace', 'partial',
'fields', 'num1', 'redis').equal('OK')
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'sortby', 'num', 'desc',)
assertResultsEqual(env, [2L, 'doc1', ['foo', 'hello world', 'num', '3','extra', 'jorem gipsum'],
'doc2', ['foo', 'hello world', 'num', '2', 'extra', 'abba']], res)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'withscores')
# Updating only indexed field affects search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'foo', 'wat wet'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = r.execute_command('ft.search', 'idx', 'wat', 'nocontent')
env.assertListEqual([1L, 'doc1'], res)
# Test updating of score and no fields
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertLess(float(res[2]), 1)
# env.assertListEqual([1L, 'doc1'], res)
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', '1.0', 'replace', 'partial', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
# We reindex though no new fields, just score is updated. this effects score
env.assertEqual(float(res[2]), 1)
# Test updating payloads
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertIsNone(res[2])
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '1.0',
'replace', 'partial', 'payload', 'foobar', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertEqual('foobar', res[2])
def testPaging(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1, 'fields',
'foo', 'hello', 'bar', i))
chunk = 7
offset = 0
while True:
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'desc', 'limit', offset, chunk)
env.assertEqual(res[0], N)
if offset + chunk > N:
env.assertTrue(len(res) - 1 <= chunk)
break
env.assertEqual(len(res), chunk + 1)
for n, id in enumerate(res[1:]):
env.assertEqual(int(id), N - 1 - (offset + n))
offset += chunk
chunk = random.randrange(1, 10)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'asc', 'limit', N, 10)
env.assertEqual(res[0], N)
env.assertEqual(len(res), 1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, -1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', -1, 10)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 2000000)
def testPrefix(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
res = r.execute_command(
'ft.search', 'idx', 'constant term', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'const* term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'constant term1*', 'nocontent')
env.assertGreater(res[0], 2)
res = r.execute_command(
'ft.search', 'idx', 'const* -term*', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term9*', 'nocontent')
env.assertEqual([0], res)
def testSortBy(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text', 'sortable', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello%03d world' % i, 'bar', 100 - i))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'asc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withscores', 'limit', '2', '5')
env.assertEqual(
[100L, 'doc2', '1', 'doc3', '1', 'doc4', '1', 'doc5', '1', 'doc6', '1'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual(
[100L, 'doc0', '#100', 'doc1', '#99', 'doc2', '#98', 'doc3', '#97', 'doc4', '#96'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'foo', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual([100L, 'doc99', '$hello099 world', 'doc98', '$hello098 world', 'doc97', '$hello097 world', 'doc96',
'$hello096 world', 'doc95', '$hello095 world'], res)
def testSortByWithoutSortable(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'baz', 'text', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello%03d world' % i, 'bar', 100 - i))
for _ in r.retry_with_rdb_reload():
waitForIndex(r, 'idx')
# test text
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'foo', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual([100L, 'doc99', '$hello099 world', 'doc98', '$hello098 world', 'doc97', '$hello097 world', 'doc96',
'$hello096 world', 'doc95', '$hello095 world'], res)
# test numeric
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'asc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withscores', 'limit', '2', '5')
env.assertEqual(
[100L, 'doc2', '1', 'doc3', '1', 'doc4', '1', 'doc5', '1', 'doc6', '1'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual(
[100L, 'doc0', '#100', 'doc1', '#99', 'doc2', '#98', 'doc3', '#97', 'doc4', '#96'], res)
def testNot(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
N = 10
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for i in range(5):
inclusive = r.execute_command(
'ft.search', 'idx', 'constant term%d' % i, 'nocontent', 'limit', 0, N)
exclusive = r.execute_command(
'ft.search', 'idx', 'constant -term%d' % i, 'nocontent', 'limit', 0, N)
exclusive2 = r.execute_command(
'ft.search', 'idx', '-(term%d)' % i, 'nocontent', 'limit', 0, N)
exclusive3 = r.execute_command(
'ft.search', 'idx', '(-term%d) (constant)' % i, 'nocontent', 'limit', 0, N)
env.assertNotEqual(inclusive[0], N)
env.assertEqual(inclusive[0] + exclusive[0], N)
env.assertEqual(exclusive3[0], exclusive2[0])
env.assertEqual(exclusive3[0], exclusive[0])
s1, s2, s3, s4 = set(inclusive[1:]), set(
exclusive[1:]), set(exclusive2[1:]), set(exclusive3[1:])
env.assertTrue(s1.difference(s2) == s1)
env.assertTrue(s1.difference(s3) == s1)
env.assertTrue(s1.difference(s4) == s1)
env.assertTrue(s2 == s3)
env.assertTrue(s2 == s4)
env.assertTrue(s2.intersection(s1) == set())
env.assertTrue(s3.intersection(s1) == set())
env.assertTrue(s4.intersection(s1) == set())
# NOT on a non existing term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -dasdfasdf', 'nocontent')[0], N)
# not on env term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -constant', 'nocontent'), [0])
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -(term0|term1|term2|term3|term4|nothing)', 'nocontent'), [0])
# env.assertEqual(r.execute_command('ft.search', 'idx', 'constant -(term1 term2)', 'nocontent')[0], N)
def testNestedIntersection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'a', 'text', 'b', 'text', 'c', 'text', 'd', 'text'))
for i in range(20):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'a', 'foo', 'b', 'bar', 'c', 'baz', 'd', 'gaz'))
res = [
r.execute_command('ft.search', 'idx',
'foo bar baz gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@a:foo @b:bar @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@b:bar @a:foo @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@c:baz @b:bar @a:foo @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@d:gaz @c:baz @b:bar @a:foo', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@a:foo (@b:bar (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@c:baz (@a:foo (@b:bar (@c:baz @d:gaz)))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@b:bar (@a:foo (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@d:gaz (@a:foo (@c:baz @b:bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar baz gaz)', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (baz gaz))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (foo bar) (foo bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (foo (bar baz (gaz)))', 'nocontent'),
r.execute_command('ft.search', 'idx', 'foo (foo (bar (baz (gaz (foo bar (gaz))))))', 'nocontent')]
for i, r in enumerate(res):
# print i, res[0], r
env.assertListEqual(res[0], r)
def testInKeys(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text'))
for i in range(200):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world'))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
for keys in (
['doc%d' % i for i in range(10)], ['doc%d' % i for i in range(0, 30, 2)], [
'doc%d' % i for i in range(99, 0, -5)]
):
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', len(keys), *keys)
env.assertEqual(len(keys), res[0])
env.assertTrue(all((k in res for k in keys)))
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', 3, 'foo', 'bar', 'baz')[0])
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', 99)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', -1)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'inkeys', 4, 'foo')
def testSlopInOrder(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 't1 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1, 'fields',
'title', 't1 t3 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1, 'fields',
'title', 't1 t3 t4 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc4', 1, 'fields',
'title', 't1 t3 t4 t5 t2'))
res = r.execute_command(
'ft.search', 'idx', 't1|t4 t3|t2', 'slop', '0', 'inorder', 'nocontent')
env.assertEqual({'doc3', 'doc4', 'doc2', 'doc1'}, set(res[1:]))
res = r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'inorder')[0])
env.assertEqual(1, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '0', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '1', 'inorder')[0])
env.assertEqual(3, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '2', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '3', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'inorder')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't t1', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4', 'inorder')[0])
def testSlopInOrderIssue1986(env):
r = env
# test with qsort optimization on intersect iterator
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 't1 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1, 'fields',
'title', 't2 t1'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1, 'fields',
'title', 't1'))
# before fix, both queries returned `doc2`
env.assertEqual([1L, 'doc2', ['title', 't2 t1']], r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'inorder'))
env.assertEqual([1L, 'doc1', ['title', 't1 t2']], r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '0', 'inorder'))
def testExact(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
res = r.execute_command(
'ft.search', 'idx', '"hello world"', 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', "hello \"another world\"", 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testGeoErrors(env):
env.expect('flushall')
env.expect('ft.create idx ON HASH schema name text location geo').equal('OK')
env.expect('ft.add idx hotel 1.0 fields name hill location -0.1757,51.5156').equal('OK')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 km').equal([0L])
# Insert error - works fine with out of keyspace implementation
# env.expect('ft.add', 'idx', 'hotel1', 1, 'fields', 'name', '_hotel1', 'location', '1, 1').error() \
# .contains('Could not index geo value')
# Query errors
env.expect('ft.search idx hilton geofilter location lon 51.5156 1 km').error() \
.contains('Bad arguments for <lon>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location 51.5156 lat 1 km').error() \
.contains('Bad arguments for <lat>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 radius km').error() \
.contains('Bad arguments for <radius>: Could not convert argument to expected type')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1 fake').error() \
.contains('Unknown distance unit fake')
env.expect('ft.search idx hilton geofilter location -0.1757 51.5156 1').error() \
.contains('GEOFILTER requires 5 arguments')
def testGeo(env):
r = env
gsearch = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', query, 'geofilter', 'location', lon, lat, dist, unit, 'LIMIT', 0, 20)
gsearch_inline = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', '{} @location:[{} {} {} {}]'.format(query, lon, lat, dist, unit), 'LIMIT', 0, 20)
env.assertOk(r.execute_command('ft.create', 'idx', 'ON', 'HASH',
'schema', 'name', 'text', 'location', 'geo'))
for i, hotel in enumerate(hotels):
env.assertOk(r.execute_command('ft.add', 'idx', 'hotel{}'.format(i), 1.0, 'fields', 'name',
hotel[0], 'location', '{},{}'.format(hotel[2], hotel[1])))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hilton')
env.assertEqual(len(hotels), res[0])
res = gsearch('hilton', "-0.1757", "51.5156", '1')
env.assertEqual(3, res[0])
env.assertIn('hotel2', res)
env.assertIn('hotel21', res)
env.assertIn('hotel79', res)
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '1')
env.assertListEqual(sorted(res), sorted(res2))
res = gsearch('hilton', "-0.1757", "51.5156", '10')
env.assertEqual(14, res[0])
res2 = gsearch('hilton', "-0.1757", "51.5156", '10000', 'm')
env.assertListEqual(sorted(res), sorted(res2))
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '10')
env.assertListEqual(sorted(res), sorted(res2))
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'm')
env.assertEqual(1, res[0])
env.assertEqual('hotel94', res[1])
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'm')
env.assertListEqual(sorted(res), sorted(res2))
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'km')
env.assertEqual(5, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'km')
env.assertEqual(5, res2[0])
env.assertListEqual(sorted(res), sorted(res2))
res = gsearch('heathrow', -0.44155, 51.45865, '5', 'km')
env.assertEqual(3, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '5', 'km')
env.assertListEqual(sorted(res), sorted(res2))
def testTagErrors(env):
env.expect("ft.create", "test", 'ON', 'HASH',
"SCHEMA", "tags", "TAG").equal('OK')
env.expect("ft.add", "test", "1", "1", "FIELDS", "tags", "alberta").equal('OK')
env.expect("ft.add", "test", "2", "1", "FIELDS", "tags", "ontario. alberta").equal('OK')
def testGeoDeletion(env):
if env.is_cluster():
raise unittest.SkipTest()
# Can't properly test if deleted on cluster
env.expect('ft.config', 'set', 'FORK_GC_CLEAN_THRESHOLD', 0).ok()
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema',
'g1', 'geo', 'g2', 'geo', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
env.cmd('ft.add', 'idx', 'doc3', 1.0, 'fields',
'g1', "-0.1757,51.5156",
't1', "hello")
# keys are: "geo:idx/g1" and "geo:idx/g2"
env.assertEqual(3, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0]))
env.assertEqual(2, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0]))
# Remove the first doc
env.cmd('ft.del', 'idx', 'doc1')
for _ in range(100):
forceInvokeGC(env, 'idx')
env.assertEqual(2, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0]))
env.assertEqual(1, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0]))
# Replace the other one:
env.cmd('ft.add', 'idx', 'doc2', 1.0,
'replace', 'fields',
't1', 'just text here')
for _ in range(100):
forceInvokeGC(env, 'idx')
env.assertEqual(1, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g1')[0]))
env.assertEqual(0, len(env.cmd('FT.DEBUG DUMP_NUMIDX idx g2')[0]))
def testInfields(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH',
'schema', 'title', 'text', 'weight', 10.0, 'body', 'text', 'weight', 1.0))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello world lorem ipsum',
'body', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', 'hello', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"hello world\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"lorem ipsum\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', "infields", 2, "body", "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
def testScorerSelection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'body', 'text'))
# this is the default scorer
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'TFIDF')
env.assertEqual(res, [0])
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'NOSUCHSCORER')
def testFieldSelectors(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'PREFIX', 1, 'doc',
'schema', 'TiTle', 'text', 'BoDy', 'text', "יוניקוד", 'text', 'field.with,punct', 'text'))
#todo: document as breaking change, ft.add fields name are not case insentive
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'TiTle', 'hello world', 'BoDy', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 0.5, 'fields',
'BoDy', 'hello world', 'TiTle', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
res = r.execute_command(
'ft.search', 'idx', '@TiTle:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc1'])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:hello @TiTle:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:hello world @TiTle:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:(hello|foo) @TiTle:(world|bar)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@BoDy:(hello|foo world|bar)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@BoDy|TiTle:(hello world)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@יוניקוד:(unicode)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
res = r.execute_command(
'ft.search', 'idx', '@field\\.with\\,punct:(punt)', 'nocontent')
env.assertEqual(sorted(res), sorted([2, 'doc1', 'doc2']))
def testStemming(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello kitties'))
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "verbatim")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
# test for unknown language
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "language", "foofoofian")
def testExpander(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
res = r.execute_command(
'ft.search', 'idx', 'kitties',
"nocontent",
"expander", "SBSTEM"
)
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitties', "nocontent", "expander", "noexpander")
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent", 'verbatim')
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
# Calling a stem directly works even with VERBATIM.
# You need to use the + prefix escaped
res = r.execute_command(
'ft.search', 'idx', '\\+kitti', "nocontent", 'verbatim')
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
def testNumericRange(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'score', 'numeric', 'price', 'numeric'))
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5).error().contains("FILTER requires 3 arguments")
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 5, 'inf').error().contains("Bad upper range: inf")
env.expect('ft.search', 'idx', 'hello kitty', 'filter', 'score', 'inf', 5).error().contains("Bad lower range: inf")
for i in xrange(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'fields',
'title', 'hello kitty', 'score', i, 'price', 100 + 10 * i))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 100)
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 50)
env.assertEqual(51, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', 'verbatim', "nocontent", "limit", 0, 100,
"filter", "score", "(0", "(50")
env.assertEqual(49, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", "-inf", "+inf")
env.assertEqual(100, res[0])
# test multi filters
scrange = (19, 90)
prrange = (290, 385)
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", scrange[
0], scrange[1],
"filter", "price", prrange[0], prrange[1])
# print res
for doc in res[2::2]:
sc = int(doc[doc.index('score') + 1])
pr = int(doc[doc.index('price') + 1])
env.assertTrue(sc >= scrange[0] and sc <= scrange[1])
env.assertGreaterEqual(pr, prrange[0])
env.assertLessEqual(pr, prrange[1])
env.assertEqual(10, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", "19", "90",
"filter", "price", "90", "185")
env.assertEqual(0, res[0])
# Test numeric ranges as part of query syntax
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 100]', "nocontent")
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 50]', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', '@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[-inf +inf]', "nocontent")
env.assertEqual(100, res[0])
def testNotIter(env):
conn = getConnectionByEnv(env)
env.assertOk(conn.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'schema', 'title', 'text', 'score', 'numeric', 'price', 'numeric'))
for i in xrange(8):
conn.execute_command('HSET', 'doc%d' % i, 'title', 'hello kitty', 'score', i, 'price', 100 + 10 * i)
# middle shunk
res = env.execute_command(
'ft.search', 'idx', '-@score:[2 4]', 'verbatim', "nocontent")
env.assertEqual(5, res[0])
env.debugPrint(', '.join(toSortedFlatList(res[1:])), force=True)
res = env.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[2 4]', 'verbatim', "nocontent")
env.assertEqual(5, res[0])
env.debugPrint(', '.join(toSortedFlatList(res[1:])), force=True)
# start chunk
res = env.execute_command(
'ft.search', 'idx', '-@score:[0 2]', 'verbatim', "nocontent")
env.assertEqual(5, res[0])
env.debugPrint(', '.join(toSortedFlatList(res[1:])), force=True)
res = env.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[0 2]', 'verbatim', "nocontent")
env.assertEqual(5, res[0])
env.debugPrint(', '.join(toSortedFlatList(res[1:])), force=True)
# end chunk
res = env.execute_command(
'ft.search', 'idx', '-@score:[5 7]', 'verbatim', "nocontent")
env.assertEqual(5, res[0])
env.debugPrint(', '.join(toSortedFlatList(res[1:])), force=True)
res = env.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[5 7]', 'verbatim', "nocontent")
env.assertEqual(5, res[0])
env.debugPrint(', '.join(toSortedFlatList(res[1:])), force=True)
# whole chunk
res = env.execute_command(
'ft.search', 'idx', '-@score:[0 7]', 'verbatim', "nocontent")
env.assertEqual(0, res[0])
env.debugPrint(', '.join(toSortedFlatList(res[1:])), force=True)
res = env.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[0 7]', 'verbatim', "nocontent")
env.assertEqual(0, res[0])
env.debugPrint(', '.join(toSortedFlatList(res[1:])), force=True)
def testPayload(env):
r = env
env.expect('ft.create', 'idx', 'ON', 'HASH', 'PAYLOAD_FIELD', '__payload', 'schema', 'f', 'text').ok()
for i in range(10):
r.expect('ft.add', 'idx', '%d' % i, 1.0,
'payload', 'payload %d' % i,
'fields', 'f', 'hello world').ok()
for x in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.search', 'idx', 'hello world')
r.assertEqual(21, len(res))
res = r.execute_command('ft.search', 'idx', 'hello world', 'withpayloads')
r.assertEqual(31, len(res))
r.assertEqual(10, res[0])
for i in range(1, 30, 3):
r.assertEqual(res[i + 1], 'payload %s' % res[i])
def testGarbageCollector(env):
env.skipOnCluster()
if env.moduleArgs is not None and 'GC_POLICY FORK' in env.moduleArgs:
# this test is not relevent for fork gc cause its not cleaning the last block
raise unittest.SkipTest()
N = 100
r = env
r.expect('ft.create', 'idx', 'ON', 'HASH', 'schema', 'foo', 'text').ok()
waitForIndex(r, 'idx')
for i in range(N):
r.expect('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'foo', ' '.join(('term%d' % random.randrange(0, 10) for i in range(10)))).ok()
def get_stats(r):
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
gc_stats = {d['gc_stats'][x]: float(
d['gc_stats'][x + 1]) for x in range(0, len(d['gc_stats']), 2)}
d['gc_stats'] = gc_stats
return d
stats = get_stats(r)
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 8)
env.assertEqual(0, stats['gc_stats']['bytes_collected'])
env.assertGreater(int(stats['num_records']), 0)
initialIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
for i in range(N):
r.expect('ft.del', 'idx', 'doc%d' % i).equal(1)
for _ in range(100):
# gc is random so we need to do it long enough times for it to work
forceInvokeGC(env, 'idx')
stats = get_stats(r)
env.assertEqual(0, int(stats['num_docs']))
env.assertEqual(0, int(stats['num_records']))
if not env.is_cluster():
env.assertEqual(100, int(stats['max_doc_id']))
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 30)
currentIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
# print initialIndexSize, currentIndexSize,
# stats['gc_stats']['bytes_collected']
env.assertGreater(initialIndexSize, currentIndexSize)
env.assertGreater(stats['gc_stats'][
'bytes_collected'], currentIndexSize)
for i in range(10):
res = r.execute_command('ft.search', 'idx', 'term%d' % i)
env.assertEqual([0], res)
def testReturning(env):
env.assertCmdOk('ft.create', 'idx', 'ON', 'HASH', 'schema',
'f1', 'text',
'f2', 'text',
'n1', 'numeric', 'sortable',
'f3', 'text')
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'DOC_{0}'.format(i), 1.0, 'fields',
'f2', 'val2', 'f1', 'val1', 'f3', 'val3',
'n1', i)
# RETURN 0. Simplest case
for x in env.retry_with_reload():
waitForIndex(env, 'idx')
res = env.cmd('ft.search', 'idx', 'val*', 'return', '0')
env.assertEqual(11, len(res))
env.assertEqual(10, res[0])
for r in res[1:]:
env.assertTrue(r.startswith('DOC_'))
for field in ('f1', 'f2', 'f3', 'n1'):
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, field)
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
for pair in grouper(res[1:], 2):
docname, fields = pair
env.assertEqual(2, len(fields))
env.assertEqual(field, fields[0])
env.assertTrue(docname.startswith('DOC_'))
# Test that we don't return SORTBY fields if they weren't specified
# also in RETURN
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'f1',
'sortby', 'n1', 'ASC')
row = res[2]
# get the first result
env.assertEqual(['f1', 'val1'], row)
# Test when field is not found
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'nonexist')
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
# # Test that we don't crash if we're given the wrong number of fields
with env.assertResponseError():
res = env.cmd('ft.search', 'idx', 'val*', 'return', 700, 'nonexist')
def _test_create_options_real(env, *options):
options = [x for x in options if x]
has_offsets = 'NOOFFSETS' not in options
has_fields = 'NOFIELDS' not in options
has_freqs = 'NOFREQS' not in options
try:
env.cmd('ft.drop', 'idx')
# RS 2.0 ft.drop does not remove documents
env.flush()
except Exception as e:
pass
options = ['idx'] + options + ['ON', 'HASH', 'schema', 'f1', 'text', 'f2', 'text']
env.assertCmdOk('ft.create', *options)
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'doc{}'.format(
i), 0.5, 'fields', 'f1', 'value for {}'.format(i))
# Query
# res = env.cmd('ft.search', 'idx', "value for 3")
# if not has_offsets:
# env.assertIsNone(res)
# else:
# env.assertIsNotNone(res)
# Frequencies:
env.assertCmdOk('ft.add', 'idx', 'doc100',
1.0, 'fields', 'f1', 'foo bar')
env.assertCmdOk('ft.add', 'idx', 'doc200', 1.0,
'fields', 'f1', ('foo ' * 10) + ' bar')
res = env.cmd('ft.search', 'idx', 'foo')
env.assertEqual(2, res[0])
if has_offsets:
docname = res[1]
if has_freqs:
# changed in minminheap PR. TODO: remove
env.assertEqual('doc100', docname)
else:
env.assertEqual('doc100', docname)
env.assertCmdOk('ft.add', 'idx', 'doc300',
1.0, 'fields', 'f1', 'Hello')
res = env.cmd('ft.search', 'idx', '@f2:Hello')
if has_fields:
env.assertEqual(1, len(res))
else:
env.assertEqual(3, len(res))
def testCreationOptions(env):
from itertools import combinations
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
_test_create_options_real(env, *combo)
env.expect('ft.create', 'idx').error()
def testInfoCommand(env):
from itertools import combinations
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'ON', 'HASH', 'NOFIELDS', 'schema', 'title', 'text'))
N = 50
for i in xrange(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'replace', 'fields',
'title', 'hello term%d' % i))
for _ in r.retry_with_rdb_reload():
waitForIndex(env, 'idx')
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['index_name'], 'idx')
env.assertEqual(d['index_options'], ['NOFIELDS'])
env.assertListEqual(
d['attributes'], [['identifier', 'title', 'attribute', 'title', 'type', 'TEXT', 'WEIGHT', '1']])
if not env.is_cluster():
env.assertEquals(int(d['num_docs']), N)
env.assertEquals(int(d['num_terms']), N + 1)
env.assertEquals(int(d['max_doc_id']), N)
env.assertEquals(int(d['records_per_doc_avg']), 2)
env.assertEquals(int(d['num_records']), N * 2)
env.assertGreater(float(d['offset_vectors_sz_mb']), 0)
env.assertGreater(float(d['key_table_size_mb']), 0)
env.assertGreater(float(d['inverted_sz_mb']), 0)
env.assertGreater(float(d['bytes_per_record_avg']), 0)
env.assertGreater(float(d['doc_table_size_mb']), 0)
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
combo = list(filter(None, combo))
options = combo + ['schema', 'f1', 'text']
try:
env.cmd('ft.drop', 'idx')
except:
pass
env.assertCmdOk('ft.create', 'idx', 'ON', 'HASH', *options)
info = env.cmd('ft.info', 'idx')
ix = info.index('index_options')
env.assertFalse(ix == -1)
opts = info[ix + 1]
# make sure that an empty opts string returns no options in
# info
if not combo:
env.assertListEqual([], opts)
for option in filter(None, combo):
env.assertTrue(option in opts)
def testNoStem(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 'body', 'text', 'name', 'text', 'nostem')
if not env.isCluster():
# todo: change it to be more generic to pass on is_cluster
res = env.cmd('ft.info', 'idx')
env.assertEqual(res[7][1][8], 'NOSTEM')
for _ in env.retry_with_reload():
waitForIndex(env, 'idx')
try:
env.cmd('ft.del', 'idx', 'doc')
except redis.ResponseError:
pass
# Insert a document
env.assertCmdOk('ft.add', 'idx', 'doc', 1.0, 'fields',
'body', "located",
'name', "located")
# Now search for the fields
res_body = env.cmd('ft.search', 'idx', '@body:location')
res_name = env.cmd('ft.search', 'idx', '@name:location')
env.assertEqual(0, res_name[0])
env.assertEqual(1, res_body[0])
def testSortbyMissingField(env):
# GH Issue 131
env.cmd('ft.create', 'ix', 'ON', 'HASH', 'schema', 'txt',
'text', 'num', 'numeric', 'sortable')
env.cmd('ft.add', 'ix', 'doc1', 1.0, 'fields', 'txt', 'foo')
env.cmd('ft.search', 'ix', 'foo', 'sortby', 'num')
def testParallelIndexing(env):
# GH Issue 207
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
from threading import Thread
env.getConnection()
ndocs = 100
def runner(tid):
cli = env.getConnection()
for num in range(ndocs):
cli.execute_command('ft.add', 'idx', 'doc{}_{}'.format(tid, num), 1.0,
'fields', 'txt', 'hello world' * 20)
ths = []
for tid in range(10):
ths.append(Thread(target=runner, args=(tid,)))
[th.start() for th in ths]
[th.join() for th in ths]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(1000, int(d['num_docs']))
def testDoubleAdd(env):
# Tests issue #210
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'hello world')
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc1', 1.0,
'fields', 'txt', 'goodbye world')
env.assertEqual('hello world', env.cmd('ft.get', 'idx', 'doc1')[1])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(1, env.cmd('ft.search', 'idx', 'hello')[0])
# Now with replace
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'replace',
'fields', 'txt', 'goodbye world')
env.assertEqual(1, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'hello')[0])
env.assertEqual('goodbye world', env.cmd('ft.get', 'idx', 'doc1')[1])
def testConcurrentErrors(env):
from multiprocessing import Process
import random
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
docs_per_thread = 100
num_threads = 50
docIds = ['doc{}'.format(x) for x in range(docs_per_thread)]
def thrfn():
myIds = docIds[::]
random.shuffle(myIds)
cli = env.getConnection()
with cli.pipeline(transaction=False) as pl:
for x in myIds:
pl.execute_command('ft.add', 'idx', x, 1.0,
'fields', 'txt', ' hello world ' * 50)
try:
pl.execute()
except Exception as e:
pass
# print e
thrs = [Process(target=thrfn) for x in range(num_threads)]
[th.start() for th in thrs]
[th.join() for th in thrs]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(100, int(d['num_docs']))
def testBinaryKeys(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text')
# Insert a document
env.cmd('ft.add', 'idx', 'Hello', 1.0, 'fields', 'txt', 'NoBin match')
env.cmd('ft.add', 'idx', 'Hello\x00World', 1.0, 'fields', 'txt', 'Bin match')
for _ in env.reloading_iterator():
waitForIndex(env, 'idx')
exp = [2L, 'Hello\x00World', ['txt', 'Bin match'], 'Hello', ['txt', 'NoBin match']]
res = env.cmd('ft.search', 'idx', 'match')
for r in res:
env.assertIn(r, exp)
def testNonDefaultDb(env):
if env.is_cluster():
raise unittest.SkipTest()
# Should be ok
env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH', 'schema', 'txt', 'text')
try:
env.cmd('SELECT 1')
except redis.ResponseError:
return
# Should fail
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx2', 'ON', 'HASH', 'schema', 'txt', 'text')
def testDuplicateNonspecFields(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'schema', 'txt', 'text').ok()
env.expect('FT.ADD', 'idx', 'doc', 1.0, 'fields',
'txt', 'foo', 'f1', 'f1val', 'f1', 'f1val2', 'F1', 'f1Val3').ok()
res = env.cmd('ft.get', 'idx', 'doc')
res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertTrue(res['f1'] in ('f1val', 'f1val2'))
env.assertEqual('f1Val3', res['F1'])
def testDuplicateFields(env):
# As of RS 2.0 it is allowed. only latest field will be saved and indexed
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH',
'SCHEMA', 'txt', 'TEXT', 'num', 'NUMERIC', 'SORTABLE')
env.expect('FT.ADD', 'idx', 'doc', 1.0, 'FIELDS',
'txt', 'foo', 'txt', 'bar', 'txt', 'baz').ok()
env.expect('FT.SEARCH idx *').equal([1L, 'doc', ['txt', 'baz']])
def testDuplicateSpec(env):
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH',
'SCHEMA', 'f1', 'text', 'n1', 'numeric', 'f1', 'text')
def testSortbyMissingFieldSparse(env):
# Note, the document needs to have one present sortable field in
# order for the indexer to give it a sort vector
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'SCHEMA', 'lastName', 'text', 'SORTABLE', 'firstName', 'text', 'SORTABLE')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'lastName', 'mark')
res = env.cmd('ft.search', 'idx', 'mark', 'WITHSORTKEYS', "SORTBY",
"firstName", "ASC", "limit", 0, 100)
# commented because we don't filter out exclusive sortby fields
# env.assertEqual([1L, 'doc1', None, ['lastName', 'mark']], res)
def testLuaAndMulti(env):
env.skip() # addhash isn't supported
if env.is_cluster():
raise unittest.SkipTest()
# Ensure we can work in Lua and Multi environments without crashing
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'text', 'n1', 'numeric')
env.cmd('HMSET', 'hashDoc', 'f1', 'v1', 'n1', 4)
env.cmd('HMSET', 'hashDoc2', 'f1', 'v1', 'n1', 5)
r = env.getConnection()
r.eval("return redis.call('ft.add', 'idx', 'doc1', 1.0, 'fields', 'f1', 'bar')", "0")
r.eval("return redis.call('ft.addhash', 'idx', 'hashDoc', 1.0)", 0)
# Try in a pipeline:
with r.pipeline(transaction=True) as pl:
pl.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'f1', 'v3')
pl.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'f1', 'v4')
pl.execute_command('ft.addhash', 'idx', 'hashdoc2', 1.0)
pl.execute()
def testLanguageField(env):
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'language', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0,
'FIELDS', 'language', 'gibberish')
res = env.cmd('FT.SEARCH', 'idx', 'gibberish')
env.assertEqual([1L, 'doc1', ['language', 'gibberish']], res)
# The only way I can verify that LANGUAGE is parsed twice is ensuring we
# provide a wrong language. This is much easier to test than trying to
# figure out how a given word is stemmed
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'LANGUAGE',
'blah', 'FIELDS', 'language', 'gibber')
def testUninitSortvector(env):
# This would previously crash
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT')
for x in range(2000):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(
x), 1.0, 'FIELDS', 'f1', 'HELLO')
env.broadcast('SAVE')
for x in range(10):
env.broadcast('DEBUG RELOAD')
def normalize_row(row):
return to_dict(row)
def assertAggrowsEqual(env, exp, got):
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
# and now, it's just free form:
exp = sorted(to_dict(x) for x in exp[1:])
got = sorted(to_dict(x) for x in got[1:])
env.assertEqual(exp, got)
def assertResultsEqual(env, exp, got, inorder=True):
from pprint import pprint
# pprint(exp)
# pprint(got)
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
exp = list(grouper(exp[1:], 2))
got = list(grouper(got[1:], 2))
for x in range(len(exp)):
exp_did, exp_fields = exp[x]
got_did, got_fields = got[x]
env.assertEqual(exp_did, got_did, message="at position {}".format(x))
got_fields = to_dict(got_fields)
exp_fields = to_dict(exp_fields)
env.assertEqual(exp_fields, got_fields, message="at position {}".format(x))
def testAlterIndex(env):
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2', 'TEXT')
waitForIndex(env, 'idx')
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
# RS 2.0 reindex and after reload both documents are found
# for _ in env.retry_with_reload():
res = env.cmd('FT.SEARCH', 'idx', 'world')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([2L, 'doc2', ['f1', 'hello', 'f2', 'world'], 'doc1', ['f1', 'hello', 'f2', 'world']]))
# env.assertEqual([1, 'doc2', ['f1', 'hello', 'f2', 'world']], ret)
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f3', 'TEXT', 'SORTABLE')
for x in range(10):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(x + 3), 1.0,
'FIELDS', 'f1', 'hello', 'f3', 'val{}'.format(x))
for _ in env.retry_with_reload():
waitForIndex(env, 'idx')
# Test that sortable works
res = env.cmd('FT.SEARCH', 'idx', 'hello', 'SORTBY', 'f3', 'DESC')
exp = [12, 'doc12', ['f1', 'hello', 'f3', 'val9'], 'doc11', ['f1', 'hello', 'f3', 'val8'],
'doc10', ['f1', 'hello', 'f3', 'val7'], 'doc9', ['f1', 'hello', 'f3', 'val6'],
'doc8', ['f1', 'hello', 'f3', 'val5'], 'doc7', ['f1', 'hello', 'f3', 'val4'],
'doc6', ['f1', 'hello', 'f3', 'val3'], 'doc5', ['f1', 'hello', 'f3', 'val2'],
'doc4', ['f1', 'hello', 'f3', 'val1'], 'doc3', ['f1', 'hello', 'f3', 'val0']]
assertResultsEqual(env, exp, res)
# Test that we can add a numeric field
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'n1', 'NUMERIC')
env.cmd('FT.ADD', 'idx', 'docN1', 1.0, 'FIELDS', 'n1', 50)
env.cmd('FT.ADD', 'idx', 'docN2', 1.0, 'FIELDS', 'n1', 250)
for _ in env.retry_with_reload():
waitForIndex(env, 'idx')
res = env.cmd('FT.SEARCH', 'idx', '@n1:[0 100]')
env.assertEqual([1, 'docN1', ['n1', '50']], res)
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'NOT_ADD', 'f2', 'TEXT').error()
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD').error()
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2').error()
env.expect('FT.ALTER', 'idx', 'ADD', 'SCHEMA', 'f2', 'TEXT').error()
env.expect('FT.ALTER', 'idx', 'f2', 'TEXT').error()
def testAlterValidation(env):
# Test that constraints for ALTER comand
env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH', 'SCHEMA', 'f0', 'TEXT')
for x in range(1, 32):
env.cmd('FT.ALTER', 'idx1', 'SCHEMA', 'ADD', 'f{}'.format(x), 'TEXT')
# OK for now.
# Should be too many indexes
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'idx1', 'SCHEMA', 'ADD', 'tooBig', 'TEXT')
env.cmd('FT.CREATE', 'idx2', 'MAXTEXTFIELDS', 'ON', 'HASH', 'SCHEMA', 'f0', 'TEXT')
# print env.cmd('FT.INFO', 'idx2')
for x in range(1, 50):
env.cmd('FT.ALTER', 'idx2', 'SCHEMA', 'ADD', 'f{}'.format(x + 1), 'TEXT')
env.cmd('FT.ADD', 'idx2', 'doc1', 1.0, 'FIELDS', 'f50', 'hello')
for _ in env.retry_with_reload():
waitForIndex(env, 'idx2')
ret = env.cmd('FT.SEARCH', 'idx2', '@f50:hello')
env.assertEqual([1, 'doc1', ['f50', 'hello']], ret)
env.cmd('FT.CREATE', 'idx3', 'ON', 'HASH', 'SCHEMA', 'f0', 'text')
# Try to alter the index with garbage
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx3',
'SCHEMA', 'ADD', 'f1', 'TEXT', 'f2', 'garbage')
ret = to_dict(env.cmd('ft.info', 'idx3'))
env.assertEqual(1, len(ret['attributes']))
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'nonExist', 'SCHEMA', 'ADD', 'f1', 'TEXT')
# test with no fields!
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD')
# test with no fields!
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD')
def testIssue366_2(env):
# FT.CREATE atest SCHEMA textfield TEXT numfield NUMERIC
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world"}' FIELDS textfield sometext numfield 1234
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world2"}' REPLACE PARTIAL FIELDS numfield 1111
# shutdown
env.cmd('FT.CREATE', 'idx1', 'ON', 'HASH',
'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.cmd('FT.ADD', 'idx1', 'doc1', 1, 'PAYLOAD', '{"hello":"world"}',
'FIELDS', 'textfield', 'sometext', 'numfield', 1234)
env.cmd('ft.add', 'idx1', 'doc1', 1,
'PAYLOAD', '{"hello":"world2"}',
'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 'sometext', 'numfield', 1111)
for _ in env.retry_with_reload():
pass #
def testIssue654(env):
# Crashes during FILTER
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'id', 'numeric')
env.cmd('ft.add', 'idx', 1, 1, 'fields', 'id', 1)
env.cmd('ft.add', 'idx', 2, 1, 'fields', 'id', 2)
res = env.cmd('ft.search', 'idx', '*', 'filter', '@version', 0, 2)
def testReplaceReload(env):
env.cmd('FT.CREATE', 'idx2', 'ON', 'HASH',
'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
# Create a document and then replace it.
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'FIELDS', 'textfield', 's1', 'numfield', 99)
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's100', 'numfield', 990)
env.dump_and_reload()
# RDB Should still be fine
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's200', 'numfield', 1090)
doc = to_dict(env.cmd('FT.GET', 'idx2', 'doc2'))
env.assertEqual('s200', doc['textfield'])
env.assertEqual('1090', doc['numfield'])
# command = 'FT.CREATE idx SCHEMA '
# for i in range(255):
# command += 't%d NUMERIC SORTABLE ' % i
# command = command[:-1]
# r.execute_command(command)
# r.execute_command('save')
# // reload from ...
# r.execute_command('FT.ADD idx doc1 1.0 FIELDS t0 1')
def testIssue417(env):
command = ['ft.create', 'idx', 'ON', 'HASH', 'schema']
for x in range(255):
command += ['t{}'.format(x), 'numeric', 'sortable']
command = command[:-1]
env.cmd(*command)
for _ in env.reloading_iterator():
waitForIndex(env, 'idx')
try:
env.execute_command('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 't0', '1')
except redis.ResponseError as e:
env.assertTrue('already' in e.message.lower())
# >FT.CREATE myIdx SCHEMA title TEXT WEIGHT 5.0 body TEXT url TEXT
# >FT.ADD myIdx doc1 1.0 FIELDS title "hello world" body "lorem ipsum" url "www.google.com"
# >FT.SEARCH myIdx "no-as"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
# >FT.SEARCH myIdx "no-as"
# (error) Unknown Index name
def testIssue422(env):
env.cmd('ft.create', 'myIdx', 'ON', 'HASH', 'schema',
'title', 'TEXT', 'WEIGHT', '5.0',
'body', 'TEXT',
'url', 'TEXT')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'FIELDS', 'title', 'hello world', 'bod', 'lorem ipsum', 'url', 'www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'no-as')
env.assertEqual([0], rv)
def testIssue446(env):
env.cmd('ft.create', 'myIdx', 'ON', 'HASH', 'schema',
'title', 'TEXT', 'SORTABLE')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'fields', 'title', 'hello world', 'body', 'lorem ipsum', 'url', '"www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([1], rv)
# Related - issue 635
env.cmd('ft.add', 'myIdx', 'doc2', '1.0', 'fields', 'title', 'hello')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([2], rv)
def testTimeout(env):
env.skipOnCluster()
if VALGRIND:
env.skip()
num_range = 1000
env.cmd('ft.config', 'set', 'timeout', '1')
env.cmd('ft.config', 'set', 'maxprefixexpansions', num_range)
env.cmd('ft.create', 'myIdx', 'schema', 't', 'TEXT')
for i in range(num_range):
env.expect('HSET', 'doc%d'%i, 't', 'aa' + str(i))
env.expect('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'limit', '0', '0').noEqual([num_range])
env.expect('ft.config', 'set', 'on_timeout', 'fail').ok()
env.expect('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'limit', '0', '0') \
.contains('Timeout limit was reached')
res = env.cmd('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'timeout', 10000)
env.assertEqual(res[0], num_range)
# test erroneous params
env.expect('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'timeout').error()
env.expect('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'timeout', -1).error()
env.expect('ft.search', 'myIdx', 'aa*|aa*|aa*|aa* aa*', 'timeout', 'STR').error()
# check no time w/o sorter/grouper
res = env.cmd('FT.AGGREGATE', 'myIdx', 'aa*|aa*',
'LOAD', 1, 't',
'APPLY', 'contains(@t, "a1")', 'AS', 'contain1',
'APPLY', 'contains(@t, "a1")', 'AS', 'contain2',
'APPLY', 'contains(@t, "a1")', 'AS', 'contain3')
env.assertEqual(res[0], 1L)
# test grouper
env.expect('FT.AGGREGATE', 'myIdx', 'aa*|aa*',
'LOAD', 1, 't',
'GROUPBY', 1, '@t',
'APPLY', 'contains(@t, "a1")', 'AS', 'contain1',
'APPLY', 'contains(@t, "a1")', 'AS', 'contain2',
'APPLY', 'contains(@t, "a1")', 'AS', 'contain3') \
.contains('Timeout limit was reached')
# test sorter
env.expect('FT.AGGREGATE', 'myIdx', 'aa*|aa*',
'LOAD', 1, 't',
'SORTBY', 1, '@t',
'APPLY', 'contains(@t, "a1")', 'AS', 'contain1',
'APPLY', 'contains(@t, "a1")', 'AS', 'contain2',
'APPLY', 'contains(@t, "a1")', 'AS', 'contain3') \
.contains('Timeout limit was reached')
# test cursor
res = env.cmd('FT.AGGREGATE', 'myIdx', 'aa*', 'WITHCURSOR', 'count', 50, 'timeout', 500)
l = len(res[0]) - 1 # do not count the number of results (the first element in the results)
cursor = res[1]
time.sleep(0.01)
while cursor != 0:
r, cursor = env.cmd('FT.CURSOR', 'READ', 'myIdx', str(cursor))
l += (len(r) - 1)
env.assertEqual(l, 1000)
# restore old configuration
env.cmd('ft.config', 'set', 'timeout', '500')
env.cmd('ft.config', 'set', 'maxprefixexpansions', 200)
def testTimeoutOnSorter(env):
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('ft.config', 'set', 'timeout', '1')
pl = conn.pipeline()
env.cmd('ft.create', 'idx', 'SCHEMA', 'n', 'numeric', 'SORTABLE')
elements = 1024 * 64
for i in range(elements):
pl.execute_command('hset', i, 'n', i)
if i % 10000 == 0:
pl.execute()
pl.execute()
res = env.cmd('ft.search', 'idx', '*', 'SORTBY', 'n', 'DESC')
env.assertGreater(elements, res[0])
def testAlias(env):
conn = getConnectionByEnv(env)
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'PREFIX', 1, 'doc1', 'schema', 't1', 'text')
env.cmd('ft.create', 'idx2', 'ON', 'HASH', 'PREFIX', 1, 'doc2', 'schema', 't1', 'text')
env.expect('ft.aliasAdd', 'myIndex').raiseError()
env.expect('ft.aliasupdate', 'fake_alias', 'imaginary_alias', 'Too_many_args').raiseError()
env.cmd('ft.aliasAdd', 'myIndex', 'idx')
env.cmd('ft.add', 'myIndex', 'doc1', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'idx', 'hello')
env.assertEqual([1, 'doc1', ['t1', 'hello']], r)
r2 = env.cmd('ft.search', 'myIndex', 'hello')
env.assertEqual(r, r2)
# try to add the same alias again; should be an error
env.expect('ft.aliasAdd', 'myIndex', 'idx2').raiseError()
env.expect('ft.aliasAdd', 'alias2', 'idx').notRaiseError()
# now delete the index
env.cmd('ft.drop', 'myIndex')
# RS2 does not delete doc on ft.drop
conn.execute_command('DEL', 'doc1')
# index list should be cleared now. This can be tested by trying to alias
# the old alias to different index
env.cmd('ft.aliasAdd', 'myIndex', 'idx2')
env.cmd('ft.aliasAdd', 'alias2', 'idx2')
env.cmd('ft.add', 'myIndex', 'doc2', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'alias2', 'hello')
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# check that aliasing one alias to another returns an error. This will
# end up being confusing
env.expect('ft.aliasAdd', 'alias3', 'myIndex').raiseError()
# check that deleting the alias works as expected
env.expect('ft.aliasDel', 'myIndex').notRaiseError()
env.expect('ft.search', 'myIndex', 'foo').raiseError()
# create a new index and see if we can use the old name
env.cmd('ft.create', 'idx3', 'ON', 'HASH', 'PREFIX', 1, 'doc3', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx3', 'doc3', 1.0, 'fields', 't1', 'foo')
env.cmd('ft.aliasAdd', 'myIndex', 'idx3')
# also, check that this works in rdb save
for _ in env.retry_with_rdb_reload():
waitForIndex(env, 'myIndex')
r = env.cmd('ft.search', 'myIndex', 'foo')
env.assertEqual([1L, 'doc3', ['t1', 'foo']], r)
# Check that we can move an alias from one index to another
env.cmd('ft.aliasUpdate', 'myIndex', 'idx2')
r = env.cmd('ft.search', 'myIndex', "hello")
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# Test that things like ft.get, ft.aggregate, etc. work
r = env.cmd('ft.get', 'myIndex', 'doc2')
env.assertEqual(['t1', 'hello'], r)
r = env.cmd('ft.aggregate', 'myIndex', 'hello', 'LOAD', '1', '@t1')
env.assertEqual([1, ['t1', 'hello']], r)
# Test update
env.expect('ft.aliasAdd', 'updateIndex', 'idx3')
env.expect('ft.aliasUpdate', 'updateIndex', 'fake_idx')
r = env.cmd('ft.del', 'idx2', 'doc2')
env.assertEqual(1, r)
env.expect('ft.aliasdel').raiseError()
env.expect('ft.aliasdel', 'myIndex', 'yourIndex').raiseError()
env.expect('ft.aliasdel', 'non_existing_alias').raiseError()
def testNoCreate(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'f1', 'text')
env.expect('ft.add', 'idx', 'schema', 'f1').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'fields', 'f1', 'hello').notRaiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'world').notRaiseError()
def testSpellCheck(env):
env.cmd('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
rv = env.cmd('FT.SPELLCHECK', 'idx', '111111')
env.assertEqual([['TERM', '111111', []]], rv)
if not env.isCluster():
rv = env.cmd('FT.SPELLCHECK', 'idx', '111111', 'FULLSCOREINFO')
env.assertEqual([1L, ['TERM', '111111', []]], rv)
# Standalone functionality
def testIssue484(env):
# Issue with split
# 127.0.0.1:6379> ft.drop productSearch1
# OK
# 127.0.0.1:6379> "FT.CREATE" "productSearch1" "NOSCOREIDX" "SCHEMA" "productid" "TEXT" "categoryid" "TEXT" "color" "TEXT" "timestamp" "NUMERIC"
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID1" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID2" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "small cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID3" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID4" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "green" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID5" "1.0" "REPLACE" "FIELDS" "productid" "3" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> FT.AGGREGATE productSearch1 * load 2 @color @categoryid APPLY "split(format(\"%s-%s\",@color,@categoryid),\"-\")" as value GROUPBY 1 @value REDUCE COUNT 0 as value_count
env.cmd('ft.create', 'productSearch1', 'noscoreidx', 'ON', 'HASH', 'schema', 'productid',
'text', 'categoryid', 'text', 'color', 'text', 'timestamp', 'numeric')
env.cmd('ft.add', 'productSearch1', 'GUID1', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID2', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'small cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID3', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID4', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'green', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID5', '1.0', 'REPLACE', 'FIELDS', 'productid', '3', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
res = env.cmd('FT.AGGREGATE', 'productSearch1', '*',
'load', '2', '@color', '@categoryid',
'APPLY', 'split(format("%s-%s",@color,@categoryid),"-")', 'as', 'value',
'GROUPBY', '1', '@value',
'REDUCE', 'COUNT', '0', 'as', 'value_count',
'SORTBY', '4', '@value_count', 'DESC', '@value', 'ASC')
expected = [6, ['value', 'white', 'value_count', '2'], ['value', 'cars', 'value_count', '2'], ['value', 'small cars', 'value_count', '1'], ['value', 'blue', 'value_count', '2'], ['value', 'Big cars', 'value_count', '2'], ['value', 'green', 'value_count', '1']]
assertAggrowsEqual(env, expected, res)
for var in expected:
env.assertIn(var, res)
def testIssue501(env):
env.cmd('FT.CREATE', 'incidents', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.cmd('FT.DICTADD', 'slang', 'timmies', 'toque', 'toonie', 'serviette', 'kerfuffle', 'chesterfield')
rv = env.cmd('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'INCLUDE', 'slang', 'TERMS', 'EXCLUDE', 'slang')
env.assertEqual("qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", rv[0][1])
env.assertEqual([], rv[0][2])
env.expect('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'FAKE_COMMAND', 'slang').error()
def testIssue589(env):
env.cmd('FT.CREATE', 'incidents', 'ON', 'HASH', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.expect('FT.SPELLCHECK', 'incidents', 'report :').error().contains("Syntax error at offset")
def testIssue621(env):
env.expect('ft.create', 'test', 'ON', 'HASH', 'SCHEMA', 'uuid', 'TAG', 'title', 'TEXT').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'uuid', 'foo', 'title', 'bar').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'title', 'bar').equal('OK')
res = env.cmd('ft.search', 'test', '@uuid:{foo}')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'a', ['uuid', 'foo', 'title', 'bar']]))
# Server crash on doc names that conflict with index keys #666
# again this test is not relevant cause index is out of key space
# def testIssue666(env):
# # We cannot reliably determine that any error will occur in cluster mode
# # because of the key name
# env.skipOnCluster()
# env.cmd('ft.create', 'foo', 'schema', 'bar', 'text')
# env.cmd('ft.add', 'foo', 'mydoc', 1, 'fields', 'bar', 'one two three')
# # crashes here
# with env.assertResponseError():
# env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'fields', 'bar', 'four five six')
# # try with replace:
# with env.assertResponseError():
# env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'REPLACE',
# 'FIELDS', 'bar', 'four five six')
# with env.assertResponseError():
# env.cmd('ft.add', 'foo', 'idx:foo', '1', 'REPLACE',
# 'FIELDS', 'bar', 'four five six')
# env.cmd('ft.add', 'foo', 'mydoc1', 1, 'fields', 'bar', 'four five six')
# 127.0.0.1:6379> flushdb
# OK
# 127.0.0.1:6379> ft.create foo SCHEMA bar text
# OK
# 127.0.0.1:6379> ft.add foo mydoc 1 FIELDS bar "one two three"
# OK
# 127.0.0.1:6379> keys *
# 1) "mydoc"
# 2) "ft:foo/one"
# 3) "idx:foo"
# 4) "ft:foo/two"
# 5) "ft:foo/three"
# 127.0.0.1:6379> ft.add foo "ft:foo/two" 1 FIELDS bar "four five six"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
@unstable
def testPrefixDeletedExpansions(env):
env.skipOnCluster()
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'txt1', 'text', 'tag1', 'tag')
# get the number of maximum expansions
maxexpansions = int(env.cmd('ft.config', 'get', 'MAXEXPANSIONS')[0][1])
for x in range(maxexpansions):
env.cmd('ft.add', 'idx', 'doc{}'.format(x), 1, 'fields',
'txt1', 'term{}'.format(x), 'tag1', 'tag{}'.format(x))
for x in range(maxexpansions):
env.cmd('ft.del', 'idx', 'doc{}'.format(x))
env.cmd('ft.add', 'idx', 'doc_XXX', 1, 'fields', 'txt1', 'termZZZ', 'tag1', 'tagZZZ')
# r = env.cmd('ft.search', 'idx', 'term*')
# print(r)
# r = env.cmd('ft.search', 'idx', '@tag1:{tag*}')
# print(r)
tmax = time.time() + 0.5 # 250ms max
iters = 0
while time.time() < tmax:
iters += 1
forceInvokeGC(env, 'idx')
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
if r[0]:
break
# print 'did {} iterations'.format(iters)
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
env.assertEqual(toSortedFlatList([1, 'doc_XXX', ['txt1', 'termZZZ', 'tag1', 'tagZZZ']]), toSortedFlatList(r))
def testOptionalFilter(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text')
for x in range(100):
env.cmd('ft.add', 'idx', 'doc_{}'.format(x), 1, 'fields', 't1', 'hello world word{}'.format(x))
env.cmd('ft.explain', 'idx', '(~@t1:word20)')
# print(r)
r = env.cmd('ft.search', 'idx', '~(word20 => {$weight: 2.0})')
def testIssue736(env):
#for new RS 2.0 ft.add does not return certian errors
env.skip()
# 1. create the schema, we need a tag field
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 't1', 'text', 'n2', 'numeric', 't2', 'tag')
# 2. create a single document to initialize at least one RSAddDocumentCtx
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 't2', 'foo, bar')
# 3. create a second document with many filler fields to force a realloc:
extra_fields = []
for x in range(20):
extra_fields += ['nidx_fld{}'.format(x), 'val{}'.format(x)]
extra_fields += ['n2', 'not-a-number', 't2', 'random, junk']
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', *extra_fields)
def testCriteriaTesterDeactivated():
env = Env(moduleArgs='_MAX_RESULTS_TO_UNSORTED_MODE 1')
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello1 hey hello2')
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', 't1', 'hello2 hey')
env.cmd('ft.add', 'idx', 'doc3', 1, 'fields', 't1', 'hey')
expected_res = sorted([2L, 'doc1', ['t1', 'hello1 hey hello2'], 'doc2', ['t1', 'hello2 hey']])
actual_res = sorted(env.cmd('ft.search', 'idx', '(hey hello1)|(hello2 hey)'))
env.assertEqual(expected_res, actual_res)
def testIssue828(env):
env.cmd('ft.create', 'beers', 'ON', 'HASH', 'SCHEMA',
'name', 'TEXT', 'PHONETIC', 'dm:en',
'style', 'TAG', 'SORTABLE',
'abv', 'NUMERIC', 'SORTABLE')
rv = env.cmd("FT.ADD", "beers", "802", "1.0",
"FIELDS", "index", "25", "abv", "0.049",
"name", "Hell or High Watermelon Wheat (2009)",
"style", "Fruit / Vegetable Beer")
env.assertEqual('OK', rv)
def testIssue862(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE')
rv = env.cmd("FT.ADD", "idx", "doc1", "1.0", "FIELDS", "test", "foo")
env.assertEqual('OK', rv)
env.cmd("FT.SEARCH", "idx", "foo", 'WITHSORTKEYS')
env.assertTrue(env.isUp())
def testIssue_884(env):
env.expect('FT.create', 'idx', 'ON', 'HASH', 'STOPWORDS', '0', 'SCHEMA', 'title', 'text', 'weight',
'50', 'subtitle', 'text', 'weight', '10', 'author', 'text', 'weight',
'10', 'description', 'text', 'weight', '20').equal('OK')
env.expect('FT.ADD', 'idx', 'doc4', '1.0', 'FIELDS', 'title', 'mohsin conversation the conversation tahir').equal('OK')
env.expect('FT.ADD', 'idx', 'doc3', '1.0', 'FIELDS', 'title', 'Fareham Civilization Church - Sermons and conversations mohsin conversation the').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'title', 'conversation the conversation - a drama about conversation, the science of conversation.').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'title', 'mohsin conversation with the mohsin').equal('OK')
expected = [2L, 'doc2', ['title', 'conversation the conversation - a drama about conversation, the science of conversation.'], 'doc4', ['title', 'mohsin conversation the conversation tahir']]
res = env.cmd('FT.SEARCH', 'idx', '@title:(conversation) (@title:(conversation the conversation))=>{$inorder: true;$slop: 0}')
env.assertEquals(len(expected), len(res))
for v in expected:
env.assertContains(v, res)
def testIssue_848(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test1', 'foo').equal('OK')
env.expect('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'test2', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc2', '1.0', 'FIELDS', 'test1', 'foo', 'test2', 'bar').equal('OK')
env.expect('FT.SEARCH', 'idx', 'foo', 'SORTBY', 'test2', 'ASC').equal([2L, 'doc1', ['test1', 'foo'], 'doc2', ['test2', 'bar', 'test1', 'foo']])
def testMod_309(env):
n = 10000 if VALGRIND else 100000
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
conn = getConnectionByEnv(env)
for i in range(n):
conn.execute_command('HSET', 'doc%d'%i, 'test', 'foo')
res = env.cmd('FT.AGGREGATE', 'idx', 'foo')
env.assertEqual(len(res), n + 1)
# test with cursor
env.skipOnCluster()
res = env.cmd('FT.AGGREGATE', 'idx', 'foo', 'WITHCURSOR')
l = len(res[0]) - 1 # do not count the number of results (the first element in the results)
cursor = res[1]
while cursor != 0:
r, cursor = env.cmd('FT.CURSOR', 'READ', 'idx', str(cursor))
l += len(r) - 1
env.assertEqual(l, n)
def testIssue_865(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', '1', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', '1', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', '1', 'foo2').equal('OK')
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'ASC').equal([2, 'doc1', ['1', 'foo1'], 'doc2', ['1', 'foo2']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'DESC').equal([2, 'doc2', ['1', 'foo2'], 'doc1', ['1', 'foo1']])
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', '1', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY', 'bad').error()
env.expect('ft.search', 'idx', 'foo*', 'SORTBY').error()
def testIssue_779(env):
# FT.ADD should return NOADD and not change the doc if value < same_value, but it returns OK and makes the change.
# Note that "greater than" ">" does not have the same bug.
env.cmd('FT.CREATE idx2 ON HASH SCHEMA ot1 TAG')
env.cmd('FT.ADD idx2 doc2 1.0 FIELDS newf CAT ot1 4001')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "CAT", "ot1", "4001"]))
# NOADD is expected since 4001 is not < 4000, and no updates to the doc2 is expected as a result
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4000 FIELDS newf DOG ot1 4000', 'NOADD')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "CAT", "ot1", "4001"]))
# OK is expected since 4001 < 4002 and the doc2 is updated
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf DOG ot1 4002').equal('OK')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "DOG", "ot1", "4002"]))
# OK is NOT expected since 4002 is not < 4002
# We expect NOADD and doc2 update; however, we get OK and doc2 updated
# After fix, @ot1 implicitly converted to a number, thus we expect NOADD
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4002 FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if to_number(@ot1)<4002 FIELDS newf FISH ot1 4002').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_str(4002) FIELDS newf FISH ot1 4002').equal('NOADD')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "DOG", "ot1", "4002"]))
# OK and doc2 update is expected since 4002 < 4003
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4003 FIELDS newf HORSE ot1 4003').equal('OK')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "HORSE", "ot1", "4003"]))
# Expect NOADD since 4003 is not > 4003
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4003 FIELDS newf COW ot1 4003').equal('NOADD')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if 4003<@ot1 FIELDS newf COW ot1 4003').equal('NOADD')
# Expect OK and doc2 updated since 4003 > 4002
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1>4002 FIELDS newf PIG ot1 4002').equal('OK')
res = env.cmd('FT.GET idx2 doc2')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(["newf", "PIG", "ot1", "4002"]))
# Syntax errors
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<4-002 FIELDS newf DOG ot1 4002').contains('Syntax error')
env.expect('FT.ADD idx2 doc2 1.0 REPLACE PARTIAL if @ot1<to_number(4-002) FIELDS newf DOG ot1 4002').contains('Syntax error')
def testUnknownSymbolErrorOnConditionalAdd(env):
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TAG f2 NUMERIC NOINDEX f3 TAG NOINDEX').ok()
env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').ok()
env.expect('ft.add idx doc1 1.0 REPLACE PARTIAL IF @f1<awfwaf FIELDS f1 foo f2 1 f3 boo').error()
def testWrongResultsReturnedBySkipOptimization(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'f1', 'TEXT', 'f2', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'f1', 'foo', 'f2', 'bar').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'f1', 'moo', 'f2', 'foo').equal('OK')
env.expect('ft.search', 'idx', 'foo @f2:moo').equal([0L])
def testErrorWithApply(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo bar').equal('OK')
err = env.cmd('FT.AGGREGATE', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'split()')[1]
env.assertEqual(str(err[0]), 'Invalid number of arguments for split')
def testSummerizeWithAggregateRaiseError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', '1', 'test',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0').error()
def testSummerizeHighlightParseError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', 'foo2', 'SUMMARIZE', 'FIELDS', 'WITHSCORES').error()
env.expect('ft.search', 'idx', 'foo2', 'HIGHLIGHT', 'FIELDS', 'WITHSCORES').error()
def testCursorBadArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0',
'WITHCURSOR', 'COUNT', 'BAD').error()
def testLimitBadArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', '*', 'LIMIT', '1').error()
def testOnTimeoutBadArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'bad').error()
def testAggregateSortByWrongArgument(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', 'bad').error()
def testAggregateSortByMaxNumberOfFields(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA',
'test1', 'TEXT', 'SORTABLE',
'test2', 'TEXT', 'SORTABLE',
'test3', 'TEXT', 'SORTABLE',
'test4', 'TEXT', 'SORTABLE',
'test5', 'TEXT', 'SORTABLE',
'test6', 'TEXT', 'SORTABLE',
'test7', 'TEXT', 'SORTABLE',
'test8', 'TEXT', 'SORTABLE',
'test9', 'TEXT', 'SORTABLE'
).equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', 'foo2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *['@test%d' % (i + 1) for i in range(9)]).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['bad']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX', 'bad']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
args = ['@test%d' % (i + 1) for i in range(8)] + ['ASC', 'MAX']
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '9', *args).error()
def testNumericFilterError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad', '2').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', 'bad').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', 'bad').error()
env.expect('ft.search', 'idx', '*', 'FILTER', 'test', '0', '2', 'FILTER', 'test', '0', 'bla').error()
def testGeoFilterError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', 'bad' , '2', '3', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , 'bad', '3', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', 'bad', 'km').error()
env.expect('ft.search', 'idx', '*', 'GEOFILTER', 'test', '1' , '2', '3', 'bad').error()
def testReducerError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', 'bad').error()
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as').error()
def testGroupbyError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test1').error()
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'bad', '0').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'GROUPBY', '1', '@test', 'REDUCE', 'SUM', '1', '@test1').error()
def testGroupbyWithSort(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.add', 'idx', 'doc2', '1.0', 'FIELDS', 'test', '1').equal('OK')
env.expect('ft.add', 'idx', 'doc3', '1.0', 'FIELDS', 'test', '2').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'SORTBY', '2', '@test', 'ASC',
'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'as', 'count').equal([2L, ['test', '2', 'count', '1'], ['test', '1', 'count', '2']])
def testApplyError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'APPLY', 'split(@test)', 'as').error()
def testLoadError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', 'bad', 'test').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', 'test').error()
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '2', '@test').error()
def testMissingArgsError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx').error()
def testUnexistsScorer(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT', 'SORTABLE').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'SCORER', 'bad').error()
def testHighlightWithUnknowsProperty(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'HIGHLIGHT', 'FIELDS', '1', 'test1').error()
def testBadFilterExpression(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', 'blabla').error()
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'FILTER', '@test1 > 1').error()
def testWithSortKeysOnNoneSortableValue(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'WITHSORTKEYS', 'SORTBY', 'test').equal([1L, 'doc1', '$foo', ['test', 'foo']])
def testWithWithRawIds(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
waitForIndex(env, 'idx')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.search', 'idx', '*', 'WITHRAWIDS').equal([1L, 'doc1', 1L, ['test', 'foo']])
def testUnkownIndex(env):
env.skipOnCluster() # todo: remove once fix on coordinator
env.expect('ft.aggregate').error()
env.expect('ft.aggregate', 'idx', '*').error()
env.expect('ft.aggregate', 'idx', '*', 'WITHCURSOR').error()
def testExplainError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('FT.EXPLAIN', 'idx', '(').error()
def testBadCursor(env):
env.expect('FT.CURSOR', 'READ', 'idx').error()
env.expect('FT.CURSOR', 'READ', 'idx', '1111').error()
env.expect('FT.CURSOR', 'READ', 'idx', 'bad').error()
env.expect('FT.CURSOR', 'DROP', 'idx', '1111').error()
env.expect('FT.CURSOR', 'bad', 'idx', '1111').error()
def testGroupByWithApplyError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('FT.AGGREGATE', 'idx', '*', 'APPLY', 'split()', 'GROUPBY', '1', '@test', 'REDUCE', 'COUNT', '0', 'AS', 'count')[1]
assertEqualIgnoreCluster(env, str(err[0]), 'Invalid number of arguments for split')
def testSubStrErrors(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a', 'APPLY', 'substr(@a,0,4)')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,-2)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",3,1000)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test",-1,2)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr(1)', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "test", "test")', 'as', 'a')
env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test2', 'APPLY', 'substr("test", "-1", "-1")', 'as', 'a')
env.assertTrue(env.isUp())
def testToUpperLower(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower("FOO")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'foo']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(@test)', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper("foo")', 'as', 'a').equal([1L, ['test', 'foo', 'a', 'FOO']])
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1)', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'upper(1,2)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'lower(1,2)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
def testMatchedTerms(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
env.expect('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', None]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms()', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms(-100)', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'matched_terms("test")', 'as', 'a').equal([1L, ['test', 'foo', 'a', ['foo']]])
def testStrFormatError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'TEXT').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', 'foo').equal('OK')
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%s")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%", "test")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format("%b", "test")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'format(5)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', 'foo', 'LOAD', '1', '@test', 'APPLY', 'upper(1)', 'as', 'b', 'APPLY', 'format("%s", @b)', 'as', 'a').equal([1L, ['test', 'foo', 'b', None, 'a', '(null)']])
# working example
env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%%s-test", "test")', 'as', 'a').equal([1L, ['a', '%s-test']])
env.expect('ft.aggregate', 'idx', 'foo', 'APPLY', 'format("%s-test", "test")', 'as', 'a').equal([1L, ['a', 'test-test']])
def testTimeFormatError(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
if not env.isCluster(): # todo: remove once fix on coordinator
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test1)', 'as', 'a').error()
env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test)', 'as', 'a')
env.assertTrue(env.isUp())
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, 4)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt("awfawf")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(235325153152356426246246246254)', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'timefmt(@test, "%s")' % ('d' * 2048), 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'hour("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'minute("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'day("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'month("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofweek("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofmonth("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'dayofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'year("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("not_number")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testMonthOfYear(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '4']])
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear(@test, 112)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'monthofyear("bad")', 'as', 'a').equal([1L, ['test', '12234556', 'a', None]])
def testParseTime(env):
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'test', 'TAG')
conn.execute_command('HSET', 'doc1', 'test', '20210401')
# check for errors
err = conn.execute_command('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'parsetime()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = conn.execute_command('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'parsetime(11)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = conn.execute_command('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'parsetime(11,22)', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
# valid test
res = conn.execute_command('ft.aggregate', 'idx', '*', 'LOAD', '1', '@test', 'APPLY', 'parsetime(@test, "%Y%m%d")', 'as', 'a')
assertEqualIgnoreCluster(env, res, [1L, ['test', '20210401', 'a', '1617235200']])
def testMathFunctions(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'exp(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', 'inf']])
env.expect('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'ceil(@test)', 'as', 'a').equal([1L, ['test', '12234556', 'a', '12234556']])
def testErrorOnOpperation(env):
env.expect('FT.CREATE', 'idx', 'ON', 'HASH', 'SCHEMA', 'test', 'NUMERIC').equal('OK')
env.expect('ft.add', 'idx', 'doc1', '1.0', 'FIELDS', 'test', '12234556').equal('OK')
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '1 + split()', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split() + 1', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '"bad" + "bad"', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', 'split("bad" + "bad")', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'LOAD', '1', '@test', 'APPLY', '!(split("bad" + "bad"))', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
err = env.cmd('ft.aggregate', 'idx', '@test:[0..inf]', 'APPLY', '!@test', 'as', 'a')[1]
assertEqualIgnoreCluster(env, type(err[0]), redis.exceptions.ResponseError)
def testSortkeyUnsortable(env):
env.cmd('ft.create', 'idx', 'ON', 'HASH', 'schema', 'test', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'test', 'foo')
rv = env.cmd('ft.aggregate', 'idx', 'foo', 'withsortkeys',
'load', '1', '@test',
'sortby', '1', '@test')
env.assertEqual([1, '$foo', ['test', 'foo']], rv)
def testIssue919(env):
# This only works if the missing field has a lower sortable index
# than the present field..
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 't1', 'text', 'sortable', 'n1', 'numeric', 'sortable')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 'n1', 42)
rv = env.cmd('ft.search', 'idx', '*', 'sortby', 't1', 'desc')
env.assertEqual([1L, 'doc1', ['n1', '42']], rv)
def testIssue1074(env):
# Ensure that sortable fields are returned in their string form from the
# document
env.cmd('ft.create', 'idx', 'ON', 'HASH',
'schema', 't1', 'text', 'n1', 'numeric', 'sortable')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 'n1', 1581011976800)
rv = env.cmd('ft.search', 'idx', '*', 'sortby', 'n1')
env.assertEqual([1L, 'doc1', ['n1', '1581011976800', 't1', 'hello']], rv)
def testIssue1085(env):
env.skipOnCluster()
env.cmd('FT.CREATE issue1085 ON HASH SCHEMA foo TEXT SORTABLE bar NUMERIC SORTABLE')
for i in range(1, 10):
env.cmd('FT.ADD issue1085 document_%d 1 REPLACE FIELDS foo foo%d bar %d' % (i, i, i))
res = env.cmd('FT.SEARCH', 'issue1085', '@bar:[8 8]')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'document_8', ['foo', 'foo8', 'bar', '8']]))
for i in range(1, 10):
env.cmd('FT.ADD issue1085 document_8 1 REPLACE FIELDS foo foo8 bar 8')
forceInvokeGC(env, 'issue1085')
res = env.cmd('FT.SEARCH', 'issue1085', '@bar:[8 8]')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1, 'document_8', ['foo', 'foo8', 'bar', '8']]))
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
from itertools import izip_longest
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def to_dict(r):
return {r[i]: r[i + 1] for i in range(0, len(r), 2)}
def testInfoError(env):
env.expect('ft.info', 'no_idx').error()
def testIndexNotRemovedFromCursorListAfterRecreated(env):
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT').ok()
env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0])
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT').error()
env.expect('FT.AGGREGATE idx * WITHCURSOR').equal([[0], 0])
def testHindiStemmer(env):
env.cmd('FT.CREATE', 'idxTest', 'LANGUAGE_FIELD', '__language', 'SCHEMA', 'body', 'TEXT')
env.cmd('FT.ADD', 'idxTest', 'doc1', 1.0, 'LANGUAGE', 'hindi', 'FIELDS', 'body', u'अँगरेजी अँगरेजों अँगरेज़')
res = env.cmd('FT.SEARCH', 'idxTest', u'अँगरेज़')
res1 = {res[2][i]:res[2][i + 1] for i in range(0, len(res[2]), 2)}
env.assertEqual(u'अँगरेजी अँगरेजों अँगरेज़', unicode(res1['body'], 'utf-8'))
def testMOD507(env):
env.skipOnCluster()
env.expect('ft.create idx ON HASH SCHEMA t1 TEXT').ok()
for i in range(50):
env.expect('ft.add idx doc-%d 1.0 FIELDS t1 foo' % i).ok()
for i in range(50):
env.expect('del doc-%d' % i).equal(1)
res = env.cmd('FT.SEARCH', 'idx', '*', 'WITHSCORES', 'SUMMARIZE', 'FRAGS', '1', 'LEN', '25', 'HIGHLIGHT', 'TAGS', "<span style='background-color:yellow'>", "</span>")
# from redisearch 2.0, docs are removed from index when `DEL` is called
env.assertEqual(len(res), 1)
def testUnseportedSortableTypeErrorOnTags(env):
env.skipOnCluster()
env.expect('FT.CREATE idx ON HASH SCHEMA f1 TEXT SORTABLE f2 NUMERIC SORTABLE NOINDEX f3 TAG SORTABLE NOINDEX f4 TEXT SORTABLE NOINDEX').ok()
env.expect('FT.ADD idx doc1 1.0 FIELDS f1 foo1 f2 1 f3 foo1 f4 foo1').ok()
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL FIELDS f2 2 f3 foo2 f4 foo2').ok()
res = env.cmd('HGETALL doc1')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2', '__score', '1.0']))
res = env.cmd('FT.SEARCH idx *')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'doc1', ['f1', 'foo1', 'f2', '2', 'f3', 'foo2', 'f4', 'foo2']]))
def testIssue1158(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA txt1 TEXT txt2 TEXT txt3 TEXT')
env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 10 txt2 num1')
res = env.cmd('FT.GET idx doc1')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(['txt1', '10', 'txt2', 'num1']))
# only 1st checked (2nd returns an error)
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt1||to_number(@txt2)<5 FIELDS txt1 5').equal('OK')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if @txt3&&to_number(@txt2)<5 FIELDS txt1 5').equal('NOADD')
# both are checked
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11||to_number(@txt1)<42 FIELDS txt2 num2').equal('OK')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)>42 FIELDS txt2 num2').equal('NOADD')
env.expect('FT.ADD idx doc1 1.0 REPLACE PARTIAL if to_number(@txt1)>11&&to_number(@txt1)<42 FIELDS txt2 num2').equal('NOADD')
res = env.cmd('FT.GET idx doc1')
env.assertEqual(toSortedFlatList(res), toSortedFlatList(['txt1', '5', 'txt2', 'num2']))
def testIssue1159(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA f1 TAG')
for i in range(1000):
env.cmd('FT.add idx doc%d 1.0 FIELDS f1 foo' % i)
def testIssue1169(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA txt1 TEXT txt2 TEXT')
env.cmd('FT.ADD idx doc1 1.0 FIELDS txt1 foo')
env.expect('FT.AGGREGATE idx foo GROUPBY 1 @txt1 REDUCE FIRST_VALUE 1 @txt2 as test').equal([1L, ['txt1', 'foo', 'test', None]])
def testIssue1184(env):
env.skipOnCluster()
field_types = ['TEXT', 'NUMERIC', 'TAG']
env.assertOk(env.execute_command('ft.config', 'set', 'FORK_GC_CLEAN_THRESHOLD', 0))
for ft in field_types:
env.assertOk(env.execute_command('FT.CREATE idx ON HASH SCHEMA field ' + ft))
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '0')
value = '42'
env.assertOk(env.execute_command('FT.ADD idx doc0 1 FIELD field ' + value))
doc = env.cmd('FT.SEARCH idx *')
env.assertEqual(doc, [1L, 'doc0', ['field', value]])
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertGreater(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '1')
env.assertEqual(env.execute_command('FT.DEL idx doc0'), 1)
forceInvokeGC(env, 'idx')
res = env.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['inverted_sz_mb'], '0')
env.assertEqual(d['num_records'], '0')
env.cmd('FT.DROP idx')
env.cmd('DEL doc0')
def testIndexListCommand(env):
env.expect('FT.CREATE idx1 ON HASH SCHEMA n NUMERIC').ok()
env.expect('FT.CREATE idx2 ON HASH SCHEMA n NUMERIC').ok()
res = env.cmd('FT._LIST')
env.assertEqual(set(res), set(['idx1', 'idx2']))
env.expect('FT.DROP idx1').ok()
env.expect('FT._LIST').equal(['idx2'])
env.expect('FT.CREATE idx3 ON HASH SCHEMA n NUMERIC').ok()
res = env.cmd('FT._LIST')
env.assertEqual(set(res), set(['idx2', 'idx3']))
def testIssue1208(env):
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC')
env.cmd('FT.ADD idx doc1 1 FIELDS n 1.0321e5')
env.cmd('FT.ADD idx doc2 1 FIELDS n 101.11')
env.cmd('FT.ADD idx doc3 1 FIELDS n 0.0011')
env.expect('FT.SEARCH', 'idx', '@n:[1.1432E3 inf]').equal([1L, 'doc1', ['n', '1.0321e5']])
env.expect('FT.SEARCH', 'idx', '@n:[-1.12E-3 1.12E-1]').equal([1L, 'doc3', ['n', '0.0011']])
res = [3L, 'doc1', ['n', '1.0321e5'], 'doc2', ['n', '101.11'], 'doc3', ['n', '0.0011']]
env.expect('FT.SEARCH', 'idx', '@n:[-inf inf]').equal(res)
env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n>42e3 FIELDS n 100').equal('NOADD')
env.expect('FT.ADD idx doc3 1 REPLACE PARTIAL IF @n<42e3 FIELDS n 100').ok()
# print env.cmd('FT.SEARCH', 'idx', '@n:[-inf inf]')
def testFieldsCaseSensetive(env):
# this test will not pass on coordinator coorently as if one shard return empty results coordinator
# will not reflect the errors
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC f TEXT t TAG g GEO')
# make sure text fields are case sesitive
conn.execute_command('hset', 'doc1', 'F', 'test')
conn.execute_command('hset', 'doc2', 'f', 'test')
env.expect('ft.search idx @f:test').equal([1L, 'doc2', ['f', 'test']])
env.expect('ft.search idx @F:test').equal([0])
# make sure numeric fields are case sesitive
conn.execute_command('hset', 'doc3', 'N', '1.0')
conn.execute_command('hset', 'doc4', 'n', '1.0')
env.expect('ft.search', 'idx', '@n:[0 2]').equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@N:[0 2]').equal([0])
# make sure tag fields are case sesitive
conn.execute_command('hset', 'doc5', 'T', 'tag')
conn.execute_command('hset', 'doc6', 't', 'tag')
env.expect('ft.search', 'idx', '@t:{tag}').equal([1L, 'doc6', ['t', 'tag']])
env.expect('ft.search', 'idx', '@T:{tag}').equal([0])
# make sure geo fields are case sesitive
conn.execute_command('hset', 'doc8', 'G', '-113.524,53.5244')
conn.execute_command('hset', 'doc9', 'g', '-113.524,53.5244')
env.expect('ft.search', 'idx', '@g:[-113.52 53.52 20 mi]').equal([1L, 'doc9', ['g', '-113.524,53.5244']])
env.expect('ft.search', 'idx', '@G:[-113.52 53.52 20 mi]').equal([0])
# make sure search filter are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'n', 0, 2).equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'N', 0, 2).equal([0])
# make sure RETURN are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'n').equal([1L, 'doc4', ['n', '1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'N').equal([1L, 'doc4', []])
# make sure SORTBY are case sensitive
conn.execute_command('hset', 'doc7', 'n', '1.1')
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'n').equal([2L, 'doc4', ['n', '1.0'], 'doc7', ['n', '1.1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'N').error().contains('not loaded nor in schema')
# make sure aggregation load are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n').equal([1L, ['n', '1'], ['n', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@N').equal([1L, [], []])
# make sure aggregation apply are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'apply', '@n', 'as', 'r').equal([1L, ['n', '1', 'r', '1'], ['n', '1.1', 'r', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'apply', '@N', 'as', 'r').error().contains('not loaded in pipeline')
# make sure aggregation filter are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'filter', '@n==1.0').equal([1L, ['n', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'filter', '@N==1.0').error().contains('not loaded in pipeline')
# make sure aggregation groupby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'groupby', '1', '@n', 'reduce', 'count', 0, 'as', 'count').equal([2L, ['n', '1', 'count', '1'], ['n', '1.1', 'count', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'groupby', '1', '@N', 'reduce', 'count', 0, 'as', 'count').error().contains('No such property')
# make sure aggregation sortby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'sortby', '1', '@n').equal([2L, ['n', '1'], ['n', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'LOAD', '1', '@n', 'sortby', '1', '@N').error().contains('not loaded')
def testSortedFieldsCaseSensetive(env):
# this test will not pass on coordinator coorently as if one shard return empty results coordinator
# will not reflect the errors
env.skipOnCluster()
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE f TEXT SORTABLE t TAG SORTABLE g GEO SORTABLE')
# make sure text fields are case sesitive
conn.execute_command('hset', 'doc1', 'F', 'test')
conn.execute_command('hset', 'doc2', 'f', 'test')
env.expect('ft.search idx @f:test').equal([1L, 'doc2', ['f', 'test']])
env.expect('ft.search idx @F:test').equal([0])
# make sure numeric fields are case sesitive
conn.execute_command('hset', 'doc3', 'N', '1.0')
conn.execute_command('hset', 'doc4', 'n', '1.0')
env.expect('ft.search', 'idx', '@n:[0 2]').equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@N:[0 2]').equal([0])
# make sure tag fields are case sesitive
conn.execute_command('hset', 'doc5', 'T', 'tag')
conn.execute_command('hset', 'doc6', 't', 'tag')
env.expect('ft.search', 'idx', '@t:{tag}').equal([1L, 'doc6', ['t', 'tag']])
env.expect('ft.search', 'idx', '@T:{tag}').equal([0])
# make sure geo fields are case sesitive
conn.execute_command('hset', 'doc8', 'G', '-113.524,53.5244')
conn.execute_command('hset', 'doc9', 'g', '-113.524,53.5244')
env.expect('ft.search', 'idx', '@g:[-113.52 53.52 20 mi]').equal([1L, 'doc9', ['g', '-113.524,53.5244']])
env.expect('ft.search', 'idx', '@G:[-113.52 53.52 20 mi]').equal([0])
# make sure search filter are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'n', 0, 2).equal([1L, 'doc4', ['n', '1.0']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'FILTER', 'N', 0, 2).equal([0])
# make sure RETURN are case sensitive
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'n').equal([1L, 'doc4', ['n', '1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '1', 'N').equal([1L, 'doc4', []])
# make sure SORTBY are case sensitive
conn.execute_command('hset', 'doc7', 'n', '1.1')
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'n').equal([2L, 'doc4', ['n', '1.0'], 'doc7', ['n', '1.1']])
env.expect('ft.search', 'idx', '@n:[0 2]', 'SORTBY', 'N').error().contains('not loaded nor in schema')
# make sure aggregation apply are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'apply', '@n', 'as', 'r').equal([1L, ['n', '1', 'r', '1'], ['n', '1.1', 'r', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'apply', '@N', 'as', 'r').error().contains('not loaded in pipeline')
# make sure aggregation filter are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'filter', '@n==1.0').equal([1L, ['n', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'filter', '@N==1.0').error().contains('not loaded in pipeline')
# make sure aggregation groupby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'groupby', '1', '@n', 'reduce', 'count', 0, 'as', 'count').equal([2L, ['n', '1', 'count', '1'], ['n', '1.1', 'count', '1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'groupby', '1', '@N', 'reduce', 'count', 0, 'as', 'count').error().contains('No such property')
# make sure aggregation sortby are case sensitive
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'sortby', '1', '@n').equal([2L, ['n', '1'], ['n', '1.1']])
env.expect('ft.aggregate', 'idx', '@n:[0 2]', 'sortby', '1', '@N').error().contains('not loaded')
def testScoreLangPayloadAreReturnedIfCaseNotMatchToSpecialFields(env):
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE')
conn.execute_command('hset', 'doc1', 'n', '1.0', '__Language', 'eng', '__Score', '1', '__Payload', '10')
res = env.cmd('ft.search', 'idx', '@n:[0 2]')
env.assertEqual(toSortedFlatList(res), toSortedFlatList([1L, 'doc1', ['n', '1.0', '__Language', 'eng', '__Score', '1', '__Payload', '10']]))
def testReturnSameFieldDifferentCase(env):
conn = getConnectionByEnv(env)
env.cmd('FT.CREATE idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE')
conn.execute_command('hset', 'doc1', 'n', '1.0', 'N', '2.0')
env.expect('ft.search', 'idx', '@n:[0 2]', 'RETURN', '2', 'n', 'N').equal([1L, 'doc1', ['n', '1', 'N', '2']])
def testCreateIfNX(env):
env.expect('FT._CREATEIFNX idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE').ok()
env.expect('FT._CREATEIFNX idx ON HASH SCHEMA n NUMERIC SORTABLE N NUMERIC SORTABLE').ok()
def testDropIfX(env):
env.expect('FT._DROPIFX idx').ok()
def testDeleteIfX(env):
env.expect('FT._DROPINDEXIFX idx').ok()
def testAlterIfNX(env):
env.expect('FT.CREATE idx ON HASH SCHEMA n NUMERIC').ok()
env.expect('FT._ALTERIFNX idx SCHEMA ADD n1 NUMERIC').ok()
env.expect('FT._ALTERIFNX idx SCHEMA ADD n1 NUMERIC').ok()
res = env.cmd('ft.info idx')
res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}['attributes']
env.assertEqual(res, [['identifier', 'n', 'attribute', 'n', 'type', 'NUMERIC'],
['identifier', 'n1', 'attribute', 'n1', 'type', 'NUMERIC']])
def testAliasAddIfNX(env):
env.expect('FT.CREATE idx ON HASH SCHEMA n NUMERIC').ok()
env.expect('FT._ALIASADDIFNX a1 idx').ok()
env.expect('FT._ALIASADDIFNX a1 idx').ok()
def testAliasDelIfX(env):
env.expect('FT._ALIASDELIFX a1').ok()
def testEmptyDoc(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE idx SCHEMA t TEXT').ok()
env.expect('FT.ADD idx doc1 1 FIELDS t foo').ok()
env.expect('FT.ADD idx doc2 1 FIELDS t foo').ok()
env.expect('FT.ADD idx doc3 1 FIELDS t foo').ok()
env.expect('FT.ADD idx doc4 1 FIELDS t foo').ok()
env.expect('FT.SEARCH idx * limit 0 0').equal([4])
conn.execute_command('DEL', 'doc1')
conn.execute_command('DEL', 'doc3')
env.expect('FT.SEARCH idx *').equal([2L, 'doc2', ['t', 'foo'], 'doc4', ['t', 'foo']])
def testRED47209(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE idx SCHEMA t TEXT').ok()
conn.execute_command('hset', 'doc1', 't', 'foo')
if env.isCluster():
# on cluster we have WITHSCORES set unconditionally for FT.SEARCH
res = [1L, 'doc1', ['t', 'foo']]
else:
res = [1L, 'doc1', None, ['t', 'foo']]
env.expect('FT.SEARCH idx foo WITHSORTKEYS LIMIT 0 1').equal(res)
def testInvertedIndexWasEntirelyDeletedDuringCursor():
env = Env(moduleArgs='GC_POLICY FORK FORK_GC_CLEAN_THRESHOLD 1')
env.skipOnCluster()
env.expect('FT.CREATE idx SCHEMA t TEXT').ok()
env.expect('HSET doc1 t foo').equal(1)
env.expect('HSET doc2 t foo').equal(1)
res, cursor = env.cmd('FT.AGGREGATE idx foo WITHCURSOR COUNT 1')
env.assertEqual(res, [1L, []])
# delete both documents and run the GC to clean 'foo' inverted index
env.expect('DEL doc1').equal(1)
env.expect('DEL doc2').equal(1)
forceInvokeGC(env, 'idx')
# make sure the inverted index was cleaned
env.expect('FT.DEBUG DUMP_INVIDX idx foo').error().contains('not find the inverted index')
# read from the cursor
res, cursor = env.cmd('FT.CURSOR READ idx %d' % cursor)
env.assertEqual(res, [0L])
env.assertEqual(cursor, 0)
def testNegativeOnly(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE idx SCHEMA t TEXT').ok()
conn.execute_command('HSET', 'doc1', 'not', 'foo')
env.expect('FT.SEARCH idx *').equal([1L, 'doc1', ['not', 'foo']])
env.expect('FT.SEARCH', 'idx', '-bar').equal([1L, 'doc1', ['not', 'foo']])
def testNotOnly(env):
conn = getConnectionByEnv(env)
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'txt1', 'TEXT')
conn.execute_command('HSET', 'a', 'txt1', 'hello', 'txt2', 'world')
conn.execute_command('HSET', 'b', 'txt1', 'world', 'txt2', 'hello')
env.assertEqual(toSortedFlatList(env.cmd('ft.search idx !world')), toSortedFlatList([1L, 'b', ['txt1', 'world', 'txt2', 'hello']]))
def testServerVersion(env):
env.assertTrue(server_version_at_least(env, "6.0.0"))
def testSchemaWithAs(env):
conn = getConnectionByEnv(env)
# sanity
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'txt', 'AS', 'foo', 'TEXT')
conn.execute_command('HSET', 'a', 'txt', 'hello')
conn.execute_command('HSET', 'b', 'foo', 'world')
for _ in env.retry_with_rdb_reload():
env.expect('ft.search idx @txt:hello').equal([0L])
env.expect('ft.search idx @txt:world').equal([0L])
env.expect('ft.search idx @foo:hello').equal([1L, 'a', ['txt', 'hello']])
env.expect('ft.search idx @foo:world').equal([0L])
# RETURN from schema
env.expect('ft.search idx hello RETURN 1 txt').equal([1L, 'a', ['txt', 'hello']])
env.expect('ft.search idx hello RETURN 1 foo').equal([1L, 'a', ['foo', 'hello']])
env.expect('ft.search idx hello RETURN 3 txt AS baz').equal([1L, 'a', ['baz', 'hello']])
env.expect('ft.search idx hello RETURN 3 foo AS baz').equal([1L, 'a', ['baz', 'hello']])
env.expect('ft.search idx hello RETURN 6 txt AS baz txt AS bar').equal([1L, 'a', ['baz', 'hello', 'bar', 'hello']])
env.expect('ft.search idx hello RETURN 6 txt AS baz txt AS baz').equal([1L, 'a', ['baz', 'hello']])
# RETURN outside of schema
conn.execute_command('HSET', 'a', 'not_in_schema', '42')
res = conn.execute_command('HGETALL', 'a')
env.assertEqual(res, {'txt': 'hello', 'not_in_schema': '42'})
env.expect('ft.search idx hello RETURN 3 not_in_schema AS txt2').equal([1L, 'a', ['txt2', '42']])
env.expect('ft.search idx hello RETURN 1 not_in_schema').equal([1L, 'a', ['not_in_schema', '42']])
env.expect('ft.search idx hello').equal([1L, 'a', ['txt', 'hello', 'not_in_schema', '42']])
env.expect('ft.search idx hello RETURN 3 not_exist as txt2').equal([1L, 'a', []])
env.expect('ft.search idx hello RETURN 1 not_exist').equal([1L, 'a', []])
env.expect('ft.search idx hello RETURN 3 txt as as').error().contains('Alias for RETURN cannot be `AS`')
# LOAD for FT.AGGREGATE
# for path - can rename
env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '1', '@txt').equal([1L, ['txt', 'hello']])
env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '3', '@txt', 'AS', 'txt1').equal([1L, ['txt1', 'hello']])
# for name - cannot rename
env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '1', '@foo').equal([1L, ['foo', 'hello']])
env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '3', '@foo', 'AS', 'foo1').equal([1L, ['foo1', 'hello']])
# for for not in schema - can rename
env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '1', '@not_in_schema').equal([1L, ['not_in_schema', '42']])
env.expect('ft.aggregate', 'idx', 'hello', 'LOAD', '3', '@not_in_schema', 'AS', 'NIS').equal([1L, ['NIS', '42']])
conn.execute_command('HDEL', 'a', 'not_in_schema')
def testSchemaWithAs_Alter(env):
conn = getConnectionByEnv(env)
# sanity
conn.execute_command('FT.CREATE', 'idx', 'SCHEMA', 'txt', 'AS', 'foo', 'TEXT')
conn.execute_command('HSET', 'a', 'txt', 'hello')
conn.execute_command('HSET', 'b', 'foo', 'world')
# FT.ALTER
conn.execute_command('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'foo', 'AS', 'bar', 'TEXT')
waitForIndex(env, 'idx')
env.expect('ft.search idx @bar:hello').equal([0L])
env.expect('ft.search idx @bar:world').equal([1L, 'b', ['foo', 'world']])
env.expect('ft.search idx @foo:world').equal([0L])
def testSchemaWithAs_Duplicates(env):
conn = getConnectionByEnv(env)
conn.execute_command('HSET', 'a', 'txt', 'hello')
# Error if field name is duplicated
res = env.expect('FT.CREATE', 'conflict1', 'SCHEMA', 'txt1', 'AS', 'foo', 'TEXT', 'txt2', 'AS', 'foo', 'TAG') \
.error().contains('Duplicate field in schema - foo')
# Success if field path is duplicated
res = env.expect('FT.CREATE', 'conflict2', 'SCHEMA', 'txt', 'AS', 'foo1', 'TEXT',
'txt', 'AS', 'foo2', 'TEXT').ok()
waitForIndex(env, 'conflict2')
env.expect('ft.search conflict2 @foo1:hello').equal([1L, 'a', ['txt', 'hello']])
env.expect('ft.search conflict2 @foo2:hello').equal([1L, 'a', ['txt', 'hello']])
env.expect('ft.search conflict2 @foo1:world').equal([0L])
env.expect('ft.search conflict2 @foo2:world').equal([0L])
def testMod1407(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE', 'idx', 'SCHEMA', 'limit', 'TEXT', 'LimitationTypeID', 'TAG', 'LimitationTypeDesc', 'TEXT').ok()
conn.execute_command('HSET', 'doc1', 'limit', 'foo1', 'LimitationTypeID', 'boo1', 'LimitationTypeDesc', 'doo1')
conn.execute_command('HSET', 'doc2', 'limit', 'foo2', 'LimitationTypeID', 'boo2', 'LimitationTypeDesc', 'doo2')
env.expect('FT.AGGREGATE', 'idx', '*', 'SORTBY', '3', '@limit', '@LimitationTypeID', 'ASC').equal([2L, ['limit', 'foo1', 'LimitationTypeID', 'boo1'], ['limit', 'foo2', 'LimitationTypeID', 'boo2']])
# make sure the crashed query is not crashing anymore
env.expect('FT.AGGREGATE', 'idx', '*', 'GROUPBY', '2', 'LLimitationTypeID', 'LLimitationTypeDesc', 'REDUCE', 'COUNT', '0')
# make sure correct query not crashing and return the right results
env.expect('FT.AGGREGATE', 'idx', '*', 'GROUPBY', '2', '@LimitationTypeID', '@LimitationTypeDesc', 'REDUCE', 'COUNT', '0').equal([2L, ['LimitationTypeID', 'boo2', 'LimitationTypeDesc', 'doo2', '__generated_aliascount', '1'], ['LimitationTypeID', 'boo1', 'LimitationTypeDesc', 'doo1', '__generated_aliascount', '1']])
def testMod1452(env):
if not env.isCluster():
# this test is only relevant on cluster
env.skip()
conn = getConnectionByEnv(env)
env.expect('FT.CREATE', 'idx', 'SCHEMA', 't', 'TEXT').ok()
conn.execute_command('HSET', 'doc1', 't', 'foo')
# here we only check that its not crashing
env.expect('FT.AGGREGATE', 'idx', '*', 'GROUPBY', '1', 'foo', 'REDUCE', 'FIRST_VALUE', 3, '@not_exists', 'BY', '@foo')
@no_msan
def test_mod1548(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE', 'idx', 'ON', 'JSON', 'SCHEMA', '$["prod:id"]', 'AS', 'prod:id', 'TEXT', '$.prod:id', 'AS', 'prod:id_unsupported', 'TEXT', '$.name', 'AS', 'name', 'TEXT', '$.categories', 'AS', 'categories', 'TAG', 'SEPARATOR' ,',').ok()
waitForIndex(env, 'idx')
res = conn.execute_command('JSON.SET', 'prod:1', '$', '{"prod:id": "35114964", "SKU": "35114964", "name":"foo", "categories":"abcat0200000"}')
env.assertOk(res)
res = conn.execute_command('JSON.SET', 'prod:2', '$', '{"prod:id": "35114965", "SKU": "35114965", "name":"bar", "categories":"abcat0200000"}')
env.assertOk(res)
# Supported jsonpath
res = conn.execute_command('FT.SEARCH', 'idx', '@categories:{abcat0200000}', 'RETURN', '1', 'name')
env.assertEqual(res, [2L, 'prod:1', ['name', 'foo'], 'prod:2', ['name', 'bar']])
# Supported jsonpath (actual path contains a colon using the bracket notation)
res = conn.execute_command('FT.SEARCH', 'idx', '@categories:{abcat0200000}', 'RETURN', '1', 'prod:id')
env.assertEqual(res, [2L, 'prod:1', ['prod:id', '35114964'], 'prod:2', ['prod:id', '35114965']])
# Currently unsupported jsonpath (actual path contains a colon using the dot notation)
res = conn.execute_command('FT.SEARCH', 'idx', '@categories:{abcat0200000}', 'RETURN', '1', 'prod:id_unsupported')
env.assertEqual(res, [2L, 'prod:1', [], 'prod:2', []])
def test_empty_field_name(env):
conn = getConnectionByEnv(env)
env.expect('FT.CREATE', 'idx', 'SCHEMA', '', 'TEXT').ok()
conn.execute_command('hset', 'doc1', '', 'foo')
env.expect('FT.SEARCH', 'idx', 'foo').equal([1L, 'doc1', ['', 'foo']])
|
parallel.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 00:23:36 2018
@author: yuluo
"""
import multiprocessing as mp
import os
import random
import subprocess
from deprecated.ProcessABC import ProcessABC
class ParallelProcess(ProcessABC):
def __init__(self, content):
self.content = content
def get_computer_list(self):
return (self.content["cloudmesh"])["cluster"]
def get_computer(self, info):
item = ""
username = ""
publickey = ""
cluster = self.get_computer_list()
if info:
for i in cluster:
if (cluster[i])["label"] == info or (cluster[i])["name"] == info:
# print("computer "+ (cluster[i])["label"]+"/"+ (cluster[i])["name"]+ " is selected")
username = ((cluster[i])["credentials"])["username"]
publickey = ((cluster[i])["credentials"])["publickey"]
item = i
return item, username, publickey
else:
index = random.randint(0, len(cluster) - 1)
key = list(cluster.keys())[index]
# print("computer "+ (cluster[key])["label"]+"/"+ (cluster[key])["name"]+ " is selected")
username = ((cluster[key])["credentials"])["username"]
publickey = ((cluster[key])["credentials"])["publickey"]
item = key
return item, username, publickey
def run_remote(self, username, publickey, script):
s = subprocess.check_output(["ssh", "-i", publickey, username, "sh", script]).decode("utf-8").split("\n")
return s
def scp(self, username, publickey, script):
subprocess.check_output(["scp", "-i", publickey, script, username + ":~/"])
return "~/" + script.split("/")[len(script.split("/")) - 1]
def delete(self, username, publickey, file):
subprocess.check_output(["ssh", "-i", publickey, username, "rm", file])
def run_local(self, username, publickey, script):
proc = os.popen("cat " + script + " | " + "ssh" + " -i " + publickey + " " + username + " sh").read()
return proc
def parall_list(self, scripts):
count = len(scripts)
process = []
c_list = self.get_computer_list()
max_c = len(c_list)
if max_c >= count:
while count != 0:
cp = self.get_computer("")
if cp not in process:
count = count - 1
process.append(cp)
else:
rest = count % max_c
repeat = int(count / max_c)
while rest != 0:
cp = self.get_computer("")
if cp not in process:
rest = rest - 1
process.append(cp)
while repeat != 0:
for i in c_list.keys():
process.append(
[i, ((c_list[i])["credentials"])["username"], ((c_list[i])["credentials"])["publickey"]])
repeat = repeat - 1
return process
def run_parall(self, scripts):
output = mp.Queue()
parall_list = self.parall_list(scripts)
def parall_process(cp, output, script):
result = self.run_local(cp[1], cp[2], script)
output.put([cp[0], result])
process = [mp.Process(target=parall_process, args=(parall_list[x], output, scripts[x])) for x in
range(len(scripts))]
for i in process:
i.start()
for i in process:
i.join()
result = [output.get() for i in process]
return result
def readable(self, result):
for i in result:
print(i[0])
print("Running script and get the result:")
print(i[1])
|
BeamProfile_window.py | """Beam Profile Display window
Author: Friedrich Schotte, Feb 26, 2016 - Sep 28, 2017
"""
import wx
from profile import xy_projections,FWHM,CFWHM,xvals,yvals
from logging import debug,info,warn,error
__version__ = "1.1.2" # optional arguments
class BeamProfile(wx.Panel):
"""Beam Profile Display window"""
name = "BeamProfile"
attributes = [
"image",
"x_ROI_center",
"y_ROI_center",
"ROI_width",
"saturation_level",
"x_nominal",
"y_nominal",
]
from numimage import numimage; from numpy import uint16
def __init__(self,parent,title="Beam Profile",object=None,
refresh_period=1.0,size=(300,300),*args,**kwargs):
"""title: string
object: has attributes "image","x_ROI_center",...
"""
wx.Window.__init__(self,parent,size=size,*args,**kwargs)
self.title = title
self.object = object
self.refresh_period = refresh_period
self.Bind(wx.EVT_PAINT,self.OnPaint)
self.Bind(wx.EVT_SIZE,self.OnResize)
# Refresh
from numpy import nan,uint16
from numimage import numimage
self.values = dict([(n,nan) for n in self.attributes])
self.values["image"] = numimage((0,0),dtype=uint16,pixelsize=0.080)
self.old_values = {}
from threading import Thread
self.refresh_thread = Thread(target=self.refresh_background,
name=self.name+".refresh")
self.refreshing = False
from wx.lib.newevent import NewEvent
self.EVT_THREAD = NewEvent()[1]
self.Bind(self.EVT_THREAD,self.OnUpdate)
self.thread = Thread(target=self.keep_updated,name=self.name)
self.thread.start()
def keep_updated(self):
"""Periodically refresh the displayed settings."""
from time import time,sleep
while True:
try:
t0 = time()
while time() < t0+self.refresh_period: sleep(0.1)
if self.Shown:
self.update_data()
if self.data_changed:
event = wx.PyCommandEvent(self.EVT_THREAD.typeId,self.Id)
# call OnUpdate in GUI thread
wx.PostEvent(self.EventHandler,event)
except wx.PyDeadObjectError: break
def refresh(self):
"""Force update"""
from threading import Thread
if not self.refreshing and self.Shown:
self.refresh_thread = Thread(target=self.refresh_background,
name=self.name+".refresh")
self.refreshing = True
self.refresh_thread.start()
else: debug("beam profile: already refreshing")
def refresh_background(self):
"""Force update"""
self.update_data()
if self.data_changed:
event = wx.PyCommandEvent(self.EVT_THREAD.typeId,self.Id)
wx.PostEvent(self.EventHandler,event) # call OnUpdate in GUI thread
debug("beam profile: redraw triggered")
self.refreshing = False
def update_data(self):
"""Retreive status information"""
self.old_values = dict(self.values) # make a copy
for n in self.attributes: self.values[n] = getattr(self.object,n)
##from numpy import copy
##self.old_values = dict((n,copy(self.values[n])) for n in self.values)
##for n in self.attributes:
## self.values[n] = copy(getattr(self.object,n))
##debug("beam profile: update completed")
@property
def data_changed(self):
"""Did the last 'update_data' change the data to be plotted?"""
##changed = (self.values != self.old_values)
if sorted(self.values.keys()) != sorted(self.old_values.keys()):
debug("beam profile: %r != %r" % (self.values.keys(),self.old_values.keys()))
changed = True
else:
changed = False
for a in self.values:
item_changed = not nan_equal(self.values[a],self.old_values[a])
debug("beam profile: %r: changed: %r" % (a,item_changed))
changed = changed or item_changed
debug("beam profile: data changed: %r" % changed)
return changed
def OnUpdate(self,event=None):
"""Periodically refresh the displayed settings."""
self.Refresh() # triggers "OnPaint" call
def OnPaint(self,event):
"""Called by WX whenever the contents of the window
needs re-rendering. E.g. when the window is brought to front,
uncovered, restored from minimized state."""
debug("OnPaint")
from numpy import rint,array,minimum,uint8,ndarray,isnan,nan
dc = wx.PaintDC(self)
##self.PrepareDC(dc)
image = self.values["image"]
debug("beam profile: image %r" % (image.shape,))
pixelsize = image.pixelsize
# Clip to ROI (region of interest)
cx,cy = self.values["x_ROI_center"],self.values["y_ROI_center"]
w = h = self.values["ROI_width"]
W,H = self.ClientSize
if W > H: w = h/H*W
if W < H: h = w/W*H
ROI = xmin,xmax,ymin,ymax = cx-w/2,cx+w/2,cy-h/2,cy+h/2
ixmin,ixmax,iymin,iymax = rint(array(ROI)/pixelsize).astype(int)
ROI = xmin,xmax,ymin,ymax = array([ixmin,ixmax,iymin,iymax])*pixelsize
w,h = xmax-xmin,ymax-ymin
image_ROI = image[ixmin:ixmax,iymin:iymax]
# Compress the dynamic range from 0...saturation_level to 0...256.
scale = 255./max(self.values["saturation_level"],1)
image = minimum(image_ROI*scale,255).astype(uint8)
# Convert from gray scale to RGB format if needed.
if image.ndim < 3:
w,h = image.shape[-2:]
RGB = ndarray((3,w,h),uint8,order="F")
RGB[0],RGB[1],RGB[2] = image,image,image
image = RGB
# Mark overloaded pixels.
overload_level = 65535
mask_color = (255,0,0)
mask_opacity = 1.0
mask = image_ROI >= overload_level
R,G,B = image
r,g,b = mask_color
x = mask_opacity
R[mask] = (1-x)*R[mask]+x*r
G[mask] = (1-x)*G[mask]+x*g
B[mask] = (1-x)*B[mask]+x*b
##image = array([R,G,B]) # needed?
# Convert image from numpy to WX image format.
w,h = image.shape[-2:]
image = wx.ImageFromData(w,h,image)
# Scale the image to fit into the window.
W,H = self.ClientSize
if len(self.values["image"]) > 0:
##scalefactor = min(float(W)/max(w,1),float(H)/max(h,1))
##W = rint(w*scalefactor); H = rint(h*scalefactor)
image = image.Scale(W,H)
dc.DrawBitmap (wx.BitmapFromImage(image),0,0)
# Draw the FWHM with dimensions box around the beam center,
# horizontal and vertcal beam projections or sections on the left and
# bottom edge of the image
cx,cy = self.values["x_ROI_center"],self.values["y_ROI_center"]
d = self.values["ROI_width"]
ROI = cx-d/2,cx+d/2,cy-d/2,cy+d/2
ROI = rint(array(ROI)/pixelsize)*pixelsize
ROI_xmin,ROI_xmax,ROI_ymin,ROI_ymax = ROI
xprofile,yprofile = xy_projections(self.values["image"],(cx,cy),d)
xscale = float(W)/max(w,1)/pixelsize; xoffset = -xmin*xscale
yscale = float(H)/max(h,1)/pixelsize; yoffset = (-ymin)*yscale
# Draw a crosshair marking the nominal beam center.
crosshair_color = wx.Colour(0,190,0)
dc.SetPen(wx.Pen(crosshair_color,1))
l = 0.2 # crosshair size in mm
x = self.values["x_nominal"]*xscale+xoffset
y = self.values["y_nominal"]*yscale+yoffset
rx,ry = l/2*xscale,l/2*yscale
dc.DrawLines([(x-rx,y),(x+rx,y)])
dc.DrawLines([(x,y-ry),(x,y+ry)])
# Draw horizontal profile at the bottom edge of the image.
profile_color = wx.Colour(255,0,255)
dc.SetPen (wx.Pen(profile_color,1))
x = xvals(xprofile); I = yvals(xprofile)
Imax = max(I) if len(I)>0 else nan
if Imax == 0: Imax = 1
Iscale = -0.35*(ROI_ymax-ROI_ymin)*xscale/Imax
Ioffset = ROI_ymax*yscale+yoffset
lines = []
for i in range(0,len(x)-1):
if not isnan(I[i]) and not isnan(I[i+1]):
p1 = x[i] *xscale+xoffset, I[i] *Iscale+Ioffset
p2 = x[i+1]*xscale+xoffset, I[i+1]*Iscale+Ioffset
lines += [(p1[0],p1[1],p2[0],p2[1])]
dc.DrawLineList(lines)
# Draw vertical profile at the left edge of the image.
profile_color = wx.Colour(255,0,255)
dc.SetPen(wx.Pen(profile_color,1))
y = xvals(yprofile); I = yvals(yprofile)
Imax = max(I) if len(I)>0 else nan
if Imax == 0: Imax = 1
Iscale = 0.35*(ROI_xmax-ROI_xmin)*xscale/Imax
Ioffset = ROI_xmin*xscale+xoffset
lines = []
for i in range(0,len(y)-1):
if not isnan(I[i]) and not isnan(I[i+1]):
p1 = I[i] *Iscale+Ioffset, y[i] *yscale+yoffset
p2 = I[i+1]*Iscale+Ioffset, y[i+1]*yscale+yoffset
lines += [(p1[0],p1[1],p2[0],p2[1])]
dc.DrawLineList(lines)
# Draw a box around the ROI.
center_color = wx.Colour(128,128,255)
dc.SetPen(wx.Pen(profile_color,1))
x1,y1 = ROI_xmin*xscale+xoffset,ROI_ymin*yscale+yoffset
x2,y2 = ROI_xmax*xscale+xoffset-1,ROI_ymax*yscale+yoffset-1
lines = [(x1,y1),(x2,y1),(x2,y2),(x1,y2),(x1,y1)]
dc.DrawLines (lines)
# Draw a box around center of the beam, with the size of the FWHM.
FWHM_color = wx.Colour(255,0,0)
dc.SetPen (wx.Pen(FWHM_color,1))
width,height = FWHM(xprofile),FWHM(yprofile)
cx,cy = CFWHM(xprofile),CFWHM(yprofile)
x1,y1 = (cx-width/2)*xscale+xoffset,(cy-height/2)*yscale+yoffset
x2,y2 = (cx+width/2)*xscale+xoffset,(cy+height/2)*yscale+yoffset
lines = [(x1,y1),(x2,y1),(x2,y2),(x1,y2),(x1,y1)]
dc.DrawLines(lines)
# Draw a vertical and horizontal line throught the center.
center_color = wx.Colour(128,128,255)
dc.SetPen (wx.Pen(center_color,1))
dc.DrawLines ([(cx*xscale+xoffset,H),(cx*xscale+xoffset,0)])
dc.DrawLines ([(0,cy*yscale+yoffset),(W,cy*yscale+yoffset)])
# Annotate the lines.
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetPointSize(10)
dc.SetFont(font)
dc.SetTextForeground(center_color)
dx = cx - self.values["x_nominal"]
if abs(dx) < 1: label = "%+.0f um" % (dx*1000)
else: label = "%+.3f mm" % dx
x,y = cx*xscale+xoffset,0.875*H
tw,th = dc.GetTextExtent(label)
dc.DrawRotatedText (label,toint(x+2),toint(y-th/2),0)
dy = cy - self.values["y_nominal"]
if abs(dy) < 1: label = "%+.0f um" % (dy*1000)
else: label = "%+.3f mm" % dy
x,y = 0.175*W,cy*yscale+yoffset
tw,th = dc.GetTextExtent(label)
dc.DrawRotatedText (label,toint(x-th/2),toint(y+2),-90)
def OnResize(self,event):
self.Refresh()
event.Skip() # call default handler
def toint(x):
"""Convert to integer without rasing exceptions"""
from numpy import rint
x = rint(x)
try: x = int(x)
except: x = 0
return x
def nan_equal(a,b):
"""Are to array equal? a and b may contain NaNs"""
import numpy
try: numpy.testing.assert_equal(a,b)
except AssertionError: return False
return True
|
server.py | """ Flask server for CO2meter
(c) Vladimir Filimonov, 2018
E-mail: vladimir.a.filimonov@gmail.com
Redacted by Pol Smith, 2019
@trenerok
"""
import optparse
import logging
import threading
import time
import glob
import os
import socket
import datetime as dt
import telebot
from telebot import types
import signal
import json
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import flask
from flask import request, render_template, jsonify
import pandas as pd
import co2meter as co2
_DEFAULT_HOST = '127.0.0.1'
_DEFAULT_PORT = '1201'
_DEFAULT_INTERVAL = 30 # seconds
_DEFAULT_NAME = 'co2'
_INIT_TIME = 30 # time to initialize and calibrate device
_URL = 'https://github.com/vfilimonov/co2meter'
_URL_IMAGES = 'https://user-images.githubusercontent.com/'
_COLORS = {'r': '#E81F2E', 'y': '#FAAF4C', 'g': '#7FB03F'}
_COLORS_HUMAN = {'r': 'red', 'y': 'yellow', 'g': 'green'}
_IMG_G = '1324881/36358454-d707e2f4-150e-11e8-9bd1-b479e232f28f'
_IMG_Y = '1324881/36358456-d8b513ba-150e-11e8-91eb-ade37733b19e'
_IMG_R = '18612132/73597400-9e098600-453c-11ea-8d97-31c1ad7d5fe3'
_RANGE_MID = [800, 1200]
_CO2_MAX_VALUE = 3200 # Cut our yaxis here
##
_SEND_TELEGRAM = True
_TELEGRAM_BOT_KEY = "telegram bot token here"
_ALERT_TIME_GAP_SECOND = 360
_name = _DEFAULT_NAME
###############################################################################
mon = None
###############################################################################
app = flask.Flask(__name__)
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
###############################################################################
@app.route('/')
def home():
# Read CO2 and temp values
if mon is None:
status = '<h1 align="center" style="color:%s;">Device is not connected</h1>' % _COLORS['r']
else:
status = ''
try:
vals = list(mon._last_data)
vals[-1] = '%.1f' % vals[-1]
except:
data = read_logs()
vals = data.split('\n')[-2].split(',')
if status == '':
status = '<h1 align="center" style="color:%s;">Device is not ready</h1>' % _COLORS['r']
# Select image and color
if int(vals[1]) >= _RANGE_MID[1]:
color = _COLORS['r']
img = _IMG_R
elif int(vals[1]) < _RANGE_MID[0]:
color = _COLORS['g']
img = _IMG_G
else:
color = _COLORS['y']
img = _IMG_Y
co2 = '<font color="%s">%s ppm</font>' % (color, vals[1])
# Return template
return render_template('index.html', image=img, timestamp=vals[0],
co2=vals[1], color=color, temp=vals[2], url=_URL,
status=status)
#############################################################################
@app.route('/log', defaults={'logname': None})
@app.route('/log/<string:logname>')
def log(logname):
data = read_logs(name=logname)
return '<h1>Full log</h1>' + wrap_table(data)
@app.route('/log.csv', defaults={'logname': None})
@app.route('/log/<string:logname>.csv')
def log_csv(logname):
data = read_logs(name=logname)
return wrap_csv(data, logname)
@app.route('/log.json', defaults={'logname': None})
@app.route('/log/<string:logname>.json')
def log_json(logname):
data = read_logs(name=logname)
return wrap_json(data)
#############################################################################
@app.route('/rename')
def get_shape_positions():
args = request.args
logging.info('rename', args.to_dict())
new_name = args.get('name', default=None, type=str)
if new_name is None:
return 'Error: new log name is not specified!'
global _name
_name = new_name
return 'Log name has changed to "%s"' % _name
#############################################################################
@app.route('/kill')
def shutdown():
server_stop()
global _monitoring
_monitoring = False
return 'Server shutting down...'
#############################################################################
# Dashboard on plotly.js
#############################################################################
def prepare_data(name=None, span='24H'):
data = read_logs(name)
data = pd.read_csv(StringIO(data), parse_dates=[0]).set_index('timestamp')
if span != 'FULL':
data = data.last(span)
if span == '24H':
data = data.resample('60s').mean()
elif span == '7D':
data = data.resample('600s').mean()
elif span == '30D':
data = data.resample('1H').mean()
elif span == 'FULL':
if len(data) > 3000: # Resample only long series
data = data.resample('1H').mean()
data = data.round({'co2': 0, 'temp': 1})
return data
def rect(y0, y1, color):
return {'type': 'rect', 'layer': 'below',
'xref': 'paper', 'x0': 0, 'x1': 1,
'yref': 'y', 'y0': y0, 'y1': y1,
'fillcolor': color, 'opacity': 0.2, 'line': {'width': 0}}
def caption(title, x, y):
return {'xref': 'paper', 'yref': 'paper', 'x': x, 'y': y, 'text': title,
'showarrow': False, 'font': {'size': 16},
'xanchor': 'center', 'yanchor': 'bottom'}
#############################################################################
@app.route("/chart/", strict_slashes=False)
@app.route("/chart/<name>", strict_slashes=False)
@app.route("/chart/<name>/<freq>", strict_slashes=False)
def chart_co2_temp(name=None, freq='24H'):
data = prepare_data(name, freq)
co2_min = min(500, data['co2'].min() - 50)
co2_max = min(max(2000, data['co2'].max() + 50), _CO2_MAX_VALUE)
t_min = min(15, data['temp'].min())
t_max = max(27, data['temp'].max())
rect_green = rect(co2_min, _RANGE_MID[0], _COLORS['g'])
rect_yellow = rect(_RANGE_MID[0], _RANGE_MID[1], _COLORS['y'])
rect_red = rect(_RANGE_MID[1], co2_max, _COLORS['r'])
# Check if mobile
try:
agent = request.headers.get('User-Agent')
phones = ['iphone', 'android', 'blackberry', 'fennec', 'iemobile']
staticPlot = any(phone in agent.lower() for phone in phones)
except RuntimeError:
staticPlot = False
# Make figure
index = data.index.format()
co2 = list(pd.np.where(data.co2.isnull(), None, data.co2))
temp = list(pd.np.where(data.temp.isnull(), None, data.temp))
d_co2 = {'mode': 'lines+markers', 'type': 'scatter',
'name': 'CO2 concentration',
'xaxis': 'x1', 'yaxis': 'y1',
'x': index, 'y': co2}
d_temp = {'mode': 'lines+markers', 'type': 'scatter',
'name': 'Temperature',
'xaxis': 'x1', 'yaxis': 'y2',
'x': index, 'y': temp}
config = {'displayModeBar': False, 'staticPlot': staticPlot}
layout = {'margin': {'l': 30, 'r': 10, 'b': 30, 't': 30},
'showlegend': False,
'shapes': [rect_green, rect_yellow, rect_red],
'xaxis1': {'domain': [0, 1], 'anchor': 'y2'},
'yaxis1': {'domain': [0.55, 1], 'anchor': 'free', 'position': 0,
'range': [co2_min, co2_max]},
'yaxis2': {'domain': [0, 0.45], 'anchor': 'x1',
'range': [t_min, t_max]},
'annotations': [caption('CO2 concentration', 0.5, 1),
caption('Temperature', 0.5, 0.45)]
}
fig = {'data': [d_co2, d_temp], 'layout': layout, 'config': config}
return jsonify(fig)
#############################################################################
@app.route("/dashboard")
def dashboard_plotly():
# Get list of files
files = glob.glob('logs/*.csv')
files = [os.path.splitext(os.path.basename(_))[0] for _ in files]
# And find selected for jinja template
files = [(_, _ == _name) for _ in files]
return render_template('dashboard.html', files=files)
#############################################################################
# Monitoring routines
#############################################################################
def now():
return dt.datetime.now().replace(microsecond=0)
def opendatetime():
try:
f = open('logs/' + 'lastdatetime.log', 'r')
data = f.readline()
result = json.loads(data)
f.close()
return result
except Exception as err:
logging.info('Something goes wrong [%s]' % str(err))
def savedatetime(last_ivent):
try:
f = open('logs/' + 'lastdatetime.log', 'w')
nowdatetime = str(dt.datetime.utcnow().timestamp())
result = {"datetime": nowdatetime, "last_event": last_ivent}
f.write(json.dumps(result) + "\n")
f.close()
return result
except Exception as err:
logging.info('Something goes wrong [%s]' % str(err))
def send_telegram(data):
bot = telebot.TeleBot(_TELEGRAM_BOT_KEY)
keyboard = types.InlineKeyboardMarkup()
try:
savedatetime(data.get('color'))
bot.send_photo(-280116721, photo=data.get('img'),
parse_mode='HTML',
caption="<b>CO2 Monitor</b>" + '\n' + str(data.get('data')) + '\n' + '\n' \
"<b>timestamp: </b>" + str(dt.datetime.now().__format__('%Y-%m-%dT%H:%M:%S')) + '\n' + '\n', reply_markup=keyboard)
logging.info('telegram send DONE')
except Exception as err:
logging.info('- Error send events to Telegram, error - [%s]' % str(err))
def read_logs(name=None):
""" read log files """
if name is None:
name = _name
fname = os.path.join('logs', _name + '.csv')
if not os.path.exists('logs'):
os.makedirs('logs')
if not os.path.isfile(fname):
with open(fname, 'a') as f:
f.write('timestamp,co2,temp\n')
else:
with open(os.path.join('logs', name + '.csv'), 'r') as f:
data = f.read()
return data
#############################################################################
def write_to_log(vals):
""" file name for a current log """
# Create file if does not exist
fname = os.path.join('logs', _name + '.csv')
if not os.path.exists('logs'):
os.makedirs('logs')
if not os.path.isfile(fname):
with open(fname, 'a') as f:
f.write('timestamp,co2,temp\n')
# Append to file
with open(fname, 'a') as f:
f.write('%s,%d,%.1f\n' % vals)
def read_co2_data():
""" A small hack to read co2 data from monitor in order to account for case
when monitor is not initialized yet
"""
global mon
if mon is None:
# Try to initialize
try:
mon = co2.CO2monitor()
# Sleep. If we read from device before it is calibrated, we'll
# get wrong values
time.sleep(_INIT_TIME)
except OSError:
return None
try:
return mon.read_data_raw(max_requests=1000)
except OSError:
# We kill the link and will require to initialize monitor again next time
mon = None
return None
def monitoring_CO2(interval):
""" Tread for monitoring / logging """
def send_notify(result):
possible = True
if os.path.isfile('logs/' + 'lastdatetime.log'):
current_datetime = dt.datetime.utcnow().timestamp()
last_datetime = opendatetime().get('datetime') ####need check
timedelta = float(current_datetime) - float(last_datetime)
#print('Timedalta in seconds: ' + str(timedelta))
if timedelta < float(_ALERT_TIME_GAP_SECOND):
#print("It's not time yet.. Spend less than %s seconds.." % (
#str(_ALERT_TIME_GAP_SECOND)))
possible = False
result['data'] = "Current CO2 level: <b>" + str(result.get('co2')) + "</b> PPM" + "\n" \
"Temperature: <b>" + str(result.get('temp')) + "</b> degree"
return send_telegram(result) if possible is True else None
while _monitoring:
# Request concentration and temperature
vals = read_co2_data()
if vals is None:
logging.info('[%s] monitor is not connected' % co2.now())
else:
# Write to log and sleep
logging.info('[%s] %d ppm, %.1f deg C' % tuple(vals))
write_to_log(vals)
#####telegram handler
if _SEND_TELEGRAM:
result = {'co2': vals[1], 'temp': format(vals[2], '.1f'), 'timestamp': now()}
## Select image and color for telegram
if int(vals[1]) >= _RANGE_MID[1]:
color = _COLORS_HUMAN['r']
img = _IMG_R
result['img'] = _URL_IMAGES + img + '.jpg'
result['color'] = color
send_notify(result)
elif int(vals[1]) < _RANGE_MID[0]:
color = _COLORS_HUMAN['g']
img = _IMG_G
result['img'] = _URL_IMAGES + img + '.jpg'
result['color'] = color
##send notify only if last event was red
if os.path.isfile('logs/' + 'lastdatetime.log'):
last_event = opendatetime().get('last_event')
if last_event == 'red':
send_notify(result)
else:
color = _COLORS_HUMAN['y']
img = _IMG_Y
result['img'] = _URL_IMAGES + img + '.jpg'
result['color'] = color
##test for yellow delete this
#send_notify(result)
# Sleep for the next call
time.sleep(interval)
#############################################################################
def start_monitor(interval=_DEFAULT_INTERVAL):
""" Start CO2 monitoring in a thread """
logging.basicConfig(level=logging.INFO)
global _monitoring
_monitoring = True
t = threading.Thread(target=monitoring_CO2, args=(interval,))
t.start()
return t
#############################################################################
def init_homekit_target(port, host):
try:
from .homekit import start_homekit
except:
from homekit import start_homekit
global mon
while mon is None:
time.sleep(5)
logging.info('Starting homekit server')
start_homekit(mon, host=host, port=port, monitoring=False, handle_sigint=False)
def init_homekit(port, host):
# We'll start homekit once the device is connected
t = threading.Thread(target=init_homekit_target, args=(port, host, ))
t.start()
#############################################################################
# Server routines
#############################################################################
def my_ip():
""" Get my local IP address """
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as s:
s.connect(("8.8.8.8", 80)) # Google Public DNS
return s.getsockname()[0]
def start_server_homekit():
""" Start monitoring, flask/dash server and homekit accessory """
# Based on http://flask.pocoo.org/snippets/133/
try:
from .homekit import PORT
except:
# the case of running not from the installed module
from homekit import PORT
host = my_ip()
parser = optparse.OptionParser()
parser.add_option("-H", "--host",
help="Hostname of the Flask app [default %s]" % host,
default=host)
parser.add_option("-P", "--port-flask",
help="Port for the Flask app [default %s]" % _DEFAULT_PORT,
default=_DEFAULT_PORT)
parser.add_option("-K", "--port-homekit",
help="Port for the Homekit accessory [default %s]" % PORT,
default=PORT)
parser.add_option("-N", "--name",
help="Name for the log file [default %s]" % _DEFAULT_NAME,
default=_DEFAULT_NAME)
options, _ = parser.parse_args()
global _name
_name = options.name
# Start monitoring
t_monitor = start_monitor()
# Start a thread that will initialize homekit once device is connected
init_homekit(host=options.host, port=int(options.port_homekit))
# Start server
app.run(host=options.host, port=int(options.port_flask))
#############################################################################
def start_server():
""" Runs Flask instance using command line arguments """
# Based on http://flask.pocoo.org/snippets/133/
parser = optparse.OptionParser()
parser.add_option("-H", "--host",
help="Hostname of the Flask app [default %s]" % _DEFAULT_HOST,
default=_DEFAULT_HOST)
parser.add_option("-P", "--port",
help="Port for the Flask app [default %s]" % _DEFAULT_PORT,
default=_DEFAULT_PORT)
parser.add_option("-I", "--interval",
help="Interval in seconds for CO2meter requests [default %d]" % _DEFAULT_INTERVAL,
default=_DEFAULT_INTERVAL)
parser.add_option("-N", "--name",
help="Name for the log file [default %s]" % _DEFAULT_NAME,
default=_DEFAULT_NAME)
parser.add_option("-m", "--nomonitoring",
help="No live monitoring (only flask server)",
action="store_true", dest="no_monitoring")
parser.add_option("-s", "--noserver",
help="No server (only monitoring to file)",
action="store_true", dest="no_server")
parser.add_option("-d", "--debug",
action="store_true", dest="debug",
help=optparse.SUPPRESS_HELP)
options, _ = parser.parse_args()
if options.debug and not options.no_monitoring:
parser.error("--debug option could be used only with --no_monitoring")
global _name
_name = options.name
# Start monitoring
if not options.no_monitoring:
start_monitor(interval=int(options.interval))
# Start server
if not options.no_server:
app.run(debug=options.debug, host=options.host, port=int(options.port))
def stop_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
###############################################################################
def wrap_csv(data, fname='output'):
""" Make CSV response downloadable """
if fname is None:
fname = 'log'
si = StringIO(data)
output = flask.make_response(si.getvalue())
output.headers["Content-Disposition"] = "attachment; filename=%s.csv" % fname
output.headers["Content-type"] = "text/csv"
return output
def wrap_json(data):
""" Convert CSV to JSON and make it downloadable """
entries = [_.split(',') for _ in data.split('\n') if _ != '']
js = [{k: v for k, v in zip(['timestamp', 'co2', 'temp'], x)}
for x in entries[1:]]
return jsonify(js)
def wrap_table(data):
""" Return HTML for table """
res = ('<table><thead><tr><th>Timestamp</th><th>CO2 concentration</th>'
'<th>Temperature</th></tr></thead><tbody>')
for line in data.split('\n')[1:]:
res += '<tr>' + ''.join(['<td>%s</td>' % d for d in line.split(',')]) + '</tr>'
res += '</tbody></table>'
return res
###############################################################################
if __name__ == '__main__':
# start_server() will take care of start_monitor()
start_server()
# start_server_homekit()
|
trainer_utils.py | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
import copy
import functools
import gc
import inspect
import os
import random
import re
import threading
import time
from typing import Any, Dict, NamedTuple, Optional, Tuple, Union
import numpy as np
from .file_utils import (
ExplicitEnum,
is_psutil_available,
is_sagemaker_dp_enabled,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_tpu_available,
)
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
tf.random.set_seed(seed)
class EvalPrediction(NamedTuple):
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (:obj:`np.ndarray`): Predictions of the model.
label_ids (:obj:`np.ndarray`): Targets to be matched.
"""
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Union[np.ndarray, Tuple[np.ndarray]]
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class HubStrategy(ExplicitEnum):
END = "end"
EVERY_SAVE = "every_save"
CHECKPOINT = "checkpoint"
ALL_CHECKPOINTS = "all_checkpoints"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see :class:`~transformers.Trainer.hyperparameter_search`).
Parameters:
run_id (:obj:`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (:obj:`float`):
The objective that was obtained for this run.
hyperparameters (:obj:`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second")]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
from .integrations import is_optuna_available
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
from .integrations import is_ray_tune_available
assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`"
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
def default_hp_space_sigopt(trial):
return [
{"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"},
{"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"},
{"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"},
{
"categorical_values": ["4", "8", "16", "32", "64"],
"name": "per_device_train_batch_size",
"type": "categorical",
},
]
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
SIGOPT = "sigopt"
default_hp_space = {
HPSearchBackend.OPTUNA: default_hp_space_optuna,
HPSearchBackend.RAY: default_hp_space_ray,
HPSearchBackend.SIGOPT: default_hp_space_sigopt,
}
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.distributed as dist
return dist.get_world_size()
elif local_rank != -1 and is_torch_available():
import torch
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless ``psutil`` is available. Install with ``pip install psutil``.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example ::
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
At the moment GPU tracking is only for ``pytorch``, but can be extended to support ``tensorflow``.
To understand this class' intricacies please read the documentation of :meth:`~transformers.Trainer.log_metrics`.
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
"""derives the stage/caller name automatically"""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def start(self):
"""start tracking for the caller's stage"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
# gpu
if self.torch is not None:
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
"""stop tracking for the passed stage"""
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
if self.torch is not None:
self.torch.cuda.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
# gpu
if self.torch is not None:
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(
begin=self.gpu_mem_used_at_start,
end=self.gpu_mem_used_now,
alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start),
peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
)
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(
begin=self.cpu_mem_used_at_start,
end=self.cpu_mem_used_now,
alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start),
peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
)
# reset - cycle finished
self.cur_stage = None
def update_metrics(self, stage, metrics):
"""updates the metrics"""
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
# if we need additional debug info, enable the following
# for t in ["begin", "end"]:
# if stage in self.cpu and t in self.cpu[stage]:
# metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t]
# if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
# metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t]
# since memory can be allocated before init, and it might be difficult to track overall
# memory usage, in particular for GPU, let's report memory usage at the point init was called
if stages[0] == "init":
metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"]
if self.torch is not None:
metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"]
# if we also wanted to report any additional memory allocations in between init and
# whatever the next stage was we could also report this:
# if self.cpu["init"]["end"] != self.cpu[stage]["begin"]:
# metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"]
# if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]:
# metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"]
def stop_and_update_metrics(self, metrics=None):
"""combine stop and metrics update in one call for simpler code"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
def denumpify_detensorize(metrics):
"""
Recursively calls `.item()` on the element of the dictionary passed
"""
if isinstance(metrics, (list, tuple)):
return type(metrics)(denumpify_detensorize(m) for m in metrics)
elif isinstance(metrics, dict):
return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
elif isinstance(metrics, np.generic):
return metrics.item()
elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1:
return metrics.item()
return metrics
def number_of_arguments(func):
"""
Return the number of arguments of the passed function, even if it's a partial function.
"""
if isinstance(func, functools.partial):
total_args = len(inspect.signature(func.func).parameters)
return total_args - len(func.args) - len(func.keywords)
return len(inspect.signature(func).parameters)
class ShardedDDPOption(ExplicitEnum):
SIMPLE = "simple"
ZERO_DP_2 = "zero_dp_2"
ZERO_DP_3 = "zero_dp_3"
OFFLOAD = "offload"
AUTO_WRAP = "auto_wrap"
|
main.py | import json
import os
from argparse import ArgumentParser
from data import get_data
from google.cloud import storage as st
from inference import load_registered_model
KEYS = ['pid', 'study', 'mlflow_id', 'last_sync', 'last_sync_update']
def validate_json(s):
data = json.loads(s)
# Same lengths.
if len(data) != len(KEYS): return False
# Same keys.
for key in KEYS:
if key not in data: return False
return True
def check_message(message):
if not message:
return 'Message must include data.'
if not validate_json(message):
return 'Message "{}" contains malformed JSON.'.format(message)
return None
def update_last_sync_date(last_sync_bucket, filename, last_sync_date_str, is_trainfer_str, args_str):
stCL = st.Client()
bucket = stCL.get_bucket(last_sync_bucket)
new_blob = bucket.blob(filename)
new_blob.upload_from_string('%s\n%s\n%s' % (last_sync_date_str, is_trainfer_str, args_str))
def process_message(message):
error = check_message(message)
if error:
print(error)
return
data = json.loads(message)
# Run inference.
data = get_data(data['pid'], data['last_sync'])
load_registered_model(data['mlflow_id'])
update_last_sync_date(data['last_sync_update']['last_sync_bucket'],
data['last_sync_update']['filename'],
data['last_sync_update']['last_sync_date_str'],
data['last_sync_update']['is_trainfer_str'],
data['last_sync_update']['args_str'])
# p = Process(target = load_registered_model, args = (json['name'],))
# p.start()
print('Now performing inference with model "%s".' % data['pid'])
def parse_args():
arg_parser = ArgumentParser(description='Server for preprocessing data.')
arg_parser.add_argument('-d', '--debug', action='store_true',
help='enable debugging mode (not intended for production)')
arg_parser.add_argument('-m', '--message', dest='message', action='store', type=str,
help='PubSub message - json encoded str')
args = arg_parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
process_message(args.message)
|
PythonCommandBase.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from abc import ABCMeta, abstractclassmethod
from time import sleep
import threading
import cv2
from .Keys import KeyPress, Button, Hat, Direction, Stick
from . import CommandBase
# the class For notifying stop signal is sent from Main window
class StopThread(Exception):
pass
# Python command
class PythonCommand(CommandBase.Command):
def __init__(self):
super(PythonCommand, self).__init__()
self.keys = None
self.thread = None
self.alive = True
self.postProcess = None
@abstractclassmethod
def do(self):
pass
def do_safe(self, ser):
if self.keys is None:
self.keys = KeyPress(ser)
try:
if self.alive:
self.do()
self.finish()
except StopThread:
print('-- finished successfully. --')
except:
if self.keys is None:
self.keys = KeyPress(ser)
print('interruppt')
import traceback
traceback.print_exc()
self.keys.end()
self.alive = False
def start(self, ser, postProcess=None):
self.alive = True
self.postProcess = postProcess
if not self.thread:
self.thread = threading.Thread(target=self.do_safe, args=(ser,))
self.thread.start()
def end(self, ser):
self.sendStopRequest()
def sendStopRequest(self):
if self.checkIfAlive(): # try if we can stop now
self.alive = False
print('-- sent a stop request. --')
# NOTE: Use this function if you want to get out from a command loop by yourself
def finish(self):
self.alive = False
self.end(self.keys.ser)
# press button at duration times(s)
def press(self, buttons, duration=0.1, wait=0.1):
self.keys.input(buttons)
self.wait(duration)
self.keys.inputEnd(buttons)
self.wait(wait)
self.checkIfAlive()
# press button at duration times(s) repeatedly
def pressRep(self, buttons, repeat, duration=0.1, interval=0.1, wait=0.1):
for i in range(0, repeat):
self.press(buttons, duration, 0 if i == repeat - 1 else interval)
self.wait(wait)
# add hold buttons
def hold(self, buttons, wait=0.1):
self.keys.hold(buttons)
self.wait(wait)
# release holding buttons
def holdEnd(self, buttons):
self.keys.holdEnd(buttons)
self.checkIfAlive()
# do nothing at wait time(s)
def wait(self, wait):
sleep(wait)
self.checkIfAlive()
def checkIfAlive(self):
if not self.alive:
self.keys.end()
self.keys = None
self.thread = None
if not self.postProcess is None:
self.postProcess()
self.postProcess = None
# raise exception for exit working thread
raise StopThread('exit successfully')
else:
return True
# Use time glitch
# Controls the system time and get every-other-day bonus without any punishments
def timeLeap(self, is_go_back=True):
self.press(Button.HOME, wait=1)
self.press(Direction.DOWN)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Button.A, wait=1.5) # System Settings
self.press(Direction.DOWN, duration=2, wait=0.5)
self.press(Button.A, wait=0.3) # System Settings > System
self.press(Direction.DOWN)
self.press(Direction.DOWN)
self.press(Direction.DOWN)
self.press(Direction.DOWN, wait=0.3)
self.press(Button.A, wait=0.2) # Date and Time
self.press(Direction.DOWN, duration=0.7, wait=0.2)
# increment and decrement
if is_go_back:
self.press(Button.A, wait=0.2)
self.press(Direction.UP, wait=0.2) # Increment a year
self.press(Direction.RIGHT, duration=1.5)
self.press(Button.A, wait=0.5)
self.press(Button.A, wait=0.2)
self.press(Direction.LEFT, duration=1.5)
self.press(Direction.DOWN, wait=0.2) # Decrement a year
self.press(Direction.RIGHT, duration=1.5)
self.press(Button.A, wait=0.5)
# use only increment
# for use of faster time leap
else:
self.press(Button.A, wait=0.2)
self.press(Direction.RIGHT)
self.press(Direction.RIGHT)
self.press(Direction.UP, wait=0.2) # increment a day
self.press(Direction.RIGHT, duration=1)
self.press(Button.A, wait=0.5)
self.press(Button.HOME, wait=1)
self.press(Button.HOME, wait=1)
TEMPLATE_PATH = "./Template/"
class ImageProcPythonCommand(PythonCommand):
def __init__(self, cam):
super(ImageProcPythonCommand, self).__init__()
self.camera = cam
# Judge if current screenshot contains an image using template matching
# It's recommended that you use gray_scale option unless the template color wouldn't be cared for performace
# 現在のスクリーンショットと指定した画像のテンプレートマッチングを行います
# 色の違いを考慮しないのであればパフォーマンスの点からuse_grayをTrueにしてグレースケール画像を使うことを推奨します
def isContainTemplate(self, template_path, threshold=0.7, use_gray=True, show_value=False):
src = self.camera.readFrame()
src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) if use_gray else src
template = cv2.imread(TEMPLATE_PATH+template_path, cv2.IMREAD_GRAYSCALE if use_gray else cv2.IMREAD_COLOR)
w, h = template.shape[1], template.shape[0]
method = cv2.TM_CCOEFF_NORMED
res = cv2.matchTemplate(src, template, method)
_, max_val, _, max_loc = cv2.minMaxLoc(res)
if show_value:
print(template_path + ' ZNCC value: ' + str(max_val))
if max_val > threshold:
if use_gray:
src = cv2.cvtColor(src, cv2.COLOR_GRAY2BGR)
top_left = max_loc
bottom_right = (top_left[0] + w, top_left[1] + h)
cv2.rectangle(src, top_left, bottom_right, (255, 0, 255), 2)
return True
else:
return False
def mostSimilarTemplate(self, template_path_list, use_gray=True, show_value=True):
src = self.camera.readFrame()
src = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) if use_gray else src
max_vals = []
for template_path in template_path_list:
template = cv2.imread(TEMPLATE_PATH + template_path, cv2.IMREAD_GRAYSCALE if use_gray else cv2.IMREAD_COLOR)
w, h = template.shape[1], template.shape[0]
method = cv2.TM_CCOEFF_NORMED
res = cv2.matchTemplate(src, template, method)
_, max_val, _, max_loc = cv2.minMaxLoc(res)
max_vals.append(max_val)
if show_value:
print(list(zip(template_path_list, max_vals)))
return max_vals.index(max(max_vals))
# Get interframe difference binarized image
# フレーム間差分により2値化された画像を取得
def getInterframeDiff(self, frame1, frame2, frame3, threshold):
diff1 = cv2.absdiff(frame1, frame2)
diff2 = cv2.absdiff(frame2, frame3)
diff = cv2.bitwise_and(diff1, diff2)
# binarize
img_th = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)[1]
# remove noise
mask = cv2.medianBlur(img_th, 3)
return mask
|
socket.py | #!/usr/bin/env python3
# Part of PieCoin educational project by SweetPalma, 2017. All rights reserved.
from threading import Thread
import socket
import struct
# Local modules:
from nanocoin.config import (NANOCOIN_ENCODING,
NANOCOIN_RANGE)
# Nanocoin message structure:
# NANOCOINXXXXYYYYMMMMMMMMMMMMMMM...
#
# NANOCOIN - Nanocoin header.
# XXXX - 4 bytes of sender port.
# YYYY - 4 bytes of message length.
# MMMM... - Message body, length is defined by YYYY.
# Nanocoin message response structure:
# XXXXMMMMMMMMMMMMMMMMMMMMMMMMMM...
#
# XXXX - 4 bytes of message length.
# MMMM... - Message body.
# Genral class:
class Socket(object):
''' Object that performs socket manipulations. '''
def __init__(self, port: int = None):
''' Returns new NanoCoin Socket object. '''
# Looking for a free port:
if not port:
for port in range(*NANOCOIN_RANGE):
if self.isfree(port):
break
# If port is already taken:
elif not self.isfree(port):
raise ValueError('Port %d is already taken!' % port)
# Basic values:
self.handler = None
self.encoding = NANOCOIN_ENCODING
self.handler = None
self.alive = False
self.port = port
@staticmethod
def isfree(port: int) -> bool:
''' Tests some port for being free to bind. '''
try:
tester = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tester.bind(('localhost', port))
except OSError:
result = False
else:
result = True
finally:
tester.close()
return result
def start(self, daemon: bool = True):
''' Starts current socket handler in a separate thread. '''
# Preparing socket:
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(('localhost', self.port))
listener.listen(5)
# Loop body:
def loop_body():
# Main loop:
while self.alive:
# Waiting for new connection:
try:
# Accepting it and reading NanoCoin header:
connection, address = listener.accept()
if not connection.recv(8) == b'NANOCOIN':
raise socket.error('Message with invalid header.')
# Reading rest of PieCoin message header (sender and len):
try:
sender, message_length = struct.unpack('!II',
connection.recv(4 * 2))
except struct.error:
raise socket.error('Message with invalid header size.')
# Reading rest of message:
message = connection.recv(message_length).decode(self.encoding)
# Handling and encoding:
answer = self.handler(sender, message) or 'ok'
answer = bytes(answer, self.encoding)
# Sending answer:
result_length = struct.pack('!I', len(answer))
connection.send(result_length + answer)
# Catching errors:
except socket.error as e:
print('Shit: %s' % e)
# Closing connection:
finally:
connection.close()
# Closing:
listener.close()
# Starting thread:
self.alive = True
self.thread = Thread(target=loop_body)
self.thread.daemon = daemon
self.thread.start()
def stop(self):
''' Stops current socket handler. '''
self.alive = False
self.thread.join()
def send(self, port: int, message: str) -> str:
''' Send message to certain port or list of ports. '''
# In case if input is list of ports:
if isinstance(port, list):
result = []
for p in port:
result.append(self.send(p, message))
return result
# Encoding message body:
message = bytes(message, self.encoding)
# Building message header (piecoin label, sender port and length):
message_header = struct.pack('!II', self.port, len(message))
message_header = b'NANOCOIN' + message_header
# Operning socket and sending message:
try:
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(('localhost', port))
connection.send(message_header + message)
except (socket.error, OSError, ConnectionError):
return False
# Receiving answer:
try:
size = struct.unpack('!I', connection.recv(4))[0]
result = connection.recv(size).decode(self.encoding)
except struct.error:
result = False
# Closing and returning:
connection.close()
return result
def locate(self, port_range:tuple = NANOCOIN_RANGE) -> list:
''' Returns list of other available nodes. '''
result = []
for port in range(*port_range):
if port != self.port:
answer = self.send(port, 'hi')
if answer:
result.append(port)
return result
|
test_run_tracker.py | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import http.server
import json
import threading
from urllib.parse import parse_qs
from pants.auth.cookies import Cookies
from pants.goal.run_tracker import RunTracker
from pants.testutil.test_base import TestBase
from pants.util.contextutil import temporary_file_path
from pants.version import VERSION
class RunTrackerTest(TestBase):
def assert_upload_stats(self, *, response_code) -> None:
stats = {"stats": {"foo": "bar", "baz": 42}}
class Handler(http.server.BaseHTTPRequestHandler):
def do_POST(handler):
try:
if handler.path.startswith("/redirect"):
code = int(handler.path[-3:])
handler.send_response(code)
handler.send_header("location", mk_url("/upload"))
handler.end_headers()
else:
self.assertEqual("/upload", handler.path)
stats_version = handler.headers["X-Pants-Stats-Version"]
self.assertIn(stats_version, {"1", "2"})
self.assertEqual(handler.headers["User-Agent"], f"pants/v{VERSION}")
length = int(handler.headers["Content-Length"])
content = handler.rfile.read(length).decode()
if stats_version == "2":
self.assertEqual("application/json", handler.headers["Content-type"])
decoded_post_data = json.loads(content)
self.assertEqual(len(decoded_post_data), 1)
builds = decoded_post_data["builds"]
self.assertEqual(len(builds), 1)
received_stats = builds[0]
else:
self.assertEqual(
"application/x-www-form-urlencoded", handler.headers["Content-type"]
)
received_stats = {
k: json.loads(v[0]) for k, v in parse_qs(content).items()
}
self.assertEqual(stats, received_stats)
handler.send_response(response_code)
handler.end_headers()
except Exception:
handler.send_response(400) # Ensure the main thread knows the test failed.
raise
server_address = ("", 0)
server = http.server.HTTPServer(server_address, Handler)
host, port = server.server_address
def mk_url(path):
return f"http://{host}:{port}{path}"
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
self.context(for_subsystems=[Cookies])
self.assertTrue(RunTracker.post_stats(mk_url("/upload"), stats, stats_version=1))
self.assertTrue(RunTracker.post_stats(mk_url("/upload"), stats, stats_version=2))
self.assertTrue(RunTracker.post_stats(mk_url("/redirect307"), stats, stats_version=1))
self.assertFalse(RunTracker.post_stats(mk_url("/redirect302"), stats, stats_version=2))
server.shutdown()
server.server_close()
def test_upload_stats(self):
self.assert_upload_stats(response_code=200)
self.assert_upload_stats(response_code=201)
self.assert_upload_stats(response_code=204)
def test_invalid_stats_version(self):
stats = {"stats": {"foo": "bar", "baz": 42}}
url = "http://example.com/upload/"
with self.assertRaises(ValueError):
RunTracker.post_stats(url, stats, stats_version=0)
with self.assertRaises(ValueError):
RunTracker.post_stats(url, stats, stats_version=None)
with self.assertRaises(ValueError):
RunTracker.post_stats(url, stats, stats_version=9)
with self.assertRaises(ValueError):
RunTracker.post_stats(url, stats, stats_version="not a number")
def test_write_stats_to_json_file(self):
# Set up
stats = {"stats": {"foo": "bar", "baz": 42}}
# Execute & verify
with temporary_file_path() as file_name:
RunTracker.write_stats_to_json(file_name, stats)
with open(file_name, "r") as f:
result = json.load(f)
self.assertEqual(stats, result)
def test_create_dict_with_nested_keys_and_val(self):
keys = []
with self.assertRaises(ValueError):
RunTracker._create_dict_with_nested_keys_and_val(keys, "something")
keys += ["one"]
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, "something"),
{"one": "something"},
)
keys += ["two"]
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, "something"),
{"one": {"two": "something"}},
)
keys += ["three"]
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, "something"),
{"one": {"two": {"three": "something"}}},
)
keys += ["four"]
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, "something"),
{"one": {"two": {"three": {"four": "something"}}}},
)
def test_merge_list_of_keys_into_dict(self):
data = {}
keys = []
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, "something")
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, "something", -1)
keys = ["key"]
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, "something", 1)
keys = ["a"]
RunTracker._merge_list_of_keys_into_dict(data, keys, "O-N-E")
self.assertEqual(data, {"a": "O-N-E"})
keys = ["one", "two", "three"]
RunTracker._merge_list_of_keys_into_dict(data, keys, "T-H-R-E-E")
self.assertEqual(data, {"one": {"two": {"three": "T-H-R-E-E"}}, "a": "O-N-E"})
keys = ["one", "two", "a"]
RunTracker._merge_list_of_keys_into_dict(data, keys, "L-A")
self.assertEqual(data, {"one": {"two": {"a": "L-A", "three": "T-H-R-E-E"}}, "a": "O-N-E"})
keys = ["c", "d", "e", "f"]
RunTracker._merge_list_of_keys_into_dict(data, keys, "F-O-U-R")
self.assertEqual(
data,
{
"one": {"two": {"a": "L-A", "three": "T-H-R-E-E"}},
"a": "O-N-E",
"c": {"d": {"e": {"f": "F-O-U-R"}}},
},
)
keys = ["one", "two", "x", "y"]
RunTracker._merge_list_of_keys_into_dict(data, keys, "W-H-Y")
self.assertEqual(
data,
{
"one": {"two": {"a": "L-A", "three": "T-H-R-E-E", "x": {"y": "W-H-Y"}}},
"a": "O-N-E",
"c": {"d": {"e": {"f": "F-O-U-R"}}},
},
)
keys = ["c", "d", "e", "g", "h"]
RunTracker._merge_list_of_keys_into_dict(data, keys, "H-E-L-L-O")
self.assertEqual(
data,
{
"one": {"two": {"a": "L-A", "three": "T-H-R-E-E", "x": {"y": "W-H-Y"}}},
"a": "O-N-E",
"c": {"d": {"e": {"f": "F-O-U-R", "g": {"h": "H-E-L-L-O"}}}},
},
)
keys = ["one", "two", "x", "z"]
RunTracker._merge_list_of_keys_into_dict(data, keys, "Z-E-D")
self.assertEqual(
data,
{
"one": {
"two": {"a": "L-A", "three": "T-H-R-E-E", "x": {"y": "W-H-Y", "z": "Z-E-D"}}
},
"a": "O-N-E",
"c": {"d": {"e": {"f": "F-O-U-R", "g": {"h": "H-E-L-L-O"}}}},
},
)
keys = ["c", "d", "e", "g", "i"]
RunTracker._merge_list_of_keys_into_dict(data, keys, "E-Y-E")
self.assertEqual(
data,
{
"one": {
"two": {"a": "L-A", "three": "T-H-R-E-E", "x": {"y": "W-H-Y", "z": "Z-E-D"}}
},
"a": "O-N-E",
"c": {"d": {"e": {"f": "F-O-U-R", "g": {"h": "H-E-L-L-O", "i": "E-Y-E"}}}},
},
)
keys = ["a"]
RunTracker._merge_list_of_keys_into_dict(data, keys, "new O-N-E")
self.assertEqual(
data,
{
"one": {
"two": {"a": "L-A", "three": "T-H-R-E-E", "x": {"y": "W-H-Y", "z": "Z-E-D"}}
},
"a": "new O-N-E",
"c": {"d": {"e": {"f": "F-O-U-R", "g": {"h": "H-E-L-L-O", "i": "E-Y-E"}}}},
},
)
keys = ["one", "two", "a"]
RunTracker._merge_list_of_keys_into_dict(data, keys, "L-A-L-A")
self.assertEqual(
data,
{
"one": {
"two": {"a": "L-A-L-A", "three": "T-H-R-E-E", "x": {"y": "W-H-Y", "z": "Z-E-D"}}
},
"a": "new O-N-E",
"c": {"d": {"e": {"f": "F-O-U-R", "g": {"h": "H-E-L-L-O", "i": "E-Y-E"}}}},
},
)
keys = ["one", "two", "a", "b", "c"]
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, "new A")
|
opc_server.py | import socket
from time import sleep
import threading
import selectors
import numpy as np
class OPCMessage:
"""
Class for handling OPC messages (http://openpixelcontrol.org/).
Suppports multiple connections by using selectors.
Based on https://realpython.com/python-sockets/
"""
def __init__(self, selector, sock, addr, callback, verbose=False):
"""Constructor
Arguments:
selector {[type]} -- Selector to use
sock {[type]} -- A TCP Socket (from socket.accept())
addr {[type]} -- Address of the client (from socket.accept())
callback {function} -- Callback to be called once a message is fully read
"""
self.selector = selector
self.sock = sock
self.addr = addr
self.callback = callback # this callback is called once a message is fully read
self._verbose = verbose
self._resetData()
def _debug(self, message):
if self._verbose:
print(message)
def _set_selector_events_mask(self, mode):
"""Set selector to listen for events: mode is 'r', 'w', or 'rw'."""
if mode == "r":
events = selectors.EVENT_READ
elif mode == "w":
events = selectors.EVENT_WRITE
elif mode == "rw":
events = selectors.EVENT_READ | selectors.EVENT_WRITE
else:
raise ValueError("Invalid events mask mode {}".format(mode))
self.selector.modify(self.sock, events, data=self)
def _resetData(self):
"""Reset all state information in order to re-use the message instance"""
self._recv_buffer = b""
self._send_buffer = b""
self.opc_header = None
self._opc_header_len = None
self.channel = None
self.message = None
self.payload_expected = None
self.messageData = None
def process_events(self, mask):
"""Main function to handle new events on the selector"""
if mask & selectors.EVENT_READ:
self.read()
if mask & selectors.EVENT_WRITE:
# don't think we need a write event...
# self.write()
# Support long connections -> enter read mode again
self._resetData()
self._set_selector_events_mask('r')
def _read(self):
"""Read data from socket and store in self._recv_buffer"""
try:
# Should be ready to read
data = self.sock.recv(4096)
except BlockingIOError:
# Resource temporarily unavailable (errno EWOULDBLOCK)
pass
else:
if data:
self._recv_buffer += data
else:
raise RuntimeError("Peer closed.")
def processOpcHeader(self):
"""Process OPC Header information and strip from self._recv_buffer if successful"""
hdrlen = 4
if len(self._recv_buffer) >= hdrlen:
header = self._recv_buffer[:hdrlen]
# Store state information
self.opc_header = header
self.channel = header[0]
self.message = header[1]
self.payload_expected = (header[2] << 8) | header[3]
# Strip header from buffer
self._recv_buffer = self._recv_buffer[hdrlen:]
def processMessageData(self):
"""Process OPC Data part of the message"""
content_len = self.payload_expected
if not len(self._recv_buffer) >= content_len:
return
self._debug("Message fully read")
data = self._recv_buffer[:content_len]
self._recv_buffer = self._recv_buffer[content_len:]
# Store state information
self.messageData = data
# Call the callback
if self.callback is not None:
self.callback(data)
# Set selector to listen for write events, we're done reading.
self._set_selector_events_mask("w")
def read(self):
"""Method to handle the read event"""
self._read()
if self.opc_header is None:
self.processOpcHeader()
if self.opc_header:
if self.messageData is None:
self.processMessageData()
def close(self):
self._debug("closing connection to", self.addr)
try:
self.selector.unregister(self.sock)
except Exception as e:
print(
"error: selector.unregister() exception for {}: {}".format(self.addr, repr(e))
)
try:
self.sock.close()
except OSError as e:
print(
"error: socket.close() exception for {}: {}".format(self.addr, repr(e))
)
finally:
# Delete reference to socket object for garbage collection
self.sock = None
class ServerThread(object):
"""Thread object to continuously read messages from socket
"""
def __init__(self, socket, callback, verbose=False):
"""Constructor for thread object
Arguments:
socket {[type]} -- Socket to connect (must be in listening state)
callback {function} -- Callback to call when OPC messages have been fully read
"""
self._socket = socket
self._callback = callback
self._thread = None
self._stopSignal = None
self._verbose = verbose
self.sel = selectors.DefaultSelector()
def _debug(self, message):
if self._verbose:
print(message)
def start(self):
"""Start the server thread"""
if self._thread is not None:
return
self._stopSignal = False
self._socket.listen()
print("FadeCandy Server thread listening.")
self._thread = threading.Thread(target=self._process_thread, args=[self._socket, self._callback])
self._thread.daemon = True
self._thread.start()
def stop(self, timeout=1):
"""Stop the server thread
Raises TimeoutError """
self._stopSignal = True
self._thread.join(timeout=timeout)
if self._thread.isAlive():
raise TimeoutError("thread.join timed out")
def isAlive(self):
"""Check whether the thread is alive.
Alive means that the background thread is running.
If the socket is closed, the background thread will exit
"""
if self._thread is None:
return False
if not self._thread.isAlive():
return False
return True
def _accept_wrapper(self, sock, callback):
conn, addr = sock.accept() # Should be ready to read
self._debug("accepted connection from {}".format(addr))
conn.setblocking(False)
message = OPCMessage(self.sel, conn, addr, callback)
self.sel.register(conn, selectors.EVENT_READ, data=message)
def _process_thread(self, lsock, callback):
# Method to run in background thread
lsock.setblocking(False)
self.sel.register(lsock, selectors.EVENT_READ, data=None)
self._debug("FadeCandy Server: Background thread started")
try:
while not self._stopSignal:
# For error handling: unregister and register again so we can detect closed sockets
# Don't know if this can be done better...
self.sel.unregister(lsock)
self.sel.register(lsock, selectors.EVENT_READ, data=None)
# Use timeout in order to periodically check stop signal
events = self.sel.select(timeout=0.1)
for key, mask in events:
if key.data is None:
self._accept_wrapper(key.fileobj, callback)
else:
message = key.data
try:
message.process_events(mask)
except Exception:
message.close()
self._debug("FadeCandy Server: Background thread exiting due to message exception")
self._stopSignal = True
except Exception as e:
self._debug("FadeCandy Server: Background thread exiting due to exception: {}".format(e))
finally:
self._debug("FaceCandy Server: Background thread closing socket")
self.sel.close()
self._socket.close()
class Server(object):
# Using static methods here since sockets can be used only once
sockets = []
all_threads = []
def __init__(self, host, port, verbose=False):
self._host = host
self._port = port
self._socket = None
self._thread = None
self._lastMessage = None
self._verbose = verbose
def __del__(self):
# Destructors in python... I'm never complaining about C++ again...
# Basically this thing is (maybe) called at some point,
# except if anyone manages to build cyclic references.
if self._thread is not None:
self._stopThread()
def _stopThread(self):
# Stopping gracefully...
# ToDo: Error handling
self._thread.stop()
if self._thread in self.all_threads:
self.all_threads.remove(self._thread)
def stop(self):
self._stopThread()
def _clean_threads(self):
toCleanup = []
for thread in self.all_threads:
try:
thread._socket.getpeername()
except Exception:
toCleanup.append(thread)
for thread in toCleanup:
# ToDo: Error handling
print("Cleaning up stale thread")
thread.stop()
self.all_threads.remove(thread)
def _get_threads(self, host, port):
"""
Returns sockets with same host and port.
"""
self._clean_threads()
sameThreads = []
for thread in self.all_threads:
try:
(host, port) = thread._socket.getpeername()
if host == host and port == port:
sameThreads.append(thread)
except Exception:
pass
return sameThreads
def _ensure_listening(self):
# We want to ensure that anyone who expects pixel information gets the data.
# Due to garbage collection and timing issues a corresponding thread for the same host and port
# may still be active and the socket may still be bound
if self._thread and self._thread.isAlive():
return True
try:
same_socket_threads = self._get_threads(self._host, self._port)
for thread in same_socket_threads:
thread.stop()
self.all_threads.remove(thread)
_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_socket.bind((self._host, self._port))
print("FadeCandy Server begin listening on {}:{}".format(self._host, self._port))
self._thread = ServerThread(_socket, self._pixelCallback, self._verbose)
self.all_threads.append(self._thread)
self._thread.start()
return True
except socket.error as e:
print("FadeCandy Server error listening on {}:{}".format(self._host, self._port))
print(e)
self._socket = None
return False
def _pixelCallback(self, data):
# Transform byte array to pixel shape
pixels = np.frombuffer(data, dtype=np.uint8).reshape((-1, 3)).T
self._lastMessage = pixels
def get_pixels(self, block=False):
isListening = self._ensure_listening()
if not isListening:
raise Exception("Server cannot listen")
if block:
while self._lastMessage is None:
print("Waiting for message...")
sleep(0.01)
return self._lastMessage
|
server.py | import subprocess
import threading
from time import sleep
from os import system
#open new process opening bedrock_server.exe
process = subprocess.Popen('bedrock_server.exe', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
#allows for input from the console
def InputLoop():
while True:
inp = input() + "\n"
process.stdin.write(inp.encode())
process.stdin.flush()
#output from bedrock_server.exe
def OutputLoop():
while True:
for line in process.stdout:
line = str(line).replace("b'","")[:-5]
print(line)
#backing up loop
def BackupLoop():
while True:
toType = "save hold" + "\n"
process.stdin.write(toType.encode())
process.stdin.flush()
sleep(7.5)
system("backup.bat")
sleep(.75)
toType = "save resume" + "\n"
process.stdin.write(toType.encode())
process.stdin.flush()
sleep(3600)
#start the threads
outputt = threading.Thread(target=InputLoop)
inputt = threading.Thread(target=OutputLoop)
outputt.start()
inputt.start()
sleep(15)
BackupLoop()
|
ex2.py | # Python 3.3.3
# python3 helloworld_python.py
from threading import Thread, Lock
# Potentially useful thing:
# In Python you "import" a global variable, instead of "export"ing it when you declare it
# (This is probably an effort to make you feel bad about typing the word "global")
num = 0
def threadIncNum(lock):
global num
for i in range(1000000):
lock.acquire(True)
num += 1
lock.release()
def threadDecNum(lock):
global num
for i in range(1000001):
lock.acquire(True)
num -= 1
lock.release()
def main():
lock = Lock()
thread1 = Thread(target = threadIncNum, args = (lock,))
thread1.start()
thread2 = Thread(target = threadDecNum, args = (lock,))
thread2.start()
thread1.join()
thread2.join()
print("Num: " + str(num))
main() |
hostperf.py | # -*- coding: UTF-8 -*-
'''
Copyright (c) 2015 Scouter Project.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from scouter.lang.utility import *
from scouter.lang.request import *
from scouter.lang.pack import *
from scouter.lang.inout import *
import scouter.host.top
import scouter.host.process_detail
import scouter.host.disk_usage
import scouter.host.netstat
import scouter.host.who
import scouter.host.meminfo
import datetime, time
import sys, getopt
import platform, socket
import datetime, time
import threading
import psutil
lastTimeDict = dict()
def getInterval(key, interval):
etime = datetime.datetime.now()
if (lastTimeDict.has_key(key)) :
stime = lastTimeDict.get(key)
lastTimeDict[key] = etime
return diffSeconds(stime, etime)
else :
lastTimeDict[key] = etime
return interval
def process(pack, default_interval=2):
pack.putFloat("Cpu" , psutil.cpu_percent(0,False))
mem = psutil.virtual_memory()
used = mem.total - mem.available
pack.putFloat("Mem", mem.percent)
pack.putFloat("MemA", mem.available / 1024.0 / 1024.0)
pack.putInt("MemU", int(used / 1024 / 1024))
pack.putInt("MemT", int(mem.total / 1024 / 1024))
swap = psutil.swap_memory()
pack.putFloat("Swap", swap.percent)
pack.putInt("SwapU", int(swap.used / 1024 / 1024))
pack.putInt("SwapT", int(swap.total / 1024 / 1024))
interval = getInterval("disk", default_interval)
if interval > 0:
disk = psutil.disk_io_counters(perdisk=False)
calc(pack, "ReadCount", delta("ReadCount", disk.read_count) / interval)
calc(pack, "WriteCount", delta("WriteCount", disk.write_count) / interval)
calc(pack, "ReadBytes", delta("ReadBytes", disk.read_bytes) / interval)
calc(pack, "WriteBytes", delta("WriteBytes", disk.write_bytes) / interval)
calc(pack, "ReadTime", delta("ReadTime", disk.read_time) / interval)
calc(pack, "WriteTime", delta("WriteTime", disk.write_time) / interval)
interval = getInterval("net", default_interval)
if interval > 0:
net = psutil.net_io_counters(pernic=False)
calc(pack, "PacketsSent", delta("PacketsSent", net.packets_sent) / interval)
calc(pack, "PacketsRecv", delta("PacketsRecv", net.packets_recv) / interval)
calc(pack, "BytesSent", delta("BytesSent", net.bytes_sent) / interval)
calc(pack, "BytesRecv", delta("BytesRecv", net.bytes_recv) / interval)
calc(pack, "ErrIn", delta("ErrIn", net.errin) / interval)
calc(pack, "ErrOut", delta("ErrOut", net.errout) / interval)
calc(pack, "DropIn", delta("DropIn", net.dropin) / interval)
calc(pack, "DropOut", delta("DropOut", net.dropout) / interval)
def help():
helpText = """hostperf.py [--scouter_name] [--host host] [--port port] [--debug] [--help]
--scouter_name : set the custom object name
--host host : hostname or ip (127.0.0.1)
--port port : port (6100)
--debug : debug
--help : help
"""
print helpText
sys.exit()
server_ip = "127.0.0.1"
server_port = 6100
debug = False
so_timeout=60000
def init(args):
global debug
global server_ip
global server_port
logo()
opts, args = getopt.getopt(args, "h", ["scouter_name=","host=","port=","debug","help"])
for opt in opts:
if opt[0] == "--help" or opt[0] == "-h":
help()
elif opt[0] == "--host":
server_ip = opt[1]
elif opt[0] == "--port":
server_port = int(opt[1])
elif opt[0] == "--debug":
debug = True
elif opt[0] == "--scouter_name":
setObjname(opt[1])
def h_ignore(param):
m = MapPack()
m.putValue('msg', TextValue('ignored command'))
return m
def h_env(param):
import os
env = os.environ
m = MapPack()
for key, value in env.iteritems():
m.putStr(key, value)
return m
def openReqServer():
handlers = dict()
handlers["OBJECT_ENV"]=h_env
handlers["OBJECT_RESET_CACHE"]=h_ignore
handlers["HOST_TOP"]=scouter.host.top.process
handlers["HOST_PROCESS_DETAIL"]=scouter.host.process_detail.process
handlers["HOST_DISK_USAGE"]=scouter.host.disk_usage.process
handlers["HOST_NET_STAT"]=scouter.host.netstat.process
handlers["HOST_WHO"]=scouter.host.who.process
handlers["HOST_MEMINFO"]=scouter.host.meminfo.process
handlers["KEEP_ALIVE"]=h_ignore
startReqHandler(server_ip, server_port, handlers)
def sendObjectPack(sock):
objPack = ObjectPack()
objPack.objName = objname()
objPack.objHash = binascii.crc32(objPack.objName)
objPack.objType = objtype()
objPack.address = getLocalAddr()
objPack.version = "0.2.0"
out = DataOutputX()
out.writePack(objPack)
sock.sendto("CAFE" + out.toByteArray(), (server_ip, server_port))
REALTIME=1
FIVE_MIN=3
def sendPerfCounterPack(sock, interval):
pack = PerfCounterPack()
pack.objName = objname()
pack.timeType = REALTIME
process(pack, interval)
if debug == True:
print pack
out = DataOutputX()
out.writePack(pack)
sock.sendto("CAFE" + out.toByteArray(), (server_ip, server_port))
def main():
init(sys.argv[1:])
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
thread1 = threading.Thread(target=openReqServer)
thread1.setDaemon(True)
thread1.start()
#thread2 = threading.Thread(target=openReqServer)
#thread2.setDaemon(True)
#thread2.start()
skip=2
interval = 2
objPackSend = bool(1)
while 1:
sendObjectPack(sock)
sendPerfCounterPack(sock, interval)
time.sleep(interval)
if __name__ == '__main__':
main()
|
sac_v2_multithread.py | '''
Soft Actor-Critic version 2
using target Q instead of V net: 2 Q net, 2 target Q net, 1 policy net
add alpha loss compared with version 1
paper: https://arxiv.org/pdf/1812.05905.pdf
'''
import math
import random
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.distributions import Normal
from IPython.display import clear_output
import matplotlib.pyplot as plt
from matplotlib import animation
from IPython.display import display
from reacher import Reacher
import argparse
import time
# import multiprocessing as mp
# from multiprocessing import Process
# import torch.multiprocessing as mp
# from torch.multiprocessing import Process
import threading as td
GPU = True
device_idx = 0
if GPU:
device = torch.device("cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
print(device)
parser = argparse.ArgumentParser(description='Train or test neural net motor controller.')
parser.add_argument('--train', dest='train', action='store_true', default=False)
parser.add_argument('--test', dest='test', action='store_true', default=False)
args = parser.parse_args()
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
self.position = 0
def push(self, state, action, reward, next_state, done):
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (state, action, reward, next_state, done)
self.position = int((self.position + 1) % self.capacity) # as a ring buffer
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size)
state, action, reward, next_state, done = map(np.stack, zip(*batch)) # stack for each element
'''
the * serves as unpack: sum(a,b) <=> batch=(a,b), sum(*batch) ;
zip: a=[1,2], b=[2,3], zip(a,b) => [(1, 2), (2, 3)] ;
the map serves as mapping the function on each list element: map(square, [2,3]) => [4,9] ;
np.stack((1,2)) => array([1, 2])
'''
return state, action, reward, next_state, done
def __len__(self):
return len(self.buffer)
class NormalizedActions(gym.ActionWrapper):
def _action(self, action):
low = self.action_space.low
high = self.action_space.high
action = low + (action + 1.0) * 0.5 * (high - low)
action = np.clip(action, low, high)
return action
def _reverse_action(self, action):
low = self.action_space.low
high = self.action_space.high
action = 2 * (action - low) / (high - low) - 1
action = np.clip(action, low, high)
return action
class ValueNetwork(nn.Module):
def __init__(self, state_dim, hidden_dim, init_w=3e-3):
super(ValueNetwork, self).__init__()
self.linear1 = nn.Linear(state_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, hidden_dim)
self.linear4 = nn.Linear(hidden_dim, 1)
# weights initialization
self.linear4.weight.data.uniform_(-init_w, init_w)
self.linear4.bias.data.uniform_(-init_w, init_w)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
x = self.linear4(x)
return x
class SoftQNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, init_w=3e-3):
super(SoftQNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, hidden_size)
self.linear4 = nn.Linear(hidden_size, 1)
self.linear4.weight.data.uniform_(-init_w, init_w)
self.linear4.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1) # the dim 0 is number of samples
x = F.relu(self.linear1(x))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
x = self.linear4(x)
return x
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size, action_range=1., init_w=3e-3, log_std_min=-20, log_std_max=2):
super(PolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.linear1 = nn.Linear(num_inputs, hidden_size)
self.linear2 = nn.Linear(hidden_size, hidden_size)
self.linear3 = nn.Linear(hidden_size, hidden_size)
self.linear4 = nn.Linear(hidden_size, hidden_size)
self.mean_linear = nn.Linear(hidden_size, num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear = nn.Linear(hidden_size, num_actions)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
self.action_range = action_range
self.num_actions = num_actions
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = F.relu(self.linear3(x))
x = F.relu(self.linear4(x))
mean = (self.mean_linear(x))
# mean = F.leaky_relu(self.mean_linear(x))
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, self.log_std_min, self.log_std_max)
return mean, log_std
def evaluate(self, state, epsilon=1e-6):
'''
generate sampled action with state as input wrt the policy network;
'''
mean, log_std = self.forward(state)
std = log_std.exp() # no clip in evaluation, clip affects gradients flow
normal = Normal(0, 1)
z = normal.sample()
action_0 = torch.tanh(mean + std*z.to(device)) # TanhNormal distribution as actions; reparameterization trick
action = self.action_range*action_0
log_prob = Normal(mean, std).log_prob(mean+ std*z.to(device)) - torch.log(1. - action_0.pow(2) + epsilon) - np.log(self.action_range)
# both dims of normal.log_prob and -log(1-a**2) are (N,dim_of_action);
# the Normal.log_prob outputs the same dim of input features instead of 1 dim probability,
# needs sum up across the features dim to get 1 dim prob; or else use Multivariate Normal.
log_prob = log_prob.sum(dim=1, keepdim=True)
return action, log_prob, z, mean, log_std
def get_action(self, state, deterministic):
state = torch.FloatTensor(state).unsqueeze(0).to(device)
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(0, 1)
z = normal.sample().to(device)
action = self.action_range* torch.tanh(mean + std*z)
action = self.action_range*torch.tanh(mean).detach().cpu().numpy()[0] if deterministic else action.detach().cpu().numpy()[0]
return action
def sample_action(self,):
a=torch.FloatTensor(self.num_actions).uniform_(-1, 1)
return self.action_range*a.numpy()
class SAC_Trainer():
def __init__(self, replay_buffer, hidden_dim, action_range):
self.replay_buffer = replay_buffer
self.soft_q_net1 = SoftQNetwork(state_dim, action_dim, hidden_dim).to(device)
self.soft_q_net2 = SoftQNetwork(state_dim, action_dim, hidden_dim).to(device)
self.target_soft_q_net1 = SoftQNetwork(state_dim, action_dim, hidden_dim).to(device)
self.target_soft_q_net2 = SoftQNetwork(state_dim, action_dim, hidden_dim).to(device)
self.policy_net = PolicyNetwork(state_dim, action_dim, hidden_dim, action_range).to(device)
self.log_alpha = torch.zeros(1, dtype=torch.float32, requires_grad=True, device=device)
print('Soft Q Network (1,2): ', self.soft_q_net1)
print('Policy Network: ', self.policy_net)
for target_param, param in zip(self.target_soft_q_net1.parameters(), self.soft_q_net1.parameters()):
target_param.data.copy_(param.data)
for target_param, param in zip(self.target_soft_q_net2.parameters(), self.soft_q_net2.parameters()):
target_param.data.copy_(param.data)
self.soft_q_criterion1 = nn.MSELoss()
self.soft_q_criterion2 = nn.MSELoss()
soft_q_lr = 3e-4
policy_lr = 3e-4
alpha_lr = 3e-4
self.soft_q_optimizer1 = optim.Adam(self.soft_q_net1.parameters(), lr=soft_q_lr)
self.soft_q_optimizer2 = optim.Adam(self.soft_q_net2.parameters(), lr=soft_q_lr)
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=policy_lr)
self.alpha_optimizer = optim.Adam([self.log_alpha], lr=alpha_lr)
def update(self, batch_size, reward_scale=10., auto_entropy=True, target_entropy=-2, gamma=0.99,soft_tau=1e-2):
state, action, reward, next_state, done = self.replay_buffer.sample(batch_size)
# print('sample:', state, action, reward, done)
state = torch.FloatTensor(state).to(device)
next_state = torch.FloatTensor(next_state).to(device)
action = torch.FloatTensor(action).to(device)
reward = torch.FloatTensor(reward).unsqueeze(1).to(device) # reward is single value, unsqueeze() to add one dim to be [reward] at the sample dim;
done = torch.FloatTensor(np.float32(done)).unsqueeze(1).to(device)
predicted_q_value1 = self.soft_q_net1(state, action)
predicted_q_value2 = self.soft_q_net2(state, action)
new_action, log_prob, z, mean, log_std = self.policy_net.evaluate(state)
new_next_action, next_log_prob, _, _, _ = self.policy_net.evaluate(next_state)
reward = reward_scale * (reward - reward.mean(dim=0)) / (reward.std(dim=0) + 1e-6) # normalize with batch mean and std; plus a small number to prevent numerical problem
# Updating alpha wrt entropy
# alpha = 0.0 # trade-off between exploration (max entropy) and exploitation (max Q)
if auto_entropy is True:
alpha_loss = -(self.log_alpha * (log_prob + target_entropy).detach()).mean()
self.alpha_optimizer.zero_grad()
alpha_loss.backward()
self.alpha_optimizer.step()
self.alpha = self.log_alpha.exp()
else:
self.alpha = 1.
alpha_loss = 0
# Training Q Function
target_q_min = torch.min(self.target_soft_q_net1(next_state, new_next_action),self.target_soft_q_net2(next_state, new_next_action)) - self.alpha * next_log_prob
target_q_value = reward + (1 - done) * gamma * target_q_min # if done==1, only reward
q_value_loss1 = self.soft_q_criterion1(predicted_q_value1, target_q_value.detach()) # detach: no gradients for the variable
q_value_loss2 = self.soft_q_criterion2(predicted_q_value2, target_q_value.detach())
self.soft_q_optimizer1.zero_grad()
q_value_loss1.backward()
self.soft_q_optimizer1.step()
self.soft_q_optimizer2.zero_grad()
q_value_loss2.backward()
self.soft_q_optimizer2.step()
# Training Policy Function
predicted_new_q_value = torch.min(self.soft_q_net1(state, new_action),self.soft_q_net2(state, new_action))
policy_loss = (self.alpha * log_prob - predicted_new_q_value).mean()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
# Soft update the target value net
for target_param, param in zip(self.target_soft_q_net1.parameters(), self.soft_q_net1.parameters()):
target_param.data.copy_( # copy data value into target parameters
target_param.data * (1.0 - soft_tau) + param.data * soft_tau
)
for target_param, param in zip(self.target_soft_q_net2.parameters(), self.soft_q_net2.parameters()):
target_param.data.copy_( # copy data value into target parameters
target_param.data * (1.0 - soft_tau) + param.data * soft_tau
)
return predicted_new_q_value.mean()
def save_model(self, path):
torch.save(self.soft_q_net1.state_dict(), path+'_q1')
torch.save(self.soft_q_net2.state_dict(), path+'_q2')
torch.save(self.policy_net.state_dict(), path+'_policy')
def load_model(self, path):
self.soft_q_net1.load_state_dict(torch.load(path+'_q1'))
self.soft_q_net2.load_state_dict(torch.load(path+'_q2'))
self.policy_net.load_state_dict(torch.load(path+'_policy'))
self.soft_q_net1.eval()
self.soft_q_net2.eval()
self.policy_net.eval()
def plot(rewards, id):
clear_output(True)
plt.figure(figsize=(20,5))
plt.plot(rewards)
plt.savefig('sac_v2_multi'+str(id)+'.png')
# plt.show()
plt.clf()
def worker(id, ): # thread could read global variables
'''
the function for sampling with multi-threading
'''
print(sac_trainer, replay_buffer)
if ENV == 'Reacher':
env=Reacher(screen_size=SCREEN_SIZE, num_joints=NUM_JOINTS, link_lengths = LINK_LENGTH, \
ini_joint_angles=INI_JOING_ANGLES, target_pos = [369,430], render=True, change_goal=False)
elif ENV == 'Pendulum':
env = NormalizedActions(gym.make("Pendulum-v0"))
print(env)
frame_idx=0
rewards=[]
# training loop
for eps in range(max_episodes):
episode_reward = 0
if ENV == 'Reacher':
state = env.reset(SCREEN_SHOT)
elif ENV == 'Pendulum':
state = env.reset()
for step in range(max_steps):
if frame_idx > explore_steps:
action = sac_trainer.policy_net.get_action(state, deterministic = DETERMINISTIC)
else:
action = sac_trainer.policy_net.sample_action()
try:
if ENV == 'Reacher':
next_state, reward, done, _ = env.step(action, SPARSE_REWARD, SCREEN_SHOT)
elif ENV == 'Pendulum':
next_state, reward, done, _ = env.step(action)
env.render()
except KeyboardInterrupt:
print('Finished')
sac_trainer.save_model(model_path)
replay_buffer.push(state, action, reward, next_state, done)
state = next_state
episode_reward += reward
frame_idx += 1
if len(replay_buffer) > batch_size:
for i in range(update_itr):
_=sac_trainer.update(batch_size, reward_scale=10., auto_entropy=AUTO_ENTROPY, target_entropy=-1.*action_dim)
if eps % 10 == 0 and eps>0:
plot(rewards, id)
sac_trainer.save_model(model_path)
if done:
break
print('Episode: ', eps, '| Episode Reward: ', episode_reward)
# if len(rewards) == 0: rewards.append(episode_reward)
# else: rewards.append(rewards[-1]*0.9+episode_reward*0.1)
sac_trainer.save_model(model_path)
if __name__ == '__main__':
replay_buffer_size = 1e6
replay_buffer = ReplayBuffer(replay_buffer_size)
# choose env
ENV = ['Pendulum', 'Reacher'][0]
if ENV == 'Reacher':
NUM_JOINTS=2
LINK_LENGTH=[200, 140]
INI_JOING_ANGLES=[0.1, 0.1]
# NUM_JOINTS=4
# LINK_LENGTH=[200, 140, 80, 50]
# INI_JOING_ANGLES=[0.1, 0.1, 0.1, 0.1]
SCREEN_SIZE=1000
SPARSE_REWARD=False
SCREEN_SHOT=False
action_range = 10.0
env=Reacher(screen_size=SCREEN_SIZE, num_joints=NUM_JOINTS, link_lengths = LINK_LENGTH, \
ini_joint_angles=INI_JOING_ANGLES, target_pos = [369,430], render=False, change_goal=False)
action_dim = env.num_actions
state_dim = env.num_observations
elif ENV == 'Pendulum':
env = NormalizedActions(gym.make("Pendulum-v0"))
action_dim = env.action_space.shape[0]
state_dim = env.observation_space.shape[0]
action_range=1.
# hyper-parameters for RL training
max_episodes = 1000
max_steps = 20 if ENV == 'Reacher' else 150 # Pendulum needs 150 steps per episode to learn well, cannot handle 20
batch_size = 256
explore_steps = 200 # for random action sampling in the beginning of training
update_itr = 1
AUTO_ENTROPY=True
DETERMINISTIC=False
hidden_dim = 512
model_path = './model/sac_v2'
sac_trainer=SAC_Trainer(replay_buffer, hidden_dim=hidden_dim, action_range=action_range )
if args.train:
num_workers=2
threads=[]
for i in range(num_workers):
thread = td.Thread(target=worker, args=(i,))
thread.daemon=True
threads.append(thread)
try:
[t.start() for t in threads]
[t.join() for t in threads]
except KeyboardInterrupt:
print('Finished')
sac_trainer.save_model(model_path)
sac_trainer.save_model(model_path)
# if args.test:
# sac_trainer.load_model(model_path)
# for eps in range(10):
# if ENV == 'Reacher':
# state = env.reset(SCREEN_SHOT)
# elif ENV == 'Pendulum':
# state = env.reset()
# episode_reward = 0
# for step in range(max_steps):
# action = sac_trainer.policy_net.get_action(state, deterministic = DETERMINISTIC)
# if ENV == 'Reacher':
# next_state, reward, done, _ = env.step(action, SPARSE_REWARD, SCREEN_SHOT)
# elif ENV == 'Pendulum':
# next_state, reward, done, _ = env.step(action)
# env.render()
# episode_reward += reward
# state=next_state
# print('Episode: ', eps, '| Episode Reward: ', episode_reward)
|
webserver.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import os
import threading
class Responder(object):
"""Sends a HTTP response. Used with TestWebServer."""
def __init__(self, handler):
self._handler = handler
def SendResponse(self, body):
"""Sends OK response with body."""
self.SendHeaders(len(body))
self.SendBody(body)
def SendResponseFromFile(self, path):
"""Sends OK response with the given file as the body."""
with open(path, 'r') as f:
self.SendResponse(f.read())
def SendHeaders(self, content_length=None):
"""Sends headers for OK response."""
self._handler.send_response(200)
if content_length:
self._handler.send_header('Content-Length', content_length)
self._handler.end_headers()
def SendError(self, code):
"""Sends response for the given HTTP error code."""
self._handler.send_error(code)
def SendBody(self, body):
"""Just sends the body, no headers."""
self._handler.wfile.write(body)
class Request(object):
"""An HTTP request."""
def __init__(self, handler):
self._handler = handler
def GetPath(self):
return self._handler.path
class _BaseServer(BaseHTTPServer.HTTPServer):
"""Internal server that throws if timed out waiting for a request."""
def __init__(self, on_request, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
class _Handler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Internal handler that just asks the server to handle the request."""
def do_GET(self):
if self.path.endswith('favicon.ico'):
self.send_error(404)
return
on_request(Request(self), Responder(self))
def log_message(self, *args, **kwargs):
"""Overriddes base class method to disable logging."""
pass
BaseHTTPServer.HTTPServer.__init__(self, ('127.0.0.1', 0), _Handler)
if server_cert_and_key_path is not None:
self._is_https_enabled = True
self._server.socket = ssl.wrap_socket(
self._server.socket, certfile=server_cert_and_key_path,
server_side=True)
else:
self._is_https_enabled = False
def handle_timeout(self):
"""Overridden from SocketServer."""
raise RuntimeError('Timed out waiting for http request')
def GetUrl(self):
"""Returns the base URL of the server."""
postfix = '://127.0.0.1:%s' % self.server_port
if self._is_https_enabled:
return 'https' + postfix
return 'http' + postfix
class WebServer(object):
"""An HTTP or HTTPS server that serves on its own thread.
Serves files from given directory but may use custom data for specific paths.
"""
def __init__(self, root_dir, server_cert_and_key_path=None):
"""Starts the server.
It is an HTTP server if parameter server_cert_and_key_path is not provided.
Otherwise, it is an HTTPS server.
Args:
root_dir: root path to serve files from. This parameter is required.
server_cert_and_key_path: path to a PEM file containing the cert and key.
if it is None, start the server as an HTTP one.
"""
self._root_dir = os.path.abspath(root_dir)
self._server = _BaseServer(self._OnRequest, server_cert_and_key_path)
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.start()
self._path_data_map = {}
self._path_data_lock = threading.Lock()
def _OnRequest(self, request, responder):
path = request.GetPath().split('?')[0]
# Serve from path -> data map.
self._path_data_lock.acquire()
try:
if path in self._path_data_map:
responder.SendResponse(self._path_data_map[path])
return
finally:
self._path_data_lock.release()
# Serve from file.
path = os.path.normpath(
os.path.join(self._root_dir, *path.split('/')))
if not path.startswith(self._root_dir):
responder.SendError(403)
return
if not os.path.exists(path):
responder.SendError(404)
return
responder.SendResponseFromFile(path)
def SetDataForPath(self, path, data):
self._path_data_lock.acquire()
try:
self._path_data_map[path] = data
finally:
self._path_data_lock.release()
def GetUrl(self):
"""Returns the base URL of the server."""
return self._server.GetUrl()
def Shutdown(self):
"""Shuts down the server synchronously."""
self._server.shutdown()
self._thread.join()
class SyncWebServer(object):
"""WebServer for testing.
Incoming requests are blocked until explicitly handled.
This was designed for single thread use. All requests should be handled on
the same thread.
"""
def __init__(self):
self._server = _BaseServer(self._OnRequest)
# Recognized by SocketServer.
self._server.timeout = 10
self._on_request = None
def _OnRequest(self, request, responder):
self._on_request(responder)
self._on_request = None
def Respond(self, on_request):
"""Blocks until request comes in, then calls given handler function.
Args:
on_request: Function that handles the request. Invoked with single
parameter, an instance of Responder.
"""
if self._on_request:
raise RuntimeError('Must handle 1 request at a time.')
self._on_request = on_request
while self._on_request:
# Don't use handle_one_request, because it won't work with the timeout.
self._server.handle_request()
def RespondWithContent(self, content):
"""Blocks until request comes in, then handles it with the given content."""
def SendContent(responder):
responder.SendResponse(content)
self.Respond(SendContent)
def GetUrl(self):
return self._server.GetUrl()
|
test_threading.py | """
Tests for the threading module.
"""
import test.support
from test.support import (verbose, import_module, cpython_only,
requires_type_collecting)
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
with self.assertWarnsRegex(DeprecationWarning, 'use is_alive()'):
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
@requires_type_collecting
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import threading
import time
class Sleeper:
def __del__(self):
time.sleep(0.05)
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
time.sleep(0.05)
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
@requires_type_collecting
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
if __name__ == "__main__":
unittest.main()
|
test_capi.py | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
import importlib.machinery
import importlib.util
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support import import_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = import_helper.import_module('_testcapi')
import _testinternalcapi
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error: '
b'PyThreadState_Get: '
b'the function must be called with the GIL held, '
b'but the GIL is released '
b'(the current Python thread state is NULL)'),
err)
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned NULL '
br'without setting an error\n'
br'Python runtime state: initialized\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned a result '
br'with an error set\n'
br'Python runtime state: initialized\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
@support.requires_resource('cpu')
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_heap_ctype_doc_and_text_signature(self):
self.assertEqual(_testcapi.HeapDocCType.__doc__, "somedoc")
self.assertEqual(_testcapi.HeapDocCType.__text_signature__, "(arg1, arg2)")
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_heaptype_with_buffer(self):
inst = _testcapi.HeapCTypeWithBuffer()
b = bytes(inst)
self.assertEqual(b, b"1234")
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_heaptype_with_setattro(self):
obj = _testcapi.HeapCTypeSetattr()
self.assertEqual(obj.pvalue, 10)
obj.value = 12
self.assertEqual(obj.pvalue, 12)
del obj.value
self.assertEqual(obj.pvalue, 0)
def test_pynumber_tobase(self):
from _testcapi import pynumber_tobase
self.assertEqual(pynumber_tobase(123, 2), '0b1111011')
self.assertEqual(pynumber_tobase(123, 8), '0o173')
self.assertEqual(pynumber_tobase(123, 10), '123')
self.assertEqual(pynumber_tobase(123, 16), '0x7b')
self.assertEqual(pynumber_tobase(-123, 2), '-0b1111011')
self.assertEqual(pynumber_tobase(-123, 8), '-0o173')
self.assertEqual(pynumber_tobase(-123, 10), '-123')
self.assertEqual(pynumber_tobase(-123, 16), '-0x7b')
self.assertRaises(TypeError, pynumber_tobase, 123.0, 10)
self.assertRaises(TypeError, pynumber_tobase, '123', 10)
self.assertRaises(SystemError, pynumber_tobase, 123, 0)
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with threading_helper.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_subinterps_recent_language_features(self):
r, w = os.pipe()
code = """if 1:
import pickle
with open({:d}, "wb") as f:
@(lambda x:x) # Py 3.9
def noop(x): return x
a = (b := f'1{{2}}3') + noop('x') # Py 3.8 (:=) / 3.6 (f'')
async def foo(arg): return await arg # Py 3.5
pickle.dump(dict(a=a, b=b), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(pickle.load(f), {'a': '123x', 'b': '123'})
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
class TestThreadState(unittest.TestCase):
@threading_helper.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
# Suppress warning from PyUnicode_FromUnicode().
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_widechar(self):
_testcapi.test_widechar()
class Test_testinternalcapi(unittest.TestCase):
locals().update((name, getattr(_testinternalcapi, name))
for name in dir(_testinternalcapi)
if name.startswith('test_'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: _PyMem_DebugMalloc: '
'Python memory allocator called without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok('-c', code, PYTHONMALLOC=self.PYTHONMALLOC)
def test_pyobject_null_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_null_is_freed')
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
class Test_ModuleStateAccess(unittest.TestCase):
"""Test access to module start (PEP 573)"""
# The C part of the tests lives in _testmultiphase, in a module called
# _testmultiphase_meth_state_access.
# This module has multi-phase initialization, unlike _testcapi.
def setUp(self):
fullname = '_testmultiphase_meth_state_access' # XXX
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
self.module = module
def test_subclass_get_module(self):
"""PyType_GetModule for defining_class"""
class StateAccessType_Subclass(self.module.StateAccessType):
pass
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_subclass_get_module_with_super(self):
class StateAccessType_Subclass(self.module.StateAccessType):
def get_defining_module(self):
return super().get_defining_module()
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_state_access(self):
"""Checks methods defined with and without argument clinic
This tests a no-arg method (get_count) and a method with
both a positional and keyword argument.
"""
a = self.module.StateAccessType()
b = self.module.StateAccessType()
methods = {
'clinic': a.increment_count_clinic,
'noclinic': a.increment_count_noclinic,
}
for name, increment_count in methods.items():
with self.subTest(name):
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
increment_count()
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 1)
increment_count(3)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 4)
increment_count(-2, twice=True)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
with self.assertRaises(TypeError):
increment_count(thrice=3)
with self.assertRaises(TypeError):
increment_count(1, 2, 3)
if __name__ == "__main__":
unittest.main()
|
simple_test_server.py | """Local Server to return RSS feed data for unit testing"""
import http.server
import socketserver
import threading
PORT = 8081
TEST_RSS = b"""<?xml version="1.0" encoding="utf-8"?>
<rss xmlns:atom="http://www.w3.org/2005/Atom" version="2.0">
<channel>
<title>test feed</title>
<link>http://example.com/test/</link>
<description>Test Feed.</description>
<atom:link href="http://example.com/test/feed/" rel="self"></atom:link>
<lastBuildDate>Sun, 13 Apr 2014 09:33:47 +0000</lastBuildDate>
<item>
<title>Test Entry</title>
<link>http://example.com/test1/</link>
<description><h1 id="TEST1">Test One</h1></description>
<pubDate>Sun, 13 Apr 2014 09:33:47 +0000</pubDate>
<guid>http://example.com/test1/</guid>
</item>
<item>
<title>Test Entry 2</title>
<link>http://example.com/test2/</link>
<description><h1 id="TEST2">Test Two</h1></description>
<pubDate>Sun, 13 Apr 2014 09:33:47 +0000</pubDate>
<guid>http://example.com/test2/</guid>
</item>
</channel>
</rss>"""
class Handler(http.server.SimpleHTTPRequestHandler):
"""Local Server to return RSS feed data"""
def do_GET(self):
"""Construct a server response."""
self.send_response(200)
self.send_header('Content-type', 'application/rss+xml')
self.end_headers()
# Can use self.path to choose data to return
self.wfile.write(TEST_RSS)
return
def log_request(self, code):
"""Suppress display of status code messages on terminal"""
pass
class TestServer(socketserver.TCPServer):
allow_reuse_address = True
test_server = TestServer(('', PORT), Handler)
def setUpModule():
"""Start server to return test rss data"""
thread = threading.Thread(target=test_server.serve_forever)
thread.daemon = True
thread.start()
def tearDownModule():
"""Stop server which returned test rss data"""
test_server.shutdown()
if __name__ == '__main__': # pragma: no cover
test_server.serve_forever()
|
client.py | #
# A Python client API for Edbot Studio.
#
# Copyright (c) Robots in Schools Ltd. All rights reserved.
#
import json
import time
import threading
from pydash.objects import merge, unset
from ws4py.client.threadedclient import WebSocketClient
class EdbotStudioClient(WebSocketClient):
Category = {
"REQUEST": 1,
"RESPONSE": 2,
"UPDATE": 3,
"DELETE": 4,
"CLOSE": 5
}
Type = {
"INIT": 1,
"GET_CLIENTS": 2,
"GET_SERVERS": 3,
"GET_SENSORS": 4,
"GET_SERVOS": 5,
"RUN_MOTION": 6,
"SET_SERVOS": 7,
"SET_SPEAKER": 8,
"SET_DISPLAY": 9,
"SET_OPTIONS": 10,
"SET_CUSTOM": 11,
"SAY": 12,
"RESET": 13
}
def __init__(self, server="localhost", port=54255, listener=None, name=None,
reporters=True, device_alias=None):
self.server = server
self.port = port
self.listener = listener
self.name = name
self.reporters = reporters
self.device_alias = device_alias
self.connected = False
self.sequence = 1
self.opened_event = threading.Event()
self.pending = {}
def connect(self, callback=None):
if self.connected:
return # silently return
url = "ws://{}:{}/api".format(self.server, self.port)
WebSocketClient.__init__(self, url)
WebSocketClient.connect(self)
t = threading.Thread(target=self.run_forever)
t.setDaemon(True)
t.start()
#
# Wait for the opened() callback. If things went wrong the previous
# call to WebSocketClient.connect() should have raised an exception.
#
self._wait(self.opened_event)
# Initialise the data dictionary.
self.data = {}
# Send the INIT request.
params = {
"name": self.name,
"reporters": self.reporters,
"deviceAlias": self.device_alias
}
self._send(EdbotStudioClient.Type["INIT"], params, callback)
###########################################################################
def opened(self):
self.opened_event.set()
def received_message(self, m):
message = json.loads(m.data.decode("UTF-8"))
if message["category"] == EdbotStudioClient.Category["RESPONSE"]:
#
# Run code specific to the response message type.
#
if message["type"] == EdbotStudioClient.Type["INIT"]:
merge(self.data, message["data"])
self.connected = True
if self.listener is not None:
self.listener(message)
#
# Use the sequence as a key in the pending dictionary. There
# will either be a callback to trigger or an event to resolve.
#
sequence = message["sequence"]
pending = self.pending[sequence]
callback = pending["callback"]
if callback is not None:
if message["status"]["success"]:
callback(True, { "data": message["data"] })
else:
callback(False, { "data": message["status"]["text"] })
del self.pending[sequence]
else:
self.pending[sequence]["response"] = message
event = pending["event"]
event.set()
elif message["category"] == EdbotStudioClient.Category["UPDATE"]:
if self.connected:
merge(self.data, message["data"])
if self.listener is not None:
self.listener(message)
elif message["category"] == EdbotStudioClient.Category["DELETE"]:
if self.connected:
unset(self.data, message["data"]["path"])
if self.listener is not None:
self.listener(message)
def closed(self, code, reason=None):
self.connected = False
self.data.clear()
if self.listener is not None:
self.listener({
"category": EdbotStudioClient.Category["CLOSE"],
"data": {
"code": code,
"reason": reason
}
})
###########################################################################
def get_connected(self):
return self.connected
#
# If connected, this will close the connection and call the closed
# handler. If the connection is already closed, it does nothing.
#
def disconnect(self):
WebSocketClient.close(self, code=1000, reason="Closed by client")
def get_data(self):
if not self.connected:
raise Exception("Not connected")
return self.data
def get_robot_names(self, model=None):
if not self.connected:
raise Exception("Not connected")
if model is None:
return list(self.data["robots"].keys())
else:
result = []
for name in self.data["robots"].keys():
if self.data["robots"][name]["model"]["type"] == model:
result.append(name)
return result
def get_robot(self, name):
if not self.connected:
raise Exception("Not connected")
if not name in self.data["robots"]:
raise Exception(name + " is not configured")
else:
return self.data["robots"][name]
def have_control(self, name):
robot = self.get_robot(name)
return robot["control"] == self.data["session"]["device"]["id"]
def await_control(self, name):
robot = self.get_robot(name)
while not self.have_control(name):
time.sleep(0.1)
def get_clients(self, callback=None):
return self._request(EdbotStudioClient.Type["GET_CLIENTS"], None, callback)
def get_servers(self, callback=None):
return self._request(EdbotStudioClient.Type["GET_SERVERS"], None, callback)
def get_sensors(self, params, callback=None):
return self._request(EdbotStudioClient.Type["GET_SENSORS"], params, callback)
def get_servos(self, params, callback=None):
return self._request(EdbotStudioClient.Type["GET_SERVOS"], params, callback)
def run_motion(self, params, callback=None):
return self._request(EdbotStudioClient.Type["RUN_MOTION"], params, callback)
def set_servos(self, params, callback=None):
return self._request(EdbotStudioClient.Type["SET_SERVOS"], params, callback)
def set_speaker(self, params, callback=None):
return self._request(EdbotStudioClient.Type["SET_SPEAKER"], params, callback)
def set_display(self, params, callback=None):
return self._request(EdbotStudioClient.Type["SET_DISPLAY"], params, callback)
def set_options(self, params, callback=None):
return self._request(EdbotStudioClient.Type["SET_OPTIONS"], params, callback)
def set_custom(self, params, callback=None):
return self._request(EdbotStudioClient.Type["SET_CUSTOM"], params, callback)
def say(self, params, callback=None):
return self._request(EdbotStudioClient.Type["SAY"], params, callback)
def reset(self, params, callback=None):
return self._request(EdbotStudioClient.Type["RESET"], params, callback)
###########################################################################
def _request(self, type, params, callback):
if not self.connected:
raise Exception("Not connected")
return self._send(type, params, callback)
def _send(self, type, params, callback):
with threading.Lock():
sequence = self.sequence
self.sequence += 1
if callback is not None:
self.pending[sequence] = { "callback": callback, "event": None }
else:
self.pending[sequence] = { "callback": None, "event": threading.Event() }
self.send(
json.dumps({
"category": EdbotStudioClient.Category["REQUEST"],
"type": type,
"sequence": sequence,
"params": params
})
)
if callback is None:
pending = self.pending[sequence]
self._wait(pending["event"])
del self.pending[sequence]
message = pending["response"]
if message["status"]["success"]:
return message["data"]
else:
raise Exception(message["status"]["text"])
def _wait(self, event):
#
# The function wait() without a timeout isn't interruptible on Windows.
# Use this workaround until the Python team sort it.
#
while not event.wait(0.1):
pass |
synchronized_lights.py | #!/usr/bin/env python
#
# Licensed under the BSD license. See full license in LICENSE file.
# http://www.lightshowpi.com/
#
# Author: Todd Giles (todd@lightshowpi.com)
# Author: Chris Usey (chris.usey@gmail.com)
# Author: Ryan Jennings
# Author: Paul Dunn (dunnsept@gmail.com)
# Author: Tom Enos (tomslick.ca@gmail.com)
"""Play any audio file and synchronize lights to the music
When executed, this script will play an audio file, as well as turn on
and off N channels of lights to the music (by default the first 8 GPIO
channels on the Raspberry Pi), based upon music it is playing. Many
types of audio files are supported (see decoder.py below), but it has
only been tested with wav and mp3 at the time of this writing.
The timing of the lights turning on and off is based upon the frequency
response of the music being played. A short segment of the music is
analyzed via FFT to get the frequency response across each defined
channel in the audio range. Each light channel is then faded in and
out based upon the amplitude of the frequency response in the
corresponding audio channel. Fading is accomplished with a software
PWM output. Each channel can also be configured to simply turn on and
off as the frequency response in the corresponding channel crosses a
threshold.
FFT calculation can be CPU intensive and in some cases can adversely
affect playback of songs (especially if attempting to decode the song
as well, as is the case for an mp3). For this reason, the FFT
calculations are cached after the first time a new song is played.
The values are cached in a gzip'd text file in the same location as the
song itself. Subsequent requests to play the same song will use the
cached information and not recompute the FFT, thus reducing CPU
utilization dramatically and allowing for clear music playback of all
audio file types.
Recent optimizations have improved this dramatically and most users are
no longer reporting adverse playback of songs even on the first
playback.
Sample usage:
To play an entire list -
sudo python synchronized_lights.py --playlist=/home/pi/music/.playlist
To play a specific song -
sudo python synchronized_lights.py --file=/home/pi/music/jingle_bells.mp3
Third party dependencies:
alsaaudio: for audio input/output
http://pyalsaaudio.sourceforge.net/
decoder.py: decoding mp3, ogg, wma, ...
https://pypi.python.org/pypi/decoder.py/1.5XB
numpy: for FFT calculation
http://www.numpy.org/
"""
import ConfigParser
import argparse
import atexit
import audioop
import csv
import fcntl
import logging as log
import os
import random
import subprocess
import sys
import wave
import alsaaudio as aa
import json
import signal
import decoder
import numpy as np
import cPickle
import time
import errno
import stat
import curses
import bright_curses
import mutagen
from collections import deque
import Platform
import fft
from prepostshow import PrePostShow
import RunningStats
from Queue import Queue, Empty
from threading import Thread
# Make sure SYNCHRONIZED_LIGHTS_HOME environment variable is set
HOME_DIR = os.getenv("SYNCHRONIZED_LIGHTS_HOME")
if not HOME_DIR:
print("Need to setup SYNCHRONIZED_LIGHTS_HOME environment variable, see readme")
sys.exit()
LOG_DIR = HOME_DIR + '/logs'
# logging levels
levels = {'DEBUG': log.DEBUG,
'INFO': log.INFO,
'WARNING': log.WARNING,
'ERROR': log.ERROR,
'CRITICAL': log.CRITICAL}
stream = None
fm_process = None
streaming = None
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('--log', default='INFO',
help='Set the logging level. levels:INFO, DEBUG, WARNING, ERROR, CRITICAL')
filegroup = parser.add_mutually_exclusive_group()
filegroup.add_argument('--playlist', default="playlist_path",
help='Playlist to choose song from.')
filegroup.add_argument('--file', help='path to the song to play (required if no '
'playlist is designated)')
parser.add_argument('--readcache', type=int, default=1,
help='read light timing from cache if available. Default: true')
log.basicConfig(filename=LOG_DIR + '/music_and_lights.play.dbg',
format='[%(asctime)s] %(levelname)s {%(pathname)s:%(lineno)d} - %(message)s',
level=log.INFO)
level = levels.get(parser.parse_args().log.upper())
log.getLogger().setLevel(level)
# import hardware_controller as hc
import hardware_controller as hc
# get copy of configuration manager
cm = hc.cm
parser.set_defaults(playlist=cm.lightshow.playlist_path)
args = parser.parse_args()
decay_factor = cm.lightshow.decay_factor
decay = np.zeros(cm.hardware.gpio_len, dtype='float32')
network = hc.network
server = network.networking == 'server'
client = network.networking == "client"
terminal = False
if cm.lightshow.use_fifo:
if os.path.exists(cm.lightshow.fifo):
os.remove(cm.lightshow.fifo)
os.mkfifo(cm.lightshow.fifo, 0777)
CHUNK_SIZE = 2048 # Use a multiple of 8 (move this to config)
def end_early():
"""atexit function"""
if server:
network.set_playing()
network.broadcast([0. for _ in range(hc.GPIOLEN)])
time.sleep(1)
network.unset_playing()
hc.clean_up()
if cm.audio_processing.fm:
fm_process.kill()
if network.network_stream:
network.close_connection()
if cm.lightshow.mode == 'stream-in':
try:
streaming.stdin.write("q")
except NameError:
pass
os.kill(streaming.pid, signal.SIGINT)
if cm.lightshow.use_fifo:
os.unlink(cm.lightshow.fifo)
atexit.register(end_early)
# Remove traceback on Ctrl-C
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
signal.signal(signal.SIGTERM, lambda x, y: sys.exit(0))
def update_lights(matrix, mean, std):
"""Update the state of all the lights
Update the state of all the lights based upon the current
frequency response matrix
:param matrix: row of data from cache matrix
:type matrix: list
:param mean: standard mean of fft values
:type mean: list
:param std: standard deviation of fft values
:type std: list
"""
global decay
brightness = matrix - mean + (std * cm.lightshow.SD_low)
brightness = (brightness / (std * (cm.lightshow.SD_low + cm.lightshow.SD_high))) * \
(1.0 - (cm.lightshow.attenuate_pct / 100.0))
# insure that the brightness levels are in the correct range
brightness = np.clip(brightness, 0.0, 1.0)
brightness = np.round(brightness, decimals=3)
# calculate light decay rate if used
if decay_factor > 0:
decay = np.where(decay <= brightness, brightness, decay)
brightness = np.where(decay - decay_factor > 0, decay - decay_factor, brightness)
decay = np.where(decay - decay_factor > 0, decay - decay_factor, decay)
# broadcast to clients if in server mode
if server:
network.broadcast(brightness)
if terminal:
terminal.curses_render(brightness)
else:
for blevel, pin in zip(brightness, range(hc.GPIOLEN)):
hc.set_light(pin, True, blevel)
def set_audio_device(sample_rate, num_channels):
"""Setup the audio devices for output
:param sample_rate: audio sample rate
:type sample_rate: int
:param num_channels: number of audio channels
:type num_channels: int
"""
global fm_process
pi_version = Platform.pi_version()
if cm.audio_processing.fm:
srate = str(int(sample_rate / (1 if num_channels > 1 else 2)))
fm_command = ["sudo",
cm.home_dir + "/bin/pifm",
"-",
cm.audio_processing.frequency,
srate,
"stereo" if num_channels > 1 else "mono"]
if pi_version == 2:
fm_command = ["sudo",
cm.home_dir + "/bin/pi_fm_rds",
"-audio", "-", "-freq",
cm.audio_processing.frequency,
"-srate",
srate,
"-nochan",
"2" if num_channels > 1 else "1"]
log.info("Sending output as fm transmission")
with open(os.devnull, "w") as dev_null:
fm_process = subprocess.Popen(fm_command, stdin=subprocess.PIPE, stdout=dev_null)
return lambda raw_data: fm_process.stdin.write(raw_data)
elif cm.lightshow.audio_out_card is not '':
if cm.lightshow.mode == 'stream-in':
num_channels = 2
output_device = aa.PCM(aa.PCM_PLAYBACK, aa.PCM_NORMAL, cm.lightshow.audio_out_card)
output_device.setchannels(num_channels)
output_device.setrate(sample_rate)
output_device.setformat(aa.PCM_FORMAT_S16_LE)
output_device.setperiodsize(CHUNK_SIZE)
return lambda raw_data: output_device.write(raw_data)
else:
return lambda raw_data: None
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
def audio_in():
"""Control the lightshow from audio coming in from a real time audio"""
global streaming
stream_reader = None
streaming = None
songcount = 0
sample_rate = cm.lightshow.input_sample_rate
num_channels = cm.lightshow.input_channels
if cm.lightshow.mode == 'audio-in':
# Open the input stream from default input device
streaming = aa.PCM(aa.PCM_CAPTURE, aa.PCM_NORMAL, cm.lightshow.audio_in_card)
streaming.setchannels(num_channels)
streaming.setformat(aa.PCM_FORMAT_S16_LE) # Expose in config if needed
streaming.setrate(sample_rate)
streaming.setperiodsize(CHUNK_SIZE)
stream_reader = lambda: streaming.read()[-1]
elif cm.lightshow.mode == 'stream-in':
outq = Queue()
if cm.lightshow.use_fifo:
streaming = subprocess.Popen(cm.lightshow.stream_command_string,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
preexec_fn=os.setsid)
io = os.open(cm.lightshow.fifo, os.O_RDONLY | os.O_NONBLOCK)
stream_reader = lambda: os.read(io, CHUNK_SIZE)
outthr = Thread(target=enqueue_output, args=(streaming.stdout, outq))
else:
# Open the input stream from command string
streaming = subprocess.Popen(cm.lightshow.stream_command_string,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stream_reader = lambda: streaming.stdout.read(CHUNK_SIZE)
outthr = Thread(target=enqueue_output, args=(streaming.stderr, outq))
outthr.daemon = True
outthr.start()
log.debug("Running in %s mode - will run until Ctrl+C is pressed" % cm.lightshow.mode)
print "Running in %s mode, use Ctrl+C to stop" % cm.lightshow.mode
# setup light_delay.
chunks_per_sec = ((16 * num_channels * sample_rate) / 8) / CHUNK_SIZE
light_delay = int(cm.audio_processing.light_delay * chunks_per_sec)
matrix_buffer = deque([], 1000)
output = set_audio_device(sample_rate, num_channels)
# Start with these as our initial guesses - will calculate a rolling mean / std
# as we get input data.
mean = np.array([12.0 for _ in range(hc.GPIOLEN)], dtype='float32')
std = np.array([1.5 for _ in range(hc.GPIOLEN)], dtype='float32')
count = 2
running_stats = RunningStats.Stats(hc.GPIOLEN)
# preload running_stats to avoid errors, and give us a show that looks
# good right from the start
running_stats.preload(mean, std, count)
hc.initialize()
fft_calc = fft.FFT(CHUNK_SIZE,
sample_rate,
hc.GPIOLEN,
cm.audio_processing.min_frequency,
cm.audio_processing.max_frequency,
cm.audio_processing.custom_channel_mapping,
cm.audio_processing.custom_channel_frequencies,
1)
if server:
network.set_playing()
# Listen on the audio input device until CTRL-C is pressed
while True:
try:
streamout = outq.get_nowait().strip('\n\r')
except Empty:
pass
else:
print streamout
if cm.lightshow.stream_song_delim in streamout:
songcount+=1
if cm.lightshow.songname_command:
streamout = streamout.replace('\033[2K','')
streamout = streamout.replace(cm.lightshow.stream_song_delim,'')
streamout = streamout.replace('"','')
os.system(cm.lightshow.songname_command + ' "Now Playing ' + streamout + '"')
if cm.lightshow.stream_song_exit_count > 0 and songcount > cm.lightshow.stream_song_exit_count:
break
try:
data = stream_reader()
except OSError as err:
if err.errno == errno.EAGAIN or err.errno == errno.EWOULDBLOCK:
continue
try:
output(data)
except aa.ALSAAudioError:
continue
if len(data):
# if the maximum of the absolute value of all samples in
# data is below a threshold we will disregard it
audio_max = audioop.max(data, 2)
if audio_max < 250:
# we will fill the matrix with zeros and turn the lights off
matrix = np.zeros(hc.GPIOLEN, dtype="float32")
log.debug("below threshold: '" + str(audio_max) + "', turning the lights off")
else:
matrix = fft_calc.calculate_levels(data)
running_stats.push(matrix)
mean = running_stats.mean()
std = running_stats.std()
matrix_buffer.appendleft(matrix)
if len(matrix_buffer) > light_delay:
matrix = matrix_buffer[light_delay]
update_lights(matrix, mean, std)
def load_custom_config(config_filename):
"""
Load custom configuration settings for file config_filename
:param config_filename: string containing path / filename of config
:type config_filename: str
"""
"""
example usage
your song
carol-of-the-bells.mp3
First run your playlist (or single files) to create your sync files. This will
create a file in the same directory as your music file.
.carol-of-the-bells.mp3.cfg
DO NOT EDIT THE existing section [fft], it will cause your sync files to be ignored.
If you want to use an override you need to add the appropriate section
The add the options you wish to use, but do not add an option you do not
want to use, as this will set that option to None and could crash your lightshow.
Look at defaults.cfg for exact usages of each option
[custom_lightshow]
always_on_channels =
always_off_channels =
invert_channels =
preshow_configuration =
preshow_script =
postshow_configuration =
postshow_script =
SD_low =
SD_high =
[custom_audio_processing]
min_frequency =
max_frequency =
custom_channel_mapping =
custom_channel_frequencies =
Note: DO NOT EDIT THE existing section [fft]
Note: If you use any of the options in "custom_audio_processing" your sync files will be
automatically regenerated after every change. This is normal as your sync file needs
to match these new settings. After they have been regenerated you will see that they
now match the settings [fft], and you will not have to regenerate then again. Unless
you make more changes again.
Note: Changes made in "custom_lightshow" do not affect the sync files, so you will not need
to regenerate them after making changes.
"""
if os.path.isfile(config_filename):
config = ConfigParser.RawConfigParser(allow_no_value=True)
with open(config_filename) as f:
config.readfp(f)
if config.has_section('custom_lightshow'):
lsc = "custom_lightshow"
always_on = "always_on_channels"
if config.has_option(lsc, always_on):
hc.always_on_channels = map(int, config.get(lsc, always_on).split(","))
always_off = "always_off_channels"
if config.has_option(lsc, always_off):
hc.always_off_channels = map(int, config.get(lsc, always_off).split(","))
inverted = "invert_channels"
if config.has_option(lsc, inverted):
hc.inverted_channels = map(int, config.get(lsc, inverted).split(","))
if config.has_option(lsc, "SD_low"):
cm.lightshow.SD_low = config.getfloat(lsc, "SD_low")
if config.has_option(lsc, "SD_high"):
cm.lightshow.SD_high = config.getfloat(lsc, "SD_high")
# setup up custom preshow
has_preshow_configuration = config.has_option(lsc, 'preshow_configuration')
has_preshow_script = config.has_option(lsc, 'preshow_script')
if has_preshow_configuration or has_preshow_script:
preshow = None
try:
preshow_configuration = config.get(lsc, 'preshow_configuration')
except ConfigParser.NoOptionError:
preshow_configuration = None
try:
preshow_script = config.get(lsc, 'preshow_script')
except ConfigParser.NoOptionError:
preshow_script = None
if preshow_configuration and not preshow_script:
try:
preshow = json.loads(preshow_configuration)
except (ValueError, TypeError) as error:
msg = "Preshow_configuration not defined or not in JSON format."
log.error(msg + str(error))
else:
if os.path.isfile(preshow_script):
preshow = preshow_script
cm.lightshow.preshow = preshow
# setup postshow
has_postshow_configuration = config.has_option(lsc, 'postshow_configuration')
has_postshow_script = config.has_option(lsc, 'postshow_script')
if has_postshow_configuration or has_postshow_script:
postshow = None
postshow_configuration = config.get(lsc, 'postshow_configuration')
postshow_script = config.get(lsc, 'postshow_script')
if postshow_configuration and not postshow_script:
try:
postshow = json.loads(postshow_configuration)
except (ValueError, TypeError) as error:
msg = "Postshow_configuration not defined or not in JSON format."
log.error(msg + str(error))
else:
if os.path.isfile(postshow_script):
postshow = postshow_script
cm.lightshow.postshow = postshow
if config.has_section('custom_audio_processing'):
if config.has_option('custom_audio_processing', 'min_frequency'):
cm.audio_processing.min_frequency = config.getfloat('custom_audio_processing',
'min_frequency')
if config.has_option('custom_audio_processing', 'max_frequency'):
cm.audio_processing.max_frequency = config.getfloat('custom_audio_processing',
'max_frequency')
if config.has_option('custom_audio_processing', 'custom_channel_mapping'):
temp = config.get('custom_audio_processing', 'custom_channel_mapping')
cm.audio_processing.custom_channel_mapping = map(int,
temp.split(',')) if temp else 0
if config.has_option('custom_audio_processing', 'custom_channel_frequencies'):
temp = config.get('custom_audio_processing', 'custom_channel_frequencies')
cm.audio_processing.custom_channel_frequencies = map(int,
temp.split(
',')) if temp else 0
def setup_audio(song_filename):
"""Setup audio file
and setup setup the output device.output is a lambda that will send data to
fm process or to the specified ALSA sound card
:param song_filename: path / filename to music file
:type song_filename: str
:return: output, fm_process, fft_calc, music_file
:rtype tuple: lambda, subprocess, fft.FFT, decoder
"""
# Set up audio
force_header = False
if any([ax for ax in [".mp4", ".m4a", ".m4b"] if ax in song_filename]):
force_header = True
music_file = decoder.open(song_filename, force_header)
sample_rate = music_file.getframerate()
num_channels = music_file.getnchannels()
fft_calc = fft.FFT(CHUNK_SIZE,
sample_rate,
hc.GPIOLEN,
cm.audio_processing.min_frequency,
cm.audio_processing.max_frequency,
cm.audio_processing.custom_channel_mapping,
cm.audio_processing.custom_channel_frequencies)
# setup output device
output = set_audio_device(sample_rate, num_channels)
chunks_per_sec = ((16 * num_channels * sample_rate) / 8) / CHUNK_SIZE
light_delay = int(cm.audio_processing.light_delay * chunks_per_sec)
# Output a bit about what we're about to play to the logs
nframes = str(music_file.getnframes() / sample_rate)
log.info("Playing: " + song_filename + " (" + nframes + " sec)")
return output, fft_calc, music_file, light_delay
def setup_cache(cache_filename, fft_calc):
"""Setup the cache_matrix, std and mean
loading them from a file if it exists, otherwise create empty arrays to be filled
:param cache_filename: path / filename to cache file
:type cache_filename: str
:param fft_calc: instance of FFT class
:type fft_calc: fft.FFT
:return: tuple of cache_found, cache_matrix, std, mean
:type tuple: (bool, numpy.array, numpy.array, numpy.array)
:raise IOError:
"""
# create empty array for the cache_matrix
cache_matrix = np.empty(shape=[0, hc.GPIOLEN])
cache_found = False
# The values 12 and 1.5 are good estimates for first time playing back
# (i.e. before we have the actual mean and standard deviations
# calculated for each channel).
mean = np.array([12.0 for _ in range(hc.GPIOLEN)], dtype='float32')
std = np.array([1.5 for _ in range(hc.GPIOLEN)], dtype='float32')
if args.readcache:
# Read in cached fft
try:
# load cache from file using numpy loadtxt
cache_matrix = np.loadtxt(cache_filename)
# compare configuration of cache file to current configuration
cache_found = fft_calc.compare_config(cache_filename)
if not cache_found:
# create empty array for the cache_matrix
cache_matrix = np.empty(shape=[0, hc.GPIOLEN])
raise IOError()
# get std from matrix / located at index 0
std = np.array(cache_matrix[0])
# get mean from matrix / located at index 1
mean = np.array(cache_matrix[1])
# delete mean and std from the array
cache_matrix = np.delete(cache_matrix, 0, axis=0)
cache_matrix = np.delete(cache_matrix, 0, axis=0)
log.debug("std: " + str(std) + ", mean: " + str(mean))
except IOError:
cache_found = fft_calc.compare_config(cache_filename)
msg = "Cached sync data song_filename not found: '"
log.warn(msg + cache_filename + "'. One will be generated.")
return cache_found, cache_matrix, std, mean
def save_cache(cache_matrix, cache_filename, fft_calc):
"""
Save matrix, std, and mean to cache_filename for use during future playback
:param cache_matrix: numpy array containing the matrix
:type cache_matrix: numpy.array
:param cache_filename: name of the cache file to look for
:type cache_filename: str
:param fft_calc: instance of fft.FFT
:type fft_calc: fft.FFT
"""
# Compute the standard deviation and mean values for the cache
mean = np.empty(hc.GPIOLEN, dtype='float32')
std = np.empty(hc.GPIOLEN, dtype='float32')
for i in range(0, hc.GPIOLEN):
std[i] = np.std([item for item in cache_matrix[:, i] if item > 0])
mean[i] = np.mean([item for item in cache_matrix[:, i] if item > 0])
# Add mean and std to the top of the cache
cache_matrix = np.vstack([mean, cache_matrix])
cache_matrix = np.vstack([std, cache_matrix])
# Save the cache using numpy savetxt
np.savetxt(cache_filename, cache_matrix)
# Save fft config
fft_calc.save_config()
cm_len = str(len(cache_matrix))
log.info("Cached sync data written to '." + cache_filename + "' [" + cm_len + " rows]")
log.info("Cached config data written to '." + fft_calc.config_filename)
def get_song():
"""
Determine the next file to play
:return: tuple containing 3 strings: song_filename, config_filename, cache_filename
:rtype: tuple
"""
play_now = int(cm.get_state('play_now', "0"))
song_to_play = int(cm.get_state('song_to_play', "0"))
song_filename = args.file
if args.playlist is not None and args.file is None:
most_votes = [None, None, []]
with open(args.playlist, 'rb') as playlist_fp:
fcntl.lockf(playlist_fp, fcntl.LOCK_SH)
playlist = csv.reader(playlist_fp, delimiter='\t')
songs = []
for song in playlist:
if len(song) < 2 or len(song) > 4:
log.error('Invalid playlist. Each line should be in the form: '
'<song name><tab><path to song>')
log.warning('Removing invalid entry')
print "Error found in playlist"
print "Deleting entry:", song
continue
elif len(song) == 2:
song.append(set())
else:
song[2] = set(song[2].split(','))
if len(song) == 3 and len(song[2]) >= len(most_votes[2]):
most_votes = song
songs.append(song)
fcntl.lockf(playlist_fp, fcntl.LOCK_UN)
if most_votes[0] is not None:
log.info("Most Votes: " + str(most_votes))
current_song = most_votes
# Update playlist with latest votes
with open(args.playlist, 'wb') as playlist_fp:
fcntl.lockf(playlist_fp, fcntl.LOCK_EX)
writer = csv.writer(playlist_fp, delimiter='\t')
for song in songs:
if current_song == song and len(song) == 3:
song.append("playing!")
if len(song[2]) > 0:
song[2] = ",".join(song[2])
else:
del song[2]
writer.writerows(songs)
fcntl.lockf(playlist_fp, fcntl.LOCK_UN)
else:
# Get a "play now" requested song
if 0 < play_now <= len(songs):
current_song = songs[play_now - 1]
# Get random song
elif cm.lightshow.randomize_playlist:
current_song = songs[random.randrange(0, len(songs))]
# Play next song in the lineup
else:
if not (song_to_play <= len(songs) - 1):
song_to_play = 0
current_song = songs[song_to_play]
if (song_to_play + 1) <= len(songs) - 1:
next_song = (song_to_play + 1)
else:
next_song = 0
cm.update_state('song_to_play', str(next_song))
# Get filename to play and store the current song playing in state cfg
song_filename = current_song[1]
cm.update_state('current_song', str(songs.index(current_song)))
song_filename = song_filename.replace("$SYNCHRONIZED_LIGHTS_HOME", cm.home_dir)
if cm.lightshow.songname_command:
metadata = mutagen.File(song_filename, easy=True)
if not metadata is None:
if "title" in metadata:
now_playing = "Now Playing " + metadata["title"][0] + " by " + metadata["artist"][0]
os.system(cm.lightshow.songname_command + " \"" + now_playing + "\"")
filename = os.path.abspath(song_filename)
config_filename = os.path.dirname(filename) + "/." + os.path.basename(song_filename) + ".cfg"
cache_filename = os.path.dirname(filename) + "/." + os.path.basename(song_filename) + ".sync"
return song_filename, config_filename, cache_filename
def play_song():
"""Play the next song from the play list (or --file argument)."""
# get the next song to play
song_filename, config_filename, cache_filename = get_song()
# load custom configuration from file
load_custom_config(config_filename)
# Initialize Lights
network.set_playing()
hc.initialize()
# Handle the pre/post show
play_now = int(cm.get_state('play_now', "0"))
network.unset_playing()
if not play_now:
result = PrePostShow('preshow', hc).execute()
if result == PrePostShow.play_now_interrupt:
play_now = int(cm.get_state('play_now', "0"))
network.set_playing()
# Ensure play_now is reset before beginning playback
if play_now:
cm.update_state('play_now', "0")
play_now = 0
# setup audio file and output device
output, fft_calc, music_file, light_delay = setup_audio(song_filename)
# setup our cache_matrix, std, mean
cache_found, cache_matrix, std, mean = setup_cache(cache_filename, fft_calc)
matrix_buffer = deque([], 1000)
# Process audio song_filename
row = 0
data = music_file.readframes(CHUNK_SIZE)
while data != '' and not play_now:
# output data to sound device
output(data)
# Control lights with cached timing values if they exist
matrix = None
if cache_found and args.readcache:
if row < len(cache_matrix):
matrix = cache_matrix[row]
else:
log.warning("Ran out of cached FFT values, will update the cache.")
cache_found = False
if matrix is None:
# No cache - Compute FFT in this chunk, and cache results
matrix = fft_calc.calculate_levels(data)
# Add the matrix to the end of the cache
cache_matrix = np.vstack([cache_matrix, matrix])
matrix_buffer.appendleft(matrix)
if len(matrix_buffer) > light_delay:
matrix = matrix_buffer[light_delay]
update_lights(matrix, mean, std)
# Read next chunk of data from music song_filename
data = music_file.readframes(CHUNK_SIZE)
row += 1
# Load new application state in case we've been interrupted
cm.load_state()
play_now = int(cm.get_state('play_now', "0"))
if not cache_found and not play_now:
save_cache(cache_matrix, cache_filename, fft_calc)
# Cleanup the pifm process
if cm.audio_processing.fm:
fm_process.kill()
# check for postshow
network.unset_playing()
if not play_now:
PrePostShow('postshow', hc).execute()
# We're done, turn it all off and clean up things ;)
hc.clean_up()
def network_client():
"""Network client support
If in client mode, ignore everything else and just
read data from the network and blink the lights
"""
log.info("Network client mode starting")
print "Network client mode starting..."
print "press CTRL<C> to end"
hc.initialize()
print
try:
channels = network.channels
channel_keys = channels.keys()
while True:
data = network.receive()
if isinstance(data[0], int):
pin = data[0]
if pin in channel_keys:
hc.set_light(channels[pin], True, float(data[1]))
continue
elif isinstance(data[0], np.ndarray):
blevels = data[0]
else:
continue
for pin in channel_keys:
hc.set_light(channels[pin], True, blevels[pin])
except KeyboardInterrupt:
log.info("CTRL<C> pressed, stopping")
print "stopping"
network.close_connection()
hc.clean_up()
def launch_curses(screen):
"""Initiate the curses window
:param screen: window object representing the entire screen
"""
terminal.init(screen)
main()
def main():
if "-in" in cm.lightshow.mode:
audio_in()
elif client:
network_client()
else:
play_song()
if __name__ == "__main__":
# Make sure one of --playlist or --file was specified
if args.file is None and args.playlist is None:
print "One of --playlist or --file must be specified"
sys.exit()
if cm.terminal.enabled:
try:
terminal = bright_curses.BrightCurses(cm.terminal)
curses.wrapper(launch_curses)
except KeyboardInterrupt:
print "Got KeyboardInterrupt exception. Exiting..."
exit()
else:
main()
|
threaded.py | # vim:fileencoding=utf-8:noet
from __future__ import absolute_import
from powerline.lib.monotonic import monotonic
from threading import Thread, Lock, Event
class MultiRunnedThread(object):
daemon = True
def __init__(self):
self.thread = None
def is_alive(self):
return self.thread and self.thread.is_alive()
def start(self):
self.shutdown_event.clear()
self.thread = Thread(target=self.run)
self.thread.daemon = self.daemon
self.thread.start()
def join(self, *args, **kwargs):
if self.thread:
return self.thread.join(*args, **kwargs)
return None
class ThreadedSegment(MultiRunnedThread):
min_sleep_time = 0.1
update_first = True
interval = 1
daemon = False
def __init__(self):
super(ThreadedSegment, self).__init__()
self.run_once = True
self.skip = False
self.crashed_value = None
self.update_value = None
self.updated = False
def __call__(self, pl, update_first=True, **kwargs):
if self.run_once:
self.pl = pl
self.set_state(**kwargs)
update_value = self.get_update_value(True)
elif not self.is_alive():
# Without this we will not have to wait long until receiving bug “I
# opened vim, but branch information is only shown after I move
# cursor”.
#
# If running once .update() is called in __call__.
update_value = self.get_update_value(update_first and self.update_first)
self.start()
elif not self.updated:
update_value = self.get_update_value(True)
self.updated = True
else:
update_value = self.update_value
if self.skip:
return self.crashed_value
return self.render(update_value, update_first=update_first, pl=pl, **kwargs)
def get_update_value(self, update=False):
if update:
self.update_value = self.update(self.update_value)
return self.update_value
def run(self):
while not self.shutdown_event.is_set():
start_time = monotonic()
try:
self.update_value = self.update(self.update_value)
except Exception as e:
self.exception('Exception while updating: {0}', str(e))
self.skip = True
except KeyboardInterrupt:
self.warn('Caught keyboard interrupt while updating')
self.skip = True
else:
self.skip = False
self.shutdown_event.wait(max(self.interval - (monotonic() - start_time), self.min_sleep_time))
def shutdown(self):
self.shutdown_event.set()
if self.daemon and self.is_alive():
# Give the worker thread a chance to shutdown, but don't block for
# too long
self.join(0.01)
def set_interval(self, interval=None):
# Allowing “interval” keyword in configuration.
# Note: Here **kwargs is needed to support foreign data, in subclasses
# it can be seen in a number of places in order to support
# .set_interval().
interval = interval or getattr(self, 'interval')
self.interval = interval
def set_state(self, interval=None, update_first=True, shutdown_event=None, **kwargs):
self.set_interval(interval)
self.shutdown_event = shutdown_event or Event()
self.updated = self.updated or (not (update_first and self.update_first))
def startup(self, pl, **kwargs):
self.run_once = False
self.pl = pl
self.daemon = pl.use_daemon_threads
self.set_state(**kwargs)
if not self.is_alive():
self.start()
def critical(self, *args, **kwargs):
self.pl.critical(prefix=self.__class__.__name__, *args, **kwargs)
def exception(self, *args, **kwargs):
self.pl.exception(prefix=self.__class__.__name__, *args, **kwargs)
def info(self, *args, **kwargs):
self.pl.info(prefix=self.__class__.__name__, *args, **kwargs)
def error(self, *args, **kwargs):
self.pl.error(prefix=self.__class__.__name__, *args, **kwargs)
def warn(self, *args, **kwargs):
self.pl.warn(prefix=self.__class__.__name__, *args, **kwargs)
def debug(self, *args, **kwargs):
self.pl.debug(prefix=self.__class__.__name__, *args, **kwargs)
class KwThreadedSegment(ThreadedSegment):
drop_interval = 10 * 60
update_first = True
def __init__(self):
super(KwThreadedSegment, self).__init__()
self.updated = True
self.update_value = ({}, set())
self.write_lock = Lock()
self.new_queries = {}
@staticmethod
def key(**kwargs):
return frozenset(kwargs.items())
def render(self, update_value, update_first, **kwargs):
queries, crashed = update_value
key = self.key(**kwargs)
if key in crashed:
return self.crashed_value
try:
update_state = queries[key][1]
except KeyError:
# Allow only to forbid to compute missing values: in either user
# configuration or in subclasses.
update_state = self.compute_state(key) if ((update_first and self.update_first) or self.run_once) else None
with self.write_lock:
self.new_queries[key] = (monotonic(), update_state)
return self.render_one(update_state, **kwargs)
def update(self, old_update_value):
updates = {}
crashed = set()
update_value = (updates, crashed)
queries = old_update_value[0]
with self.write_lock:
if self.new_queries:
queries.update(self.new_queries)
self.new_queries.clear()
for key, (last_query_time, state) in queries.items():
if last_query_time < monotonic() < last_query_time + self.drop_interval:
try:
updates[key] = (last_query_time, self.compute_state(key))
except Exception as e:
self.exception('Exception while computing state for {0!r}: {1}', key, str(e))
crashed.add(key)
except KeyboardInterrupt:
self.warn('Interrupt while computing state for {0!r}', key)
crashed.add(key)
return update_value
def set_state(self, interval=None, shutdown_event=None, **kwargs):
self.set_interval(interval)
self.shutdown_event = shutdown_event or Event()
@staticmethod
def render_one(update_state, **kwargs):
return update_state
def with_docstring(instance, doc):
instance.__doc__ = doc
return instance
|
streaming.py | #21datalabplugin
from abc import ABC, abstractmethod
import utils
from utils import Profiling
from system import __functioncontrolfolder
import dates
import numpy
import numpy as np
import copy
import time
from model import getRandomId
import threading
class Interface(ABC):
@abstractmethod
def flush(self,data = None):
pass
@abstractmethod
def feed(self, data=None):
pass
@abstractmethod
def reset(self, data=None):
pass
PipeLineHead={
"name":"PipelineHead",
"type":"object",
"class": "streaming.PipelineHead",
"children":[
{"name": "enabled", "type":"const","value":True},
{"name": "processors","type":"referencer"},
{"name": "variables","type":"referencer"}, #ref to the variables and eventseries (only one) for the renaming of incoming descriptors
{"name":"autoCreate","type":"const","value":False},
{"name":"autoCreateFolder","type":"referencer"},
__functioncontrolfolder
]
}
Splitter={
"name":"Splitter",
"type":"object",
"class": "streaming.SplitterClass",
"children":[
{"name": "enabled", "type":"const","value":True},
{"name": "threaded", "type":"const","value":True}, #set this true to execute the pipes in the threading pipeline, false to execute them sync from here
{"name": "pipelines","type":"referencer"},
__functioncontrolfolder
]
}
FlowMonitor={
"name":"FlowMonitor",
"type":"object",
"class": "streaming.FlowMonitorClass",
"children":[
{"name": "enabled", "type": "const", "value": False},
{"name": "windowSize", "type": "const", "value": 30}, #the monitoring window in seconds
{"name": "expectedNoOfMsgs", "type": "const", "value": 6}, #if >= this number
{"name": "alarm","type":"variable","value":False}, # values: alarm, ok, idle
{"name": "alarmMessagesFolder","type":"referencer"},
__functioncontrolfolder
]
}
class PipelineHead():
def __init__(self,functionNode):
self.logger = functionNode.get_logger()
self.logger.debug("init ThresholdScorer()")
self.functionNode = functionNode
self.model = functionNode.get_model()
self.enabledNode = functionNode.get_child("enabled")
#self.reset() #this is executed at startup
def feed(self,data):
"""
this is the interface function to the REST call to insert into the stream, so we convert the data as needed
and send it on
"""
p=Profiling("feed")
if not self.enabledNode.get_value():
return True
for blob in data:
#first, we convert all names to ids
if blob["type"] in ["timeseries","eventseries"]:
if blob["type"] == "eventseries":
#print(f"eventseries coming {blob}")
pass
blob["data"] = self.__convert_to_ids__(blob["data"])
p.lap("#")
blob = self.pipeline.feed(blob)
#print(p)
return True
def __convert_to_ids__(self,blob):
"""
convert incoming descriptors to ids
convert times to epoch
support the __events name for a default eventnode
"""
newBlob = {}
for k, v in blob.items():
if k == "__time":
if type(v) is not list:
v = [v]
if type(v[0]) is str:
v = [dates.date2secs(t) for t in v] # convert to epoch
newBlob[k] = numpy.asarray(v)
else:
# try to convert
if k in self.varNameLookup:
id = self.varNameLookup[k].get_id()
if type(v) is not list:
v=[v]
newBlob[id]=numpy.asarray(v)
else:
if self.autoCreate:
self.logger.warning(f"__convert_to_ids__: cant find {k}.. autocreate it")
startPath = self.functionNode.get_child("autoCreateFolder").get_target().get_browse_path()
path = startPath+"."+k
id = self.model.create_node_from_path(path,properties={"type":"timeseries"})
newNode = self.model.get_node(id)
self.varNameLookup.update({id:newNode,newNode.get_browse_path():newNode,k:newNode})
newBlob[id]=numpy.asarray(v)
else:
self.logger.warning(f"__convert_to_ids__: cant find {k}, ignore!")
return newBlob
def reset(self,data=None):
#create look up table for variables
leaves = self.functionNode.get_child("variables").get_leaves()
self.varNameLookup = {}
for node in leaves:
typ = node.get_type()
if typ == "timeseries":
self.varNameLookup[node.get_name()] = node
elif typ == "eventseries":
self.varNameLookup[node.get_name()] = node
self.varNameLookup["__events"] = node # this is the default entry for incoming events
varBrowsePathLookup = {node.get_browse_path():node for node in leaves if node.get_type() in ["timeseries","eventseries"]}
self.varNameLookup.update(varBrowsePathLookup)
varIdLookup = {node.get_id():node for node in leaves if node.get_type() in ["timeseries","eventseries"]}
self.varNameLookup.update(varIdLookup)
#build the pipeline
self.pipeline = Pipeline(self.functionNode.get_child("processors").get_targets())
self.pipeline.reset() # reset all processors
if self.functionNode.get_child("autoCreate"):
self.autoCreate = self.functionNode.get_child("autoCreate").get_value()
else:
self.autoCreate = False
return True
class SplitterClass():
def __init__(self, functionNode):
self.logger = functionNode.get_logger()
self.logger.debug("init Splitter")
self.functionNode = functionNode
self.model = functionNode.get_model()
self.enabledNode = functionNode.get_child("enabled")
def reset(self,data=None):
#build the pipeline
self.threaded = self.functionNode.get_child("threaded").get_value()
self.pipelineNodes = self.functionNode.get_child("pipelines").get_targets()
for pipeline in self.pipelineNodes:
pipeline.get_object().reset()
def feed(self,data=None):
if not self.enabledNode.get_value():
return None
for pipeline in self.pipelineNodes:
if self.threaded:
self.model.execute_object_function(pipeline.get_id(),"feed",copy.deepcopy(data))
else:
pipeline.get_object().feed(data)
class Pipeline():
def __init__(self,processors=[]):
self.processors=processors # these are Nodes()!
def reset(self,data=None):
for p in self.processors:
p.get_object().reset(data)
def feed(self,data):
pro = utils.Profiling("pipee")
for p in self.processors:
if not type(data) is list:
data = [data]
returnData = []
for blob in data:
result = p.get_object().feed(blob)
if type(result) is list:
returnData.extend(result)
else:
returnData.append(result)
pro.lap(p.get_name())
data = returnData
print(pro)
return data
def flush(self,data):
for p in self.processors:
data = p.get_object().flush()
return data
class Windowing:
def __init__(self,samplePeriod,stepSize, maxHoleSize, samplePointsPerWindow, debug = False):
"""
Args:
windowSize [int] the windowLen in samples
stepSize [int] advance to the next window in samples
maxHoleSize [int] in seconds
samplingPerio [float] resampling in seconds
:param windowLen:
"""
self.samplePeriod = samplePeriod
self.stepSize = stepSize
self.maxHoleSizeSeconds = maxHoleSize
self.samplePointsPerWindow = samplePointsPerWindow
self._update_max_hole_size()
self.debug = debug
self.windowTime = (samplePointsPerWindow-1)*samplePeriod
self.times = np.asarray([],dtype=np.float64)
self.values = np.asarray([], dtype=np.float64)
self.currentStartTime = 0
def _update_max_hole_size(self):
self.maxHoleSamples = int(float(self.maxHoleSizeSeconds)/float(self.samplePeriod))
if (self.maxHoleSamples > self.samplePointsPerWindow): self.maxHoleSamples = self.samplePointsPerWindow - 1
def __append(self,times,values):
if self.times.size==0:
self.currentStartTime = times[0]
self.times = np.append(self.times,times)
self.values = np.append(self.values,values)
def insert(self,times, values):
"""
insert data in the internal cache
"""
self.__append(times,values)
def get_all(self,times,values):
return list(self.iterate(times,values))
def log(self,msg):
if self.debug:
print(msg)
def __sample_times(self,start,stop,step):
"""
creating an array of equidistant points including the stop value i
Args:
start: first value in the array
stop: last value in the array (including)
step: step size
"""
c = np.arange(start, stop, step)
if c.size == 0:
c=np.asarray([start])
if c[-1]+step<=stop: # can including the last ?
c=np.append(c,c[-1]+step) # unfortunately, this requires realloc of the array memory
return c
def iterate(self,times=None,values=None):
"""
we assume :
- time-ordered incoming data, so we can simply append
"""
# first, update the internal cache memory
if type(times) is not type(None):
self.__append(times,values)
#first downsample the current data, so create the target time points
samplingTimes = self.__sample_times(self.currentStartTime,self.times[-1],self.samplePeriod)
samplingIndices = np.searchsorted(self.times,samplingTimes)
#print(f"no sampingIndices {samplingIndices}, sample times {samplingTimes}")
if samplingIndices[-1]>=len(self.times):
print("must reduce resampling times")
samplingIndices=samplingIndices[0:-1]
#now downsample
values = self.values[samplingIndices]
times = self.times[samplingIndices]
start = 0 # the startindex of the window
while True:
if values.size-start >= self.samplePointsPerWindow:
timesW = times[start:start+self.samplePointsPerWindow]
if self.maxHoleSamples:
# find holes by subtracting times and shifted times, if the diff is zero they are the same
# (instead, we could have taken the samplingIndices and look into them)
# so the original data did not have any "new" points there, the timeseries class just filled them
# with the forward fill method
# attention: this does not easily work for UPSAMPLING, as we will have many samples in a row from the same original time
# as we forward fill the upsampling with the one original value
holes = (timesW[self.maxHoleSamples:]-timesW[0:-self.maxHoleSamples])==0
if np.any(holes):
#this window has a hole, step forward
#find the last index and advance
#print("hole")
lastZero = np.max(np.argwhere(holes))+self.maxHoleSamples
start = start+lastZero
continue
yield([samplingTimes[start:start+self.samplePointsPerWindow],values[start:start+self.samplePointsPerWindow]])
start=start+self.stepSize
else:
break
# complete all yields, now shift the data
# the current start position is the position which did not work, so we take that for the next
if start != 0: #if start is zero we did not create any window
originalIndex = samplingIndices[start]
self.times = self.times[originalIndex:]
self.values = self.values[originalIndex:]
self.currentStartTime=samplingTimes[1]
#print(f"now shift by {start}, the original was {originalIndex}")
class FlowMonitorClass():
def __init__(self, functionNode):
self.logger = functionNode.get_logger()
self.logger.debug("init FlowMonitorClass")
self.functionNode = functionNode
self.model = functionNode.get_model()
self.lock = threading.RLock()
self.thread = threading.Thread(target = self._threadfunc)
self.started = False
def reset(self,data=None):
self.enabledNode = self.functionNode.get_child("enabled")
self.alarmNode = self.functionNode.get_child("alarm")
self.alarmNode.set_value(False)
self.windowNode = self.functionNode.get_child("windowSize")
self.expectedNode= self.functionNode.get_child("expectedNoOfMsgs")
self.alarmFolder = self.functionNode.get_child("alarmMessagesFolder").get_target()
self.msgTimes=[]
self.enoughMsgs = False
if not self.started:
self.thread.start()
self.started = True
def _threadfunc(self):
while True:
#print(".")
time.sleep(2)
self.check(False)
def check(self,withData=False):
#this is called periodically from the thread and feed function
if not self.enabledNode.get_value():
return
with self.lock:
now = time.time()
self.msgTimes.append({"time":now,"withData":withData}) # append this entry
windowStart = now - self.windowNode.get_value()
#now remove all "too old" msgs
while 1:
if self.msgTimes:
if self.msgTimes[0]["time"] < windowStart:
self.msgTimes.pop(0)
self.enoughMsgs = True # if we have popped one msg, the overall window processed is long enough
else:
break
else:
break
# now see if the overall number of msgs is enough
# now see if we have a state transition
#count the number of msgs
noDataMsgs = len([m for m in self.msgTimes if m["withData"]==True])
#self.logger.debug(f"FlowMonitorClass enough={self.enoughMsgs} {noDataMsgs} / {len(self.msgTimes)}")#{self.msgTimes}")
if self.enoughMsgs:
if noDataMsgs < self.expectedNode.get_value():
if self.alarmNode.get_value() == False:
# transition into alarm mode
self.alarmNode.set_value(True)
self.__generate_alarm(self.msgTimes[0]["time"])
self.msgTimes = [] #flush the list
self.enoughMsgs = False # start over
else:
if self.alarmNode.get_value() == True:
self.alarmNode.set_value(False) # not in alarm
#self.logger.debug(f"FlowMonitorClass enough={self.enoughMsgs} alarm = {self.alarmNode.get_value()} msgs: {noDataMsgs} / {len(self.msgTimes)}") # {self.msgTimes}")
def feed(self,data=None):
if self.enabledNode.get_value():
self.check(True)
return data
def flush(self,data):
return data
def __generate_alarm(self,lastTime):
try:
alarmTime = dates.epochToIsoString(time.time(),zone='Europe/Berlin')
lastTime = dates.epochToIsoString(lastTime,zone='Europe/Berlin')
messagetemplate = {
"name":None,"type":"alarm","children":[
{"name": "text","type":"const","value":f"Stream data missing since {lastTime}"},
{"name": "level", "type": "const", "value":"automatic"},
{"name": "confirmed", "type": "const", "value": "unconfirmed","enumValues":["unconfirmed","critical","continue","accepted"]},
{"name": "startTime", "type": "const", "value": alarmTime},
{"name": "endTime", "type": "const", "value": None},
{"name": "confirmTime", "type": "const", "value": None},
{"name": "mustEscalate", "type": "const", "value":True},
{"name": "summary","type":"const","value":f"21data alarm: Stream data missing since {lastTime}"}
]
}
path = self.alarmFolder.get_browse_path()+".StreamDataAlarm_"+getRandomId()
self.model.create_template_from_path(path,messagetemplate)
except:
self.model.log_error()
return
##############################################################################################################################################################################################################
# TEST AREA
##############################################################################################################################################################################################################
if __name__ == "__main__":
def test1():
#(self, samplePeriod, stepSize, maxHoleSize, samplePointsPerWindow, debug = False):
p = Windowing(samplePeriod = 2, stepSize = 1,maxHoleSize=5,samplePointsPerWindow=10)
v = np.arange(100)
p.insert(v,v)
for w in p.iterate():
print(w)
def test2():
#a longer window with a hole
p = Windowing(samplePeriod=0.5, stepSize=1, maxHoleSize=2, samplePointsPerWindow=5)
v = np.append(np.arange(0,100),np.arange(130,200))
for w in p.iterate(v,v):
print(w)
def test3():
#check the precise shift
p = Windowing(samplePeriod=2, stepSize=1, maxHoleSize=1, samplePointsPerWindow=6)
v=np.arange(51)
for w in p.iterate(v,v):
print(w)
v = np.arange(55,100)
for w in p.iterate(v,v):
print(w)
def test4():
#streaming each points
p = Windowing(samplePeriod=2, stepSize=1, maxHoleSize=1, samplePointsPerWindow=6)
v=np.arange(50)
for i in v:
print(i)
for w in p.iterate([i],[i]):
print(w)
def test5():
#test resampling alignment
p = Windowing(samplePeriod=2, stepSize=1, maxHoleSize=1, samplePointsPerWindow=10)
v=np.arange(10,40,1.42)
for w in p.iterate(v,v):
print(list(w[0]),list(w[1]))
#same in singe
print("same in single streaming")
p = Windowing(samplePeriod=2, stepSize=1, maxHoleSize=1, samplePointsPerWindow=10)
v=np.arange(10,40,1.42)
for i in v:
for w in p.iterate([i],[i]):
print(list(w[0]), list(w[1]))
def test_up1():
#upsampling with holes
p = Windowing(samplePeriod=0.1, stepSize=1, maxHoleSize=2, samplePointsPerWindow=10)
v = np.append(np.arange(0,10),np.arange(13,20))
for w in p.iterate(v,v):
print(w)
def resampling_test():
t = np.arange(101)
re1 = np.linspace(0,t[-1],11)
re2 = np.linspace(0, t[-1], 200)
i1 = np.searchsorted(t,re1)
i2 = np.searchsorted(t, re2)
print(t[i1],t[i2])
def timing_test():
#typical application
# 10sek sampling over one year = 3Mio Points
# window i 1 hour = 360
# we step by 5 min = 300 sec = 3 step
# downsamping faktor 10 = 100 sec
stepSize = 5
samplePeriod = 100
p = Windowing(samplePeriod=samplePeriod, stepSize=stepSize, maxHoleSize=3, samplePointsPerWindow=20)
v = np.arange(0,30000*1000,10)
print(f"expected no window {v.size/(stepSize*samplePeriod)}")
start = time.time()
count = 0
for i in p.iterate(v,v):
count = count+1
if count %1000 == 0:
print("count:",count, "current window len",i[0].size,i[0][0],"...",i[1][-1])
pass
total = time.time()-start
#res = list(p.iterate(v,v))
print(f"took {total} no windows:{count}, per yield = {total/count*1000}ms")
def check_timings():
total = 3*1000*1000
a=np.arange(0,total,0.98)
start = time.time()
diff = np.diff(a)
where = a > 10
locs = np.argwhere(where)
print(f"diff and where on {a.size} = {time.time()-start}")
times = np.linspace(0,total,int(total/13))
times = times[:-1]
start = time.time()
indices = np.searchsorted(a,times)
re = a[indices]
print(f"timing resample {a.size} -> {re.size} = {time.time()-start}")
#main
#timing_test()
#check_timings()
#resampling_test()
#test1()
test2()
#test3()
#test4()
#test5()
#lin_space_test()
#test_up1()
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9886
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
BasicHttpAgent.py | # pip3 install requests Flask
from IOAgent import IOAgent
from abc import ABC
from http.server import HTTPServer, BaseHTTPRequestHandler
from http.client import HTTPSConnection, HTTPConnection
from base64 import b64encode
import sseclient
import requests
import threading
import platform
import subprocess
import pprint
import cgi
import json
import logging
IOAgent = IOAgent
module_logger = logging.getLogger('BasicHttpAgent')
#https://stackoverflow.com/questions/2953462/pinging-servers-in-python
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
# Option for the number of packets as a function of
param = '-n' if platform.system().lower()=='windows' else '-c'
if(host[0] == '['): # ipv6 address
host = host[1:40]
command = ['ping', param, '1', '-6', host]
else:
# Building the command. Ex: "ping -c 1 google.com"
command = ['ping', param, '1', host]
return subprocess.call(command) == 0
"""
app = Flask(__name__)
@app.route("/<url>", methods = ['POST'])
def http_endpoint(url):
return {
"message": "request OK"
}, 200 """
#todo: make child classes for specific adapters for data validation/transformation depending on use-case
class BasicHttpAgent(IOAgent, ABC):
def __init__(self, *args):
super().__init__(*args)
self.logger = logging.getLogger('BasicHttpAgent')
self.connection = None
self.headers = {}
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
def __init__(self, adapter, *args):
self.adapter = adapter
super().__init__(*args)
def set_headers(self, statuscode):
self.send_response(statuscode)
self.send_header('Content-type', 'application/json')
self.end_headers()
#https://gist.github.com/nitaku/10d0662536f37a087e1b
def do_POST(self):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
# refuse to receive non-json content
if ctype != 'application/json':
self.set_headers(400)
return
# read the message and convert it into a python dictionary
length = int(self.headers.get('content-length'))
message = json.loads(self.rfile.read(length))
# send the message back
self.set_headers(200)
self.wfile.write(str.encode(json.dumps({'received': 'ok'})))
self.adapter.receive_from_downlink(message)
def initiate_agent(self, config, callback):
self.connection_host = f"{self.connection_ip}:{self.port}"
self.connection_url = f"http://{self.connection_host}" # + config url
self.host_url = "/" # + config
self.message_received_callback = callback
# https://stackoverflow.com/questions/18444395/basehttprequesthandler-with-custom-instance
# def handler(*args):
# return BasicHttpAdapter.SimpleHTTPRequestHandler(self, *args)
# self.httpd = HTTPServer(('localhost', 5000), handler)
# threading.Thread(target=self.httpd.serve_forever).start()
def send_downlink(self, message, *args):
requests.post(self.connection_url, message)
def receive_from_downlink(self, message):
#if url == self.host_url:
# parse incomming message
self.from_downlink_to_central(message)
def disconnect(self, *args):
self.on_disconnect()
def connect(self, *args):
# if ping(self.connection_ip):
self.on_connect()
def set_headers(self, headers):
self.headers = headers
def basic_auth(self, uri):
self.connection = HTTPSConnection(f"{self.connection_ip}:{self.port}")
# authenticate with client_id and client_secret
auth_string = b64encode(bytes(self.user + ':' + self.password, "utf-8")).decode("ascii")
headers = {
'Content-type': "application/x-www-form-urlencoded",
'Authorization': 'Basic %s' % auth_string
}
body = f"grant_type=client_credentials"
self.connection.request('POST', f'/{uri}', headers=headers, body=bytes(body, encoding="utf-8"))
res = self.connection.getresponse()
data = res.read()
self.logger.debug("successfully authenticated")
return data
def rpt_auth(self, uri, access_token):
self.connection = HTTPSConnection(f"{self.connection_ip}:{self.port}")
# authenticate with access token
headers = {
'Content-type': "application/x-www-form-urlencoded",
'Authorization': 'Bearer %s' % access_token
}
body = f"grant_type=urn:ietf:params:oauth:grant-type:uma-ticket&audience=policy-enforcer"
self.connection.request('POST', f'/{uri}', headers=headers, body=bytes(body, encoding="utf-8"))
res = self.connection.getresponse()
data = res.read()
self.logger.debug("successfully got RTP token")
return data
def refresh_rpt(self, uri, refresh_token):
self.connection = HTTPSConnection(f"{self.connection_ip}:{self.port}")
# authenticate with access token
headers = {
'Content-type': "application/x-www-form-urlencoded"
}
body = f"grant_type=refresh_token&refresh_token={refresh_token}&client_id={self.user}&client_secret={self.password}"
self.connection.request('POST', f'/{uri}', headers=headers, body=bytes(body, encoding="utf-8"))
res = self.connection.getresponse()
data = res.read()
self.logger.debug("successfully refreshed token")
return data
def get_stream(self, uri):
try:
headers = {
'Accept': "text/event-stream"
}
response = requests.get(uri, stream=True, headers=headers)
client = sseclient.SSEClient(response)
for event in client.events():
# pprint.pprint(json.loads(event.data))
self.message_received_callback(event.data)
except Exception as e:
self.logger.error("failed to parse message: " + str(e))
def on_message_received(self, client, config, msg):
self.logger.debug("received message")
data = msg.payload
try:
if isinstance(data, str):
payload = json.loads(json.dumps(data))
else:
payload = json.loads(data)
self.message_received_callback(payload)
except Exception as e:
self.logger.error("failed to parse message: " + str(e))
def send_message(self, uri, msg, method):
self.connection = HTTPConnection(host=self.connection_ip, port=self.port, timeout=128)
try:
self.connection.request(method, f'/{uri}', body=msg, headers=self.headers)
res = self.connection.getresponse()
data = res.read()
self.logger.debug(method + " " + uri + " returned " + str(res.status))
self.connection.close()
return res.status, data
except Exception as e:
self.logger.error("failed to parse or send message: " + str(e))
def send_secure_message(self, uri, msg, token):
self.connection = HTTPSConnection(f"{self.connection_ip}:{self.port}")
try:
headers = {
'Authorization': 'Bearer %s' % token
}
self.logger.debug(msg)
self.connection.request('POST', f'/{uri}', headers=headers, body=msg)
res = self.connection.getresponse()
data = res.read()
self.logger.debug(data)
self.connection.close()
return res.status
except Exception as e:
self.logger.error("failed to parse or send message: " + str(e))
|
run_system.py | from multiprocessing import Pool, Process
from settings import *
import os
from byte_code_feature_extraction import byte_extraction
from asm_code_feature_extraction import asm_extraction
from classification_system import classification
def byte_code_worker(datasets):
byte_pool = Pool(2)
byte_pool.map(byte_extraction, datasets)
def asm_code_worker(datasets):
byte_pool = Pool(2)
byte_pool.map(asm_extraction, datasets)
def main():
steps = ['feature extraction', 'classification']
step = steps[0]
datasets = ['train', 'test']
if step == 'feature extraction':
print('Feature Extraction Step')
print('=======================')
p1 = Process(target=byte_code_worker, args=(datasets,))
p2 = Process(target=asm_code_worker, args=(datasets,))
p1.start()
p2.start()
p1.join()
p2.join()
print('Done!')
if step == 'classification':
print('Classification Step')
print('===================')
eval_methods = ['cv', 'test']
method = 1
if eval_methods[method] == 'cv':
print('Testing with Cross validation')
classification(TRAIN_FILE, select = False, bagging = False, test = 'cv')
else:
print('Testing on the test dataset')
classification(TRAIN_FILE, bagging = True, test = TEST_FILE)
if __name__ == "__main__":
main() |
fun_multiprocessing.py | # Topic: Multiprocessing with module 'multiprocessing' and 'concurrent.futures'
#
# Author: Xuhua Huang
# Last updated: Jun 29, 2021
# Created on: Jun 29, 2021
import time
import multiprocessing
import concurrent.futures
""" Function that does not ask for arguments """
def fn_no_args(void):
print('[fn_no_args(void)]Sleeping for 1 seconds...')
time.sleep(1)
print('Done sleeping.')
""" Function that asks for an integer as argument """
def fn_with_args(seconds: int):
print(f'[fn_with_args(seconds: int)]Sleeping for {seconds} second(s)...')
time.sleep(seconds)
print('Done sleeping.')
def main():
""" Record the start time of the program """
start_time = time.perf_counter()
""" Create a list of processes with throw-away variables """
processes = [] # initializing an empty list
for _ in range(10):
process = multiprocessing.Process(target=fn_no_args(void=True))
process.start() # start the process
processes.append(process) # add process to existing list
""" Wait for all processes to finish and join the main process """
for process in processes:
process.join()
""" With 'concurrent.futures' module and context manager """
with concurrent.futures.ProcessPoolExecutor() as executor:
exec_1 = executor.submit(fn_no_args(void=True)) # allow function to be scheduled and run
exec_2 = executor.submit(fn_with_args(2)) # call function that asks for an argument
"""
if the function has a return type, use the .result() method to retrieve, e.g,
print(fn_1.result())
"""
""" Using list comprehension """
results = [executor.submit(fn_with_args(2)) for _ in range(10)]
""" Print the execution time of the program """
finish_time = time.perf_counter()
print(f'Program finished in {round(finish_time - start_time, 2)} second(s)')
if __name__ == '__main__':
main()
|
client-socket-ver2.py | #Nicholas Tahan, November 11, 2020
#Client Version Two:
#This program will connect to a server already running. It will create a new thread to listen for feeback from the server.
#The client will also accept input while receiving feedback from the server
#Multiple instances of this client can connect to the server
#The user can select from the following commands:
# 1. who - server returns a list of logged in users
# 2. send all - Server sends a message to all connected users
# 3. send userID - Server sends a message only to the user with userID specified
# 4. login user_name password - server checks to see if account exists, if it does let the user login
# 5. logout - If the user is logged in, let them log out. Disconnect from the server
import socket
import sys
import select
import traceback
import time
from threading import Thread
#Port Number: 12483
#Threads are used to allow the client to search for receiving data and to wait for user input at the same time
PORT = 12483 #define port constant
IP_ADDRESS = '127.0.0.1' #Define IP address
global logged_In
logged_In = False
global User_ID
User_ID = ''
global output
output = ""
global keepThread
keepThread=True#Kills the thread when user logs out
try:
sobj = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #Create port object
except:
print("Socket failed to create")
sys.exit()
def main():
global logged_In
global keepThread
logged_In = False
print("My chat Room Client. Version Two.")
print("Commands: login, send all, send UserID, logout, newuser, who")
#Ask the user what they want to do
try:
sobj.connect((IP_ADDRESS, PORT))
sobj.settimeout(2)#Sets timeout for listening
except:
print("Unable to connect to server")
exit()
Thread(target=listen_for_data, args=()).start() #Creates new thread to listen for data from the server
while True:
try:
if(keepThread==True):
usr_input = input("--> ")
logged_In = menu(usr_input, logged_In)
else:
break
except Exception as error:
if(error.args[0]!="timed out"):
print("Error: ", error)
except OSError as err:
print("Lost connection to server")
keepThread = False
break
sobj.close()
#Thread actively listens for data from the server
def listen_for_data():
global output
global keepThread
while keepThread:
try:
msg = sobj.recv(2048).decode("utf8")
output = msg
print(msg)#If server sends data, prints the data to the screen
except Exception as error:
if(error.args[0]!="timed out"):
print("Error: ", error)
except OSError as err:
print("lost connection to server")
keepThread = False
#sends client input to server
def send_msg(usr_input):
sobj.sendall(usr_input.encode("utf8"))
#Sends logout to server and logs out the user
def logout(user_name):
global logged_In
global keepThread
keepThread = False
sobj.sendall("logout".encode("utf8"))
time.sleep(2)
msg = output
if msg== ("Logging out user "+user_name):
logged_In = False
print("Successfully logged out")
#Login to the server by passing userID and password, will return error if unable to login
def login(userID, password, usr_input):
global output
global User_ID
User_ID = userID
sobj.send(usr_input.encode("utf8"))
time.sleep(2) #Waits for the server to respond
msg = output
if "Error" in msg:
return False
else:
return True
#Checks the user input to see if new account is valid. From there if all the information is valid, it will send the data to the server.
#The server will check to see if the account already exists. If it does it will not allow a new account to be made. If it does not a new account will be made.
def new_User(userID, password, usr_input):
if(len(userID)>32):
print("Error UserID is too long. UserID must be less than 32 characters")
return -1
elif(len(password)<4 or len(password)>8):
print("Error: Password must be between 4 and 8 characters in length")
return -1
else:
sobj.sendall(usr_input.encode("utf8"))
#Checks the input of the user, it checks the first word of the input. The first word dictates a command
def menu(usr_input, logged_In):
global User_ID
parse_string = usr_input.split()[0]
wordCount = len(usr_input.split()) #Ensures the user enters in the correct amount of arguments
#User must not already be logged in to login
if(parse_string=="login"):
if(logged_In == False):
logged_In = login(usr_input.split()[1], usr_input.split()[2], usr_input)
else:
print("ERROR: User is already logged in")
elif(parse_string =="newuser"):
if(logged_In == False):
if(wordCount==3):
new_User(usr_input.split()[1], usr_input.split()[2], usr_input)
else:
print("Incorrect number of arguments!")
else:
print("Error: Must be logged out to create new account")
#send all and send userID are both sent through here
elif(parse_string == "send"): #Send works for both send all and send user, the server will actually differentiate between them
if(logged_In ==False):
print("Error: You must login first!")
else:
send_msg(usr_input)
elif(parse_string == "logout"):
if(logged_In == True):
logout(User_ID)
else:
print("You are already logged out!")
#The client just sends who to the server
elif(parse_string == "who"):#Can reuse the send_msg function since the server will handle the actual response
send_msg(usr_input)
else:
print("Error unknown command!")
return logged_In
#Call main
main() |
android_hooks.py | # -*- coding: utf-8 -*-
# An android event hook via getevent.
# Only ABS_MT_POSITION_X(Y) events are handled.
#
# Basic input: TouchDown(D), TouchUp(U), TouchMove(M)
# Basic timeouts: TouchPressTimeout(P), TouchFollowTimeout(F), TouchMoveStopTimeout(S)
# guestures are defined as follows:
# Tap/Touch/Click: DM?UF
# TapFollow: (DM?U)+DM?UF
# LongPress: DP, may be followed by Drag or Swipe
# Drag: D?M+S, may be followed by Drag or Swipe
# Swipe/Fling: D?M+U, difference with `Drag` is that `TouchMoveStopTimeout` cannot be fired.
# 2-Finger-Pinch: distance changing
# 2-Finger-Drag: distance hold while moving
# where '?' after M means a little movement and '+' means a large one.
# other guestures are ignored.
import re
import math
import time
import numpy as np
import subprocess
import threading
import Queue
import traceback
__all__ = ['AndroidInputHookManager', 'HookManager', 'HookConstants']
# global, max MultiTap count. Set to 1 to disable MultiTap, 0 for infinite.
_MULTI_TAP_NUM = 3
def set_multitap(count):
if count < 0:
print 'Cannot set to negative count.'
return
global _MULTI_TAP_NUM
_MULTI_TAP_NUM = int(count)
class HookConstants:
# basic events
TOUCH_ANY = 1 << 3
TOUCH_DOWN = 1 << 3 ^ 1
TOUCH_UP = 1 << 3 ^ 2
TOUCH_MOVE = 1 << 3 ^ 3
# only used for gesture analyze
TOUCH_PRESS_TIMEOUT = 1 << 3 ^ 4
TOUCH_FOLLOW_TIMEOUT = 1 << 3 ^ 5
TOUCH_MOVESTOP_TIMEOUT = 1 << 3 ^ 6
# DOWN is odd, UP is even & DONW + 1 == UP
KEY_ANY = 1 << 4
KEY_HOME_DOWN = 1 << 4 ^ 1
KEY_HOME_UP = 1 << 4 ^ 2
KEY_BACK_DOWN = 1 << 4 ^ 3
KEY_BACK_UP = 1 << 4 ^ 4
KEY_MENU_DOWN = 1 << 4 ^ 5
KEY_MENU_UP = 1 << 4 ^ 6
KEY_POWER_DOWN = 1 << 4 ^ 7
KEY_POWER_UP = 1 << 4 ^ 8
KEY_VOLUMEDOWN_DOWN = 1 << 4 ^ 9
KEY_VOLUMEDOWN_UP = 1 << 4 ^ 10
KEY_VOLUMEUP_DOWN = 1 << 4 ^ 11
KEY_VOLUMEUP_UP = 1 << 4 ^ 12
# gestures
GST_TAP = 1 << 5 ^ 1
GST_MULTI_TAP = 1 << 5 ^ 2
GST_LONG_PRESS = 1 << 5 ^ 3
GST_LONG_PRESS_RELEASE = 1 << 5 ^ 4
GST_DRAG = 1 << 5 ^ 5
GST_SWIPE = 1 << 5 ^ 6
GST_PINCH_IN = 1 << 5 ^ 7
GST_PINCH_OUT = 1 << 5 ^ 8
HC = HookConstants
HCREPR = {
HC.TOUCH_DOWN : 'D',
HC.TOUCH_UP : 'U',
HC.TOUCH_MOVE : 'M',
HC.TOUCH_PRESS_TIMEOUT : 'P',
HC.TOUCH_FOLLOW_TIMEOUT : 'F',
HC.TOUCH_MOVESTOP_TIMEOUT : 'S',
HC.GST_TAP: 'Tap',
HC.GST_MULTI_TAP: 'MultiTap',
HC.GST_LONG_PRESS: 'LongPress',
HC.GST_LONG_PRESS_RELEASE: 'PressRelease',
HC.GST_DRAG: 'Drag',
HC.GST_SWIPE: 'Swipe',
HC.GST_PINCH_IN: 'PinchIn',
HC.GST_PINCH_OUT: 'PinchOut',
}
class Event(object):
def __init__(self, time, msg):
self.time = time
self.msg = msg
def __str__(self):
return '%s_%s' % (self.__class__.__name__, HCREPR.get(self.msg, self.msg))
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(['%s=%s' % (k, v) for k, v in self.__dict__.iteritems()]))
class KeyEvent(Event):
def __init__(self, time, msg, key):
super(KeyEvent, self).__init__(time, msg)
# convert to KEYCODE_xxx for 'adb input keyevent xxx'
if key.startswith('KEY_'):
key = 'KEYCODE_' + key[4:]
self.key = key
class TouchEvent(Event):
def __init__(self, time, msg, slotid, x, y, pressure, touch_major, **extra):
super(TouchEvent, self).__init__(time, msg)
self.slotid = slotid
self.x = x
self.y = y
self.pressure = pressure
self.touch_major = touch_major
self.__dict__.update(extra)
class TouchTimeoutEvent(Event):
def __init__(self, time, msg, slotid):
super(TouchTimeoutEvent, self).__init__(time, msg)
self.slotid = slotid
class GestureEvent(Event):
def __init__(self, msg, track):
# suffixes: s for start, e for end.
# two-finger guestures need two tracks
if msg in (HC.GST_PINCH_IN, HC.GST_PINCH_OUT):
t1, t2 = track[0], track[1]
ts = min(t1[0].time, t2[0].time)
te = max(t1[-1].time, t2[-1].time)
else:
es, ee = track[0], track[-1]
ts, te = track[0].time, track[-1].time
print 'Gesture', HCREPR.get(msg, msg), ''.join([HCREPR.get(e.msg, e.msg) for e in track]), (es.x, es.y), (ee.x, ee.y)
if msg in (HC.GST_SWIPE, HC.GST_DRAG):
# TODO: check for corners for complicated trace
self.points = [(es.x, es.y), (ee.x, ee.y)]
else:
self.points = [(es.x, es.y), (ee.x, ee.y)]
super(GestureEvent, self).__init__(ts, msg)
self.duration = te - ts
SLOT_NUM = 5
_X, _Y, _VR, _VA, _MJ, _PR, FIELD_NUM = range(7)
INF = 9999
class InputParser(object):
_pat = re.compile('\[\s*(?P<time>[0-9.]+)\] (?P<device>/dev/.*): +(?P<type>\w+) +(?P<code>\w+) +(?P<value>\w+)')
_move_radius = 10
def __init__(self, queue):
self.timediff = None
self.queue = queue
# the 'standard' status temp_status is compared to.
# if changes are great enough, new event are emitted.
# velocity will be calculated for touch-move events.
self._status = np.ones((SLOT_NUM, FIELD_NUM), dtype=int) * (-INF)
self._status_time = 0
# realtime status, minor changes are cumulated
self._temp_status = np.ones((SLOT_NUM, FIELD_NUM), dtype=int) * (-INF)
self._temp_status_time = 0
self._touch_batch = []
self._curr_slot = 0
def feed(self, line):
# print line
m = self._pat.search(line)
if not m:
return
_time, _device, _type, _code, _value = m.groups()
_time = float(_time)
if self.timediff is None:
self.timediff = time.time() - _time
_time = self.timediff + _time
try:
_value = int(_value, 16)
except:
pass
if _type == 'EV_SYN':
if _code in ('SYN_REPORT', 'SYN_MT_REPORT'):
try:
self._process_touch_batch()
except IndexError: # there might be a 6th finger, ignore that.
self._touch_batch = []
elif _code == 'SYN_DROPPED':
self._touch_batch = []
else:
# print 'unknown syn code', _code
pass
elif _type == 'EV_KEY':
self.emit_key_event(_time, _code, _value)
elif _type == 'EV_ABS':
self._touch_batch.append((_time, _device, _type, _code, _value))
else:
# print 'unknown input event type', _type
pass
def emit_key_event(self, _time, _code, _value):
name = '%s_%s' % (_code, _value)
msg = getattr(HC, name, None)
if msg is None:
return
event = KeyEvent(_time, msg, _code)
self.queue.put(event)
def emit_touch_event(self, event):
self.queue.put(event)
def _process_touch_batch(self):
'''a batch syncs in about 0.001 seconds.'''
if not self._touch_batch:
return
_time = self._temp_status_time
changed = False
for (_time, _device, _type, _code, _value) in self._touch_batch:
if _code == 'ABS_MT_TRACKING_ID':
if _value == 0xffffffff:
self._temp_status[self._curr_slot] = -INF
changed = True
else:
pass
elif _code == 'ABS_MT_SLOT':
self._curr_slot = _value
else:
if _code == 'ABS_MT_POSITION_X':
self._temp_status[self._curr_slot,_X] = _value
changed = True
elif _code == 'ABS_MT_POSITION_Y':
self._temp_status[self._curr_slot,_Y] = _value
changed = True
elif _code == 'ABS_MT_PRESSURE':
self._temp_status[self._curr_slot,_PR] = _value
elif _code == 'ABS_MT_TOUCH_MAJOR':
self._temp_status[self._curr_slot,_MJ] = _value
else:
print 'Unknown code', _code
self._temp_status_time = _time
self._touch_batch = []
if not changed:
return
# check differences, if position changes are big enough then emit events
diff = self._temp_status - self._status
dt = self._temp_status_time - self._status_time
emitted = False
for i in range(SLOT_NUM):
arr = self._temp_status[i]
oldarr = self._status[i]
dx, dy = diff[i,_X], diff[i,_Y]
if dx > INF or dy > INF:
# touch begin
event = TouchEvent(_time, HC.TOUCH_DOWN, i, arr[_X], arr[_Y], arr[_PR], arr[_MJ])
self.emit_touch_event(event)
emitted = True
elif dx < -INF or dy < -INF:
# touch end
event = TouchEvent(_time, HC.TOUCH_UP, i, oldarr[_X], oldarr[_Y], oldarr[_PR], oldarr[_MJ])
self.emit_touch_event(event)
emitted = True
else:
r, a = radang(float(dx), float(dy))
if r > self._move_radius:
v = r / dt
event = TouchEvent(_time, HC.TOUCH_MOVE, i, arr[_X], arr[_Y], arr[_PR], arr[_MJ], angle=a, velocity=v)
self.emit_touch_event(event)
emitted = True
if not emitted:
return
self._status = self._temp_status.copy()
self._status_time = self._temp_status_time
def radang(x, y):
'''return (radius, angle) of a vector(x, y)'''
if x == 0:
if y == 0:
return 0, 0
return abs(y), 90+180*(y<0)
if y == 0:
return abs(x), 180*(x<0)
r = math.sqrt(x*x+y*y)
a = math.degrees(math.atan(y/x))
if x < 0:
a += 180
elif y < 0:
a += 360
return r, a
class GestureRecognizer(object):
double_tap_delay = 0.5
long_press_delay = 1
move_stop_delay = 0.2
pinch_difference_square = 3000
def __init__(self, queue):
self.queue = queue
self.dispatch_map = {}
self.running = False
self.touches = [None] * SLOT_NUM
# used for recognition
self.tracks = [None for i in range(SLOT_NUM)]
self.track_slots = set()
def register(self, keycode, func):
self.dispatch_map[keycode] = func
def start(self):
if self.running:
return
self.running = True
t = threading.Thread(target=self.process)
t.setDaemon(True)
t.start()
def stop(self):
self.running = False
def process(self):
'''handle events and trigger time-related events'''
timediff = 0
while True:
try:
time.sleep(0.001)
event = self.queue.get_nowait()
self.handle_event(event)
if event.msg & HC.KEY_ANY:
continue
if timediff == 0:
timediff = time.time() - event.time
self.touches[event.slotid] = event
except Queue.Empty:
if not self.running:
break
now = time.time() - timediff
for i in range(SLOT_NUM):
e = self.touches[i]
if e is None:
continue
if e.msg == HC.TOUCH_DOWN and now - e.time > self.long_press_delay:
self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_PRESS_TIMEOUT, i))
self.touches[i] = None
elif e.msg == HC.TOUCH_UP and now - e.time > self.double_tap_delay:
self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_FOLLOW_TIMEOUT, i))
self.touches[i] = None
elif e.msg == HC.TOUCH_MOVE and now - e.time > self.move_stop_delay:
self.analyze_tracks(TouchTimeoutEvent(now, HC.TOUCH_MOVESTOP_TIMEOUT, i))
self.touches[i] = None
except:
traceback.print_exc()
print 'process done.'
def handle_event(self, event):
self.dispatch_event(event.msg, event)
if event.msg & HC.KEY_ANY:
self.dispatch_event(HC.KEY_ANY, event)
else:
self.dispatch_event(HC.TOUCH_ANY, event)
self.analyze_tracks(event)
def dispatch_event(self, msg, event):
func = self.dispatch_map.get(msg)
if func is None:
return
try:
func(event)
except:
traceback.print_exc()
def analyze_tracks(self, event):
pass
def handle_gesture(self, msg, tracks):
event = GestureEvent(msg, tracks)
func = self.dispatch_map.get(msg)
if func is None:
return
try:
func(event)
except:
traceback.print_exc()
## NOT COMPLEMENTED ##
class SimpleGestureRecognizer(GestureRecognizer):
N_FINGER = 2
def analyze_tracks(self, event):
# handle one-finger and two-finger gestures only
# means a third finger will be ignored even if one of the
# first two fingers leaves the screen.
i = event.slotid
# begin guesture when touch down
if event.msg == HC.TOUCH_DOWN:
if len(self.track_slots) == self.N_FINGER and i not in self.track_slots:
return
if self.tracks[i] is None:
self.tracks[i] = []
self.track_slots.add(i)
self.tracks[i].append(event)
return
if self.tracks[i] is None:
return
if event.msg == HC.TOUCH_FOLLOW_TIMEOUT:
self.tracks[i] = []
elif event.msg == HC.TOUCH_PRESS_TIMEOUT:
# print ''.join([HCREPR.get(e.msg) for e in self.tracks[i]]), 'long press'
self.tracks[i] = []
elif event.msg == HC.TOUCH_MOVESTOP_TIMEOUT:
# print ''.join([HCREPR.get(e.msg) for e in self.tracks[i]]), 'drag'
self.tracks[i] = []
if len(self.track_slots) == 2:
for s in self.track_slots:
print s, ''.join([HCREPR.get(e.msg) for e in self.tracks[s]])
print
elif event.msg == HC.TOUCH_UP:
self.tracks[i].append(event)
if len(self.track_slots) == 2:
for s in self.track_slots:
print s, ''.join([HCREPR.get(e.msg) for e in self.tracks[s]])
print
self.tracks[i] = None
self.track_slots.discard(i)
else: # TOUCH_MOVE
self.tracks[i].append(event)
return
# check for pinch/pan
if len(self.track_slots) == 2:
t1, t2 = [self.tracks[s] for s in self.track_slots]
if len(t1) == 0 or len(t2) == 0 or len(t1) + len(t2) < 6:
return
# make copy and check distance changing
t1, t2 = t1[:], t2[:]
dists = []
while len(dists) < 5:
e1, e2 = t1[-1], t2[-1]
dx, dy = e1.x-e2.x, e1.y-e2.y
dists.append(dx*dx+dy*dy)
if e1.time < e2.time:
if len(t2) == 1:
break
else:
t2.pop()
else:
if len(t1) == 1:
break
else:
t1.pop()
print [dists[j+1]-dists[j] for j in range(len(dists)-1)]
# just keep latest position
for s in self.track_slots:
self.tracks[s] = self.tracks[s][-1:]
class RegexpGestureRecognizer(GestureRecognizer):
N_FINGER = 1
def analyze_tracks(self, event):
# handle one-finger gestures only
i = event.slotid
# begin guesture when touch down
if event.msg == HC.TOUCH_DOWN:
if len(self.track_slots) == self.N_FINGER and i not in self.track_slots:
return
if not self.tracks[i]:
self.tracks[i] = []
self.track_slots.add(i)
self.tracks[i].append(event)
return
if self.tracks[i] is None:
return
s = ''.join([HCREPR.get(e.msg) for e in self.tracks[i]])
if event.msg == HC.TOUCH_FOLLOW_TIMEOUT:
if re.match('^DM?U$', s):
self.handle_gesture(HC.GST_TAP, self.tracks[i][:])
elif re.match('^(DM?U)+DM?U$', s):
self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:])
self.tracks[i] = None
self.track_slots.discard(i)
elif event.msg == HC.TOUCH_MOVESTOP_TIMEOUT:
if re.match('^D?MM+$', s):
self.handle_gesture(HC.GST_DRAG, self.tracks[i][:])
self.tracks[i] = []
elif event.msg == HC.TOUCH_PRESS_TIMEOUT:
if s == 'D':
self.handle_gesture(HC.GST_LONG_PRESS, self.tracks[i][:])
self.tracks[i] = []
elif event.msg == HC.TOUCH_UP:
self.tracks[i].append(event) # note: it's not the same with s after add
if s == '':
self.handle_gesture(HC.GST_LONG_PRESS_RELEASE, [event])
elif re.match('^D?MM+$', s):
self.handle_gesture(HC.GST_SWIPE, self.tracks[i][:])
self.tracks[i] = []
elif _MULTI_TAP_NUM == 1 and re.match('^DM?$', s):
self.handle_gesture(HC.GST_TAP, self.tracks[i][:])
self.tracks[i] = []
elif _MULTI_TAP_NUM > 1 and re.match('^(DM?U){%d}DM?$' % (_MULTI_TAP_NUM-1,), s):
self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:])
self.tracks[i] = []
elif event.msg == HC.TOUCH_MOVE:
if re.match('^(DU)+D$', s):
if s == 'DUD':
self.handle_gesture(HC.GST_TAP, self.tracks[i][:-1])
else:
self.handle_gesture(HC.GST_MULTI_TAP, self.tracks[i][:-1])
self.tracks[i] = self.tracks[i][-1:]
self.tracks[i].append(event)
NOTACTIVE, ACTIVE, STAGE_1, STAGE_2, TRIGGERED = range(5)
## NOT COMPLEMENTED ##
class StateMachineGestureRecognizer(GestureRecognizer):
state_map = {
HC.GST_TAP: {
NOTACTIVE: { HC.TOUCH_DOWN : ACTIVE },
ACTIVE: {
HC.TOUCH_MOVE: STAGE_1,
HC.TOUCH_PRESS_TIMEOUT : NOTACTIVE,
HC.TOUCH_FOLLOW_TIMEOUT : TRIGGERED,
},
STAGE_1: {
HC.TOUCH_MOVE: NOTACTIVE,
HC.TOUCH_PRESS_TIMEOUT : NOTACTIVE,
HC.TOUCH_FOLLOW_TIMEOUT : TRIGGERED,
}
},
HC.GST_SWIPE: {
NOTACTIVE: { HC.TOUCH_DOWN: ACTIVE },
ACTIVE: { HC.TOUCH_UP: NOTACTIVE, HC.TOUCH_MOVE: STAGE_1},
STAGE_1: { HC.TOUCH_UP: NOTACTIVE, HC.TOUCH_MOVE: STAGE_2 },
STAGE_2: { HC.TOUCH_UP: TRIGGERED, HC.TOUCH_MOVESTOP_TIMEOUT: TRIGGERED},
},
}
def __init__(self, queue):
super(self.__class__, self).__init__(queue)
self.state = {}
for k in self.state_map:
self.state[k] = NOTACTIVE
print self.state_map
def analyze_tracks(self, event):
for k, v in self.state.iteritems():
s = self.state_map.get(k, {}).get(v, {}).get(event.msg)
if s is not None:
self.state[k] = s
triggered = False
for k, v in self.state.iteritems():
if v == TRIGGERED:
print 'trigger event', k
triggered = True
if triggered:
for k in self.state:
self.state[k] = NOTACTIVE
class AndroidInputHookManager(object):
def __init__(self, serial=None, processor_class=RegexpGestureRecognizer):
self._serial = serial
self.running = False
self._queue = Queue.Queue()
self._listener = None
self._parser = InputParser(self._queue)
self._processor = processor_class(self._queue)
def set_serial(self, serial):
self._serial = serial
def register(self, keycode, func):
'''register hook function'''
self._processor.register(keycode, func)
def hook(self):
self._processor.start()
self.running = True
t = threading.Thread(target=self._run_hook)
t.setDaemon(True)
t.start()
def _run_hook(self):
cmd = ['adb']
if self._serial:
cmd.extend(['-s', self._serial])
cmd.extend(['shell', 'getevent', '-lt'])
while True:
# start listener
self._listener = p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
try:
line = p.stdout.readline().strip()
if not line:
if p.poll() is not None:
break
continue
self._parser.feed(line)
except KeyboardInterrupt:
p.kill()
except:
p.kill()
traceback.print_exc()
if not self.running:
break
state = subprocess.check_output(['adb', '-s', self._serial, 'get-state']).strip()
if state != 'device':
print 'adb status(%s) wrong! stop hook.' % (state,)
break
print 'adb getevent died, reconnecting...'
time.sleep(1)
def unhook(self):
self.running = False
self._processor.stop()
if self._listener:
self._listener.kill()
HookManager = AndroidInputHookManager
if __name__ == '__main__':
hm = AndroidInputHookManager(processor_class=RegexpGestureRecognizer)
hm.hook()
while True:
try:
time.sleep(0.1)
except KeyboardInterrupt:
break
hm.unhook()
|
timeline_window.py | #!/usr/bin/env python
# System modules
import sys,os
import random
import unittest
import threading, time
import colorsys
import math
import bisect
import cairo, pangocairo
import gtk, gtk.glade
# TREX modules
from TREX.core.db_core import DbCore,Timeline
from TREX.core.assembly import Assembly,Entity,Rule,Token,Slot,Variable
from TREX.widgets_gtk import trex_glade_path
##############################################################################
# Timeline
# This class represents a TREX timeline and is used for keeping track of
# tokens and drawing state.
##############################################################################
class TimelineSprite():
# Timeline constants
INTERNAL, EXTERNAL = range(2)
def __init__(self,timeline):
# Store this object
self.tl = timeline
# Store the reactor that this timeline belongs to
self.reactor_panel = None
# Store a list of active tokens
self.active_tokens = []
self.new_active_tokens = []
# Store a list of region boundaries for hit-testing
self.hit_boundaries = []
self.hit_tokens = []
# Drawing variables
self.earliest_tick = float("Inf")
self.earliest_start = 0
self.latest_end = 0
self.latest_tick = float("Inf")
self.row = 0
# Add tokens
self.add_tokens(timeline.tokens)
# Add new tokens to this timeline
def add_tokens(self,tokens):
# Copy new active tokens into active tokens, reset new active tokens
#self.active_tokens = self.active_tokens + self.new_active_tokens
self.new_active_tokens = []
# Copy only active tokens onto the token list
for token in tokens:
if token.slot_index == 0:
self.new_active_tokens.append(token)
# Get the color for this timeline
def get_color(self):
if self.reactor_panel:
rgb = self.reactor_panel.color
else:
# No parent
rgb = (0.3,0.3,0.3)
return rgb
# Get the token that was hit
def get_hit_token(self,x):
token_index = bisect.bisect_left(self.hit_boundaries, x)
if token_index < len(self.hit_tokens):
return (self.hit_tokens[token_index], self.hit_boundaries[token_index])
else:
return (None,0)
##############################################################################
# ReactorPanel
# This is a GTK widget container class that draws all of the timelines for
# a given reactor. It has two cairo drawing contexts which are separated by
# a slider.
#
# ReactorPanel currently has two drawing modes for timelines:
# Compact: All tokens are compressed to the smallest area necessary to
# print their names. This shows only temporal ordering.
# Metric: All tokens are expanded (or compressed) to maintiain relative
# scale over time. This shows both temporal ordering and
# temporal scale, which makes it harder to observe terse tokens
# in the same context as long-running ones.
##############################################################################
class ReactorPanel():
# Constants for drawing
ROW_HEIGHT = 16
ROW_SPACE = 12
ROW_STEP = ROW_HEIGHT+ROW_SPACE
# Constants for labeling
LABEL_MARGIN = 8
# Interaction
Ruler = 0
H_adj = gtk.Adjustment()
# Static variables
LabelWidth = 200
TokenCache = 50
TokenHistory = 1
Center = 0
PastWidth = 500
FutureWidth = 500
MetricTime = False
TimeScale = 1.0
ViewTokenKeys = False
TokenSpace = 2
# Callback containers
# ContextCallbacks is a dictionary used for registrating extensions to
# the TREX gui. This dict stores callbacks with string keys. At runtime
# when a user right-clicks a token, he or she is presented with a the
# string keys from this dict. When one of the labels is activated,
# the function in the value part of the dict is called with the arguments:
# assembly
# token
ContextCallbacks = {}
DoubleClickCallbacks = []
def __init__(self):
# Initialize data structures
self.db_core = DbCore()
self.int_timelines = []
self.ext_timelines = []
self.timelines = {}
self.n_timelines = 0
# Token structures
self.all_tokens = {}
self.token_ticks = {}
self.token_timelines = {}
self.tokens_to_remove = []
# Sorted token structures
self.started_token_keys = []
self.planned_token_keys = []
self.started_token_times = []
self.planned_token_times = []
# Drawing variables
self.needs_redraw = True
self.timelines_group = None
self.color = (0,0,0)
self.hilight_keys = []
# Initialize tab index (used for keeping track of where this reactor is in the notebook)
self.tab_index = 0
# Initialize icon
self.icon = None
# Create glade window
tree = gtk.glade.XML(trex_glade_path("timeline_window.glade"),root="timeline_panel")
self.w = tree.get_widget("timeline_panel")
self.w.show_all()
# Bind the scrolledwindow widgets
self.timeline_sw = tree.get_widget("timeline_sw")
self.timeline_label_vp = tree.get_widget("timeline_label_vp")
self.timeline_label_sw = tree.get_widget("timeline_label_sw")
# Make scrolled windows share vertical adjustment object for synchronization
self.v_adj = gtk.Adjustment()
self.timeline_sw.set_vadjustment(self.v_adj)
self.timeline_label_sw.set_vadjustment(self.v_adj)
# Make all windows share the same timeline position
self.timeline_sw.set_hadjustment(ReactorPanel.H_adj)
# Bind the drawingarea widgets
self.timeline_da = tree.get_widget("timeline_da")
self.timeline_label_da = tree.get_widget("timeline_label_da")
# Register callbacks for updating the timeline labels and timeline
self.timeline_label_da.connect("expose-event",self.expose_timeline_label_da)
self.timeline_da.connect("expose-event",self.expose_timeline_da)
self.timeline_da.connect("button-press-event", self.on_timeline_click,None)
def on_change_color(self,widget):
color = self.color_chooser_but.get_color()
self.set_color(color.red,color.green,color.blue)
def set_color(self,r,g,b):
# Set the stored color
self.color = (r/65535.0,g/65535.0,b/65535.0)
# Set the chooser color
vis = gtk.gdk.visual_get_system()
cmap = gtk.gdk.Colormap(vis,True)
self.color_chooser_but.set_color(gtk.gdk.Colormap.alloc_color(cmap,r,g,b))
self.draw()
# Create timeline structures for all of the timelines in an assembly
def process_timelines(self, db_core):
# Save db_core
self.db_core = db_core
# Clear timeline vars
self.n_timelines = 0
# Remove tokens that were tagged for removal
for token in self.tokens_to_remove:
# Get token key
key = token.key
if self.all_tokens.has_key(key):
# Remove from all token map
del self.all_tokens[key]
# Remove from sorted lists
if token.key in self.planned_token_keys:
times = self.planned_token_times
keys = self.planned_token_keys
else:
times = self.started_token_times
keys = self.started_token_keys
# Get the index in the sorted lists of this token
sorted_index = keys.index(key)
# Remove from the sorted lists
times.pop(sorted_index)
keys.pop(sorted_index)
# Clear tokens to remove list
self.tokens_to_remove = []
# TODO:Clear timelines that are not represented in the assembly
# Create timeline objects for all timelines
# This also classifies all timelines as internal or external
for tl in db_core.int_timelines.values() + db_core.ext_timelines.values():
# Check if this is a new timeline
if not self.timelines.has_key(tl.name):
# Create a new timeline object
timeline = TimelineSprite(tl)
# Add the timeline
self.timelines[timeline.tl.name] = timeline
if timeline.tl.mode == Timeline.INTERNAL:
timeline.reactor_panel = self
self.int_timelines.append(timeline)
else:
# Find another timeline that belongs to this reactor, if it exists
self.ext_timelines.append(timeline)
else:
# Retrieve the timeline object
timeline = self.timelines[tl.name]
# Update the object
timeline.add_tokens(tl.tokens)
# Add all tokens to this reactor
##############################################################
for new_token in timeline.new_active_tokens:
# Check if this token existed in a previously viewed tick
if self.all_tokens.has_key(new_token.key):
# Check if this token is planned from the previous tick
# This means that it might have started on this one, also it's start time is not yet closed,
# so we need to remove it before sorting
if new_token.key in self.planned_token_keys:
# Get the index in the sorted lists of this token
sorted_index = self.planned_token_keys.index(new_token.key)
# Remove from the sorted lists
self.planned_token_times.pop(sorted_index)
self.planned_token_keys.pop(sorted_index)
else:
# We're travelling backwards
# Get the index in the sorted lists of this token
sorted_index = self.started_token_keys.index(new_token.key)
# Remove from the sorted lists
self.started_token_times.pop(sorted_index)
self.started_token_keys.pop(sorted_index)
pass
# Store / update this token
self.all_tokens[new_token.key] = new_token
# Update last updated tick for this token
self.token_ticks[new_token.key] = db_core.tick#[0]
# Store this token's timeline
self.token_timelines[new_token.key] = timeline
# Insert this token to the appropriate sorted token list
if new_token.start[0] == new_token.start[1]:
keys = self.started_token_keys
times = self.started_token_times
else:
keys = self.planned_token_keys
times = self.planned_token_times
# Get insertion indices
insert_index_start = bisect.bisect_left(times, new_token.start[0])
insert_index_end = bisect.bisect_right(times, new_token.start[0])
# Get list of times
if insert_index_start != insert_index_end and insert_index_start < len(keys) and insert_index_end < len(keys):
local_token_keys = keys[insert_index_start:insert_index_end]
#print local_token_keys
#local_end_times = self.all_tokens[local_token_keys]
#print local_end_times
#print "NEW TOKEN ENDING: %d [%s]" % (new_token.end[0],str(new_token))
# Sort based on end time
for index in range(insert_index_start,insert_index_end):
#print self.all_tokens[keys[index]]
if self.all_tokens[keys[index]].end[0] > new_token.end[0]:
insert_index_start = insert_index_start + 1
# Insert token
if insert_index_start >= len(keys) or new_token.key not in keys[insert_index_start:insert_index_end]:
keys.insert(insert_index_start,new_token.key)
times.insert(insert_index_start,new_token.start[0])
##############################################################
# Set the row in each timeline
row = 0
for timeline in self.int_timelines + self.ext_timelines:
timeline.row = row
row = row + 1
# Set the number of timelines
self.n_timelines = row
# Callback to process click events on the timeline view
def on_timeline_click(self, widget, event, data):
# Calculate row
row = int(event.y/ReactorPanel.ROW_STEP)
if row > self.n_timelines-1:
return False
# Get timeline
timeline = (self.int_timelines+self.ext_timelines)[row]
# Do hit test
token,hit_edge = timeline.get_hit_token(event.x)
# Process click type
if event.type == gtk.gdk._2BUTTON_PRESS:
if event.button == 1:
if token:
# Hilight the token
self.hilight_keys = [token.key]
""" MULTIPLE SELECTION
if token.key not in self.hilight_keys:
self.hilight_keys.append(token.key)
else:
self.hilight_keys.remove(token.key)
"""
self.draw()
# Call the double click callbacks with the db core and the token
for cb in ReactorPanel.DoubleClickCallbacks:
cb(self.db_core,token)
elif event.type == gtk.gdk.BUTTON_PRESS:
if event.button == 1:
pass
elif event.button == 2:
# Set the ruler
if ReactorPanel.Ruler == hit_edge:
ReactorPanel.Ruler = 0
else:
ReactorPanel.Ruler = hit_edge
self.draw()
elif event.button == 3:
if token:
# Hilight the token
self.hilight_keys = [token.key]
self.draw()
# Create context menu
m = gtk.Menu()
# Create info label menu item
info = gtk.MenuItem("Timeline: %s\nToken: %s\nKey: %s" % (timeline.tl.name, token.name, str(token.key)),False)
info.set_sensitive(False)
m.append(info)
m.append(gtk.SeparatorMenuItem())
# Iterate over extensions
for label_str,cb in ReactorPanel.ContextCallbacks.items():
menu_ext = gtk.MenuItem(label_str)
menu_ext.connect("activate",self.callback_wrapper,cb,self.db_core,token)
m.append(menu_ext)
# Show the menu, and pop it up
m.show_all()
m.popup(None, None, None, event.button, event.time, None)
return False
# Wrap the callback to keep it from receiving the menuitem
def callback_wrapper(self,menuitem,cb,db_core,token):
cb(db_core, token)
#############################################################################
# Drawing timelines
#############################################################################
# Callback to re-draw a timeline cr when necessary
def expose_timeline_da(self, widget, event):
# Create the cairo context
cr = widget.window.cairo_create()
# Set a clip region for the expose event
cr.rectangle(event.area.x, event.area.y,event.area.width, event.area.height)
cr.clip()
# Determine if a redraw is necessary
"""
if self.needs_redraw:
cr.push_group()
self.draw_timeline_da(cr)
self.timelines_group = cr.pop_group()
self.needs_redraw = False
# Draw the stored timelines group
cr.set_source(self.timelines_group)
cr.rectangle(event.area.x, event.area.y,event.area.width, event.area.height)
cr.fill()
"""
# Draw timelines
self.draw_timeline_da(cr)
# Draws the group from scratch
def draw_timeline_da(self, cr):
# Get visible width of timeline drawing area
timeline_da_width = self.timeline_sw.get_allocation().width
# Clear the image
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
# Initialize row counter
row = 0
# Draw row backgrounds
row_width = ReactorPanel.PastWidth + ReactorPanel.FutureWidth + timeline_da_width
for timeline in self.int_timelines + self.ext_timelines:
row_y = row*ReactorPanel.ROW_STEP + ReactorPanel.ROW_SPACE
# Draw timeline background
cr.set_source_rgba(0.8, 0.8, 0.8, 0.7)
cr.rectangle(0,row_y,row_width,ReactorPanel.ROW_HEIGHT)
cr.fill()
row = row+1
# Draw the execution frontier
cr.set_source_rgba(0, 0, 0, 0.2)
cr.rectangle(ReactorPanel.PastWidth + ReactorPanel.Center,0,row_width,(self.n_timelines)*ReactorPanel.ROW_STEP)
cr.fill()
# Set global earliest start and latest end
earliest_tick = float("Inf")
earliest_start = 0
latest_end = 0
latest_tick = 0
if ReactorPanel.MetricTime:
token_space = min(2,ReactorPanel.TimeScale/2)
else:
token_space = ReactorPanel.TokenSpace
# Reset earliest start and latest end for each reactor
for tl in self.int_timelines + self.ext_timelines:
tl.hit_boundaries = [0]
tl.hit_tokens = [None]
tl.earliest_tick = float("Inf")
tl.earliest_start = 0
tl.latest_end = 0
tl.latest_tick = 0
# Iterate over all token keys
#print "STARTED TOKENS: %d" % len(self.started_token_keys)
#print "PLANNED TOKENS: %d" % len(self.planned_token_keys)
for key in self.started_token_keys[::-1] + self.planned_token_keys:
# Get token
token = self.all_tokens[key]
timeline = self.token_timelines[key]
tick = self.token_ticks[key]
# Get timeline color
(r,g,b) = timeline.get_color()
# Skip tokens that are older than the tickbehind and newer than the current tick
if tick[0] < self.db_core.tick[0]-ReactorPanel.TokenHistory:
# Check if this is older than the cache
if tick[0] < self.db_core.tick[0]-ReactorPanel.TokenCache:
# Add this token to the removal list
self.tokens_to_remove.append(token)
continue
elif tick > self.db_core.tick:
continue
# Do not draw BaseState (HACK)
if token.name == "BaseState.Holds":
pass#continue
#print "TICK: %d, EARLIEST_START: %d, LATEST_END: %d" % (self.assembly.tick, earliest_start, latest_end)
# Create the label string, and get the length of the label
self.set_label_font(cr)
# Switch on whether we want to view the keys in the label
if ReactorPanel.ViewTokenKeys:
label_str = "%s (%s)" % (token.name.split('.')[1], str(token.key))
else:
label_str = "%s" % (token.name.split('.')[1])
_xb,_yb,w_label,_th,_xa,_ya = cr.text_extents(label_str)
# Create the time bound string and get its length
self.set_times_font(cr)
end_str = "[%d, %d]" % (token.start[0], token.end[0])
_xb,_yb,w_end_str,_th,_xa,_ya = cr.text_extents(end_str)
# Get the max width of the label
tok_width_label = max(w_label,w_end_str)
tok_width_label = tok_width_label + 2*ReactorPanel.LABEL_MARGIN
tok_x0 = 0
tok_y0 = 0
# Switch draw ordering behavior if this token has started or is planned
if token.start[0] == token.start[1]:
# Has started
# Initialize token synchronization width
tok_width_sync = 0
if token.start[0] < earliest_tick:
# Increase spacing if this tick happened before the earliest tick in this view
tok_width_sync = ReactorPanel.ROW_HEIGHT/2
if timeline.earliest_tick > earliest_tick and token.end[0] < earliest_tick:
# A token is missing, re-sync to create a space for it
timeline.earliest_start = earliest_start
#print "[%s, %s] earliest: %s [%s]" % (str(token.start[0]),str(token.end[0]), str(earliest_tick), str(token))
# Calculate the token pixel width
# Get the width if this token were to be drawn between the latest point on this timeline, and the earliest point for all timelines
tok_width_sync = tok_width_sync + abs(timeline.earliest_start - earliest_start)
# Get the larger of the two widths
tok_width = max(tok_width_label, tok_width_sync)
if ReactorPanel.MetricTime:
tok_width = ReactorPanel.TimeScale*(token.end[0]-token.start[0])
# Calculate the token end point
# This is the start of the earliest token on the timeline that this token is being drawn onto
tok_end = timeline.earliest_start
# Do not draw token if it ends before the visible window
#if tok_end < -ReactorPanel.PastWidth:
# continue
# Set the earliest tick for this timeline
earliest_tick = token.start[0]
# Set the earliest tick for this timeline
timeline.earliest_tick = earliest_tick
# Increment earliest start for this timeline
timeline.earliest_start = timeline.earliest_start - tok_width
# Set the new earliest start for all timeline
earliest_start = min(earliest_start, timeline.earliest_start)
# Calculate the position top-right corner of the token
tok_x0 = math.ceil(tok_end)
#print "TOKEN \"%s\" ADD WIDTH: %d = max( abs( %d - %d ), %d )" % (token.name,tok_width, timeline.earliest_start, earliest_start, tok_width_label)
else:
# Is planned
# Initialize token synchronization width
tok_width_sync = 0
if token.end[0] > latest_tick:
# Increase spacing if this tick happened before the earliest tick in this view
tok_width_sync = ReactorPanel.ROW_HEIGHT/2
# Calculate the token pixel width
# Get the width if this token were to be drawn between the latest point on this timeline, and the earliest point for all timelines
tok_width_sync = tok_width_sync + abs(latest_end - timeline.latest_end)
# Get the larger of the two widths
tok_width = max(tok_width_label, tok_width_sync)
# Calculate the token end point
# This is the start of the earliest token on the timeline that this token is being drawn onto
tok_start = timeline.latest_end
# Do not draw token if it start outside the visible window
if tok_start < -ReactorPanel.FutureWidth:
continue
# Update latest tick
latest_tick = token.end[0]
# Set timeline latest tick
timeline.latest_tick = latest_tick
# Increment earliest start for this timelines
timeline.latest_end = timeline.latest_end + tok_width
# Set the new earliest start for all timelines
latest_end = max(latest_end, timeline.latest_end)
# Calculate the position top-right corner of the token
tok_x0 = math.ceil(tok_start+tok_width)
tok_x0 = tok_x0 +ReactorPanel.PastWidth + ReactorPanel.Center
tok_y0 = ReactorPanel.ROW_STEP*timeline.row + ReactorPanel.ROW_SPACE
# Store the token hit region
if token.start[0] == token.start[1]:
timeline.hit_boundaries.insert(1,tok_x0)
timeline.hit_tokens.insert(1,token)
else:
timeline.hit_boundaries.append(tok_x0)
timeline.hit_tokens.append(token)
# Update the edge of the hit region
timeline.hit_boundaries[0] = ReactorPanel.PastWidth + ReactorPanel.Center + timeline.earliest_start
# Draw token
# Set the color for the appropriate reactors
if key in self.hilight_keys:
cr.set_source_rgba(1.0, 0.7, 0.07, 1.0)
elif self.token_ticks[key] < self.db_core.tick:#[0]:
cr.set_source_rgba(r,g,b, 0.3)
else:
cr.set_source_rgba(r, g, b, 0.7)
# Draw the token rectangle
cr.rectangle(tok_x0, tok_y0, -tok_width+token_space, ReactorPanel.ROW_HEIGHT)
cr.fill()
# Only draw the labels if there is space for them
if tok_width-2-ReactorPanel.LABEL_MARGIN > w_label:
# Draw the token label
self.set_label_font(cr)
tx = tok_x0 - w_label - ReactorPanel.LABEL_MARGIN
ty = tok_y0 + ReactorPanel.ROW_HEIGHT - 4
cr.move_to(tx,ty)
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.show_text(label_str)
if tok_width-2-ReactorPanel.LABEL_MARGIN > w_end_str:
# Draw the time bounds
self.set_times_font(cr)
cr.set_source_rgba(0, 0, 0, 0.5)
tx = tok_x0 - w_end_str - ReactorPanel.LABEL_MARGIN
ty = tok_y0 + ReactorPanel.ROW_STEP - 3
cr.move_to(tx,ty)
cr.show_text(end_str)
# Draw ruler
cr.set_source_rgba(0, 0, 0, 0.5)
cr.rectangle(ReactorPanel.Ruler,0,2,(self.n_timelines)*ReactorPanel.ROW_STEP)
cr.fill()
return False
def set_label_font(self,cr):
cr.select_font_face(
"Sans",
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_NORMAL)
cr.set_font_size(10)
def set_times_font(self,cr):
cr.select_font_face(
"Monospace",
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_NORMAL)
cr.set_font_size(8)
#############################################################################
# Drawing timeline labels
#############################################################################
def expose_timeline_label_da(self, widget, event):
# Create the cairo context
cr = widget.window.cairo_create()
# set a clip region for the expose event
cr.rectangle(event.area.x, event.area.y,event.area.width, event.area.height)
cr.clip()
# Clear the image
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.paint()
# Determine width needed to show all labels
max_width = 0
self.set_label_font(cr)
for timeline in self.int_timelines + self.ext_timelines:
# Get extents
xb,yb,w,h,xa,ya = cr.text_extents(timeline.tl.name)
max_width = max(w,max_width)
# Set the width
ReactorPanel.LabelWidth = max(ReactorPanel.LabelWidth, max_width + (2*ReactorPanel.LABEL_MARGIN))
# Draw rows
row = 0
for timeline in self.int_timelines + self.ext_timelines:
y = row*ReactorPanel.ROW_STEP + ReactorPanel.ROW_SPACE
# Get color based on parent
(r,g,b) = timeline.get_color()
cr.set_source_rgba(r, g, b, 1.0)
cr.rectangle(0,y,1000,ReactorPanel.ROW_HEIGHT)
cr.fill()
# Determine extents of text string
xb,yb,w,h,xa,ya = cr.text_extents(timeline.tl.name)
tx = ReactorPanel.LabelWidth - w - ReactorPanel.LABEL_MARGIN
ty = y+ReactorPanel.ROW_HEIGHT-4
cr.move_to(tx,ty)
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.show_text(timeline.tl.name)
row = row+1
# Resize drawing area for the current number of tokens
win_height_px = ReactorPanel.ROW_SPACE + ReactorPanel.ROW_STEP*self.n_timelines
self.timeline_da.set_size_request(int(ReactorPanel.PastWidth + ReactorPanel.FutureWidth),win_height_px)
# Resize drawing area for token width
self.timeline_label_da.set_size_request(int(ReactorPanel.LabelWidth),win_height_px)
self.timeline_label_sw.set_size_request(int(ReactorPanel.LabelWidth+20),-1)
return False
def draw(self, redraw = True):
# Set the needs_redraw flag
self.needs_redraw = redraw
# Redraw the timelines drawingarea
if self.timeline_da.window:
# Refresh view
alloc = self.timeline_da.get_allocation()
rect = gtk.gdk.Rectangle(0, 0, alloc.width, alloc.height)
self.timeline_da.window.invalidate_rect(rect, True)
self.timeline_da.window.process_updates(True)
# Redraw the labels drawingarea
if self.timeline_label_da.window:
# Refresh view
alloc = self.timeline_label_da.get_allocation()
rect = gtk.gdk.Rectangle(0, 0, alloc.width, alloc.height)
self.timeline_label_da.window.invalidate_rect(rect, True)
self.timeline_label_da.window.process_updates(True)
##############################################################################
# TimelineWindow
# This is a GTK widget container class that shows a user a set of partial
# plans from the reactors in a given TREX agent.
##############################################################################
class TimelineWindow():
def __init__(self):
# Initialize structures
self.reactor_panels = {}
self.reactor_indices = {}
self.colors = [(0,0,0)]
# Create glade window
self.tree = gtk.glade.XML(trex_glade_path("timeline_window.glade"))
self.w = self.tree.get_widget("timeline_window")
self.w.set_title("TREX Timelines")
# Add references to all underscored widgets
for w in self.tree.get_widget_prefix('_'):
name = w.get_name()[1:]
# Make sure we don't clobber existing attributes
assert not hasattr(self, name)
setattr(self, name, w)
# Remove template page from notebook
self.reactor_nb.remove_page(0)
# add tabs
random.seed(4)
#self.time_scale_slider.connect("change-value",self.redraw_viewport)
#self.viewport_da.connect("expose-event",self.expose_viewport)
# Connect menu view check
self.show_view_options_menu_item.connect("toggled",self.on_toggle_view_controls)
# connect view controls
self.past_width_spin.connect("value-changed",self.on_change_view_controls)
self.center_spin.connect("value-changed",self.on_change_view_controls)
self.future_width_spin.connect("value-changed",self.on_change_view_controls)
self.metric_time_check.connect("toggled",self.on_change_view_controls)
self.view_token_keys_check.connect("toggled",self.on_change_view_controls)
self.time_scale_spin.connect("value-changed",self.on_change_view_controls)
self.token_cache_spin.connect("value-changed",self.on_change_view_controls)
self.token_history_spin.connect("value-changed",self.on_change_view_controls)
self.w.show()
#############################################################################
# UI Event handlers
#############################################################################
# Callback to hide and show the view controls
def on_toggle_view_controls(self,widget):
if self.show_view_options_menu_item.get_active():
self.view_options_box.show()
else:
self.view_options_box.hide()
# Callback to propagate the view control settings into the reactor panel and redraw
def on_change_view_controls(self,widget):
# Get control values
ReactorPanel.PastWidth = self.past_width_spin.get_value()
ReactorPanel.FutureWidth = self.future_width_spin.get_value()
ReactorPanel.Center = self.center_spin.get_value()
ReactorPanel.MetricTime = self.metric_time_check.get_active()
ReactorPanel.ViewTokenKeys = self.view_token_keys_check.get_active()
ReactorPanel.TimeScale = self.time_scale_spin.get_value()
ReactorPanel.TokenCache = self.token_cache_spin.get_value()
ReactorPanel.TokenHistory = self.token_history_spin.get_value()
self.draw_active_reactor()
# Set the status text
def set_status(self,text):
self.status_text = text
self.statusbar.pop(0)
self.statusbar.push(0,self.status_text)
#############################################################################
# Data manipulation
#############################################################################
# Load new assemblies
def set_db_cores(self,db_cores,selected_reactor_name):
# Add and remove reactors as necessary
for reactor_name,db_core in db_cores.items():
# Check if this reactor exists
if not self.reactor_panels.has_key(reactor_name):
self.add_reactor(reactor_name)
# Set this reactor's assembly
self.reactor_panels[reactor_name].process_timelines(db_core)
# Remove reactors that were not updated
removed_reactors = [rname for rname in self.reactor_panels.keys() if rname not in db_cores.keys()]
for reactor_name in removed_reactors:
self.rem_reactor(reactor_name)
# Determine external timeline parents
for reactor_panel in self.reactor_panels.values():
for timeline in reactor_panel.ext_timelines:
if not timeline.reactor_panel:
for tl_parent in self.reactor_panels.values():
# Only compare to reactors that aren't the owner
if tl_parent != reactor_panel:
# Generate a list of the internal timeline names for this candidate parent
tl_names = [tl.tl.name for tl in tl_parent.int_timelines]
if timeline.tl.name in tl_names:
# Set this timeline's reactor panel to it's parent
timeline.reactor_panel = tl_parent
# Re-order this timeline to group it with the first previous timeline with the same parent
tl_index = reactor_panel.ext_timelines.index(timeline)
sorted_timelines = [tl for tl in reactor_panel.ext_timelines[0:tl_index] if tl.reactor_panel]
for sib in sorted_timelines:
if sib.reactor_panel == tl_parent:
sib_index = reactor_panel.ext_timelines.index(sib)
tl_move = reactor_panel.ext_timelines.pop(tl_index)
reactor_panel.ext_timelines.insert(sib_index,tl_move)
break
# Re-calculate the row for each timeline
row = 0
for timeline in reactor_panel.int_timelines + reactor_panel.ext_timelines:
timeline.row = row
row = row + 1
# Set the statusbar
if selected_reactor_name:
tick = db_cores[selected_reactor_name].tick
if db_cores[selected_reactor_name].conflict:
self.statusbar.push(0,"TICK: %s CONFLICT: %s" % (str(tick[0]),str(tick[1])))
else:
self.statusbar.push(0,"TICK: %s" % (str(tick[0])))
# Redraw the active reactor
self.draw_active_reactor()
#############################################################################
# Methods for manipulating reactor views
#############################################################################
# Add a reactor (and create a new tab)
def add_reactor(self,reactor_name):
# Create a new timeline panel
tp = ReactorPanel()
self.reactor_panels[reactor_name] = tp
# Create a new label
tree = gtk.glade.XML(trex_glade_path("timeline_window.glade"),root="tab_label")
label = tree.get_widget("tab_label")
icon = tree.get_widget("tab_icon")
text = tree.get_widget("tab_text")
# Bind the color buttons for changing the reactor color
tp.color_chooser_but = tree.get_widget("tab_color_button")
tp.color_chooser_but.connect("color-set",tp.on_change_color)
# Set the label text
text.set_text(reactor_name)
# Store a reference to the icon widget
tp.icon = icon
# Append the reactor to the notebook
tp.tab_index = self.reactor_nb.insert_page(tp.w,label)
self.reactor_nb.set_menu_label_text(tp.w,reactor_name)
# Store the index
self.reactor_indices[tp.tab_index] = tp
# Update the colors
self.update_icons()
# Show all of the label sub-widgets
label.show_all()
# hide the color chooser button since it is not fully functional yet
tp.color_chooser_but.hide()
def set_visible_reactor(self,reactor_name):
tab_index = self.reactor_panels[reactor_name].tab_index
self.reactor_nb.set_current_page(tab_index)
# Remove a reactor (and delete its tab)
def rem_reactor(self,reactor_name):
# Get page index
tab_index = self.reactor_panels[reactor_name].tab_index
# Remove notebook page
self.reactor_nb.remove_page(tab_index)
# Remove from reactor list
del self.reactor_panels[reactor_name]
# Update tab indices for all other reactors
for reactor_panel in self.reactor_panels.values():
if reactor_panel.tab_index > tab_index:
# Decrease tab index by one
reactor_panel.tab_index = reactor_panel.tab_index - 1
# Update the index
self.reactor_indices[reactor_panel.tab_index] = reactor_panel
# Update the colors
self.update_icons()
# Draw the selected reactor
def draw_active_reactor(self):
pid = self.reactor_nb.get_current_page()
if self.reactor_indices.has_key(pid):
self.reactor_indices[pid].draw()
# Generate a random color that is some distance from all other colors in rgb space
def gen_rand_color(self):
r = 0
g = 0
b = 0
min_dist = 0
while min_dist < 0.1:
r = random.random()
g = random.random()
b = random.random()
dist = float("Inf")
for c in self.colors:
dist = min(dist,(pow(r-c[0],2)+pow(g-c[1],2)+pow(b-c[2],2)))
min_dist = dist
return (r,g,b)
# Update the icon pixbufs with new colors based on the number of reactors
def update_icons(self):
# Get total number of reactors
n_reactors = len(self.reactor_panels)
# Initialize reactor count
for reactor_panel in self.reactor_panels.values():
icon = reactor_panel.icon
canvas = gtk.gdk.Pixmap(None,16,16,24)
vis = gtk.gdk.visual_get_system()
cmap = gtk.gdk.Colormap(vis,True)
canvas.set_colormap(cmap)
cr = canvas.cairo_create()
# set a clip region
cr.rectangle(0,0,16,16)
cr.clip()
# Generate the color
rgb = colorsys.hsv_to_rgb(float(reactor_panel.tab_index)/float(n_reactors),0.4,0.5)
#rgb = self.gen_rand_color()
reactor_panel.color = rgb
cr.set_source_rgba(rgb[0], rgb[1], rgb[2], 1.0)
cr.paint()
# Get pixbuf from drawable
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,False,8,16,16)
pixbuf.get_from_drawable(canvas,cmap,0,0,0,0,16,16)
icon.set_from_pixbuf(pixbuf)
#############################################################################
# Extension API
#############################################################################
def register_context_extension(self,label_str,cb):
# Add or replace callback in dictionary
ReactorPanel.ContextCallbacks[label_str] = cb
def unregister_context_extension(self,label_str,cb):
# Remove callback from dictionary
if ReactorPanel.has_key(label_str):
del ReactorPanel[label_str]
def register_double_click_callback(self,cb):
ReactorPanel.DoubleClickCallbacks.append(cb)
def unregister_double_click_callback(self,cb):
ReactorPanel.DoubleClickCallbacks.remove(cb)
# Unit tests
class TestTokenNetworkWindow(unittest.TestCase):
# Create the gtk thread and window structure
def setUp(self):
# Create a new timeline window
self.timeline_window = TimelineWindow()
# Destroy window and kill gtk
def tearDown(self):
print "Killing The window..."
self.timeline_window.w.destroy()
time.sleep(5)
# Test the auto-redraw of a network
def test_window(self):
gtk.gdk.threads_init()
self.timeline_window.w.connect("destroy",gtk.main_quit)
test_thread = threading.Thread(target=self.thread_test_window)
test_thread.start()
gtk.main()
def thread_test_window(self):
print "Started test thread..."
time.sleep(2)
print "Constructing test assembly..."
from TREX.core.assembly import Assembly,construct_test_assembly
# Create assemblies
assemblies = {}
assemblies["test"] = construct_test_assembly()
print "Adding reactors..."
gtk.gdk.threads_enter()
self.timeline_window.add_reactor("doorman")
self.timeline_window.add_reactor("recharger")
self.timeline_window.add_reactor("state_estimator")
self.timeline_window.add_reactor("mechanism_control")
self.timeline_window.add_reactor("master")
self.timeline_window.add_reactor("navigator")
self.timeline_window.add_reactor("driver")
self.timeline_window.add_reactor("writer")
gtk.gdk.threads_leave()
time.sleep(1)
gtk.gdk.threads_enter()
self.timeline_window.rem_reactor("state_estimator")
gtk.gdk.threads_leave()
time.sleep(1)
gtk.gdk.threads_enter()
self.timeline_window.rem_reactor("writer")
gtk.gdk.threads_leave()
time.sleep(1)
gtk.gdk.threads_enter()
self.timeline_window.rem_reactor("doorman")
gtk.gdk.threads_leave()
self.tearDown()
if __name__ == '__main__':
unittest.main()
|
filestatemanager.py | # Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' filestatemanager.py '''
import os
import threading
import time
from collections import defaultdict
from heron.statemgrs.src.python.statemanager import StateManager
from heron.proto.execution_state_pb2 import ExecutionState
from heron.proto.physical_plan_pb2 import PhysicalPlan
from heron.proto.scheduler_pb2 import SchedulerLocation
from heron.proto.tmaster_pb2 import TMasterLocation
from heron.proto.topology_pb2 import Topology
# pylint: disable=too-many-instance-attributes
class FileStateManager(StateManager):
"""
State manager which reads states from local file system.
This is not a production level state manager. The watches
are based on polling the file system at regular intervals.
"""
def __init__(self, name, rootpath):
self.name = name
self.rootpath = rootpath
# This is the cache of the state directories.
self.topologies_directory = {}
self.execution_state_directory = {}
self.pplan_directory = {}
self.tmaster_directory = {}
self.scheduler_location_directory = {}
# The watches are triggered when there
# is a corresponding change.
# The list contains the callbacks to be called
# when topologies change.
self.topologies_watchers = []
# The dictionary is from the topology name
# to the callback.
self.topology_watchers = defaultdict(lambda: [])
self.execution_state_watchers = defaultdict(lambda: [])
self.pplan_watchers = defaultdict(lambda: [])
self.tmaster_watchers = defaultdict(lambda: [])
self.scheduler_location_watchers = defaultdict(lambda: [])
# Instantiate the monitoring thread.
self.monitoring_thread = threading.Thread(target=self.monitor)
# pylint: disable=attribute-defined-outside-init
def start(self):
""" start monitoring thread """
self.monitoring_thread_stop_signal = False
self.monitoring_thread.start()
def stop(self):
"""" stop monitoring thread """
self.monitoring_thread_stop_signal = True
def monitor(self):
"""
Monitor the rootpath and call the callback
corresponding to the change.
This monitoring happens periodically. This function
is called in a seperate thread from the main thread,
because it sleeps for the intervals between each poll.
"""
def trigger_watches_based_on_files(watchers, path, directory, ProtoClass):
"""
For all the topologies in the watchers, check if the data
in directory has changed. Trigger the callback if it has.
"""
for topology, callbacks in watchers.iteritems():
file_path = os.path.join(path, topology)
data = ""
if os.path.exists(file_path):
with open(os.path.join(path, topology)) as f:
data = f.read()
if topology not in directory or data != directory[topology]:
proto_object = ProtoClass()
proto_object.ParseFromString(data)
for callback in callbacks:
callback(proto_object)
directory[topology] = data
while not self.monitoring_thread_stop_signal:
topologies_path = self.get_topologies_path()
topologies = filter(
lambda f: os.path.isfile(os.path.join(topologies_path, f)), os.listdir(topologies_path))
if set(topologies) != set(self.topologies_directory):
for callback in self.topologies_watchers:
callback(topologies)
self.topologies_directory = topologies
trigger_watches_based_on_files(
self.topology_watchers, topologies_path, self.topologies_directory, Topology)
# Get the directory name for execution state
execution_state_path = os.path.dirname(self.get_execution_state_path(""))
trigger_watches_based_on_files(
self.execution_state_watchers, execution_state_path,
self.execution_state_directory, ExecutionState)
# Get the directory name for pplan
pplan_path = os.path.dirname(self.get_pplan_path(""))
trigger_watches_based_on_files(
self.pplan_watchers, pplan_path,
self.pplan_directory, PhysicalPlan)
# Get the directory name for tmaster
tmaster_path = os.path.dirname(self.get_tmaster_path(""))
trigger_watches_based_on_files(
self.tmaster_watchers, tmaster_path,
self.tmaster_directory, TMasterLocation)
# Get the directory name for scheduler location
scheduler_location_path = os.path.dirname(self.get_scheduler_location_path(""))
trigger_watches_based_on_files(
self.scheduler_location_watchers, scheduler_location_path,
self.scheduler_location_directory, SchedulerLocation)
# Sleep for some time
time.sleep(5)
def get_topologies(self, callback=None):
"""get topologies"""
if callback:
self.topologies_watchers.append(callback)
else:
topologies_path = self.get_topologies_path()
return filter(lambda f: os.path.isfile(os.path.join(topologies_path, f)),
os.listdir(topologies_path))
def get_topology(self, topologyName, callback=None):
"""get topology"""
if callback:
self.topology_watchers[topologyName].append(callback)
else:
topology_path = self.get_topology_path(topologyName)
with open(topology_path) as f:
data = f.read()
topology = Topology()
topology.ParseFromString(data)
return topology
def create_topology(self, topologyName, topology):
"""
Create path is currently not supported in file based state manager.
"""
pass
def delete_topology(self, topologyName):
"""
Delete path is currently not supported in file based state manager.
"""
pass
def get_pplan(self, topologyName, callback=None):
"""
Get physical plan of a topology
"""
if callback:
self.pplan_watchers[topologyName].append(callback)
else:
pplan_path = self.get_pplan_path(topologyName)
with open(pplan_path) as f:
data = f.read()
pplan = PhysicalPlan()
pplan.ParseFromString(data)
return pplan
def create_pplan(self, topologyName, pplan):
"""
Create path is currently not supported in file based state manager.
"""
pass
def delete_pplan(self, topologyName):
"""
Delete path is currently not supported in file based state manager.
"""
pass
def get_execution_state(self, topologyName, callback=None):
"""
Get execution state
"""
if callback:
self.execution_state_watchers[topologyName].append(callback)
else:
execution_state_path = self.get_execution_state_path(topologyName)
with open(execution_state_path) as f:
data = f.read()
executionState = ExecutionState()
executionState.ParseFromString(data)
return executionState
def create_execution_state(self, topologyName, executionState):
"""
Create path is currently not supported in file based state manager.
"""
pass
def delete_execution_state(self, topologyName):
"""
Delete path is currently not supported in file based state manager.
"""
pass
def get_tmaster(self, topologyName, callback=None):
"""
Get tmaster
"""
if callback:
self.tmaster_watchers[topologyName].append(callback)
else:
tmaster_path = self.get_tmaster_path(topologyName)
with open(tmaster_path) as f:
data = f.read()
tmaster = TMasterLocation()
tmaster.ParseFromString(data)
return tmaster
def get_scheduler_location(self, topologyName, callback=None):
"""
Get scheduler location
"""
if callback:
self.scheduler_location_watchers[topologyName].append(callback)
else:
scheduler_location_path = self.get_scheduler_location_path(topologyName)
with open(scheduler_location_path) as f:
data = f.read()
scheduler_location = SchedulerLocation()
scheduler_location.ParseFromString(data)
return scheduler_location
|
HiwinRA605_socket_ros_20190625104951.py | #!/usr/bin/env python3
# license removed for brevity
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
##
import sys
import os
import numpy as np
import rospy
import matplotlib as plot
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import enum
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
state_feedback = 0
NAME = 'socket_server'
client_response = 0 #回傳次數初始值
##------------class pos-------
class pos():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
##------------class socket_cmd---------
class socket_cmd():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
def socket_client_arm_state(Arm_state):
global state_feedback
rospy.wait_for_service('arm_state')
try:
Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
state_feedback = Arm_state_client(Arm_state)
#pos_feedback_times = pos_feedback.response
return state_feedback
except rospy.ServiceException as e:
print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(req): ##接收策略端傳送位姿資料
global client_response
pos.x = '%s'%req.x
pos.y = '%s'%req.y
pos.z = '%s'%req.z
pos.pitch = '%s'%req.pitch
pos.roll = '%s'%req.roll
pos.yaw = '%s'%req.yaw
client_response = client_response + 1
return(client_response)
##----------Arm Mode-------------###
def Arm_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.action = int('%s'%req.action)
socket_cmd.grip = int('%s'%req.grip)
socket_cmd.ra = int('%s'%req.ra)
socket_cmd.setvel = int('%s'%req.vel)
socket_cmd.setboth = int('%s'%req.both)
return(1)
##-------Arm Speed Mode------------###
def Speed_Mode(req): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = int('%s'%req.Speedmode)
return(1)
def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
socket_cmd.grip = int('%s'%req.grip)
return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##-----------socket client--------
def socket_client():
global Arm_feedback,data
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(s.recv(1024))
#start_input=int(input('開始傳輸請按1,離開請按3 : ')) #輸入開始指令
start_input = 1
if start_input==1:
while 1:
##---------------socket 傳輸手臂命令-----------------
#-------選擇模式--------
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
s.send(data.encode('utf-8'))#socket傳送for python to translate str
feedback_str = s.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '70':# F 手臂為Ready狀態準備接收下一個運動指令
Arm_feedback = 0
socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '84':# T 手臂為忙碌狀態無法執行下一個運動指令
Arm_feedback = 1
socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
Arm_feedback = 6
socket_client_arm_state(Arm_feedback)
print("shutdown")
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
rospy.on_shutdown(myhook)
break
if start_input == 3:
pass
s.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
# Ctrl+K Ctrl+C 添加行注释 Add line comment
# Ctrl+K Ctrl+U 删除行注释 Remove line comment
#Ctrl+] / [ 缩进/缩进行 Indent/outdent line |
tapper.py | from multiprocessing.queues import Queue
import sys
from threading import Thread
import uuid
import time
from mc_bin_client import MemcachedClient
import memcacheConstants
from memcached.helper.data_helper import MemcachedClientHelper
from resourceparser import ServerInfo
from membase.api.tap import TapConnection
class TapListener(Thread):
def __init__(self, queue, server):
Thread.__init__(self)
self.queue = queue
self.server = server
self.stats = []
self.aborted = False
def run(self):
self.tap()
def callback(self, identifier, cmd, extra, key, vb, val, cas):
# if key == 'farshid':
# if cmd != 65 and cmd != 68:
# print cmd
command_names = memcacheConstants.COMMAND_NAMES[cmd]
if command_names != "CMD_TAP_MUTATION":
print "%s: ``%s'' (vb:%d) -> (%d bytes from %s)" % (
memcacheConstants.COMMAND_NAMES[cmd],
key, vb, len(val), identifier)
print extra, cas
def tap(self):
print "starting tap process"
t = TapConnection(self.server, 11210, callback=self.callback, clientId=str(uuid.uuid4()),
# opts={})
opts={memcacheConstants.TAP_FLAG_BACKFILL: 0xffffffff})
while True and not self.aborted:
t.receive()
sys.path.append("lib")
sys.path.append("pytests")
def tap(server, queue):
listen = TapListener(queue, server)
listen.tap()
queue = Queue(maxsize=10000)
server = ServerInfo()
server.ip = "10.17.12.20"
bucket = {'name': 'default', 'port': 11220, 'password': ''}
#vam = VBucketAwareMemcached(RestConnection(server), bucket)
#print vam.memcacheds
#print vam.vBucketMap
payload = MemcachedClientHelper.create_value('*', 10240)
keys = ["key_%d" % (i) for i in range(4000)]
#keys = ["key_%s_%d" % (str(uuid.uuid4()), i) for i in range(4)]
total_size = 0
#mc = MemcachedClientHelper.create_memcached_client("172.16.75.128","default",11210,"default")
mc = MemcachedClient("10.17.12.20", 11210)
#for key in keys:
# vam.memcached(key).set(key, 1, 0, payload)
# total_size += len(key) + len(payload) + 200
#time.sleep(10)
#for i in range(0,1023):
# mc.set_vbucket_state(i, 'active')
new_thread = TapListener(queue, server)
new_thread.start()
i = 0
while i < 4000:
for key in keys:
# vam.memcached(key).get(key)
mc.set(key, 10, 0, payload, vbucket=0)
# for key in keys:
# vam.memcached(key).get(key)
# mc.set(key, 1, 0, payload, vbucket=0)
try:
a,b,c = mc.get(key, vbucket=0)
# print c
except:
pass
i += 1
# print i
#for key in keys:
# vam.memcached(key).get(key)
# mc.set(key, 1, 0, payload, vbucket=0)
# mc.get(key, vbucket=0)
#for key in keys:
# vam.memcached(key).get(key)
# mc.delete(key,vbucket=0)
time.sleep(10)
# vam.memcached(key).delete(key)
#vam.done()
new_thread.aborted = True
time.sleep(30)
new_thread.join()
print "total_size", total_size
#reader = Process(target=tap, args=(server, queue))
#reader.start()
#time.sleep(10)
#keys = []
#keys_count = 0
#was_empty = 0
#while was_empty < 50:
# try:
# key = queue.get(False, 5)
#
# keys_count += 1
# print key
# keys.append(key)
# except Empty:
# print "exception thrown"
# print "how many keys ? {0}".format(keys_count)
# was_empty += 1
#
#reader.terminate()
#
|
serving.py | from threading import Thread
def thread_run_simple(app,args=(),debug=False):
"""Lightly Modified Version of run_simple. Used by serving.run_in_thread()
Args:
app (Union[function, ABC]): Executed automatically
args (tuple, optional): Args passed in app(). Defaults to ().
debug (bool, optional): Debug. Defaults to False.
"""
print("* Restarting with Werkzeug")
print("* Running in non-main thread")
print("* (https://packages.zerotwo36.repl.co/lib/werkzeug/")
print(f'* Serving app \'{app.__name__}\'...')
print(f"* Debug Mode: '{'on' if debug else 'off'}'")
app(args)
class ThreadPoolExecutor:
def __init__(self):
self.loops = []
self.funcs = []
def loop(self,func):
def wrap(f):
self.loops.append(Loop(f))
self.funcs.append(func)
return wrap
class Loop:
def __init__(self,method) -> None:
self.method = method
def start(self,*,in_thread:bool=False):
if in_thread:
run_in_thread(self.method)
else:
run_simple(self.method)
def run_simple(*,app,args=(),debug=False):
"""
Run a Minimal Application:
Code is basicly a fancy wrapper around
```py
pool = serving.ThreadPoolExecutor()
@pool.loop
def mainloop():
foo('bar')
pool.loops[0].start()
```
Args:
app (Union[function, ABC]): Executed automatically
args (tuple, optional): Args passed in app(). Defaults to ().
debug (bool, optional): Debug. Defaults to False.
"""
print("* Restarting with Werkzeug")
print("* (https://packages.zerotwo36.repl.co/lib/werkzeug/")
print(f'* Serving app \'{app.__name__}\'...')
print(f"* Debug Mode: '{'on' if debug else 'off'}'")
app(args)
def placeholder(*args):
"""
Use this if you just want to print out a note from serving.run_simple()
"""
pass
def run_in_thread(app,args=(),debug=False):
t = Thread(target=thread_run_simple,args=(app,args,debug)) |
producer.py | import logging
import threading
import time
import json
import random
import pandas as pd
import os
from pykafka import KafkaClient
from config.configurator import Configurator
from src.spark.context import AppSparkContext
class Producer(threading.Thread):
def __init__(self, configurator: Configurator, sc: AppSparkContext):
host = configurator['clusters']['kafka']['host']
port = configurator['clusters']['kafka']['port']
threading.Thread.__init__(self)
self.stop_event = threading.Event()
self.client = KafkaClient(hosts=host + ':' + port)
def stop(self) -> None:
self.stop_event.set()
def run(self, topic: str, data) -> None:
logging.info('Run Producer')
topic = self.client.topics[bytes(topic, encoding='utf-8')]
producer = topic.get_producer()
num_user = 2
def work():
while True:
msg = json.dumps(data)
logging.info('msg from producer: %s', str(len(msg)))
producer.produce(bytes(msg, encoding='utf-8'))
time.sleep(20)
thread_list = [threading.Thread(target=work) for i in range(num_user)]
for thread in thread_list:
thread.setDaemon(True)
thread.start()
|
uploader.py | #!/usr/bin/env python
import os
import re
import time
import stat
import json
import random
import ctypes
import inspect
import requests
import traceback
import threading
import subprocess
from collections import Counter
from selfdrive.swaglog import cloudlog
from selfdrive.loggerd.config import ROOT
from common.params import Params
from common.api import api_get
fake_upload = os.getenv("FAKEUPLOAD") is not None
def raise_on_thread(t, exctype):
for ctid, tobj in threading._active.items():
if tobj is t:
tid = ctid
break
else:
raise Exception("Could not find thread")
'''Raises an exception in the threads with id tid'''
if not inspect.isclass(exctype):
raise TypeError("Only types can be raised (not instances)")
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid),
ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# "if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, 0)
raise SystemError("PyThreadState_SetAsyncExc failed")
def listdir_with_creation_date(d):
lst = os.listdir(d)
for fn in lst:
try:
st = os.stat(os.path.join(d, fn))
ctime = st[stat.ST_CTIME]
yield (ctime, fn)
except OSError:
cloudlog.exception("listdir_with_creation_date: stat failed?")
yield (None, fn)
def listdir_by_creation_date(d):
times_and_paths = list(listdir_with_creation_date(d))
return [path for _, path in sorted(times_and_paths)]
def clear_locks(root):
for logname in os.listdir(root):
path = os.path.join(root, logname)
try:
for fname in os.listdir(path):
if fname.endswith(".lock"):
os.unlink(os.path.join(path, fname))
except OSError:
cloudlog.exception("clear_locks failed")
def is_on_wifi():
# ConnectivityManager.getActiveNetworkInfo()
try:
result = subprocess.check_output(["service", "call", "connectivity", "2"]).strip().split("\n")
except subprocess.CalledProcessError:
return False
data = ''.join(''.join(w.decode("hex")[::-1] for w in l[14:49].split()) for l in result[1:])
return "\x00".join("WIFI") in data
def is_on_hotspot():
try:
result = subprocess.check_output(["ifconfig", "wlan0"])
result = re.findall(r"inet addr:((\d+\.){3}\d+)", result)[0][0]
is_android = result.startswith('192.168.43.')
is_ios = result.startswith('172.20.10.')
return (is_android or is_ios)
except:
return False
class Uploader(object):
def __init__(self, dongle_id, access_token, root):
self.dongle_id = dongle_id
self.access_token = access_token
self.root = root
self.upload_thread = None
self.last_resp = None
self.last_exc = None
def clean_dirs(self):
try:
for logname in os.listdir(self.root):
path = os.path.join(self.root, logname)
# remove empty directories
if not os.listdir(path):
os.rmdir(path)
except OSError:
cloudlog.exception("clean_dirs failed")
def gen_upload_files(self):
if not os.path.isdir(self.root):
return
for logname in listdir_by_creation_date(self.root):
path = os.path.join(self.root, logname)
names = os.listdir(path)
if any(name.endswith(".lock") for name in names):
continue
for name in names:
key = os.path.join(logname, name)
fn = os.path.join(path, name)
yield (name, key, fn)
def get_data_stats(self):
name_counts = Counter()
total_size = 0
for name, key, fn in self.gen_upload_files():
name_counts[name] += 1
total_size += os.stat(fn).st_size
return dict(name_counts), total_size
def next_file_to_compress(self):
for name, key, fn in self.gen_upload_files():
if name.endswith("log"):
return (key, fn, 0)
return None
def next_file_to_upload(self, with_video):
# try to upload log files first
for name, key, fn in self.gen_upload_files():
if name == "rlog.bz2":
return (key, fn, 0)
if with_video:
# then upload compressed rear and front camera files
for name, key, fn in self.gen_upload_files():
if name == "fcamera.hevc":
return (key, fn, 1)
elif name == "dcamera.hevc":
return (key, fn, 2)
# then upload other files
for name, key, fn in self.gen_upload_files():
if not name.endswith('.lock') and not name.endswith(".tmp"):
return (key, fn, 3)
return None
def do_upload(self, key, fn):
try:
url_resp = api_get("v1.2/"+self.dongle_id+"/upload_url/", timeout=2, path=key, access_token=self.access_token)
url_resp_json = json.loads(url_resp.text)
url = url_resp_json['url']
headers = url_resp_json['headers']
cloudlog.info("upload_url v1.2 %s %s", url, str(headers))
if fake_upload:
cloudlog.info("*** WARNING, THIS IS A FAKE UPLOAD TO %s ***" % url)
class FakeResponse(object):
def __init__(self):
self.status_code = 200
self.last_resp = FakeResponse()
else:
with open(fn, "rb") as f:
self.last_resp = requests.put(url, data=f, headers=headers, timeout=10)
except Exception as e:
self.last_exc = (e, traceback.format_exc())
raise
def normal_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
try:
self.do_upload(key, fn)
except Exception:
pass
return self.last_resp
def killable_upload(self, key, fn):
self.last_resp = None
self.last_exc = None
self.upload_thread = threading.Thread(target=lambda: self.do_upload(key, fn))
self.upload_thread.start()
self.upload_thread.join()
self.upload_thread = None
return self.last_resp
def abort_upload(self):
thread = self.upload_thread
if thread is None:
return
if not thread.is_alive():
return
raise_on_thread(thread, SystemExit)
thread.join()
def compress(self, key, fn):
# write out the bz2 compress
if fn.endswith("log"):
ext = ".bz2"
cloudlog.info("compressing %r to %r", fn, fn+ext)
if os.system("nice -n 19 bzip2 -c %s > %s.tmp && mv %s.tmp %s%s && rm %s" % (fn, fn, fn, fn, ext, fn)) != 0:
cloudlog.exception("upload: bzip2 compression failed")
return False
# assuming file is named properly
key += ext
fn += ext
return (key, fn)
def upload(self, key, fn):
try:
sz = os.path.getsize(fn)
except OSError:
cloudlog.exception("upload: getsize failed")
return False
cloudlog.event("upload", key=key, fn=fn, sz=sz)
cloudlog.info("checking %r with size %r", key, sz)
if sz == 0:
# can't upload files of 0 size
os.unlink(fn) # delete the file
success = True
else:
cloudlog.info("uploading %r", fn)
# stat = self.killable_upload(key, fn)
stat = self.normal_upload(key, fn)
if stat is not None and stat.status_code in (200, 201):
cloudlog.event("upload_success", key=key, fn=fn, sz=sz)
os.unlink(fn) # delete the file
success = True
else:
cloudlog.event("upload_failed", stat=stat, exc=self.last_exc, key=key, fn=fn, sz=sz)
success = False
self.clean_dirs()
return success
def uploader_fn(exit_event):
cloudlog.info("uploader_fn")
params = Params()
dongle_id, access_token = params.get("DongleId"), params.get("AccessToken")
if dongle_id is None or access_token is None:
cloudlog.info("uploader MISSING DONGLE_ID or ACCESS_TOKEN")
raise Exception("uploader can't start without dongle id and access token")
uploader = Uploader(dongle_id, access_token, ROOT)
backoff = 0.1
while True:
allow_cellular = (params.get("IsUploadVideoOverCellularEnabled") != "0")
on_hotspot = is_on_hotspot()
on_wifi = is_on_wifi()
should_upload = allow_cellular or (on_wifi and not on_hotspot)
if exit_event.is_set():
return
d = uploader.next_file_to_compress()
if d is not None:
key, fn, _ = d
uploader.compress(key, fn)
continue
if not should_upload:
time.sleep(5)
continue
d = uploader.next_file_to_upload(with_video=True)
if d is None:
time.sleep(5)
continue
key, fn, _ = d
cloudlog.event("uploader_netcheck", allow_cellular=allow_cellular, is_on_hotspot=on_hotspot, is_on_wifi=on_wifi)
cloudlog.info("to upload %r", d)
success = uploader.upload(key, fn)
if success:
backoff = 0.1
else:
cloudlog.info("backoff %r", backoff)
time.sleep(backoff + random.uniform(0, backoff))
backoff = min(backoff*2, 120)
cloudlog.info("upload done, success=%r", success)
def main(gctx=None):
uploader_fn(threading.Event())
if __name__ == "__main__":
main()
|
wsdump.py | #!C:\Users\VmAdm\PycharmProjects\ledmatrix\venv\Scripts\python.exe
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
util.py | """Utility functions.
"""
import traceback
from queue import Queue
import os
import re
import sys
import base64
import time
import platform
import logging
import threading
import subprocess
import serial
import serial.tools.list_ports
import pkg_resources
from meshtastic.supported_device import supported_devices
"""Some devices such as a seger jlink we never want to accidentally open"""
blacklistVids = dict.fromkeys([0x1366])
def quoteBooleans(a_string):
"""Quote booleans
given a string that contains ": true", replace with ": 'true'" (or false)
"""
tmp = a_string.replace(": true", ": 'true'")
tmp = tmp.replace(": false", ": 'false'")
return tmp
def genPSK256():
"""Generate a random preshared key"""
return os.urandom(32)
def fromPSK(valstr):
"""A special version of fromStr that assumes the user is trying to set a PSK.
In that case we also allow "none", "default" or "random" (to have python generate one), or simpleN
"""
if valstr == "random":
return genPSK256()
elif valstr == "none":
return bytes([0]) # Use the 'no encryption' PSK
elif valstr == "default":
return bytes([1]) # Use default channel psk
elif valstr.startswith("simple"):
# Use one of the single byte encodings
return bytes([int(valstr[6:]) + 1])
else:
return fromStr(valstr)
def fromStr(valstr):
"""Try to parse as int, float or bool (and fallback to a string as last resort)
Returns: an int, bool, float, str or byte array (for strings of hex digits)
Args:
valstr (string): A user provided string
"""
if len(valstr) == 0: # Treat an emptystring as an empty bytes
val = bytes()
elif valstr.startswith('0x'):
# if needed convert to string with asBytes.decode('utf-8')
val = bytes.fromhex(valstr[2:])
elif valstr.lower() in {"t", "true", "yes"}:
val = True
elif valstr.lower() in {"f", "false", "no"}:
val = False
else:
try:
val = int(valstr)
except ValueError:
try:
val = float(valstr)
except ValueError:
val = valstr # Not a float or an int, assume string
return val
def pskToString(psk: bytes):
"""Given an array of PSK bytes, decode them into a human readable (but privacy protecting) string"""
if len(psk) == 0:
return "unencrypted"
elif len(psk) == 1:
b = psk[0]
if b == 0:
return "unencrypted"
elif b == 1:
return "default"
else:
return f"simple{b - 1}"
else:
return "secret"
def stripnl(s):
"""Remove newlines from a string (and remove extra whitespace)"""
s = str(s).replace("\n", " ")
return ' '.join(s.split())
def fixme(message):
"""Raise an exception for things that needs to be fixed"""
raise Exception(f"FIXME: {message}")
def catchAndIgnore(reason, closure):
"""Call a closure but if it throws an exception print it and continue"""
try:
closure()
except BaseException as ex:
logging.error(f"Exception thrown in {reason}: {ex}")
def findPorts(eliminate_duplicates=False):
"""Find all ports that might have meshtastic devices
eliminate_duplicates will run the eliminate_duplicate_port() on the collection
Returns:
list -- a list of device paths
"""
l = list(map(lambda port: port.device,
filter(lambda port: port.vid is not None and port.vid not in blacklistVids,
serial.tools.list_ports.comports())))
l.sort()
if eliminate_duplicates:
l = eliminate_duplicate_port(l)
return l
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class Timeout:
"""Timeout class"""
def __init__(self, maxSecs=20):
self.expireTime = 0
self.sleepInterval = 0.1
self.expireTimeout = maxSecs
def reset(self):
"""Restart the waitForSet timer"""
self.expireTime = time.time() + self.expireTimeout
def waitForSet(self, target, attrs=()):
"""Block until the specified attributes are set. Returns True if config has been received."""
self.reset()
while time.time() < self.expireTime:
if all(map(lambda a: getattr(target, a, None), attrs)):
return True
time.sleep(self.sleepInterval)
return False
class DeferredExecution():
"""A thread that accepts closures to run, and runs them as they are received"""
def __init__(self, name=None):
self.queue = Queue()
self.thread = threading.Thread(target=self._run, args=(), name=name)
self.thread.daemon = True
self.thread.start()
def queueWork(self, runnable):
""" Queue up the work"""
self.queue.put(runnable)
def _run(self):
while True:
try:
o = self.queue.get()
o()
except:
logging.error(f"Unexpected error in deferred execution {sys.exc_info()[0]}")
print(traceback.format_exc())
def our_exit(message, return_value = 1):
"""Print the message and return a value.
return_value defaults to 1 (non-successful)
"""
print(message)
sys.exit(return_value)
def support_info():
"""Print out info that helps troubleshooting of the cli."""
print('')
print('If having issues with meshtastic cli or python library')
print('or wish to make feature requests, visit:')
print('https://github.com/meshtastic/Meshtastic-python/issues')
print('When adding an issue, be sure to include the following info:')
print(f' System: {platform.system()}')
print(f' Platform: {platform.platform()}')
print(f' Release: {platform.uname().release}')
print(f' Machine: {platform.uname().machine}')
print(f' Encoding (stdin): {sys.stdin.encoding}')
print(f' Encoding (stdout): {sys.stdout.encoding}')
the_version = pkg_resources.get_distribution("meshtastic").version
print(f' meshtastic: v{the_version}')
print(f' Executable: {sys.argv[0]}')
print(f' Python: {platform.python_version()} {platform.python_implementation()} {platform.python_compiler()}')
print('')
print('Please add the output from the command: meshtastic --info')
def remove_keys_from_dict(keys, adict):
"""Return a dictionary without some keys in it.
Will removed nested keys.
"""
for key in keys:
try:
del adict[key]
except:
pass
for val in adict.values():
if isinstance(val, dict):
remove_keys_from_dict(keys, val)
return adict
def hexstr(barray):
"""Print a string of hex digits"""
return ":".join(f'{x:02x}' for x in barray)
def ipstr(barray):
"""Print a string of ip digits"""
return ".".join(f'{x}' for x in barray)
def readnet_u16(p, offset):
"""Read big endian u16 (network byte order)"""
return p[offset] * 256 + p[offset + 1]
def convert_mac_addr(val):
"""Convert the base 64 encoded value to a mac address
val - base64 encoded value (ex: '/c0gFyhb'))
returns: a string formatted like a mac address (ex: 'fd:cd:20:17:28:5b')
"""
if not re.match("[0-9a-f]{2}([-:]?)[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", val):
val_as_bytes = base64.b64decode(val)
return hexstr(val_as_bytes)
return val
def snake_to_camel(a_string):
"""convert snake_case to camelCase"""
# split underscore using split
temp = a_string.split('_')
# joining result
result = temp[0] + ''.join(ele.title() for ele in temp[1:])
return result
def camel_to_snake(a_string):
"""convert camelCase to snake_case"""
return ''.join(['_'+i.lower() if i.isupper() else i for i in a_string]).lstrip('_')
def detect_supported_devices():
"""detect supported devices based on vendor id"""
system = platform.system()
#print(f'system:{system}')
possible_devices = set()
if system == "Linux":
# if linux, run lsusb and list ports
# linux: use lsusb
# Bus 001 Device 091: ID 10c4:ea60 Silicon Labs CP210x UART Bridge
_, lsusb_output = subprocess.getstatusoutput('lsusb')
vids = get_unique_vendor_ids()
for vid in vids:
#print(f'looking for {vid}...')
search = f' {vid}:'
#print(f'search:"{search}"')
if re.search(search, lsusb_output, re.MULTILINE):
#print(f'Found vendor id that matches')
devices = get_devices_with_vendor_id(vid)
for device in devices:
possible_devices.add(device)
elif system == "Windows":
# if windows, run Get-PnpDevice
_, sp_output = subprocess.getstatusoutput('powershell.exe "[Console]::OutputEncoding = [Text.UTF8Encoding]::UTF8;'
'Get-PnpDevice -PresentOnly | Format-List"')
#print(f'sp_output:{sp_output}')
vids = get_unique_vendor_ids()
for vid in vids:
#print(f'looking for {vid.upper()}...')
search = f'DeviceID.*{vid.upper()}&'
#search = f'{vid.upper()}'
#print(f'search:"{search}"')
if re.search(search, sp_output, re.MULTILINE):
#print(f'Found vendor id that matches')
devices = get_devices_with_vendor_id(vid)
for device in devices:
possible_devices.add(device)
elif system == "Darwin":
# run: system_profiler SPUSBDataType
# Note: If in boot mode, the 19003 reports same product ID as 5005.
_, sp_output = subprocess.getstatusoutput('system_profiler SPUSBDataType')
vids = get_unique_vendor_ids()
for vid in vids:
#print(f'looking for {vid}...')
search = f'Vendor ID: 0x{vid}'
#print(f'search:"{search}"')
if re.search(search, sp_output, re.MULTILINE):
#print(f'Found vendor id that matches')
devices = get_devices_with_vendor_id(vid)
for device in devices:
possible_devices.add(device)
return possible_devices
def detect_windows_needs_driver(sd, print_reason=False):
"""detect if Windows user needs to install driver for a supported device"""
need_to_install_driver = False
if sd:
system = platform.system()
#print(f'in detect_windows_needs_driver system:{system}')
if system == "Windows":
# if windows, see if we can find a DeviceId with the vendor id
# Get-PnpDevice | Where-Object{ ($_.DeviceId -like '*10C4*')} | Format-List
command = 'powershell.exe "[Console]::OutputEncoding = [Text.UTF8Encoding]::UTF8; Get-PnpDevice | Where-Object{ ($_.DeviceId -like '
command += f"'*{sd.usb_vendor_id_in_hex.upper()}*'"
command += ')} | Format-List"'
#print(f'command:{command}')
_, sp_output = subprocess.getstatusoutput(command)
#print(f'sp_output:{sp_output}')
search = f'CM_PROB_FAILED_INSTALL'
#print(f'search:"{search}"')
if re.search(search, sp_output, re.MULTILINE):
need_to_install_driver = True
# if the want to see the reason
if print_reason:
print(sp_output)
return need_to_install_driver
def eliminate_duplicate_port(ports):
"""Sometimes we detect 2 serial ports, but we really only need to use one of the ports.
ports is a list of ports
return a list with a single port to use, if it meets the duplicate port conditions
examples:
Ports: ['/dev/cu.usbserial-1430', '/dev/cu.wchusbserial1430'] => ['/dev/cu.wchusbserial1430']
Ports: ['/dev/cu.usbmodem11301', '/dev/cu.wchusbserial11301'] => ['/dev/cu.wchusbserial11301']
Ports: ['/dev/cu.SLAB_USBtoUART', '/dev/cu.usbserial-0001'] => ['/dev/cu.usbserial-0001']
"""
new_ports = []
if len(ports) != 2:
new_ports = ports
else:
ports.sort()
if 'usbserial' in ports[0] and 'wchusbserial' in ports[1]:
first = ports[0].replace("usbserial-", "")
second = ports[1].replace("wchusbserial", "")
if first == second:
new_ports.append(ports[1])
elif 'usbmodem' in ports[0] and 'wchusbserial' in ports[1]:
first = ports[0].replace("usbmodem", "")
second = ports[1].replace("wchusbserial", "")
if first == second:
new_ports.append(ports[1])
elif 'SLAB_USBtoUART' in ports[0] and 'usbserial' in ports[1]:
new_ports.append(ports[1])
else:
new_ports = ports
return new_ports
def is_windows11():
"""Detect if Windows 11"""
is_win11 = False
if platform.system() == "Windows":
if float(platform.release()) >= 10.0:
patch = platform.version().split('.')[2]
# in case they add some number suffix later, just get first 5 chars of patch
patch = patch[:5]
try:
if int(patch) >= 22000:
is_win11 = True
except Exception as e:
print(f'problem detecting win11 e:{e}')
return is_win11
def get_unique_vendor_ids():
"""Return a set of unique vendor ids"""
vids = set()
for d in supported_devices:
if d.usb_vendor_id_in_hex:
vids.add(d.usb_vendor_id_in_hex)
return vids
def get_devices_with_vendor_id(vid):
"""Return a set of unique devices with the vendor id"""
sd = set()
for d in supported_devices:
if d.usb_vendor_id_in_hex == vid:
sd.add(d)
return sd
def active_ports_on_supported_devices(sds, eliminate_duplicates=False):
"""Return a set of active ports based on the supplied supported devices"""
ports = set()
baseports = set()
system = platform.system()
# figure out what possible base ports there are
for d in sds:
if system == "Linux":
baseports.add(d.baseport_on_linux)
elif system == "Darwin":
baseports.add(d.baseport_on_mac)
elif system == "Windows":
baseports.add(d.baseport_on_windows)
for bp in baseports:
if system == "Linux":
# see if we have any devices (ignoring any stderr output)
command = f'ls -al /dev/{bp}* 2> /dev/null'
#print(f'command:{command}')
_, ls_output = subprocess.getstatusoutput(command)
#print(f'ls_output:{ls_output}')
# if we got output, there are ports
if len(ls_output) > 0:
#print('got output')
# for each line of output
lines = ls_output.split('\n')
#print(f'lines:{lines}')
for line in lines:
parts = line.split(' ')
#print(f'parts:{parts}')
port = parts[-1]
#print(f'port:{port}')
ports.add(port)
elif system == "Darwin":
# see if we have any devices (ignoring any stderr output)
command = f'ls -al /dev/{bp}* 2> /dev/null'
#print(f'command:{command}')
_, ls_output = subprocess.getstatusoutput(command)
#print(f'ls_output:{ls_output}')
# if we got output, there are ports
if len(ls_output) > 0:
#print('got output')
# for each line of output
lines = ls_output.split('\n')
#print(f'lines:{lines}')
for line in lines:
parts = line.split(' ')
#print(f'parts:{parts}')
port = parts[-1]
#print(f'port:{port}')
ports.add(port)
elif system == "Windows":
# for each device in supported devices found
for d in sds:
# find the port(s)
com_ports = detect_windows_port(d)
#print(f'com_ports:{com_ports}')
# add all ports
for com_port in com_ports:
ports.add(com_port)
if eliminate_duplicates:
ports = eliminate_duplicate_port(list(ports))
ports.sort()
ports = set(ports)
return ports
def detect_windows_port(sd):
"""detect if Windows port"""
ports = set()
if sd:
system = platform.system()
if system == "Windows":
command = ('powershell.exe "[Console]::OutputEncoding = [Text.UTF8Encoding]::UTF8;'
'Get-PnpDevice -PresentOnly | Where-Object{ ($_.DeviceId -like ')
command += f"'*{sd.usb_vendor_id_in_hex.upper()}*'"
command += ')} | Format-List"'
#print(f'command:{command}')
_, sp_output = subprocess.getstatusoutput(command)
#print(f'sp_output:{sp_output}')
p = re.compile(r'\(COM(.*)\)')
for x in p.findall(sp_output):
#print(f'x:{x}')
ports.add(f'COM{x}')
return ports
|
autosave.py | #!/usr/bin/python3.9
# Copyright (c) 2021 MobileCoin Inc.
# Copyright (c) 2021 The Forest Team
import asyncio
import logging
import os
import time
from pathlib import Path
from subprocess import PIPE, Popen
from typing import Any
import aioprocessing
from aiohttp import web
from forest import fuse, mem, utils
_memfs_process = None
# this is the first thing that runs on aiohttp app startup, before datastore.download
async def start_memfs(app: web.Application) -> None:
"""
mount a filesystem in userspace to store data
the fs contents are stored in memory, so that our keys never touch a disk
this means we can log signal-cli's interactions with fs,
and store them in mem_queue.
"""
logging.info("starting memfs")
app["mem_queue"] = mem_queue = aioprocessing.AioQueue()
if not os.path.exists("/dev/fuse"):
# you *must* have fuse already loaded if running locally
proc = Popen(
["/usr/sbin/insmod", "/app/fuse.ko"],
stdout=PIPE,
stderr=PIPE,
)
proc.wait()
(stdout, stderr) = proc.communicate() # pylint: disable=unused-variable
if stderr:
raise Exception(
f"Could not load fuse module! You may need to recompile.\t\n{stderr.decode()}"
)
def memfs_proc(path: str = "data") -> Any:
"""Start the memfs process"""
mountpath = Path(utils.ROOT_DIR) / path
logging.info("Starting memfs with PID: %s on dir: %s", os.getpid(), mountpath)
backend = mem.Memory(logqueue=mem_queue) # type: ignore
logging.info("mountpoint already exists: %s", mountpath.exists())
Path(utils.ROOT_DIR).mkdir(exist_ok=True, parents=True)
return fuse.FUSE(operations=backend, mountpoint=utils.ROOT_DIR + "/data") # type: ignore
async def launch() -> None:
logging.info("about to launch memfs with aioprocessing")
memfs = aioprocessing.AioProcess(target=memfs_proc)
memfs.start() # pylint: disable=no-member
app["memfs"] = memfs
_memfs_process = memfs
await launch()
# input, operation, path, arguments, caller
# ["->", "fsync", "/+14703226669", "(1, 2)", "/app/signal-cli", ["/app/signal-cli", "--config", "/app", "--username=+14703226669", "--output=json", "stdio", ""], 0, 0, 523]
# ["<-", "fsync", "0"]
async def start_memfs_monitor(app: web.Application) -> None:
"""
monitor the memfs activity queue for file saves, sync with supabase
"""
async def upload_after_signalcli_writes() -> None:
queue = app.get("mem_queue")
if not queue:
logging.info("no mem_queue, nothing to monitor")
return
logging.info("monitoring memfs")
counter = 0
while True:
queue_item = await queue.coro_get()
# iff fsync triggered by signal-cli
if (
queue_item[0:2] == ["->", "fsync"]
and queue_item[5][0] == utils.ROOT_DIR + "/signal-cli"
):
# /+14703226669
# file_to_sync = queue_item[2]
# 14703226669
maybe_session = app.get("session")
if maybe_session:
counter += 1
if time.time() % (60 * 3) == 0:
logging.info("background syncs in the past ~3min: %s", counter)
counter = 0
await maybe_session.datastore.upload()
app["mem_task"] = asyncio.create_task(upload_after_signalcli_writes())
|
utils.py | #!/usr/bin/env python
"""
General Utilities
(part of web.py)
"""
from __future__ import print_function
import datetime
import os
import re
import subprocess
import sys
import threading
import time
import traceback
from threading import local as threadlocal
from .py3helpers import (
PY2,
imap,
is_iter,
iteritems,
itervalues,
string_types,
text_type,
)
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
__all__ = [
"Storage",
"storage",
"storify",
"Counter",
"counter",
"iters",
"rstrips",
"lstrips",
"strips",
"safeunicode",
"safestr",
"timelimit",
"Memoize",
"memoize",
"re_compile",
"re_subm",
"group",
"uniq",
"iterview",
"IterBetter",
"iterbetter",
"safeiter",
"safewrite",
"dictreverse",
"dictfind",
"dictfindall",
"dictincr",
"dictadd",
"requeue",
"restack",
"listget",
"intget",
"datestr",
"numify",
"denumify",
"commify",
"dateify",
"nthstr",
"cond",
"CaptureStdout",
"capturestdout",
"Profile",
"profile",
"tryall",
"ThreadedDict",
"threadeddict",
"autoassign",
"to36",
"sendmail",
]
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
def __repr__(self):
return "<Storage " + dict.__repr__(self) + ">"
storage = Storage
def storify(mapping, *requireds, **defaults):
"""
Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
d doesn't have all of the keys in `requireds` and using the default
values for keys found in `defaults`.
For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
`storage({'a':1, 'b':2, 'c':3})`.
If a `storify` value is a list (e.g. multiple values in a form submission),
`storify` returns the last element of the list, unless the key appears in
`defaults` as a list. Thus:
>>> storify({'a':[1, 2]}).a
2
>>> storify({'a':[1, 2]}, a=[]).a
[1, 2]
>>> storify({'a':1}, a=[]).a
[1]
>>> storify({}, a=[]).a
[]
Similarly, if the value has a `value` attribute, `storify will return _its_
value, unless the key appears in `defaults` as a dictionary.
>>> storify({'a':storage(value=1)}).a
1
>>> storify({'a':storage(value=1)}, a={}).a
<Storage {'value': 1}>
>>> storify({}, a={}).a
{}
"""
_unicode = defaults.pop("_unicode", False)
# if _unicode is callable object, use it convert a string to unicode.
to_unicode = safeunicode
if _unicode is not False and hasattr(_unicode, "__call__"):
to_unicode = _unicode
def unicodify(s):
if _unicode and isinstance(s, str):
return to_unicode(s)
else:
return s
def getvalue(x):
if hasattr(x, "file") and hasattr(x, "value"):
return x.value
elif hasattr(x, "value"):
return unicodify(x.value)
else:
return unicodify(x)
stor = Storage()
for key in requireds + tuple(mapping.keys()):
value = mapping[key]
if isinstance(value, list):
if isinstance(defaults.get(key), list):
value = [getvalue(x) for x in value]
else:
value = value[-1]
if not isinstance(defaults.get(key), dict):
value = getvalue(value)
if isinstance(defaults.get(key), list) and not isinstance(value, list):
value = [value]
setattr(stor, key, value)
for (key, value) in iteritems(defaults):
result = value
if hasattr(stor, key):
result = stor[key]
if value == () and not isinstance(result, tuple):
result = (result,)
setattr(stor, key, result)
return stor
class Counter(storage):
"""Keeps count of how many times something is added.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c['y']
1
>>> c['x']
5
>>> c.most()
['x']
"""
def add(self, n):
self.setdefault(n, 0)
self[n] += 1
def most(self):
"""Returns the keys with maximum count."""
m = max(itervalues(self))
return [k for k, v in iteritems(self) if v == m]
def least(self):
"""Returns the keys with minimum count."""
m = min(self.itervalues())
return [k for k, v in iteritems(self) if v == m]
def percent(self, key):
"""Returns what percentage a certain key is of all entries.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.percent('x')
0.75
>>> c.percent('y')
0.25
"""
return float(self[key]) / sum(self.values())
def sorted_keys(self):
"""Returns keys sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_keys()
['x', 'y']
"""
return sorted(self.keys(), key=lambda k: self[k], reverse=True)
def sorted_values(self):
"""Returns values sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_values()
[2, 1]
"""
return [self[k] for k in self.sorted_keys()]
def sorted_items(self):
"""Returns items sorted by value.
>>> c = counter()
>>> c.add('x')
>>> c.add('x')
>>> c.add('y')
>>> c.sorted_items()
[('x', 2), ('y', 1)]
"""
return [(k, self[k]) for k in self.sorted_keys()]
def __repr__(self):
return "<Counter " + dict.__repr__(self) + ">"
counter = Counter
iters = [list, tuple, set, frozenset]
class _hack(tuple):
pass
iters = _hack(iters)
iters.__doc__ = """
A list of iterable items (like lists, but not strings). Includes whichever
of lists, tuples, sets, and Sets are available in this version of Python.
"""
def _strips(direction, text, remove):
if isinstance(remove, iters):
for subr in remove:
text = _strips(direction, text, subr)
return text
if direction == "l":
if text.startswith(remove):
return text[len(remove) :]
elif direction == "r":
if text.endswith(remove):
return text[: -len(remove)]
else:
raise ValueError("Direction needs to be r or l.")
return text
def rstrips(text, remove):
"""
removes the string `remove` from the right of `text`
>>> rstrips("foobar", "bar")
'foo'
"""
return _strips("r", text, remove)
def lstrips(text, remove):
"""
removes the string `remove` from the left of `text`
>>> lstrips("foobar", "foo")
'bar'
>>> lstrips('http://foo.org/', ['http://', 'https://'])
'foo.org/'
>>> lstrips('FOOBARBAZ', ['FOO', 'BAR'])
'BAZ'
>>> lstrips('FOOBARBAZ', ['BAR', 'FOO'])
'BARBAZ'
"""
return _strips("l", text, remove)
def strips(text, remove):
"""
removes the string `remove` from the both sides of `text`
>>> strips("foobarfoo", "foo")
'bar'
"""
return rstrips(lstrips(text, remove), remove)
def safeunicode(obj, encoding="utf-8"):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is text_type:
return obj
elif t is bytes:
return obj.decode(encoding)
elif t in [int, float, bool]:
return text_type(obj)
# elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
# return unicode(obj)
# else:
# return str(obj).decode(encoding)
else:
return text_type(obj)
def safestr(obj, encoding="utf-8"):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(2)
'2'
"""
if PY2 and isinstance(obj, text_type):
return obj.encode(encoding)
elif is_iter(obj):
return imap(safestr, obj)
else:
return str(obj)
if not PY2:
# Since Python3, utf-8 encoded strings and unicode strings are the same thing
safeunicode = safestr
def timelimit(timeout):
"""
A decorator to limit a function to `timeout` seconds, raising `TimeoutError`
if it takes longer.
>>> import time
>>> def meaningoflife():
... time.sleep(.2)
... return 42
>>>
>>> timelimit(.1)(meaningoflife)()
Traceback (most recent call last):
...
RuntimeError: took too long
>>> timelimit(1)(meaningoflife)()
42
_Caveat:_ The function isn't stopped after `timeout` seconds but continues
executing in a separate thread. (There seems to be no way to kill a thread.)
inspired by <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/473878>
"""
def _1(function):
def _2(*args, **kw):
class Dispatch(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.result = None
self.error = None
self.setDaemon(True)
self.start()
def run(self):
try:
self.result = function(*args, **kw)
except:
self.error = sys.exc_info()
c = Dispatch()
c.join(timeout)
if c.is_alive():
raise RuntimeError("took too long")
if c.error:
raise c.error[1]
return c.result
return _2
return _1
class Memoize:
"""
'Memoizes' a function, caching its return values for each input.
If `expires` is specified, values are recalculated after `expires` seconds.
If `background` is specified, values are recalculated in a separate thread.
>>> calls = 0
>>> def howmanytimeshaveibeencalled():
... global calls
... calls += 1
... return calls
>>> fastcalls = memoize(howmanytimeshaveibeencalled)
>>> howmanytimeshaveibeencalled()
1
>>> howmanytimeshaveibeencalled()
2
>>> fastcalls()
3
>>> fastcalls()
3
>>> import time
>>> fastcalls = memoize(howmanytimeshaveibeencalled, .1, background=False)
>>> fastcalls()
4
>>> fastcalls()
4
>>> time.sleep(.2)
>>> fastcalls()
5
>>> def slowfunc():
... time.sleep(.1)
... return howmanytimeshaveibeencalled()
>>> fastcalls = memoize(slowfunc, .2, background=True)
>>> fastcalls()
6
>>> timelimit(.05)(fastcalls)()
6
>>> time.sleep(.2)
>>> timelimit(.05)(fastcalls)()
6
>>> timelimit(.05)(fastcalls)()
6
>>> time.sleep(.2)
>>> timelimit(.05)(fastcalls)()
7
>>> fastcalls = memoize(slowfunc, None, background=True)
>>> threading.Thread(target=fastcalls).start()
>>> time.sleep(.01)
>>> fastcalls()
9
"""
def __init__(self, func, expires=None, background=True):
self.func = func
self.cache = {}
self.expires = expires
self.background = background
self.running = {}
self.running_lock = threading.Lock()
def __call__(self, *args, **keywords):
key = (args, tuple(keywords.items()))
with self.running_lock:
if not self.running.get(key):
self.running[key] = threading.Lock()
def update(block=False):
if self.running[key].acquire(block):
try:
self.cache[key] = (self.func(*args, **keywords), time.time())
finally:
self.running[key].release()
if key not in self.cache:
update(block=True)
elif self.expires and (time.time() - self.cache[key][1]) > self.expires:
if self.background:
threading.Thread(target=update).start()
else:
update()
return self.cache[key][0]
memoize = Memoize
re_compile = memoize(re.compile)
re_compile.__doc__ = """
A memoized version of re.compile.
"""
class _re_subm_proxy:
def __init__(self):
self.match = None
def __call__(self, match):
self.match = match
return ""
def re_subm(pat, repl, string):
"""
Like re.sub, but returns the replacement _and_ the match object.
>>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
>>> t
'foooooolish'
>>> m.groups()
('oooooo',)
"""
compiled_pat = re_compile(pat)
proxy = _re_subm_proxy()
compiled_pat.sub(proxy.__call__, string)
return compiled_pat.sub(repl, string), proxy.match
def group(seq, size):
"""
Returns an iterator over a series of lists of length size from iterable.
>>> list(group([1,2,3,4], 2))
[[1, 2], [3, 4]]
>>> list(group([1,2,3,4,5], 2))
[[1, 2], [3, 4], [5]]
"""
return (seq[i : i + size] for i in range(0, len(seq), size))
def uniq(seq, key=None):
"""
Removes duplicate elements from a list while preserving the order of the rest.
>>> uniq([9,0,2,1,0])
[9, 0, 2, 1]
The value of the optional `key` parameter should be a function that
takes a single argument and returns a key to test the uniqueness.
>>> uniq(["Foo", "foo", "bar"], key=lambda s: s.lower())
['Foo', 'bar']
"""
key = key or (lambda x: x)
seen = set()
result = []
for v in seq:
k = key(v)
if k in seen:
continue
seen.add(k)
result.append(v)
return result
def iterview(x):
"""
Takes an iterable `x` and returns an iterator over it
which prints its progress to stderr as it iterates through.
"""
WIDTH = 70
def plainformat(n, lenx):
return "%5.1f%% (%*d/%d)" % ((float(n) / lenx) * 100, len(str(lenx)), n, lenx)
def bars(size, n, lenx):
val = int((float(n) * size) / lenx + 0.5)
if size - val:
spacing = ">" + (" " * (size - val))[1:]
else:
spacing = ""
return "[%s%s]" % ("=" * val, spacing)
def eta(elapsed, n, lenx):
if n == 0:
return "--:--:--"
if n == lenx:
secs = int(elapsed)
else:
secs = int((elapsed / n) * (lenx - n))
mins, secs = divmod(secs, 60)
hrs, mins = divmod(mins, 60)
return "%02d:%02d:%02d" % (hrs, mins, secs)
def format(starttime, n, lenx):
out = plainformat(n, lenx) + " "
if n == lenx:
end = " "
else:
end = " ETA "
end += eta(time.time() - starttime, n, lenx)
out += bars(WIDTH - len(out) - len(end), n, lenx)
out += end
return out
starttime = time.time()
lenx = len(x)
for n, y in enumerate(x):
sys.stderr.write("\r" + format(starttime, n, lenx))
yield y
sys.stderr.write("\r" + format(starttime, n + 1, lenx) + "\n")
class IterBetter:
"""
Returns an object that can be used as an iterator
but can also be used via __getitem__ (although it
cannot go backwards -- that is, you cannot request
`iterbetter[0]` after requesting `iterbetter[1]`).
>>> import itertools
>>> c = iterbetter(itertools.count())
>>> c[1]
1
>>> c[5]
5
>>> c[3]
Traceback (most recent call last):
...
IndexError: already passed 3
It is also possible to get the first value of the iterator or None.
>>> c = iterbetter(iter([3, 4, 5]))
>>> print(c.first())
3
>>> c = iterbetter(iter([]))
>>> print(c.first())
None
For boolean test, IterBetter peeps at first value in the itertor without effecting the iteration.
>>> c = iterbetter(iter(range(5)))
>>> bool(c)
True
>>> list(c)
[0, 1, 2, 3, 4]
>>> c = iterbetter(iter([]))
>>> bool(c)
False
>>> list(c)
[]
"""
def __init__(self, iterator):
self.i, self.c = iterator, 0
def first(self, default=None):
"""Returns the first element of the iterator or None when there are no
elements.
If the optional argument default is specified, that is returned instead
of None when there are no elements.
"""
try:
return next(iter(self))
except StopIteration:
return default
def __iter__(self):
if hasattr(self, "_head"):
yield self._head
while 1:
try:
yield next(self.i)
except StopIteration:
return
self.c += 1
def __getitem__(self, i):
# todo: slices
if i < self.c:
raise IndexError("already passed " + str(i))
try:
while i > self.c:
next(self.i)
self.c += 1
# now self.c == i
self.c += 1
return next(self.i)
except StopIteration:
raise IndexError(str(i))
def __nonzero__(self):
if hasattr(self, "__len__"):
return self.__len__() != 0
elif hasattr(self, "_head"):
return True
else:
try:
self._head = next(self.i)
except StopIteration:
return False
else:
return True
__bool__ = __nonzero__
iterbetter = IterBetter
def safeiter(it, cleanup=None, ignore_errors=True):
"""Makes an iterator safe by ignoring the exceptions occurred during the iteration.
"""
def next():
while True:
try:
return next(it)
except StopIteration:
raise
except:
traceback.print_exc()
it = iter(it)
while True:
yield next()
def safewrite(filename, content):
"""Writes the content to a temp file and then moves the temp file to
given filename to avoid overwriting the existing file in case of errors.
"""
with open(filename + ".tmp", "w") as f:
f.write(content)
os.rename(f.name, filename)
def dictreverse(mapping):
"""
Returns a new dictionary with keys and values swapped.
>>> dictreverse({1: 2, 3: 4})
{2: 1, 4: 3}
"""
return dict([(value, key) for (key, value) in iteritems(mapping)])
def dictfind(dictionary, element):
"""
Returns a key whose value in `dictionary` is `element`
or, if none exists, None.
>>> d = {1:2, 3:4}
>>> dictfind(d, 4)
3
>>> dictfind(d, 5)
"""
for (key, value) in iteritems(dictionary):
if element is value:
return key
def dictfindall(dictionary, element):
"""
Returns the keys whose values in `dictionary` are `element`
or, if none exists, [].
>>> d = {1:4, 3:4}
>>> dictfindall(d, 4)
[1, 3]
>>> dictfindall(d, 5)
[]
"""
res = []
for (key, value) in iteritems(dictionary):
if element is value:
res.append(key)
return res
def dictincr(dictionary, element):
"""
Increments `element` in `dictionary`,
setting it to one if it doesn't exist.
>>> d = {1:2, 3:4}
>>> dictincr(d, 1)
3
>>> d[1]
3
>>> dictincr(d, 5)
1
>>> d[5]
1
"""
dictionary.setdefault(element, 0)
dictionary[element] += 1
return dictionary[element]
def dictadd(*dicts):
"""
Returns a dictionary consisting of the keys in the argument dictionaries.
If they share a key, the value from the last argument is used.
>>> dictadd({1: 0, 2: 0}, {2: 1, 3: 1})
{1: 0, 2: 1, 3: 1}
"""
result = {}
for dct in dicts:
result.update(dct)
return result
def requeue(queue, index=-1):
"""Returns the element at index after moving it to the beginning of the queue.
>>> x = [1, 2, 3, 4]
>>> requeue(x)
4
>>> x
[4, 1, 2, 3]
"""
x = queue.pop(index)
queue.insert(0, x)
return x
def restack(stack, index=0):
"""Returns the element at index after moving it to the top of stack.
>>> x = [1, 2, 3, 4]
>>> restack(x)
1
>>> x
[2, 3, 4, 1]
"""
x = stack.pop(index)
stack.append(x)
return x
def listget(lst, ind, default=None):
"""
Returns `lst[ind]` if it exists, `default` otherwise.
>>> listget(['a'], 0)
'a'
>>> listget(['a'], 1)
>>> listget(['a'], 1, 'b')
'b'
"""
if len(lst) - 1 < ind:
return default
return lst[ind]
def intget(integer, default=None):
"""
Returns `integer` as an int or `default` if it can't.
>>> intget('3')
3
>>> intget('3a')
>>> intget('3a', 0)
0
"""
try:
return int(integer)
except (TypeError, ValueError):
return default
def datestr(then, now=None):
"""
Converts a (UTC) datetime object to a nice string representation.
>>> from datetime import datetime, timedelta
>>> d = datetime(1970, 5, 1)
>>> datestr(d, now=d)
'0 microseconds ago'
>>> for t, v in iteritems({
... timedelta(microseconds=1): '1 microsecond ago',
... timedelta(microseconds=2): '2 microseconds ago',
... -timedelta(microseconds=1): '1 microsecond from now',
... -timedelta(microseconds=2): '2 microseconds from now',
... timedelta(microseconds=2000): '2 milliseconds ago',
... timedelta(seconds=2): '2 seconds ago',
... timedelta(seconds=2*60): '2 minutes ago',
... timedelta(seconds=2*60*60): '2 hours ago',
... timedelta(days=2): '2 days ago',
... }):
... assert datestr(d, now=d+t) == v
>>> datestr(datetime(1970, 1, 1), now=d)
'January 1'
>>> datestr(datetime(1969, 1, 1), now=d)
'January 1, 1969'
>>> datestr(datetime(1970, 6, 1), now=d)
'June 1, 1970'
>>> datestr(None)
''
"""
def agohence(n, what, divisor=None):
if divisor:
n = n // divisor
out = str(abs(n)) + " " + what # '2 days'
if abs(n) != 1:
out += "s" # '2 days'
out += " " # '2 days '
if n < 0:
out += "from now"
else:
out += "ago"
return out # '2 days ago'
oneday = 24 * 60 * 60
if not then:
return ""
if not now:
now = datetime.datetime.utcnow()
if type(now).__name__ == "DateTime":
now = datetime.datetime.fromtimestamp(now)
if type(then).__name__ == "DateTime":
then = datetime.datetime.fromtimestamp(then)
elif type(then).__name__ == "date":
then = datetime.datetime(then.year, then.month, then.day)
delta = now - then
deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
deltadays = abs(deltaseconds) // oneday
if deltaseconds < 0:
deltadays *= -1 # fix for oddity of floor
if deltadays:
if abs(deltadays) < 4:
return agohence(deltadays, "day")
# Trick to display 'June 3' instead of 'June 03'
# Even though the %e format in strftime does that, it doesn't work on Windows.
out = then.strftime("%B %d").replace(" 0", " ")
if then.year != now.year or deltadays < 0:
out += ", %s" % then.year
return out
if int(deltaseconds):
if abs(deltaseconds) > (60 * 60):
return agohence(deltaseconds, "hour", 60 * 60)
elif abs(deltaseconds) > 60:
return agohence(deltaseconds, "minute", 60)
else:
return agohence(deltaseconds, "second")
deltamicroseconds = delta.microseconds
if delta.days:
deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
if abs(deltamicroseconds) > 1000:
return agohence(deltamicroseconds, "millisecond", 1000)
return agohence(deltamicroseconds, "microsecond")
def numify(string):
"""
Removes all non-digit characters from `string`.
>>> numify('800-555-1212')
'8005551212'
>>> numify('800.555.1212')
'8005551212'
"""
return "".join([c for c in str(string) if c.isdigit()])
def denumify(string, pattern):
"""
Formats `string` according to `pattern`, where the letter X gets replaced
by characters from `string`.
>>> denumify("8005551212", "(XXX) XXX-XXXX")
'(800) 555-1212'
"""
out = []
for c in pattern:
if c == "X":
out.append(string[0])
string = string[1:]
else:
out.append(c)
return "".join(out)
def commify(n):
"""
Add commas to an integer `n`.
>>> commify(1)
'1'
>>> commify(123)
'123'
>>> commify(-123)
'-123'
>>> commify(1234)
'1,234'
>>> commify(1234567890)
'1,234,567,890'
>>> commify(123.0)
'123.0'
>>> commify(1234.5)
'1,234.5'
>>> commify(1234.56789)
'1,234.56789'
>>> commify(' %.2f ' % -1234.5)
'-1,234.50'
>>> commify(None)
>>>
"""
if n is None:
return None
n = str(n).strip()
if n.startswith("-"):
prefix = "-"
n = n[1:].strip()
else:
prefix = ""
if "." in n:
dollars, cents = n.split(".")
else:
dollars, cents = n, None
r = []
for i, c in enumerate(str(dollars)[::-1]):
if i and (not (i % 3)):
r.insert(0, ",")
r.insert(0, c)
out = "".join(r)
if cents:
out += "." + cents
return prefix + out
def dateify(datestring):
"""
Formats a numified `datestring` properly.
"""
return denumify(datestring, "XXXX-XX-XX XX:XX:XX")
def nthstr(n):
"""
Formats an ordinal.
Doesn't handle negative numbers.
>>> nthstr(1)
'1st'
>>> nthstr(0)
'0th'
>>> [nthstr(x) for x in [2, 3, 4, 5, 10, 11, 12, 13, 14, 15]]
['2nd', '3rd', '4th', '5th', '10th', '11th', '12th', '13th', '14th', '15th']
>>> [nthstr(x) for x in [91, 92, 93, 94, 99, 100, 101, 102]]
['91st', '92nd', '93rd', '94th', '99th', '100th', '101st', '102nd']
>>> [nthstr(x) for x in [111, 112, 113, 114, 115]]
['111th', '112th', '113th', '114th', '115th']
"""
assert n >= 0
if n % 100 in [11, 12, 13]:
return "%sth" % n
return {1: "%sst", 2: "%snd", 3: "%srd"}.get(n % 10, "%sth") % n
def cond(predicate, consequence, alternative=None):
"""
Function replacement for if-else to use in expressions.
>>> x = 2
>>> cond(x % 2 == 0, "even", "odd")
'even'
>>> cond(x % 2 == 0, "even", "odd") + '_row'
'even_row'
"""
if predicate:
return consequence
else:
return alternative
class CaptureStdout:
"""
Captures everything `func` prints to stdout and returns it instead.
>>> def idiot():
... print("foo")
>>> capturestdout(idiot)()
'foo\\n'
**WARNING:** Not threadsafe!
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **keywords):
out = StringIO()
oldstdout = sys.stdout
sys.stdout = out
try:
self.func(*args, **keywords)
finally:
sys.stdout = oldstdout
return out.getvalue()
capturestdout = CaptureStdout
class Profile:
"""
Profiles `func` and returns a tuple containing its output
and a string with human-readable profiling information.
>>> import time
>>> out, inf = profile(time.sleep)(.001)
>>> out
>>> inf[:10].strip()
'took 0.0'
"""
def __init__(self, func):
self.func = func
def __call__(self, *args): # , **kw): kw unused
import cProfile
import pstats
import os
import tempfile
f, filename = tempfile.mkstemp()
os.close(f)
prof = cProfile.Profile()
stime = time.time()
result = prof.runcall(self.func, *args)
stime = time.time() - stime
out = StringIO()
stats = pstats.Stats(prof, stream=out)
stats.strip_dirs()
stats.sort_stats("time", "calls")
stats.print_stats(40)
stats.print_callers()
x = "\n\ntook " + str(stime) + " seconds\n"
x += out.getvalue()
# remove the tempfile
try:
os.remove(filename)
except IOError:
pass
return result, x
profile = Profile
def tryall(context, prefix=None):
"""
Tries a series of functions and prints their results.
`context` is a dictionary mapping names to values;
the value will only be tried if it's callable.
>>> tryall(dict(j=lambda: True))
j: True
----------------------------------------
results:
True: 1
For example, you might have a file `test/stuff.py`
with a series of functions testing various things in it.
At the bottom, have a line:
if __name__ == "__main__": tryall(globals())
Then you can run `python test/stuff.py` and get the results of
all the tests.
"""
context = context.copy() # vars() would update
results = {}
for (key, value) in iteritems(context):
if not hasattr(value, "__call__"):
continue
if prefix and not key.startswith(prefix):
continue
print(key + ":", end=" ")
try:
r = value()
dictincr(results, r)
print(r)
except:
print("ERROR")
dictincr(results, "ERROR")
print(" " + "\n ".join(traceback.format_exc().split("\n")))
print("-" * 40)
print("results:")
for (key, value) in iteritems(results):
print(" " * 2, str(key) + ":", value)
class ThreadedDict(threadlocal):
"""
Thread local storage.
>>> d = ThreadedDict()
>>> d.x = 1
>>> d.x
1
>>> import threading
>>> def f(): d.x = 2
...
>>> t = threading.Thread(target=f)
>>> t.start()
>>> t.join()
>>> d.x
1
"""
_instances = set()
def __init__(self):
ThreadedDict._instances.add(self)
def __del__(self):
ThreadedDict._instances.remove(self)
def __hash__(self):
return id(self)
def clear_all():
"""Clears all ThreadedDict instances.
"""
for t in list(ThreadedDict._instances):
t.clear()
clear_all = staticmethod(clear_all)
# Define all these methods to more or less fully emulate dict -- attribute access
# is built into threading.local.
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, value):
self.__dict__[key] = value
def __delitem__(self, key):
del self.__dict__[key]
def __contains__(self, key):
return key in self.__dict__
has_key = __contains__
def clear(self):
self.__dict__.clear()
def copy(self):
return self.__dict__.copy()
def get(self, key, default=None):
return self.__dict__.get(key, default)
def items(self):
return self.__dict__.items()
def iteritems(self):
return iteritems(self.__dict__)
def keys(self):
return self.__dict__.keys()
def iterkeys(self):
try:
return iterkeys(self.__dict__)
except NameError:
return self.__dict__.keys()
iter = iterkeys
def values(self):
return self.__dict__.values()
def itervalues(self):
return itervalues(self.__dict__)
def pop(self, key, *args):
return self.__dict__.pop(key, *args)
def popitem(self):
return self.__dict__.popitem()
def setdefault(self, key, default=None):
return self.__dict__.setdefault(key, default)
def update(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __repr__(self):
return "<ThreadedDict %r>" % self.__dict__
__str__ = __repr__
threadeddict = ThreadedDict
def autoassign(self, locals):
"""
Automatically assigns local variables to `self`.
>>> self = storage()
>>> autoassign(self, dict(a=1, b=2))
>>> self.a
1
>>> self.b
2
Generally used in `__init__` methods, as in:
def __init__(self, foo, bar, baz=1): autoassign(self, locals())
"""
for (key, value) in iteritems(locals):
if key == "self":
continue
setattr(self, key, value)
def to36(q):
"""
Converts an integer to base 36 (a useful scheme for human-sayable IDs).
>>> to36(35)
'z'
>>> to36(119292)
'2k1o'
>>> int(to36(939387374), 36)
939387374
>>> to36(0)
'0'
>>> to36(-393)
Traceback (most recent call last):
...
ValueError: must supply a positive integer
"""
if q < 0:
raise ValueError("must supply a positive integer")
letters = "0123456789abcdefghijklmnopqrstuvwxyz"
converted = []
while q != 0:
q, r = divmod(q, 36)
converted.insert(0, letters[r])
return "".join(converted) or "0"
r_url = re_compile(r"(?<!\()(http://(\S+))")
def sendmail(from_address, to_address, subject, message, headers=None, **kw):
"""
Sends the email message `message` with mail and envelope headers
for from `from_address_` to `to_address` with `subject`.
Additional email headers can be specified with the dictionary
`headers.
Optionally cc, bcc and attachments can be specified as keyword arguments.
Attachments must be an iterable and each attachment can be either a
filename or a file object or a dictionary with filename, content and
optionally content_type keys.
If `web.config.smtp_server` is set, it will send the message
to that SMTP server. Otherwise it will look for
`/usr/sbin/sendmail`, the typical location for the sendmail-style
binary. To use sendmail from a different path, set `web.config.sendmail_path`.
"""
attachments = kw.pop("attachments", [])
mail = _EmailMessage(from_address, to_address, subject, message, headers, **kw)
for a in attachments:
if isinstance(a, dict):
mail.attach(a["filename"], a["content"], a.get("content_type"))
elif hasattr(a, "read"): # file
filename = os.path.basename(getattr(a, "name", ""))
content_type = getattr(a, "content_type", None)
mail.attach(filename, a.read(), content_type)
elif isinstance(a, string_types):
f = open(a, "rb")
content = f.read()
f.close()
filename = os.path.basename(a)
mail.attach(filename, content, None)
else:
raise ValueError("Invalid attachment: %s" % repr(a))
mail.send()
class _EmailMessage:
def __init__(self, from_address, to_address, subject, message, headers=None, **kw):
def listify(x):
if not isinstance(x, list):
return [safestr(x)]
else:
return [safestr(a) for a in x]
subject = safestr(subject)
message = safestr(message)
from_address = safestr(from_address)
to_address = listify(to_address)
cc = listify(kw.get("cc", []))
bcc = listify(kw.get("bcc", []))
recipients = to_address + cc + bcc
import email.utils
self.from_address = email.utils.parseaddr(from_address)[1]
self.recipients = [email.utils.parseaddr(r)[1] for r in recipients]
self.headers = dictadd(
{"From": from_address, "To": ", ".join(to_address), "Subject": subject},
headers or {},
)
if cc:
self.headers["Cc"] = ", ".join(cc)
self.message = self.new_message()
self.message.add_header("Content-Transfer-Encoding", "7bit")
self.message.add_header("Content-Disposition", "inline")
self.message.add_header("MIME-Version", "1.0")
self.message.set_payload(message, "utf-8")
self.multipart = False
def new_message(self):
from email.message import Message
return Message()
def attach(self, filename, content, content_type=None):
if not self.multipart:
msg = self.new_message()
msg.add_header("Content-Type", "multipart/mixed")
msg.attach(self.message)
self.message = msg
self.multipart = True
import mimetypes
try:
from email import encoders
except:
from email import Encoders as encoders
content_type = (
content_type
or mimetypes.guess_type(filename)[0]
or "application/octet-stream"
)
msg = self.new_message()
msg.set_payload(content)
msg.add_header("Content-Type", content_type)
msg.add_header("Content-Disposition", "attachment", filename=filename)
if not content_type.startswith("text/"):
encoders.encode_base64(msg)
self.message.attach(msg)
def prepare_message(self):
for k, v in iteritems(self.headers):
if k.lower() == "content-type":
self.message.set_type(v)
else:
self.message.add_header(k, v)
self.headers = {}
def send(self):
self.prepare_message()
message_text = self.message.as_string()
try:
from . import webapi
except ImportError:
webapi = Storage(config=Storage())
if webapi.config.get("smtp_server"):
self.send_with_smtp(message_text)
elif webapi.config.get("email_engine") == "aws":
self.send_with_aws(message_text)
else:
self.default_email_sender(message_text)
def send_with_aws(self, message_text):
try:
from . import webapi
except ImportError:
webapi = Storage(config=Storage())
import boto.ses
c = boto.ses.SESConnection(
aws_access_key_id=webapi.config.get("aws_access_key_id"),
aws_secret_access_key=webapi.config.get("aws_secret_access_key"),
)
c.send_raw_email(message_text, self.from_address, self.recipients)
def send_with_smtp(self, message_text):
try:
from . import webapi
except ImportError:
webapi = Storage(config=Storage())
server = webapi.config.get("smtp_server")
port = webapi.config.get("smtp_port", 0)
username = webapi.config.get("smtp_username")
password = webapi.config.get("smtp_password")
debug_level = webapi.config.get("smtp_debuglevel", None)
starttls = webapi.config.get("smtp_starttls", False)
import smtplib
smtpserver = smtplib.SMTP(server, port)
if debug_level:
smtpserver.set_debuglevel(debug_level)
if starttls:
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo()
if username and password:
smtpserver.login(username, password)
smtpserver.sendmail(self.from_address, self.recipients, message_text)
smtpserver.quit()
def default_email_sender(self, message_text):
try:
from . import webapi
except ImportError:
webapi = Storage(config=Storage())
sendmail = webapi.config.get("sendmail_path", "/usr/sbin/sendmail")
assert not self.from_address.startswith("-"), "security"
for r in self.recipients:
assert not r.startswith("-"), "security"
cmd = [sendmail, "-f", self.from_address] + self.recipients
p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
p.stdin.write(message_text.encode("utf-8"))
p.stdin.close()
p.wait()
def __repr__(self):
return "<EmailMessage>"
def __str__(self):
return self.message.as_string()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
test.py | import json
import os.path as p
import random
import subprocess
import threading
import logging
import time
from random import randrange
import pika
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.test_tools import TSV
from . import rabbitmq_pb2
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/rabbitmq.xml'],
with_rabbitmq=True)
# Helpers
def rabbitmq_check_result(result, check=False, ref_file='test_rabbitmq_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
def kill_rabbitmq(rabbitmq_id):
p = subprocess.Popen(('docker', 'stop', rabbitmq_id), stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
def revive_rabbitmq(rabbitmq_id):
p = subprocess.Popen(('docker', 'start', rabbitmq_id), stdout=subprocess.PIPE)
p.communicate()
return p.returncode == 0
# Fixtures
@pytest.fixture(scope="module")
def rabbitmq_cluster():
try:
cluster.start()
logging.debug("rabbitmq_id is {}".format(instance.cluster.rabbitmq_docker_id))
instance.query('CREATE DATABASE test')
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def rabbitmq_setup_teardown():
print("RabbitMQ is available - running test")
yield # run test
instance.query('DROP TABLE IF EXISTS test.rabbitmq')
# Tests
def test_rabbitmq_select(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = '{}:5672',
rabbitmq_exchange_name = 'select',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
'''.format(rabbitmq_cluster.rabbitmq_host))
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='select', routing_key='', body=message)
connection.close()
# The order of messages in select * from test.rabbitmq is not guaranteed, so sleep to collect everything in one select
time.sleep(1)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
def test_rabbitmq_select_empty(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = '{}:5672',
rabbitmq_exchange_name = 'empty',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
'''.format(rabbitmq_cluster.rabbitmq_host))
assert int(instance.query('SELECT count() FROM test.rabbitmq')) == 0
def test_rabbitmq_json_without_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = '{}:5672',
rabbitmq_exchange_name = 'json',
rabbitmq_format = 'JSONEachRow'
'''.format(rabbitmq_cluster.rabbitmq_host))
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
all_messages = [messages]
for message in all_messages:
channel.basic_publish(exchange='json', routing_key='', body=message)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
all_messages = [messages]
for message in all_messages:
channel.basic_publish(exchange='json', routing_key='', body=message)
connection.close()
time.sleep(1)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
def test_rabbitmq_csv_with_delimiter(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'csv',
rabbitmq_format = 'CSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
for message in messages:
channel.basic_publish(exchange='csv', routing_key='', body=message)
connection.close()
time.sleep(1)
result = ''
while True:
result += instance.query('SELECT * FROM test.rabbitmq ORDER BY key', ignore_error=True)
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
def test_rabbitmq_tsv_with_delimiter(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'tsv',
rabbitmq_format = 'TSV',
rabbitmq_queue_base = 'tsv',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
for message in messages:
channel.basic_publish(exchange='tsv', routing_key='', body=message)
connection.close()
result = ''
while True:
result = instance.query('SELECT * FROM test.view ORDER BY key')
if rabbitmq_check_result(result):
break
rabbitmq_check_result(result, True)
def test_rabbitmq_materialized_view(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'mv',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='mv', routing_key='', body=message)
while True:
result = instance.query('SELECT * FROM test.view ORDER BY key')
if (rabbitmq_check_result(result)):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
connection.close()
rabbitmq_check_result(result, True)
def test_rabbitmq_materialized_view_with_subquery(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'mvsq',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.rabbitmq);
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='mvsq', routing_key='', body=message)
while True:
result = instance.query('SELECT * FROM test.view ORDER BY key')
if rabbitmq_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
connection.close()
rabbitmq_check_result(result, True)
def test_rabbitmq_many_materialized_views(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'mmv',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.rabbitmq;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
for message in messages:
channel.basic_publish(exchange='mmv', routing_key='', body=message)
while True:
result1 = instance.query('SELECT * FROM test.view1 ORDER BY key')
result2 = instance.query('SELECT * FROM test.view2 ORDER BY key')
if rabbitmq_check_result(result1) and rabbitmq_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
connection.close()
rabbitmq_check_result(result1, True)
rabbitmq_check_result(result2, True)
@pytest.mark.skip(reason="clichouse_path with rabbitmq.proto fails to be exported")
def test_rabbitmq_protobuf(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value String)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'pb',
rabbitmq_format = 'Protobuf',
rabbitmq_schema = 'rabbitmq.proto:KeyValueProto';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
data = ''
for i in range(0, 20):
msg = rabbitmq_pb2.KeyValueProto()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
channel.basic_publish(exchange='pb', routing_key='', body=data)
data = ''
for i in range(20, 21):
msg = rabbitmq_pb2.KeyValueProto()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
channel.basic_publish(exchange='pb', routing_key='', body=data)
data = ''
for i in range(21, 50):
msg = rabbitmq_pb2.KeyValueProto()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
channel.basic_publish(exchange='pb', routing_key='', body=data)
connection.close()
result = ''
while True:
result = instance.query('SELECT * FROM test.view ORDER BY key')
if rabbitmq_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
rabbitmq_check_result(result, True)
def test_rabbitmq_big_message(rabbitmq_cluster):
# Create batchs of messages of size ~100Kb
rabbitmq_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(rabbitmq_messages)]
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value String)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'big',
rabbitmq_format = 'JSONEachRow';
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
for message in messages:
channel.basic_publish(exchange='big', routing_key='', body=message)
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == batch_messages * rabbitmq_messages:
break
connection.close()
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == rabbitmq_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_sharding_between_queues_publish(rabbitmq_cluster):
NUM_CONSUMERS = 10
NUM_QUEUES = 10
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'test_sharding',
rabbitmq_num_queues = 10,
rabbitmq_num_consumers = 10,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64, channel_id String)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _channel_id AS channel_id FROM test.rabbitmq;
''')
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
current = 0
for message in messages:
current += 1
mes_id = str(current)
channel.basic_publish(exchange='test_sharding', routing_key='',
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
result1 = ''
while True:
result1 = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT channel_id) FROM test.view")
for thread in threads:
thread.join()
assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
assert int(result2) == 10
def test_rabbitmq_mv_combo(rabbitmq_cluster):
NUM_MV = 5
NUM_CONSUMERS = 4
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'combo',
rabbitmq_queue_base = 'combo',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 5,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
for mv_id in range(NUM_MV):
instance.query('''
DROP TABLE IF EXISTS test.combo_{0};
DROP TABLE IF EXISTS test.combo_{0}_mv;
CREATE TABLE test.combo_{0} (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.combo_{0}_mv TO test.combo_{0} AS
SELECT * FROM test.rabbitmq;
'''.format(mv_id))
time.sleep(2)
i = [0]
messages_num = 10000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='combo', routing_key='',
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = 0
for mv_id in range(NUM_MV):
result += int(instance.query('SELECT count() FROM test.combo_{0}'.format(mv_id)))
if int(result) == messages_num * threads_num * NUM_MV:
break
time.sleep(1)
for thread in threads:
thread.join()
for mv_id in range(NUM_MV):
instance.query('''
DROP TABLE test.combo_{0}_mv;
DROP TABLE test.combo_{0};
'''.format(mv_id))
assert int(result) == messages_num * threads_num * NUM_MV, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_insert(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'insert',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'insert1',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
consumer_connection = pika.BlockingConnection(parameters)
consumer = consumer_connection.channel()
result = consumer.queue_declare(queue='')
queue_name = result.method.queue
consumer.queue_bind(exchange='insert', queue=queue_name, routing_key='insert1')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
insert_messages = []
def onReceived(channel, method, properties, body):
i = 0
insert_messages.append(body.decode())
if (len(insert_messages) == 50):
channel.stop_consuming()
consumer.basic_consume(onReceived, queue_name)
consumer.start_consuming()
consumer_connection.close()
result = '\n'.join(insert_messages)
rabbitmq_check_result(result, True)
def test_rabbitmq_insert_headers_exchange(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'insert_headers',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'test=insert,topic=headers',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
consumer_connection = pika.BlockingConnection(parameters)
consumer = consumer_connection.channel()
result = consumer.queue_declare(queue='')
queue_name = result.method.queue
consumer.queue_bind(exchange='insert_headers', queue=queue_name, routing_key="",
arguments={'x-match': 'all', 'test': 'insert', 'topic': 'headers'})
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
insert_messages = []
def onReceived(channel, method, properties, body):
i = 0
insert_messages.append(body.decode())
if (len(insert_messages) == 50):
channel.stop_consuming()
consumer.basic_consume(onReceived, queue_name)
consumer.start_consuming()
consumer_connection.close()
result = '\n'.join(insert_messages)
rabbitmq_check_result(result, True)
def test_rabbitmq_many_inserts(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.rabbitmq_many;
DROP TABLE IF EXISTS test.rabbitmq_consume;
DROP TABLE IF EXISTS test.view_many;
DROP TABLE IF EXISTS test.consumer_many;
CREATE TABLE test.rabbitmq_many (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'many_inserts',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'insert2',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.rabbitmq_consume (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'many_inserts',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'insert2',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
''')
messages_num = 10000
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
def insert():
while True:
try:
instance.query("INSERT INTO test.rabbitmq_many VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
instance.query('''
CREATE TABLE test.view_many (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer_many TO test.view_many AS
SELECT * FROM test.rabbitmq_consume;
''')
for thread in threads:
thread.join()
while True:
result = instance.query('SELECT count() FROM test.view_many')
if int(result) == messages_num * threads_num:
break
time.sleep(1)
instance.query('''
DROP TABLE test.rabbitmq_consume;
DROP TABLE test.rabbitmq_many;
DROP TABLE test.consumer_many;
DROP TABLE test.view_many;
''')
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_overloaded_insert(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view_overload;
DROP TABLE IF EXISTS test.consumer_overload;
DROP TABLE IF EXISTS test.rabbitmq_consume;
CREATE TABLE test.rabbitmq_consume (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'over',
rabbitmq_queue_base = 'over',
rabbitmq_exchange_type = 'direct',
rabbitmq_num_consumers = 5,
rabbitmq_num_queues = 10,
rabbitmq_max_block_size = 10000,
rabbitmq_routing_key_list = 'over',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.rabbitmq_overload (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'over',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'over',
rabbitmq_format = 'TSV',
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view_overload (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
CREATE MATERIALIZED VIEW test.consumer_overload TO test.view_overload AS
SELECT * FROM test.rabbitmq_consume;
''')
messages_num = 100000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.rabbitmq_overload VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 5
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view_overload')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer_overload;
DROP TABLE test.view_overload;
DROP TABLE test.rabbitmq_consume;
DROP TABLE test.rabbitmq_overload;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_direct_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key
SETTINGS old_parts_lifetime=5, cleanup_delay_period=2, cleanup_delay_period_random_add=3;
''')
num_tables = 5
for consumer_id in range(num_tables):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.direct_exchange_{0};
DROP TABLE IF EXISTS test.direct_exchange_{0}_mv;
CREATE TABLE test.direct_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_exchange_name = 'direct_exchange_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'direct_{0}',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.direct_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.direct_exchange_{0};
'''.format(consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for num in range(num_tables):
key = "direct_" + str(key_num)
key_num += 1
for message in messages:
mes_id = str(randrange(10))
channel.basic_publish(
exchange='direct_exchange_testing', routing_key=key,
properties=pika.BasicProperties(message_id=mes_id), body=message)
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables:
break
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE test.direct_exchange_{0}_mv;
DROP TABLE test.direct_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_fanout_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 5
for consumer_id in range(num_tables):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.fanout_exchange_{0};
DROP TABLE IF EXISTS test.fanout_exchange_{0}_mv;
CREATE TABLE test.fanout_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_routing_key_list = 'key_{0}',
rabbitmq_exchange_name = 'fanout_exchange_testing',
rabbitmq_exchange_type = 'fanout',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.fanout_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.fanout_exchange_{0};
'''.format(consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='fanout_exchange_testing', routing_key='',
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables:
break
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE test.fanout_exchange_{0}_mv;
DROP TABLE test.fanout_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(result) == messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_topic_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 5
for consumer_id in range(num_tables):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_exchange_name = 'topic_exchange_testing',
rabbitmq_exchange_type = 'topic',
rabbitmq_routing_key_list = '*.{0}',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.topic_exchange_{0};
'''.format(consumer_id))
for consumer_id in range(num_tables):
print(("Setting up table {}".format(num_tables + consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.topic_exchange_{0};
DROP TABLE IF EXISTS test.topic_exchange_{0}_mv;
CREATE TABLE test.topic_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_num_queues = 2,
rabbitmq_exchange_name = 'topic_exchange_testing',
rabbitmq_exchange_type = 'topic',
rabbitmq_routing_key_list = '*.logs',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.topic_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.topic_exchange_{0};
'''.format(num_tables + consumer_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
key_num = 0
for num in range(num_tables):
key = "topic." + str(key_num)
key_num += 1
for message in messages:
channel.basic_publish(exchange='topic_exchange_testing', routing_key=key, body=message)
key = "random.logs"
current = 0
for msg_id in range(messages_num):
channel.basic_publish(exchange='topic_exchange_testing', routing_key=key,
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables + messages_num * num_tables:
break
for consumer_id in range(num_tables * 2):
instance.query('''
DROP TABLE test.topic_exchange_{0}_mv;
DROP TABLE test.topic_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(
result) == messages_num * num_tables + messages_num * num_tables, 'ClickHouse lost some messages: {}'.format(
result)
def test_rabbitmq_hash_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64, channel_id String)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 4
for consumer_id in range(num_tables):
table_name = 'rabbitmq_consumer{}'.format(consumer_id)
print(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 4,
rabbitmq_num_queues = 2,
rabbitmq_exchange_type = 'consistent_hash',
rabbitmq_exchange_name = 'hash_exchange_testing',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT key, value, _channel_id AS channel_id FROM test.{0};
'''.format(table_name))
i = [0]
messages_num = 500
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
def produce():
# init connection here because otherwise python rabbitmq client might fail
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='hash_exchange_testing', routing_key=str(msg_id),
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
result1 = ''
while True:
result1 = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT channel_id) FROM test.destination")
for consumer_id in range(num_tables):
table_name = 'rabbitmq_consumer{}'.format(consumer_id)
instance.query('''
DROP TABLE test.{0}_mv;
DROP TABLE test.{0};
'''.format(table_name))
instance.query('''
DROP TABLE test.destination;
''')
for thread in threads:
thread.join()
assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
assert int(result2) == 4 * num_tables
def test_rabbitmq_multiple_bindings(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
DROP TABLE IF EXISTS test.bindings;
DROP TABLE IF EXISTS test.bindings_mv;
CREATE TABLE test.bindings (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'multiple_bindings_testing',
rabbitmq_exchange_type = 'direct',
rabbitmq_routing_key_list = 'key1,key2,key3,key4,key5',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.bindings_mv TO test.destination AS
SELECT * FROM test.bindings;
''')
i = [0]
messages_num = 500
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
def produce():
# init connection here because otherwise python rabbitmq client might fail
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
keys = ['key1', 'key2', 'key3', 'key4', 'key5']
for key in keys:
for message in messages:
channel.basic_publish(exchange='multiple_bindings_testing', routing_key=key, body=message)
connection.close()
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * threads_num * 5:
break
for thread in threads:
thread.join()
instance.query('''
DROP TABLE test.bindings;
DROP TABLE test.bindings_mv;
DROP TABLE test.destination;
''')
assert int(result) == messages_num * threads_num * 5, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_headers_exchange(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables_to_receive = 2
for consumer_id in range(num_tables_to_receive):
print(("Setting up table {}".format(consumer_id)))
instance.query('''
DROP TABLE IF EXISTS test.headers_exchange_{0};
DROP TABLE IF EXISTS test.headers_exchange_{0}_mv;
CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_num_consumers = 2,
rabbitmq_exchange_name = 'headers_exchange_testing',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2020',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.headers_exchange_{0};
'''.format(consumer_id))
num_tables_to_ignore = 2
for consumer_id in range(num_tables_to_ignore):
print(("Setting up table {}".format(consumer_id + num_tables_to_receive)))
instance.query('''
DROP TABLE IF EXISTS test.headers_exchange_{0};
DROP TABLE IF EXISTS test.headers_exchange_{0}_mv;
CREATE TABLE test.headers_exchange_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'headers_exchange_testing',
rabbitmq_exchange_type = 'headers',
rabbitmq_routing_key_list = 'x-match=all,format=logs,type=report,year=2019',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.headers_exchange_{0}_mv TO test.destination AS
SELECT key, value FROM test.headers_exchange_{0};
'''.format(consumer_id + num_tables_to_receive))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
fields = {}
fields['format'] = 'logs'
fields['type'] = 'report'
fields['year'] = '2020'
for msg_id in range(messages_num):
channel.basic_publish(exchange='headers_exchange_testing', routing_key='',
properties=pika.BasicProperties(headers=fields, message_id=str(msg_id)),
body=messages[msg_id])
connection.close()
while True:
result = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result) == messages_num * num_tables_to_receive:
break
for consumer_id in range(num_tables_to_receive + num_tables_to_ignore):
instance.query('''
DROP TABLE test.headers_exchange_{0}_mv;
DROP TABLE test.headers_exchange_{0};
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(result) == messages_num * num_tables_to_receive, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_virtual_columns(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
CREATE TABLE test.rabbitmq_virtuals (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'virtuals',
rabbitmq_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, key, _exchange_name, _channel_id, _delivery_tag, _redelivered FROM test.rabbitmq_virtuals;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
message_num = 10
i = 0
messages = []
for _ in range(message_num):
messages.append(json.dumps({'key': i, 'value': i}))
i += 1
for message in messages:
channel.basic_publish(exchange='virtuals', routing_key='', body=message)
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == message_num:
break
connection.close()
result = instance.query('''
SELECT key, value, _exchange_name, SUBSTRING(_channel_id, 1, 3), _delivery_tag, _redelivered
FROM test.view ORDER BY key
''')
expected = '''\
0 0 virtuals 1_0 1 0
1 1 virtuals 1_0 2 0
2 2 virtuals 1_0 3 0
3 3 virtuals 1_0 4 0
4 4 virtuals 1_0 5 0
5 5 virtuals 1_0 6 0
6 6 virtuals 1_0 7 0
7 7 virtuals 1_0 8 0
8 8 virtuals 1_0 9 0
9 9 virtuals 1_0 10 0
'''
instance.query('''
DROP TABLE test.rabbitmq_virtuals;
DROP TABLE test.view;
''')
assert TSV(result) == TSV(expected)
def test_rabbitmq_virtual_columns_with_materialized_view(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq_virtuals_mv (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'virtuals_mv',
rabbitmq_format = 'JSONEachRow';
CREATE TABLE test.view (key UInt64, value UInt64,
exchange_name String, channel_id String, delivery_tag UInt64, redelivered UInt8) ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _exchange_name as exchange_name, _channel_id as channel_id, _delivery_tag as delivery_tag, _redelivered as redelivered
FROM test.rabbitmq_virtuals_mv;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
message_num = 10
i = 0
messages = []
for _ in range(message_num):
messages.append(json.dumps({'key': i, 'value': i}))
i += 1
for message in messages:
channel.basic_publish(exchange='virtuals_mv', routing_key='', body=message)
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == message_num:
break
connection.close()
result = instance.query(
"SELECT key, value, exchange_name, SUBSTRING(channel_id, 1, 3), delivery_tag, redelivered FROM test.view ORDER BY delivery_tag")
expected = '''\
0 0 virtuals_mv 1_0 1 0
1 1 virtuals_mv 1_0 2 0
2 2 virtuals_mv 1_0 3 0
3 3 virtuals_mv 1_0 4 0
4 4 virtuals_mv 1_0 5 0
5 5 virtuals_mv 1_0 6 0
6 6 virtuals_mv 1_0 7 0
7 7 virtuals_mv 1_0 8 0
8 8 virtuals_mv 1_0 9 0
9 9 virtuals_mv 1_0 10 0
'''
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
DROP TABLE test.rabbitmq_virtuals_mv
''')
assert TSV(result) == TSV(expected)
def test_rabbitmq_many_consumers_to_each_queue(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination(key UInt64, value UInt64, channel_id String)
ENGINE = MergeTree()
ORDER BY key;
''')
num_tables = 4
for table_id in range(num_tables):
print(("Setting up table {}".format(table_id)))
instance.query('''
DROP TABLE IF EXISTS test.many_consumers_{0};
DROP TABLE IF EXISTS test.many_consumers_{0}_mv;
CREATE TABLE test.many_consumers_{0} (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'many_consumers',
rabbitmq_num_queues = 2,
rabbitmq_num_consumers = 2,
rabbitmq_queue_base = 'many_consumers',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.many_consumers_{0}_mv TO test.destination AS
SELECT key, value, _channel_id as channel_id FROM test.many_consumers_{0};
'''.format(table_id))
i = [0]
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='many_consumers', routing_key='',
properties=pika.BasicProperties(message_id=str(msg_id)), body=messages[msg_id])
connection.close()
threads = []
threads_num = 20
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
result1 = ''
while True:
result1 = instance.query('SELECT count() FROM test.destination')
time.sleep(1)
if int(result1) == messages_num * threads_num:
break
result2 = instance.query("SELECT count(DISTINCT channel_id) FROM test.destination")
for thread in threads:
thread.join()
for consumer_id in range(num_tables):
instance.query('''
DROP TABLE test.many_consumers_{0};
DROP TABLE test.many_consumers_{0}_mv;
'''.format(consumer_id))
instance.query('''
DROP TABLE test.destination;
''')
assert int(result1) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
# 4 tables, 2 consumers for each table => 8 consumer tags
assert int(result2) == 8
def test_rabbitmq_restore_failed_connection_without_losses_1(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.consume;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE TABLE test.consume (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'producer_reconnect',
rabbitmq_format = 'JSONEachRow',
rabbitmq_num_consumers = 2,
rabbitmq_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.consume;
DROP TABLE IF EXISTS test.producer_reconnect;
CREATE TABLE test.producer_reconnect (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'producer_reconnect',
rabbitmq_persistent = '1',
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages_num = 100000
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.producer_reconnect VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(0.1)
kill_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id)
time.sleep(4)
revive_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id)
while True:
result = instance.query('SELECT count(DISTINCT key) FROM test.view')
time.sleep(1)
if int(result) == messages_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
DROP TABLE test.consume;
DROP TABLE test.producer_reconnect;
''')
assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_restore_failed_connection_without_losses_2(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.consumer_reconnect (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'consumer_reconnect',
rabbitmq_num_consumers = 10,
rabbitmq_num_queues = 10,
rabbitmq_format = 'JSONEachRow',
rabbitmq_row_delimiter = '\\n';
''')
i = 0
messages_num = 150000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
messages.append(json.dumps({'key': i, 'value': i}))
i += 1
for msg_id in range(messages_num):
channel.basic_publish(exchange='consumer_reconnect', routing_key='', body=messages[msg_id],
properties=pika.BasicProperties(delivery_mode=2, message_id=str(msg_id)))
connection.close()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.consumer_reconnect;
''')
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(0.1)
kill_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id)
time.sleep(8)
revive_rabbitmq(rabbitmq_cluster.rabbitmq_docker_id)
# while int(instance.query('SELECT count() FROM test.view')) == 0:
# time.sleep(0.1)
# kill_rabbitmq()
# time.sleep(2)
# revive_rabbitmq()
while True:
result = instance.query('SELECT count(DISTINCT key) FROM test.view')
time.sleep(1)
if int(result) == messages_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.consumer_reconnect;
''')
assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_commit_on_block_write(rabbitmq_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.rabbitmq (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'block',
rabbitmq_format = 'JSONEachRow',
rabbitmq_queue_base = 'block',
rabbitmq_max_block_size = 100,
rabbitmq_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq;
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
for message in messages:
channel.basic_publish(exchange='block', routing_key='', body=message)
rabbitmq_thread = threading.Thread(target=produce)
rabbitmq_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('DETACH TABLE test.rabbitmq;')
while int(instance.query("SELECT count() FROM system.tables WHERE database='test' AND name='rabbitmq'")) == 1:
time.sleep(1)
instance.query('ATTACH TABLE test.rabbitmq;')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
rabbitmq_thread.join()
connection.close()
assert result == 1, 'Messages from RabbitMQ get duplicated!'
def test_rabbitmq_no_connection_at_startup(rabbitmq_cluster):
# no connection when table is initialized
rabbitmq_cluster.pause_container('rabbitmq1')
instance.query('''
CREATE TABLE test.cs (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'cs',
rabbitmq_format = 'JSONEachRow',
rabbitmq_num_consumers = '5',
rabbitmq_row_delimiter = '\\n';
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.cs;
''')
time.sleep(5)
rabbitmq_cluster.unpause_container('rabbitmq1')
# need to make sure rabbit table made all rabbit setup
time.sleep(10)
messages_num = 1000
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
for i in range(messages_num):
message = json.dumps({'key': i, 'value': i})
channel.basic_publish(exchange='cs', routing_key='', body=message,
properties=pika.BasicProperties(delivery_mode=2, message_id=str(i)))
connection.close()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.cs;
''')
assert int(result) == messages_num, 'ClickHouse lost some messages: {}'.format(result)
def test_rabbitmq_format_factory_settings(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.format_settings (
id String, date DateTime
) ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'format_settings',
rabbitmq_format = 'JSONEachRow',
date_time_input_format = 'best_effort';
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
message = json.dumps({"id":"format_settings_test","date":"2021-01-19T14:42:33.1829214Z"})
expected = instance.query('''SELECT parseDateTimeBestEffort(CAST('2021-01-19T14:42:33.1829214Z', 'String'))''')
channel.basic_publish(exchange='format_settings', routing_key='', body=message)
result = ''
while True:
result = instance.query('SELECT date FROM test.format_settings')
if result == expected:
break;
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (
id String, date DateTime
) ENGINE = MergeTree ORDER BY id;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.format_settings;
''')
channel.basic_publish(exchange='format_settings', routing_key='', body=message)
result = ''
while True:
result = instance.query('SELECT date FROM test.view')
if result == expected:
break;
connection.close()
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.format_settings;
''')
assert(result == expected)
def test_rabbitmq_vhost(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq_vhost (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'vhost',
rabbitmq_format = 'JSONEachRow',
rabbitmq_vhost = '/'
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.basic_publish(exchange='vhost', routing_key='', body=json.dumps({'key': 1, 'value': 2}))
connection.close()
while True:
result = instance.query('SELECT * FROM test.rabbitmq_vhost ORDER BY key', ignore_error=True)
if result == "1\t2\n":
break
def test_rabbitmq_drop_table_properly(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq_drop (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'drop',
rabbitmq_format = 'JSONEachRow',
rabbitmq_queue_base = 'rabbit_queue_drop'
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.basic_publish(exchange='drop', routing_key='', body=json.dumps({'key': 1, 'value': 2}))
while True:
result = instance.query('SELECT * FROM test.rabbitmq_drop ORDER BY key', ignore_error=True)
if result == "1\t2\n":
break
exists = channel.queue_declare(queue='rabbit_queue_drop', passive=True)
assert(exists)
instance.query("DROP TABLE test.rabbitmq_drop")
time.sleep(30)
try:
exists = channel.queue_declare(callback, queue='rabbit_queue_drop', passive=True)
except Exception as e:
exists = False
assert(not exists)
def test_rabbitmq_queue_settings(rabbitmq_cluster):
instance.query('''
CREATE TABLE test.rabbitmq_settings (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_exchange_name = 'rabbit_exchange',
rabbitmq_format = 'JSONEachRow',
rabbitmq_queue_base = 'rabbit_queue_settings',
rabbitmq_queue_settings_list = 'x-max-length=10,x-overflow=reject-publish'
''')
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
for i in range(50):
channel.basic_publish(exchange='rabbit_exchange', routing_key='', body=json.dumps({'key': 1, 'value': 2}))
connection.close()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq_settings;
''')
time.sleep(5)
result = instance.query('SELECT count() FROM test.rabbitmq_settings', ignore_error=True)
while int(result) != 10:
time.sleep(0.5)
result = instance.query('SELECT count() FROM test.view', ignore_error=True)
instance.query('DROP TABLE test.rabbitmq_settings')
# queue size is 10, but 50 messages were sent, they will be dropped (setting x-overflow = reject-publish) and only 10 will remain.
assert(int(result) == 10)
def test_rabbitmq_queue_consume(rabbitmq_cluster):
credentials = pika.PlainCredentials('root', 'clickhouse')
parameters = pika.ConnectionParameters(rabbitmq_cluster.rabbitmq_ip, rabbitmq_cluster.rabbitmq_port, '/', credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='rabbit_queue', durable=True)
i = [0]
messages_num = 1000
def produce():
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
messages = []
for _ in range(messages_num):
message = json.dumps({'key': i[0], 'value': i[0]})
channel.basic_publish(exchange='', routing_key='rabbit_queue', body=message)
i[0] += 1
threads = []
threads_num = 10
for _ in range(threads_num):
threads.append(threading.Thread(target=produce))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
instance.query('''
CREATE TABLE test.rabbitmq_queue (key UInt64, value UInt64)
ENGINE = RabbitMQ
SETTINGS rabbitmq_host_port = 'rabbitmq1:5672',
rabbitmq_format = 'JSONEachRow',
rabbitmq_queue_base = 'rabbit_queue',
rabbitmq_queue_consume = 1;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.rabbitmq_queue;
''')
result = ''
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == messages_num * threads_num:
break
time.sleep(1)
for thread in threads:
thread.join()
instance.query('DROP TABLE test.rabbitmq_queue')
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
logging_test.py | import sys
sys.path.append('..')
import unittest
import threading
import socket
import logging
logging.basicConfig(level=logging.DEBUG)
from interfaces import get_log_interface, tflogging
from entities import Server, Pug
from tornado import ioloop
class RconConnection(object):
def __init__(self, *args):
pass
def send_cmd(self, cmd, cb):
print "SERVER: Command to send: %s" % cmd
@property
def closed(self):
return False
class LoggingTestCase(unittest.TestCase):
def setUp(self):
self.server = Server.Server("TF2")
self.server.rcon_connection = RconConnection()
self.server.ip = "202.138.3.55"
self.server.port = 27045
self.server.rcon_password = "thanksobama"
self.pug = Pug.Pug(pid = 1)
# add some players to the pug for stats testing...
self.pug.add_player(76561197960265729, "1", Pug.PlayerStats())
self.pug.add_player(76561197960265730, "2", Pug.PlayerStats())
self.pug.add_player(76561197960265731, "3", Pug.PlayerStats())
for i in xrange(9):
stat = Pug.PlayerStats()
self.pug.add_player(i, str(i), stat)
self.pug.begin_map_vote()
self.pug.end_map_vote()
self.pug.shuffle_teams()
self.server.reserve(self.pug)
self.server.prepare()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.connect(('127.0.0.1', self.server.log_port))
def tearDown(self):
self.socket.close()
class VerifyTestCase(unittest.TestCase):
def setUp(self):
self.iface = get_log_interface("TF2")(None)
def tearDown(self):
pass
def test_normal_to_secret(self):
self.assertTrue(self.iface._verify_data("RL"))
self.assertTrue(self.iface._verify_data("S123L"))
self.assertFalse(self.iface._verify_data("RL"))
def test_secret_to_normal(self):
self.assertTrue(self.iface._verify_data("S123L"))
self.assertFalse(self.iface._verify_data("RL"))
self.assertTrue(self.iface._verify_data("S123L"))
def test_secret(self):
self.assertTrue(self.iface._verify_data("S123L"))
self.assertFalse(self.iface._verify_data("S456L"))
self.assertTrue(self.iface._verify_data("S123L"))
class RegexTestCase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def check_group_match(self, strings, group):
for s in strings:
"""
print "REGEXTEST Group: Checking string \"%s\" for group \"%s\"" % (
s, group)
"""
re_match = tflogging.check_regex_match(s)
self.assertNotEquals(re_match, None)
group, match, expr = re_match
self.assertEquals(group, group)
def test_chat_regex(self):
s1 = 'L 10/01/2012 - 21:58:09: "1<0><[U:1:2]><Red>" say "!teams"'
s2 = 'L 10/01/2012 - 21:58:09: "1<0><[U:1:2]><Red>" say_team "!teams"'
strings = [ s1, s2 ]
self.check_group_match(strings, "chat")
def test_round_regex(self):
s1 = 'L 10/01/2012 - 22:02:34: World triggered "Round_Win" (winner "Blue")'
s2 = 'L 10/01/2012 - 22:07:26: World triggered "Round_Overtime"'
s3 = 'L 10/01/2012 - 22:07:27: World triggered "Round_Length" (seconds "288.70")'
s4 = 'L 10/01/2012 - 22:07:32: World triggered "Round_Start"'
s5 = 'L 10/01/2012 - 22:07:32: World triggered "Round_Setup_Begin"'
s6 = 'L 10/01/2012 - 22:07:32: World triggered "Round_Setup_End"'
strings = [ s1, s2, s3, s4, s5, s6 ]
self.check_group_match(strings, "round")
def test_connection_regex(self):
s1 = 'L 10/01/2012 - 22:07:32: "1<0><[U:1:1]><>" connected, address "1.1.1.1:12345"'
s2 = 'L 10/01/2012 - 22:07:32: "1<0><[U:1:1]><>" disconnected (reason "noob")'
s3 = 'L 10/01/2012 - 22:07:32: "1<0><[U:1:1]><>" STEAM USERID validated'
strings = [ s1, s2, s3 ]
self.check_group_match(strings, "player_connection")
def test_teamscore_regex(self):
s1 = 'L 10/01/2012 - 22:07:27: Team "Red" current score "2" with "6" players'
s2 = 'L 10/01/2012 - 22:07:27: Team "Blue" current score "3" with "6" players'
s3 = 'L 10/01/2012 - 22:20:51: Team "Red" final score "3" with "6" players'
s4 = 'L 10/01/2012 - 22:20:51: Team "Blue" final score "4" with "6" players'
strings = [ s1, s2, s3, s4 ]
self.check_group_match(strings, "team_score")
def test_game_regex(self):
s1 = 'L 10/01/2012 - 22:20:51: World triggered "Game_Over" reason "Reached Win Limit"'
strings = [ s1 ]
self.check_group_match(strings, "game_event")
def test_stat_regex(self):
s1 = 'L 10/01/2012 - 22:20:45: "1<0><[U:1:1]><Blue>" killed "2<2><[U:1:2]><Red>" with "scattergun" (attacker_position "-1803 129 236") (victim_position "-1767 278 218")'
s2 = 'L 10/01/2012 - 22:20:45: "3<0><[U:1:3]><Blue>" triggered "kill assist" against "2<2><[U:1:2]><Red>" (assister_position "-1446 -200 236") (attacker_position "-1803 129 236") (victim_position "-1767 278 218")'
s3 = 'L 10/01/2012 - 21:58:01: "1<0><[U:1:1]><Blue>" killed "2<2><[U:1:2]><Red>" with "knife" (customkill "backstab") (attacker_position "-1085 99 240") (victim_position "-1113 51 240")'
strings = [ s1, s2, s3 ]
self.check_group_match(strings, "player_stat")
def test_unity_report_regex(self):
s1 = 'L 10/01/2012 - 21:38:54: "{"token":"REPORT","data":{"reported":"STEAM_0:1:1","reporter":"STEAM_0:1:2","reason":"CHEATING","matchId":2}}"'
strings = [ s1 ]
self.check_group_match(strings, "report")
class ConnectTestCase(LoggingTestCase):
def test_valid_connect(self):
msg = 'RL 10/01/2012 - 22:07:32: "1<0><[U:1:1]><>" connected, address "1.1.1.1:12345"'
print "Sending message " + msg
self.socket.send(msg)
def test_invalid_connect(self):
msg = 'RL 10/01/2012 - 22:07:32: "4<0><[U:1:4]><>" connected, address "1.1.1.1:12345"'
print "Sending message " + msg
self.socket.send(msg)
# CID of [U:1:1]: 76561197960265729
# CID of [U:1:2]: 76561197960265730
cid1 = 76561197960265729
cid2 = 76561197960265730
cid3 = 76561197960265731
class StatTestCase(LoggingTestCase):
### Test sending messages, make sure they're processed properly
def setUp(self):
super(StatTestCase, self).setUp()
self.pug.begin_game()
def test_player_kill_msg(self):
msg = 'RL 10/01/2012 - 22:20:45: "1<0><[U:1:1]><Blue>" killed "2<2><[U:1:2]><Red>" with "scattergun" (attacker_position "-1803 129 236") (victim_position "-1767 278 218")'
self.socket.send(msg)
def test_player_assist_msg(self):
msg = 'RL 10/01/2012 - 22:20:45: "3<0><[U:1:3]><Blue>" triggered "kill assist" against "2<2><[U:1:2]><Red>" (assister_position "-1446 -200 236") (attacker_position "-1803 129 236") (victim_position "-1767 278 218")'
self.socket.send(msg)
def test_player_custom_kill_msg(self):
msg = 'RL 10/01/2012 - 21:58:01: "1<0><[U:1:1]><Blue>" killed "2<2><[U:1:2]><Red>" with "knife" (customkill "backstab") (attacker_position "-1085 99 240") (victim_position "-1113 51 240")'
self.socket.send(msg)
# Now we can actually test the parsing
def test_player_kill(self):
kill = 'L 10/01/2012 - 22:20:45: "1<0><[U:1:1]><Blue>" killed "2<2><[U:1:2]><Red>" with "scattergun" (attacker_position "-1803 129 236") (victim_position "-1767 278 218")'
reverse_kill = 'L 10/01/2012 - 22:20:45: "2<2><[U:1:2]><Red>" killed "1<0><[U:1:1]><Blue>" with "scattergun" (attacker_position "-1803 129 236") (victim_position "-1767 278 218")'
self.server._log_interface._dispatch_parse(kill)
self.assertEquals(self.pug.game_stats[cid1]["kills"], 1)
self.assertEquals(self.pug.game_stats[cid2]["deaths"], 1)
self.server._log_interface._dispatch_parse(kill)
self.assertEquals(self.pug.game_stats[cid1]["kills"], 2)
self.assertEquals(self.pug.game_stats[cid2]["deaths"], 2)
self.server._log_interface._dispatch_parse(reverse_kill)
self.assertEquals(self.pug.game_stats[cid1]["deaths"], 1)
self.assertEquals(self.pug.game_stats[cid2]["kills"], 1)
def test_player_assist(self):
kill_assist = 'L 10/01/2012 - 22:20:45: "3<0><[U:1:3]><Blue>" triggered "kill assist" against "2<2><[U:1:2]><Red>" (assister_position "-1446 -200 236") (attacker_position "-1803 129 236") (victim_position "-1767 278 218")'
self.server._log_interface._dispatch_parse(kill_assist)
self.assertEquals(self.pug.game_stats[cid3]["assists"], 1)
def test_player_custom_kill(self):
kill_special = 'L 10/01/2012 - 21:58:01: "1<0><[U:1:1]><Blue>" killed "2<2><[U:1:2]><Red>" with "knife" (customkill "backstab") (attacker_position "-1085 99 240") (victim_position "-1113 51 240")'
self.server._log_interface._dispatch_parse(kill_special)
self.assertEquals(self.pug.game_stats[cid1]["kills"], 1)
self.assertEquals(self.pug.game_stats[cid2]["deaths"], 1)
self.server._log_interface._dispatch_parse(kill_special)
self.assertEquals(self.pug.game_stats[cid1]["kills"], 2)
self.assertEquals(self.pug.game_stats[cid2]["deaths"], 2)
class ChatTestCase(LoggingTestCase):
def test_team_command(self):
msg = 'S123L 10/01/2012 - 21:58:09: "1<0><[U:1:1]><Red>" say "!teams"'
print "Sending message " + msg
self.socket.send(msg)
def test_start_command(self):
msg = 'S123L 10/01/2012 - 21:58:09: "1<0><[U:1:1]><Red>" say "!start"'
print "Sending message " + msg
self.socket.send(msg)
@unittest.skip("NYI")
def test_replace_command(self):
pass
class ReportTestCase(LoggingTestCase):
def test_report(self):
msg = 'L 10/01/2012 - 21:38:54: "{"token":"REPORT","data":{"reported":"STEAM_0:1:1","reporter":"STEAM_0:1:2","reason":"CHEATING","matchId":2}}"'
print "Sending message " + msg
self.socket.send(msg)
def test_suites():
classes = [ RegexTestCase, VerifyTestCase, ConnectTestCase, StatTestCase,
ChatTestCase, ReportTestCase ]
return [ unittest.TestLoader().loadTestsFromTestCase(x) for x in classes ]
if __name__ == "__main__":
unittest.TestSuite(test_suites())
# get a tornado ioloop instance running in another thread so we can
# actually test this shiz
t = threading.Thread(target = unittest.main)
t.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
quit()
|
play.py | import chess
from .types import *
from .player import Player
from .game import Game, LocalGame, RemoteGame, MultiprocessingLocalGame
from .history import GameHistory
import multiprocessing as mp
def play_local_game(white_player: Player, black_player: Player, game: LocalGame = None,
seconds_per_player: float = 900) -> Tuple[Optional[Color], Optional[WinReason], GameHistory]:
"""
Plays a game between the two players passed in. Uses :class:`LocalGame` to run the game, and just calls
:func:`play_turn` until the game is over: ::
while not game.is_over():
play_turn(game, player)
:param white_player: The white :class:`Player`.
:param black_player: The black :class:`Player`.
:param game: The :class:`LocalGame` object to use.
:param seconds_per_player: The time each player has to play. Only used if `game` is not passed in.
:return: The results of the game, also passed to each player via :meth:`Player.handle_game_end`.
"""
players = [black_player, white_player]
if game is None:
game = LocalGame(seconds_per_player=seconds_per_player)
white_name = white_player.__class__.__name__
black_name = black_player.__class__.__name__
game.store_players(white_name, black_name)
white_player.handle_game_start(chess.WHITE, game.board.copy(), black_name)
black_player.handle_game_start(chess.BLACK, game.board.copy(), white_name)
game.start()
while not game.is_over():
play_turn(game, players[game.turn], end_turn_last=True)
game.end()
winner_color = game.get_winner_color()
win_reason = game.get_win_reason()
game_history = game.get_game_history()
white_player.handle_game_end(winner_color, win_reason, game_history)
black_player.handle_game_end(winner_color, win_reason, game_history)
return winner_color, win_reason, game_history
def play_remote_game(server_url, game_id, auth, player: Player):
game = RemoteGame(server_url, game_id, auth)
player.handle_game_start(game.get_player_color(), game.get_starting_board(), game.get_opponent_name())
game.start()
while not game.is_over():
play_turn(game, player, end_turn_last=False)
winner_color = game.get_winner_color()
win_reason = game.get_win_reason()
game_history = game.get_game_history()
player.handle_game_end(winner_color, win_reason, game_history)
return winner_color, win_reason, game_history
def play_turn(game: Game, player: Player, end_turn_last=False):
"""
Coordinates playing a turn for `player` in `game`. Does the following sequentially:
#. :func:`notify_opponent_move_results`
#. :func:`play_sense`
#. :func:`play_move`
See :func:`play_move` for more info on `end_turn_last`.
:param game: The :class:`Game` that `player` is playing in.
:param player: The :class:`Player` whose turn it is.
:param end_turn_last: Flag indicating whether to call :meth:`Game.end_turn` before or after :meth:`Player.handle_move_result`
"""
sense_actions = game.sense_actions()
move_actions = game.move_actions()
notify_opponent_move_results(game, player)
play_sense(game, player, sense_actions, move_actions)
play_move(game, player, move_actions, end_turn_last=end_turn_last)
def notify_opponent_move_results(game: Game, player: Player):
"""
Passes the opponents move results to the player. Does the following sequentially:
#. Get the results of the opponents move using :meth:`Game.opponent_move_results`.
#. Give the results to the player using :meth:`Player.handle_opponent_move_result`.
:param game: The :class:`Game` that `player` is playing in.
:param player: The :class:`Player` whose turn it is.
"""
opt_capture_square = game.opponent_move_results()
player.handle_opponent_move_result(opt_capture_square is not None, opt_capture_square)
def play_sense(game: Game, player: Player, sense_actions: List[Square], move_actions: List[chess.Move]):
"""
Runs the sense phase for `player` in `game`. Does the following sequentially:
#. Get the sensing action using :meth:`Player.choose_sense`.
#. Apply the sense action using :meth:`Game.sense`.
#. Give the result of the sense action to player using :meth:`Player.handle_sense_result`.
:param game: The :class:`Game` that `player` is playing in.
:param player: The :class:`Player` whose turn it is.
:param sense_actions: The possible sense actions for `player`.
:param move_actions: The possible move actions for `player`.
"""
sense = player.choose_sense(sense_actions, move_actions, game.get_seconds_left())
sense_result = game.sense(sense)
player.handle_sense_result(sense_result)
def play_move(game: Game, player: Player, move_actions: List[chess.Move], end_turn_last=False):
"""
Runs the move phase for `player` in `game`. Does the following sequentially:
#. Get the moving action using :meth:`Player.choose_move`.
#. Apply the moving action using :meth:`Game.move`.
#. Ends the current player's turn using :meth:`Game.end_turn`.
#. Give the result of the moveaction to player using :meth:`Player.handle_move_result`.
If `end_turn_last` is True, then :meth:`Game.end_turn` is called last instead of before
:meth:`Player.handle_move_result`.
:param game: The :class:`Game` that `player` is playing in.
:param player: The :class:`Player` whose turn it is.
:param move_actions: The possible move actions for `player`.
:param end_turn_last: Flag indicating whether to call :meth:`Game.end_turn` before or after :meth:`Player.handle_move_result`
"""
move = player.choose_move(move_actions, game.get_seconds_left())
requested_move, taken_move, opt_enemy_capture_square = game.move(move)
if not end_turn_last:
game.end_turn()
player.handle_move_result(requested_move, taken_move,
opt_enemy_capture_square is not None, opt_enemy_capture_square)
if end_turn_last:
game.end_turn()
def play_multiprocessing_local_game(white_player_class, black_player_class,
game: LocalGame = None, seconds_per_player: float = 900) \
-> Tuple[Optional[Color], Optional[WinReason], GameHistory]:
"""
Plays a game between the two players passed in. Uses :class:`LocalGame` to run the game, but enables behavior
similar to :class:`RemoteGame` by multiprocessing. Unlike :func:`play_local_game` and :func:`play_remote_game`, the
players here must be passed as un-initialized classes; TroutBot rather than TroutBot().
:param white_player_class: The white :class:`Player` un-initialized.
:param black_player_class: The black :class:`Player` un-initialized.
:param game: The :class:`LocalGame` object to use.
:param seconds_per_player: The time each player has to play. Only used if `game` is not passed in.
:return: The results of the game, also passed to each player via :meth:`Player.handle_game_end`.
"""
if game is None:
game = LocalGame(seconds_per_player=seconds_per_player)
white_name = white_player_class.__name__
black_name = black_player_class.__name__
game.store_players(white_name, black_name)
# Stored player names become inaccessible from game (except by cheating: game._LocalGame__game_history...), instead:
game.player_names = {
chess.WHITE: white_name,
chess.BLACK: black_name
}
player_queues = {
chess.WHITE: {'to player': mp.Queue(), 'to moderator': mp.Queue()},
chess.BLACK: {'to player': mp.Queue(), 'to moderator': mp.Queue()}
}
player_processes = [
mp.Process(target=_play_in_multiprocessing_local_game, args=(player_queues[chess.BLACK], black_player_class)),
mp.Process(target=_play_in_multiprocessing_local_game, args=(player_queues[chess.WHITE], white_player_class))
]
[process.start() for process in player_processes]
game.start()
while any([process.is_alive() for process in player_processes]):
_respond_to_requests(game, player_queues)
winner_color = game.get_winner_color()
win_reason = game.get_win_reason()
game_history = game.get_game_history()
return winner_color, win_reason, game_history
def _play_in_multiprocessing_local_game(queues, player_class):
"""
Each player in a :class:`MultiprocessingLocalGame` uses this to participate in parallel. It mimics
:func:`play_remote_game`, but replaces server requests with multiprocessing queues.
:param queues: This player's dictionary of multiprocessing queues keyed 'to player' and 'to moderator'
:param player_class: The :class:`Player` un-initialized.
"""
game = MultiprocessingLocalGame(queues)
player = player_class()
player.handle_game_start(game.get_player_color(), game.get_starting_board(), game.get_opponent_name())
game.start()
while not game.is_over():
play_turn(game, player, end_turn_last=False)
winner_color = game.get_winner_color()
win_reason = game.get_win_reason()
game_history = game.get_game_history()
player.handle_game_end(winner_color, win_reason, game_history)
def _respond_to_requests(game: LocalGame, queues):
"""
Pass information between the moderator which runs a :class:`LocalGame` and each player which run their own
:class:`RemoteGame` sub-class, :class:`MultiprocessingLocalGame`.
:param game: The :class:`LocalGame` object to reference for neutral game-state information.
:param queues: Multiprocessing Queues for communicating with the players. Stored as a nested dictionary of player
color and queue direction: queues[chess.WHITE | chess.BLACK]['to player' | 'to moderator'].
"""
for color in [game.turn, not game.turn]:
if not queues[color]['to moderator'].empty():
request = queues[color]['to moderator'].get()
request_command = request[0]
on_own_turn = game.turn == color
if request_command == 'color':
queues[color]['to player'].put({'color': color})
elif request_command == 'starting_board':
queues[color]['to player'].put({'board': chess.Board()})
elif request_command == 'opponent_name':
queues[color]['to player'].put({'opponent_name': game.player_names[not color]})
# This attribute was added in moderate_multiprocessing_local_game, since the names become hard to access
elif request_command == 'sense_actions':
if on_own_turn:
queues[color]['to player'].put({'sense_actions': game.sense_actions()})
else:
queues[color]['to player'].put('Request unavailable')
elif request_command == 'move_actions':
if on_own_turn:
queues[color]['to player'].put({'move_actions': game.move_actions()})
else:
queues[color]['to player'].put('Request unavailable')
elif request_command == 'seconds_left':
if on_own_turn:
queues[color]['to player'].put({'seconds_left': game.get_seconds_left()})
else:
queues[color]['to player'].put({'seconds_left': game.seconds_left_by_color[color]})
elif request_command == 'ready':
queues[color]['to player'].put({'ready': 'ready'})
elif request_command == 'is_my_turn':
queues[color]['to player'].put({'is_my_turn': on_own_turn and not game.is_over()})
elif request_command == 'opponent_move_results':
if on_own_turn:
queues[color]['to player'].put({'opponent_move_results': game.opponent_move_results()})
else:
queues[color]['to player'].put('Request unavailable')
elif request_command == 'sense':
request_value = request[1]
if on_own_turn:
queues[color]['to player'].put({'sense_result': game.sense(request_value['square'])})
else:
queues[color]['to player'].put('Request unavailable')
elif request_command == 'move':
request_value = request[1]
if on_own_turn:
queues[color]['to player'].put({'move_result': game.move(request_value['requested_move'])})
else:
queues[color]['to player'].put('Request unavailable')
elif request_command == 'end_turn':
if on_own_turn:
game.end_turn()
queues[color]['to player'].put({'end_turn': 'done'})
else:
queues[color]['to player'].put('Request unavailable')
elif request_command == 'game_status':
queues[color]['to player'].put({'is_over': game.is_over(), 'is_my_turn': on_own_turn})
elif request_command == 'winner_color':
queues[color]['to player'].put({'winner_color': game.get_winner_color()})
elif request_command == 'win_reason':
queues[color]['to player'].put({'win_reason': game.get_win_reason()})
elif request_command == 'game_history':
queues[color]['to player'].put({'game_history': game.get_game_history()})
else:
raise KeyError(f'Requested command {request_command} is not implemented')
# After each action, check if the game has ended
if game.is_over():
game.end()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.